summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_mir_build/src/thir
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
commitef24de24a82fe681581cc130f342363c47c0969a (patch)
tree0d494f7e1a38b95c92426f58fe6eaa877303a86c /compiler/rustc_mir_build/src/thir
parentReleasing progress-linux version 1.74.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-ef24de24a82fe681581cc130f342363c47c0969a.tar.xz
rustc-ef24de24a82fe681581cc130f342363c47c0969a.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_mir_build/src/thir')
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/expr.rs68
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/mod.rs17
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/check_match.rs911
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs148
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs1923
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/mod.rs404
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/usefulness.rs512
-rw-r--r--compiler/rustc_mir_build/src/thir/print.rs12
8 files changed, 2207 insertions, 1788 deletions
diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs
index 16a85d427..dfd39b512 100644
--- a/compiler/rustc_mir_build/src/thir/cx/expr.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs
@@ -191,11 +191,16 @@ impl<'tcx> Cx<'tcx> {
source: self.mirror_expr(source),
cast: PointerCoercion::ArrayToPointer,
}
- } else {
- // check whether this is casting an enum variant discriminant
- // to prevent cycles, we refer to the discriminant initializer
+ } else if let hir::ExprKind::Path(ref qpath) = source.kind
+ && let res = self.typeck_results().qpath_res(qpath, source.hir_id)
+ && let ty = self.typeck_results().node_type(source.hir_id)
+ && let ty::Adt(adt_def, args) = ty.kind()
+ && let Res::Def(DefKind::Ctor(CtorOf::Variant, CtorKind::Const), variant_ctor_id) = res
+ {
+ // Check whether this is casting an enum variant discriminant.
+ // To prevent cycles, we refer to the discriminant initializer,
// which is always an integer and thus doesn't need to know the
- // enum's layout (or its tag type) to compute it during const eval
+ // enum's layout (or its tag type) to compute it during const eval.
// Example:
// enum Foo {
// A,
@@ -204,21 +209,6 @@ impl<'tcx> Cx<'tcx> {
// The correct solution would be to add symbolic computations to miri,
// so we wouldn't have to compute and store the actual value
- let hir::ExprKind::Path(ref qpath) = source.kind else {
- return ExprKind::Cast { source: self.mirror_expr(source) };
- };
-
- let res = self.typeck_results().qpath_res(qpath, source.hir_id);
- let ty = self.typeck_results().node_type(source.hir_id);
- let ty::Adt(adt_def, args) = ty.kind() else {
- return ExprKind::Cast { source: self.mirror_expr(source) };
- };
-
- let Res::Def(DefKind::Ctor(CtorOf::Variant, CtorKind::Const), variant_ctor_id) = res
- else {
- return ExprKind::Cast { source: self.mirror_expr(source) };
- };
-
let idx = adt_def.variant_index_with_ctor_id(variant_ctor_id);
let (discr_did, discr_offset) = adt_def.discriminant_def_for_variant(idx);
@@ -255,6 +245,10 @@ impl<'tcx> Cx<'tcx> {
};
ExprKind::Cast { source }
+ } else {
+ // Default to `ExprKind::Cast` for all explicit casts.
+ // MIR building then picks the right MIR casts based on the types.
+ ExprKind::Cast { source: self.mirror_expr(source) }
}
}
@@ -320,17 +314,23 @@ impl<'tcx> Cx<'tcx> {
reason: errors::RustcBoxAttrReason::Attributes,
});
} else if let Some(box_item) = tcx.lang_items().owned_box() {
- if let hir::ExprKind::Path(hir::QPath::TypeRelative(ty, fn_path)) = fun.kind
+ if let hir::ExprKind::Path(hir::QPath::TypeRelative(ty, fn_path)) =
+ fun.kind
&& let hir::TyKind::Path(hir::QPath::Resolved(_, path)) = ty.kind
&& path.res.opt_def_id().is_some_and(|did| did == box_item)
&& fn_path.ident.name == sym::new
&& let [value] = args
{
- return Expr { temp_lifetime, ty: expr_ty, span: expr.span, kind: ExprKind::Box { value: self.mirror_expr(value) } }
+ return Expr {
+ temp_lifetime,
+ ty: expr_ty,
+ span: expr.span,
+ kind: ExprKind::Box { value: self.mirror_expr(value) },
+ };
} else {
tcx.sess.emit_err(errors::RustcBoxAttributeError {
span: expr.span,
- reason: errors::RustcBoxAttrReason::NotBoxNew
+ reason: errors::RustcBoxAttrReason::NotBoxNew,
});
}
} else {
@@ -343,17 +343,16 @@ impl<'tcx> Cx<'tcx> {
// Tuple-like ADTs are represented as ExprKind::Call. We convert them here.
let adt_data = if let hir::ExprKind::Path(ref qpath) = fun.kind
- && let Some(adt_def) = expr_ty.ty_adt_def() {
+ && let Some(adt_def) = expr_ty.ty_adt_def()
+ {
match qpath {
- hir::QPath::Resolved(_, ref path) => {
- match path.res {
- Res::Def(DefKind::Ctor(_, CtorKind::Fn), ctor_id) => {
- Some((adt_def, adt_def.variant_index_with_ctor_id(ctor_id)))
- }
- Res::SelfCtor(..) => Some((adt_def, FIRST_VARIANT)),
- _ => None,
+ hir::QPath::Resolved(_, ref path) => match path.res {
+ Res::Def(DefKind::Ctor(_, CtorKind::Fn), ctor_id) => {
+ Some((adt_def, adt_def.variant_index_with_ctor_id(ctor_id)))
}
- }
+ Res::SelfCtor(..) => Some((adt_def, FIRST_VARIANT)),
+ _ => None,
+ },
hir::QPath::TypeRelative(_ty, _) => {
if let Some((DefKind::Ctor(_, CtorKind::Fn), ctor_id)) =
self.typeck_results().type_dependent_def(fun.hir_id)
@@ -362,7 +361,6 @@ impl<'tcx> Cx<'tcx> {
} else {
None
}
-
}
_ => None,
}
@@ -570,8 +568,8 @@ impl<'tcx> Cx<'tcx> {
let closure_ty = self.typeck_results().expr_ty(expr);
let (def_id, args, movability) = match *closure_ty.kind() {
ty::Closure(def_id, args) => (def_id, UpvarArgs::Closure(args), None),
- ty::Generator(def_id, args, movability) => {
- (def_id, UpvarArgs::Generator(args), Some(movability))
+ ty::Coroutine(def_id, args, movability) => {
+ (def_id, UpvarArgs::Coroutine(args), Some(movability))
}
_ => {
span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty);
@@ -672,7 +670,7 @@ impl<'tcx> Cx<'tcx> {
hir::ExprKind::OffsetOf(_, _) => {
let data = self.typeck_results.offset_of_data();
let &(container, ref indices) = data.get(expr.hir_id).unwrap();
- let fields = tcx.mk_fields_from_iter(indices.iter().copied());
+ let fields = tcx.mk_offset_of_from_iter(indices.iter().copied());
ExprKind::OffsetOf { container, fields }
}
diff --git a/compiler/rustc_mir_build/src/thir/cx/mod.rs b/compiler/rustc_mir_build/src/thir/cx/mod.rs
index d98cc76ad..b6adb383f 100644
--- a/compiler/rustc_mir_build/src/thir/cx/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/mod.rs
@@ -37,7 +37,7 @@ pub(crate) fn thir_body(
// The resume argument may be missing, in that case we need to provide it here.
// It will always be `()` in this case.
- if tcx.def_kind(owner_def) == DefKind::Generator && body.params.is_empty() {
+ if tcx.def_kind(owner_def) == DefKind::Coroutine && body.params.is_empty() {
cx.thir.params.push(Param {
ty: Ty::new_unit(tcx),
pat: None,
@@ -148,11 +148,16 @@ impl<'tcx> Cx<'tcx> {
Some(env_param)
}
- DefKind::Generator => {
- let gen_ty = self.typeck_results.node_type(owner_id);
- let gen_param =
- Param { ty: gen_ty, pat: None, ty_span: None, self_kind: None, hir_id: None };
- Some(gen_param)
+ DefKind::Coroutine => {
+ let coroutine_ty = self.typeck_results.node_type(owner_id);
+ let coroutine_param = Param {
+ ty: coroutine_ty,
+ pat: None,
+ ty_span: None,
+ self_kind: None,
+ hir_id: None,
+ };
+ Some(coroutine_param)
}
_ => None,
}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
index d440ca319..8c3d09c19 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -1,4 +1,4 @@
-use super::deconstruct_pat::{Constructor, DeconstructedPat};
+use super::deconstruct_pat::{Constructor, DeconstructedPat, WitnessPat};
use super::usefulness::{
compute_match_usefulness, MatchArm, MatchCheckCtxt, Reachability, UsefulnessReport,
};
@@ -9,9 +9,7 @@ use rustc_arena::TypedArena;
use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_errors::{
- struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan,
-};
+use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, MultiSpan};
use rustc_hir as hir;
use rustc_hir::def::*;
use rustc_hir::def_id::LocalDefId;
@@ -44,7 +42,7 @@ pub(crate) fn check_match(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), Err
for param in thir.params.iter() {
if let Some(box ref pattern) = param.pat {
- visitor.check_irrefutable(pattern, "function argument", None);
+ visitor.check_binding_is_irrefutable(pattern, "function argument", None);
}
}
visitor.error
@@ -58,7 +56,7 @@ fn create_e0004(
struct_span_err!(sess, sp, E0004, "{}", &error_message)
}
-#[derive(PartialEq)]
+#[derive(Debug, Copy, Clone, PartialEq)]
enum RefutableFlag {
Irrefutable,
Refutable,
@@ -68,24 +66,30 @@ use RefutableFlag::*;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum LetSource {
None,
+ PlainLet,
IfLet,
IfLetGuard,
LetElse,
WhileLet,
}
-struct MatchVisitor<'a, 'p, 'tcx> {
+struct MatchVisitor<'thir, 'p, 'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
- thir: &'a Thir<'tcx>,
+ thir: &'thir Thir<'tcx>,
lint_level: HirId,
let_source: LetSource,
pattern_arena: &'p TypedArena<DeconstructedPat<'p, 'tcx>>,
+ /// Tracks if we encountered an error while checking this body. That the first function to
+ /// report it stores it here. Some functions return `Result` to allow callers to short-circuit
+ /// on error, but callers don't need to store it here again.
error: Result<(), ErrorGuaranteed>,
}
-impl<'a, 'tcx> Visitor<'a, 'tcx> for MatchVisitor<'a, '_, 'tcx> {
- fn thir(&self) -> &'a Thir<'tcx> {
+// Visitor for a thir body. This calls `check_match`, `check_let` and `check_let_chain` as
+// appropriate.
+impl<'thir, 'tcx> Visitor<'thir, 'tcx> for MatchVisitor<'thir, '_, 'tcx> {
+ fn thir(&self) -> &'thir Thir<'tcx> {
self.thir
}
@@ -100,7 +104,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for MatchVisitor<'a, '_, 'tcx> {
}
Some(Guard::IfLet(ref pat, expr)) => {
this.with_let_source(LetSource::IfLetGuard, |this| {
- this.check_let(pat, expr, LetSource::IfLetGuard, pat.span);
+ this.check_let(pat, Some(expr), pat.span);
this.visit_pat(pat);
this.visit_expr(&this.thir[expr]);
});
@@ -148,10 +152,18 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for MatchVisitor<'a, '_, 'tcx> {
self.check_match(scrutinee, arms, source, ex.span);
}
ExprKind::Let { box ref pat, expr } => {
- self.check_let(pat, expr, self.let_source, ex.span);
+ self.check_let(pat, Some(expr), ex.span);
}
- ExprKind::LogicalOp { op: LogicalOp::And, lhs, rhs } => {
- self.check_let_chain(self.let_source, ex.span, lhs, rhs);
+ ExprKind::LogicalOp { op: LogicalOp::And, .. }
+ if !matches!(self.let_source, LetSource::None) =>
+ {
+ let mut chain_refutabilities = Vec::new();
+ let Ok(()) = self.visit_land(ex, &mut chain_refutabilities) else { return };
+ // If at least one of the operands is a `let ... = ...`.
+ if chain_refutabilities.iter().any(|x| x.is_some()) {
+ self.check_let_chain(chain_refutabilities, ex.span);
+ }
+ return;
}
_ => {}
};
@@ -159,31 +171,27 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for MatchVisitor<'a, '_, 'tcx> {
}
fn visit_stmt(&mut self, stmt: &Stmt<'tcx>) {
- let old_lint_level = self.lint_level;
match stmt.kind {
StmtKind::Let {
box ref pattern, initializer, else_block, lint_level, span, ..
} => {
- if let LintLevel::Explicit(lint_level) = lint_level {
- self.lint_level = lint_level;
- }
-
- if let Some(initializer) = initializer && else_block.is_some() {
- self.check_let(pattern, initializer, LetSource::LetElse, span);
- }
-
- if else_block.is_none() {
- self.check_irrefutable(pattern, "local binding", Some(span));
- }
+ self.with_lint_level(lint_level, |this| {
+ let let_source =
+ if else_block.is_some() { LetSource::LetElse } else { LetSource::PlainLet };
+ this.with_let_source(let_source, |this| {
+ this.check_let(pattern, initializer, span)
+ });
+ visit::walk_stmt(this, stmt);
+ });
+ }
+ StmtKind::Expr { .. } => {
+ visit::walk_stmt(self, stmt);
}
- _ => {}
}
- visit::walk_stmt(self, stmt);
- self.lint_level = old_lint_level;
}
}
-impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
+impl<'thir, 'p, 'tcx> MatchVisitor<'thir, 'p, 'tcx> {
#[instrument(level = "trace", skip(self, f))]
fn with_let_source(&mut self, let_source: LetSource, f: impl FnOnce(&mut Self)) {
let old_let_source = self.let_source;
@@ -192,49 +200,127 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
self.let_source = old_let_source;
}
- fn with_lint_level(&mut self, new_lint_level: LintLevel, f: impl FnOnce(&mut Self)) {
+ fn with_lint_level<T>(
+ &mut self,
+ new_lint_level: LintLevel,
+ f: impl FnOnce(&mut Self) -> T,
+ ) -> T {
if let LintLevel::Explicit(hir_id) = new_lint_level {
let old_lint_level = self.lint_level;
self.lint_level = hir_id;
- f(self);
+ let ret = f(self);
self.lint_level = old_lint_level;
+ ret
} else {
- f(self);
+ f(self)
}
}
- fn check_patterns(&self, pat: &Pat<'tcx>, rf: RefutableFlag) {
- pat.walk_always(|pat| check_borrow_conflicts_in_at_patterns(self, pat));
- check_for_bindings_named_same_as_variants(self, pat, rf);
+ /// Visit a nested chain of `&&`. Used for if-let chains. This must call `visit_expr` on the
+ /// subexpressions we are not handling ourselves.
+ fn visit_land(
+ &mut self,
+ ex: &Expr<'tcx>,
+ accumulator: &mut Vec<Option<(Span, RefutableFlag)>>,
+ ) -> Result<(), ErrorGuaranteed> {
+ match ex.kind {
+ ExprKind::Scope { value, lint_level, .. } => self.with_lint_level(lint_level, |this| {
+ this.visit_land(&this.thir[value], accumulator)
+ }),
+ ExprKind::LogicalOp { op: LogicalOp::And, lhs, rhs } => {
+ // We recurse into the lhs only, because `&&` chains associate to the left.
+ let res_lhs = self.visit_land(&self.thir[lhs], accumulator);
+ let res_rhs = self.visit_land_rhs(&self.thir[rhs])?;
+ accumulator.push(res_rhs);
+ res_lhs
+ }
+ _ => {
+ let res = self.visit_land_rhs(ex)?;
+ accumulator.push(res);
+ Ok(())
+ }
+ }
+ }
+
+ /// Visit the right-hand-side of a `&&`. Used for if-let chains. Returns `Some` if the
+ /// expression was ultimately a `let ... = ...`, and `None` if it was a normal boolean
+ /// expression. This must call `visit_expr` on the subexpressions we are not handling ourselves.
+ fn visit_land_rhs(
+ &mut self,
+ ex: &Expr<'tcx>,
+ ) -> Result<Option<(Span, RefutableFlag)>, ErrorGuaranteed> {
+ match ex.kind {
+ ExprKind::Scope { value, lint_level, .. } => {
+ self.with_lint_level(lint_level, |this| this.visit_land_rhs(&this.thir[value]))
+ }
+ ExprKind::Let { box ref pat, expr } => {
+ self.with_let_source(LetSource::None, |this| {
+ this.visit_expr(&this.thir()[expr]);
+ });
+ Ok(Some((ex.span, self.is_let_irrefutable(pat)?)))
+ }
+ _ => {
+ self.with_let_source(LetSource::None, |this| {
+ this.visit_expr(ex);
+ });
+ Ok(None)
+ }
+ }
}
fn lower_pattern(
- &self,
- cx: &mut MatchCheckCtxt<'p, 'tcx>,
- pattern: &Pat<'tcx>,
- ) -> &'p DeconstructedPat<'p, 'tcx> {
- cx.pattern_arena.alloc(DeconstructedPat::from_pat(cx, &pattern))
+ &mut self,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ pat: &Pat<'tcx>,
+ ) -> Result<&'p DeconstructedPat<'p, 'tcx>, ErrorGuaranteed> {
+ if let Err(err) = pat.pat_error_reported() {
+ self.error = Err(err);
+ Err(err)
+ } else {
+ // Check the pattern for some things unrelated to exhaustiveness.
+ let refutable = if cx.refutable { Refutable } else { Irrefutable };
+ pat.walk_always(|pat| check_borrow_conflicts_in_at_patterns(self, pat));
+ pat.walk_always(|pat| check_for_bindings_named_same_as_variants(self, pat, refutable));
+ Ok(cx.pattern_arena.alloc(DeconstructedPat::from_pat(cx, pat)))
+ }
}
- fn new_cx(&self, hir_id: HirId, refutable: bool) -> MatchCheckCtxt<'p, 'tcx> {
+ fn new_cx(
+ &self,
+ refutability: RefutableFlag,
+ match_span: Option<Span>,
+ ) -> MatchCheckCtxt<'p, 'tcx> {
+ let refutable = match refutability {
+ Irrefutable => false,
+ Refutable => true,
+ };
MatchCheckCtxt {
tcx: self.tcx,
param_env: self.param_env,
- module: self.tcx.parent_module(hir_id).to_def_id(),
+ module: self.tcx.parent_module(self.lint_level).to_def_id(),
pattern_arena: &self.pattern_arena,
+ match_span,
refutable,
}
}
#[instrument(level = "trace", skip(self))]
- fn check_let(&mut self, pat: &Pat<'tcx>, scrutinee: ExprId, source: LetSource, span: Span) {
- if let LetSource::None = source {
- return;
+ fn check_let(&mut self, pat: &Pat<'tcx>, scrutinee: Option<ExprId>, span: Span) {
+ assert!(self.let_source != LetSource::None);
+ if let LetSource::PlainLet = self.let_source {
+ self.check_binding_is_irrefutable(pat, "local binding", Some(span))
+ } else {
+ let Ok(refutability) = self.is_let_irrefutable(pat) else { return };
+ if matches!(refutability, Irrefutable) {
+ report_irrefutable_let_patterns(
+ self.tcx,
+ self.lint_level,
+ self.let_source,
+ 1,
+ span,
+ );
+ }
}
- self.check_patterns(pat, Refutable);
- let mut cx = self.new_cx(self.lint_level, true);
- let tpat = self.lower_pattern(&mut cx, pat);
- self.check_let_reachability(&mut cx, self.lint_level, source, tpat, span);
}
fn check_match(
@@ -244,32 +330,25 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
source: hir::MatchSource,
expr_span: Span,
) {
- let mut cx = self.new_cx(self.lint_level, true);
+ let cx = self.new_cx(Refutable, Some(expr_span));
+ let mut tarms = Vec::with_capacity(arms.len());
for &arm in arms {
- // Check the arm for some things unrelated to exhaustiveness.
let arm = &self.thir.arms[arm];
- self.with_lint_level(arm.lint_level, |this| {
- this.check_patterns(&arm.pattern, Refutable);
+ let got_error = self.with_lint_level(arm.lint_level, |this| {
+ let Ok(pat) = this.lower_pattern(&cx, &arm.pattern) else { return true };
+ let arm = MatchArm { pat, hir_id: this.lint_level, has_guard: arm.guard.is_some() };
+ tarms.push(arm);
+ false
});
+ if got_error {
+ return;
+ }
}
- let tarms: Vec<_> = arms
- .iter()
- .map(|&arm| {
- let arm = &self.thir.arms[arm];
- let hir_id = match arm.lint_level {
- LintLevel::Explicit(hir_id) => hir_id,
- LintLevel::Inherited => self.lint_level,
- };
- let pat = self.lower_pattern(&mut cx, &arm.pattern);
- MatchArm { pat, hir_id, has_guard: arm.guard.is_some() }
- })
- .collect();
-
let scrut = &self.thir[scrut];
let scrut_ty = scrut.ty;
- let report = compute_match_usefulness(&cx, &tarms, self.lint_level, scrut_ty);
+ let report = compute_match_usefulness(&cx, &tarms, self.lint_level, scrut_ty, scrut.span);
match source {
// Don't report arm reachability of desugared `match $iter.into_iter() { iter => .. }`
@@ -293,107 +372,39 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
debug_assert_eq!(pat.span.desugaring_kind(), Some(DesugaringKind::ForLoop));
let PatKind::Variant { ref subpatterns, .. } = pat.kind else { bug!() };
let [pat_field] = &subpatterns[..] else { bug!() };
- self.check_irrefutable(&pat_field.pattern, "`for` loop binding", None);
+ self.check_binding_is_irrefutable(&pat_field.pattern, "`for` loop binding", None);
} else {
- self.error = Err(non_exhaustive_match(
+ self.error = Err(report_non_exhaustive_match(
&cx, self.thir, scrut_ty, scrut.span, witnesses, arms, expr_span,
));
}
}
}
- fn check_let_reachability(
- &mut self,
- cx: &mut MatchCheckCtxt<'p, 'tcx>,
- pat_id: HirId,
- source: LetSource,
- pat: &'p DeconstructedPat<'p, 'tcx>,
- span: Span,
- ) {
- if is_let_irrefutable(cx, pat_id, pat) {
- irrefutable_let_patterns(cx.tcx, pat_id, source, 1, span);
- }
- }
-
#[instrument(level = "trace", skip(self))]
fn check_let_chain(
&mut self,
- let_source: LetSource,
- top_expr_span: Span,
- mut lhs: ExprId,
- rhs: ExprId,
+ chain_refutabilities: Vec<Option<(Span, RefutableFlag)>>,
+ whole_chain_span: Span,
) {
- if let LetSource::None = let_source {
- return;
- }
-
- // Lint level enclosing the next `lhs`.
- let mut cur_lint_level = self.lint_level;
-
- // Obtain the refutabilities of all exprs in the chain,
- // and record chain members that aren't let exprs.
- let mut chain_refutabilities = Vec::new();
-
- let add = |expr: ExprId, mut local_lint_level| {
- // `local_lint_level` is the lint level enclosing the pattern inside `expr`.
- let mut expr = &self.thir[expr];
- debug!(?expr, ?local_lint_level, "add");
- // Fast-forward through scopes.
- while let ExprKind::Scope { value, lint_level, .. } = expr.kind {
- if let LintLevel::Explicit(hir_id) = lint_level {
- local_lint_level = hir_id
- }
- expr = &self.thir[value];
- }
- debug!(?expr, ?local_lint_level, "after scopes");
- match expr.kind {
- ExprKind::Let { box ref pat, expr: _ } => {
- let mut ncx = self.new_cx(local_lint_level, true);
- let tpat = self.lower_pattern(&mut ncx, pat);
- let refutable = !is_let_irrefutable(&mut ncx, local_lint_level, tpat);
- Some((expr.span, refutable))
- }
- _ => None,
- }
- };
-
- // Let chains recurse on the left, so we start by adding the rightmost.
- chain_refutabilities.push(add(rhs, cur_lint_level));
-
- loop {
- while let ExprKind::Scope { value, lint_level, .. } = self.thir[lhs].kind {
- if let LintLevel::Explicit(hir_id) = lint_level {
- cur_lint_level = hir_id
- }
- lhs = value;
- }
- if let ExprKind::LogicalOp { op: LogicalOp::And, lhs: new_lhs, rhs: expr } =
- self.thir[lhs].kind
- {
- chain_refutabilities.push(add(expr, cur_lint_level));
- lhs = new_lhs;
- } else {
- chain_refutabilities.push(add(lhs, cur_lint_level));
- break;
- }
- }
- debug!(?chain_refutabilities);
- chain_refutabilities.reverse();
+ assert!(self.let_source != LetSource::None);
- // Third, emit the actual warnings.
- if chain_refutabilities.iter().all(|r| matches!(*r, Some((_, false)))) {
+ if chain_refutabilities.iter().all(|r| matches!(*r, Some((_, Irrefutable)))) {
// The entire chain is made up of irrefutable `let` statements
- irrefutable_let_patterns(
+ report_irrefutable_let_patterns(
self.tcx,
self.lint_level,
- let_source,
+ self.let_source,
chain_refutabilities.len(),
- top_expr_span,
+ whole_chain_span,
);
return;
}
- if let Some(until) = chain_refutabilities.iter().position(|r| !matches!(*r, Some((_, false)))) && until > 0 {
+ if let Some(until) =
+ chain_refutabilities.iter().position(|r| !matches!(*r, Some((_, Irrefutable))))
+ && until > 0
+ {
// The chain has a non-zero prefix of irrefutable `let` statements.
// Check if the let source is while, for there is no alternative place to put a prefix,
@@ -402,43 +413,71 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
// so can't always be moved out.
// FIXME: Add checking whether the bindings are actually used in the prefix,
// and lint if they are not.
- if !matches!(let_source, LetSource::WhileLet | LetSource::IfLetGuard) {
+ if !matches!(self.let_source, LetSource::WhileLet | LetSource::IfLetGuard) {
// Emit the lint
let prefix = &chain_refutabilities[..until];
let span_start = prefix[0].unwrap().0;
let span_end = prefix.last().unwrap().unwrap().0;
let span = span_start.to(span_end);
let count = prefix.len();
- self.tcx.emit_spanned_lint(IRREFUTABLE_LET_PATTERNS, self.lint_level, span, LeadingIrrefutableLetPatterns { count });
+ self.tcx.emit_spanned_lint(
+ IRREFUTABLE_LET_PATTERNS,
+ self.lint_level,
+ span,
+ LeadingIrrefutableLetPatterns { count },
+ );
}
}
- if let Some(from) = chain_refutabilities.iter().rposition(|r| !matches!(*r, Some((_, false)))) && from != (chain_refutabilities.len() - 1) {
+ if let Some(from) =
+ chain_refutabilities.iter().rposition(|r| !matches!(*r, Some((_, Irrefutable))))
+ && from != (chain_refutabilities.len() - 1)
+ {
// The chain has a non-empty suffix of irrefutable `let` statements
let suffix = &chain_refutabilities[from + 1..];
let span_start = suffix[0].unwrap().0;
let span_end = suffix.last().unwrap().unwrap().0;
let span = span_start.to(span_end);
let count = suffix.len();
- self.tcx.emit_spanned_lint(IRREFUTABLE_LET_PATTERNS, self.lint_level, span, TrailingIrrefutableLetPatterns { count });
+ self.tcx.emit_spanned_lint(
+ IRREFUTABLE_LET_PATTERNS,
+ self.lint_level,
+ span,
+ TrailingIrrefutableLetPatterns { count },
+ );
}
}
- #[instrument(level = "trace", skip(self))]
- fn check_irrefutable(&mut self, pat: &Pat<'tcx>, origin: &str, sp: Option<Span>) {
- let mut cx = self.new_cx(self.lint_level, false);
+ fn analyze_binding(
+ &mut self,
+ pat: &Pat<'tcx>,
+ refutability: RefutableFlag,
+ ) -> Result<(MatchCheckCtxt<'p, 'tcx>, UsefulnessReport<'p, 'tcx>), ErrorGuaranteed> {
+ let cx = self.new_cx(refutability, None);
+ let pat = self.lower_pattern(&cx, pat)?;
+ let arms = [MatchArm { pat, hir_id: self.lint_level, has_guard: false }];
+ let report = compute_match_usefulness(&cx, &arms, self.lint_level, pat.ty(), pat.span());
+ Ok((cx, report))
+ }
- let pattern = self.lower_pattern(&mut cx, pat);
- let pattern_ty = pattern.ty();
- let arm = MatchArm { pat: pattern, hir_id: self.lint_level, has_guard: false };
- let report = compute_match_usefulness(&cx, &[arm], self.lint_level, pattern_ty);
+ fn is_let_irrefutable(&mut self, pat: &Pat<'tcx>) -> Result<RefutableFlag, ErrorGuaranteed> {
+ let (cx, report) = self.analyze_binding(pat, Refutable)?;
+ // Report if the pattern is unreachable, which can only occur when the type is uninhabited.
+ // This also reports unreachable sub-patterns.
+ report_arm_reachability(&cx, &report);
+ // If the list of witnesses is empty, the match is exhaustive, i.e. the `if let` pattern is
+ // irrefutable.
+ Ok(if report.non_exhaustiveness_witnesses.is_empty() { Irrefutable } else { Refutable })
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn check_binding_is_irrefutable(&mut self, pat: &Pat<'tcx>, origin: &str, sp: Option<Span>) {
+ let pattern_ty = pat.ty;
- // Note: we ignore whether the pattern is unreachable (i.e. whether the type is empty). We
- // only care about exhaustiveness here.
+ let Ok((cx, report)) = self.analyze_binding(pat, Irrefutable) else { return };
let witnesses = report.non_exhaustiveness_witnesses;
if witnesses.is_empty() {
// The pattern is irrefutable.
- self.check_patterns(pat, Irrefutable);
return;
}
@@ -448,23 +487,21 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
let mut interpreted_as_const = None;
if let PatKind::Constant { .. }
- | PatKind::AscribeUserType {
- subpattern: box Pat { kind: PatKind::Constant { .. }, .. },
- ..
- } = pat.kind
+ | PatKind::AscribeUserType {
+ subpattern: box Pat { kind: PatKind::Constant { .. }, .. },
+ ..
+ } = pat.kind
&& let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(pat.span)
{
// If the pattern to match is an integer literal:
if snippet.chars().all(|c| c.is_digit(10)) {
// Then give a suggestion, the user might've meant to create a binding instead.
misc_suggestion = Some(MiscPatternSuggestion::AttemptedIntegerLiteral {
- start_span: pat.span.shrink_to_lo()
+ start_span: pat.span.shrink_to_lo(),
});
} else if snippet.chars().all(|c| c.is_alphanumeric() || c == '_') {
- interpreted_as_const = Some(InterpretedAsConst {
- span: pat.span,
- variable: snippet,
- });
+ interpreted_as_const =
+ Some(InterpretedAsConst { span: pat.span, variable: snippet });
}
}
@@ -487,34 +524,23 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
});
};
- let adt_defined_here = try {
- let ty = pattern_ty.peel_refs();
- let ty::Adt(def, _) = ty.kind() else { None? };
- let adt_def_span = cx.tcx.hir().get_if_local(def.did())?.ident()?.span;
- let mut variants = vec![];
-
- for span in maybe_point_at_variant(&cx, *def, witnesses.iter().take(5)) {
- variants.push(Variant { span });
- }
- AdtDefinedHere { adt_def_span, ty, variants }
- };
+ let adt_defined_here = report_adt_defined_here(self.tcx, pattern_ty, &witnesses, false);
// Emit an extra note if the first uncovered witness would be uninhabited
// if we disregard visibility.
- let witness_1_is_privately_uninhabited =
- if cx.tcx.features().exhaustive_patterns
- && let Some(witness_1) = witnesses.get(0)
- && let ty::Adt(adt, args) = witness_1.ty().kind()
- && adt.is_enum()
- && let Constructor::Variant(variant_index) = witness_1.ctor()
- {
- let variant = adt.variant(*variant_index);
- let inhabited = variant.inhabited_predicate(cx.tcx, *adt).instantiate(cx.tcx, args);
- assert!(inhabited.apply(cx.tcx, cx.param_env, cx.module));
- !inhabited.apply_ignore_module(cx.tcx, cx.param_env)
- } else {
- false
- };
+ let witness_1_is_privately_uninhabited = if self.tcx.features().exhaustive_patterns
+ && let Some(witness_1) = witnesses.get(0)
+ && let ty::Adt(adt, args) = witness_1.ty().kind()
+ && adt.is_enum()
+ && let Constructor::Variant(variant_index) = witness_1.ctor()
+ {
+ let variant = adt.variant(*variant_index);
+ let inhabited = variant.inhabited_predicate(self.tcx, *adt).instantiate(self.tcx, args);
+ assert!(inhabited.apply(self.tcx, cx.param_env, cx.module));
+ !inhabited.apply_ignore_module(self.tcx, cx.param_env)
+ } else {
+ false
+ };
self.error = Err(self.tcx.sess.emit_err(PatternNotCovered {
span: pat.span,
@@ -532,69 +558,154 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
}
}
-fn check_for_bindings_named_same_as_variants(
- cx: &MatchVisitor<'_, '_, '_>,
- pat: &Pat<'_>,
- rf: RefutableFlag,
-) {
- pat.walk_always(|p| {
- if let PatKind::Binding {
- name,
- mode: BindingMode::ByValue,
- mutability: Mutability::Not,
- subpattern: None,
- ty,
- ..
- } = p.kind
- && let ty::Adt(edef, _) = ty.peel_refs().kind()
- && edef.is_enum()
- && edef.variants().iter().any(|variant| {
- variant.name == name && variant.ctor_kind() == Some(CtorKind::Const)
- })
- {
- let variant_count = edef.variants().len();
- let ty_path = with_no_trimmed_paths!({
- cx.tcx.def_path_str(edef.did())
+/// Check if a by-value binding is by-value. That is, check if the binding's type is not `Copy`.
+/// Check that there are no borrow or move conflicts in `binding @ subpat` patterns.
+///
+/// For example, this would reject:
+/// - `ref x @ Some(ref mut y)`,
+/// - `ref mut x @ Some(ref y)`,
+/// - `ref mut x @ Some(ref mut y)`,
+/// - `ref mut? x @ Some(y)`, and
+/// - `x @ Some(ref mut? y)`.
+///
+/// This analysis is *not* subsumed by NLL.
+fn check_borrow_conflicts_in_at_patterns<'tcx>(cx: &MatchVisitor<'_, '_, 'tcx>, pat: &Pat<'tcx>) {
+ // Extract `sub` in `binding @ sub`.
+ let PatKind::Binding { name, mode, ty, subpattern: Some(box ref sub), .. } = pat.kind else {
+ return;
+ };
+
+ let is_binding_by_move = |ty: Ty<'tcx>| !ty.is_copy_modulo_regions(cx.tcx, cx.param_env);
+
+ let sess = cx.tcx.sess;
+
+ // Get the binding move, extract the mutability if by-ref.
+ let mut_outer = match mode {
+ BindingMode::ByValue if is_binding_by_move(ty) => {
+ // We have `x @ pat` where `x` is by-move. Reject all borrows in `pat`.
+ let mut conflicts_ref = Vec::new();
+ sub.each_binding(|_, mode, _, span| match mode {
+ BindingMode::ByValue => {}
+ BindingMode::ByRef(_) => conflicts_ref.push(span),
});
- cx.tcx.emit_spanned_lint(
- BINDINGS_WITH_VARIANT_NAME,
- cx.lint_level,
- p.span,
- BindingsWithVariantName {
- // If this is an irrefutable pattern, and there's > 1 variant,
- // then we can't actually match on this. Applying the below
- // suggestion would produce code that breaks on `check_irrefutable`.
- suggestion: if rf == Refutable || variant_count == 1 {
- Some(p.span)
- } else { None },
- ty_path,
+ if !conflicts_ref.is_empty() {
+ sess.emit_err(BorrowOfMovedValue {
+ binding_span: pat.span,
+ conflicts_ref,
name,
- },
- )
+ ty,
+ suggest_borrowing: Some(pat.span.shrink_to_lo()),
+ });
+ }
+ return;
+ }
+ BindingMode::ByValue => return,
+ BindingMode::ByRef(m) => m.mutability(),
+ };
+
+ // We now have `ref $mut_outer binding @ sub` (semantically).
+ // Recurse into each binding in `sub` and find mutability or move conflicts.
+ let mut conflicts_move = Vec::new();
+ let mut conflicts_mut_mut = Vec::new();
+ let mut conflicts_mut_ref = Vec::new();
+ sub.each_binding(|name, mode, ty, span| {
+ match mode {
+ BindingMode::ByRef(mut_inner) => match (mut_outer, mut_inner.mutability()) {
+ // Both sides are `ref`.
+ (Mutability::Not, Mutability::Not) => {}
+ // 2x `ref mut`.
+ (Mutability::Mut, Mutability::Mut) => {
+ conflicts_mut_mut.push(Conflict::Mut { span, name })
+ }
+ (Mutability::Not, Mutability::Mut) => {
+ conflicts_mut_ref.push(Conflict::Mut { span, name })
+ }
+ (Mutability::Mut, Mutability::Not) => {
+ conflicts_mut_ref.push(Conflict::Ref { span, name })
+ }
+ },
+ BindingMode::ByValue if is_binding_by_move(ty) => {
+ conflicts_move.push(Conflict::Moved { span, name }) // `ref mut?` + by-move conflict.
+ }
+ BindingMode::ByValue => {} // `ref mut?` + by-copy is fine.
}
});
-}
-/// Checks for common cases of "catchall" patterns that may not be intended as such.
-fn pat_is_catchall(pat: &DeconstructedPat<'_, '_>) -> bool {
- use Constructor::*;
- match pat.ctor() {
- Wildcard => true,
- Single => pat.iter_fields().all(|pat| pat_is_catchall(pat)),
- _ => false,
+ let report_mut_mut = !conflicts_mut_mut.is_empty();
+ let report_mut_ref = !conflicts_mut_ref.is_empty();
+ let report_move_conflict = !conflicts_move.is_empty();
+
+ let mut occurrences = match mut_outer {
+ Mutability::Mut => vec![Conflict::Mut { span: pat.span, name }],
+ Mutability::Not => vec![Conflict::Ref { span: pat.span, name }],
+ };
+ occurrences.extend(conflicts_mut_mut);
+ occurrences.extend(conflicts_mut_ref);
+ occurrences.extend(conflicts_move);
+
+ // Report errors if any.
+ if report_mut_mut {
+ // Report mutability conflicts for e.g. `ref mut x @ Some(ref mut y)`.
+ sess.emit_err(MultipleMutBorrows { span: pat.span, occurrences });
+ } else if report_mut_ref {
+ // Report mutability conflicts for e.g. `ref x @ Some(ref mut y)` or the converse.
+ match mut_outer {
+ Mutability::Mut => {
+ sess.emit_err(AlreadyMutBorrowed { span: pat.span, occurrences });
+ }
+ Mutability::Not => {
+ sess.emit_err(AlreadyBorrowed { span: pat.span, occurrences });
+ }
+ };
+ } else if report_move_conflict {
+ // Report by-ref and by-move conflicts, e.g. `ref x @ y`.
+ sess.emit_err(MovedWhileBorrowed { span: pat.span, occurrences });
}
}
-fn unreachable_pattern(tcx: TyCtxt<'_>, span: Span, id: HirId, catchall: Option<Span>) {
- tcx.emit_spanned_lint(
- UNREACHABLE_PATTERNS,
- id,
- span,
- UnreachablePattern { span: if catchall.is_some() { Some(span) } else { None }, catchall },
- );
+fn check_for_bindings_named_same_as_variants(
+ cx: &MatchVisitor<'_, '_, '_>,
+ pat: &Pat<'_>,
+ rf: RefutableFlag,
+) {
+ if let PatKind::Binding {
+ name,
+ mode: BindingMode::ByValue,
+ mutability: Mutability::Not,
+ subpattern: None,
+ ty,
+ ..
+ } = pat.kind
+ && let ty::Adt(edef, _) = ty.peel_refs().kind()
+ && edef.is_enum()
+ && edef
+ .variants()
+ .iter()
+ .any(|variant| variant.name == name && variant.ctor_kind() == Some(CtorKind::Const))
+ {
+ let variant_count = edef.variants().len();
+ let ty_path = with_no_trimmed_paths!(cx.tcx.def_path_str(edef.did()));
+ cx.tcx.emit_spanned_lint(
+ BINDINGS_WITH_VARIANT_NAME,
+ cx.lint_level,
+ pat.span,
+ BindingsWithVariantName {
+ // If this is an irrefutable pattern, and there's > 1 variant,
+ // then we can't actually match on this. Applying the below
+ // suggestion would produce code that breaks on `check_binding_is_irrefutable`.
+ suggestion: if rf == Refutable || variant_count == 1 {
+ Some(pat.span)
+ } else {
+ None
+ },
+ ty_path,
+ name,
+ },
+ )
+ }
}
-fn irrefutable_let_patterns(
+fn report_irrefutable_let_patterns(
tcx: TyCtxt<'_>,
id: HirId,
source: LetSource,
@@ -608,7 +719,7 @@ fn irrefutable_let_patterns(
}
match source {
- LetSource::None => bug!(),
+ LetSource::None | LetSource::PlainLet => bug!(),
LetSource::IfLet => emit_diag!(IrrefutableLetPatternsIfLet),
LetSource::IfLetGuard => emit_diag!(IrrefutableLetPatternsIfLetGuard),
LetSource::LetElse => emit_diag!(IrrefutableLetPatternsLetElse),
@@ -616,34 +727,28 @@ fn irrefutable_let_patterns(
}
}
-fn is_let_irrefutable<'p, 'tcx>(
- cx: &mut MatchCheckCtxt<'p, 'tcx>,
- pat_id: HirId,
- pat: &'p DeconstructedPat<'p, 'tcx>,
-) -> bool {
- let arms = [MatchArm { pat, hir_id: pat_id, has_guard: false }];
- let report = compute_match_usefulness(&cx, &arms, pat_id, pat.ty());
-
- // Report if the pattern is unreachable, which can only occur when the type is uninhabited.
- // This also reports unreachable sub-patterns though, so we can't just replace it with an
- // `is_uninhabited` check.
- report_arm_reachability(&cx, &report);
-
- // If the list of witnesses is empty, the match is exhaustive,
- // i.e. the `if let` pattern is irrefutable.
- report.non_exhaustiveness_witnesses.is_empty()
-}
-
/// Report unreachable arms, if any.
fn report_arm_reachability<'p, 'tcx>(
cx: &MatchCheckCtxt<'p, 'tcx>,
report: &UsefulnessReport<'p, 'tcx>,
) {
+ let report_unreachable_pattern = |span, hir_id, catchall: Option<Span>| {
+ cx.tcx.emit_spanned_lint(
+ UNREACHABLE_PATTERNS,
+ hir_id,
+ span,
+ UnreachablePattern {
+ span: if catchall.is_some() { Some(span) } else { None },
+ catchall,
+ },
+ );
+ };
+
use Reachability::*;
let mut catchall = None;
for (arm, is_useful) in report.arm_usefulness.iter() {
match is_useful {
- Unreachable => unreachable_pattern(cx.tcx, arm.pat.span(), arm.hir_id, catchall),
+ Unreachable => report_unreachable_pattern(arm.pat.span(), arm.hir_id, catchall),
Reachable(unreachables) if unreachables.is_empty() => {}
// The arm is reachable, but contains unreachable subpatterns (from or-patterns).
Reachable(unreachables) => {
@@ -651,7 +756,7 @@ fn report_arm_reachability<'p, 'tcx>(
// Emit lints in the order in which they occur in the file.
unreachables.sort_unstable();
for span in unreachables {
- unreachable_pattern(cx.tcx, span, arm.hir_id, None);
+ report_unreachable_pattern(span, arm.hir_id, None);
}
}
}
@@ -661,24 +766,23 @@ fn report_arm_reachability<'p, 'tcx>(
}
}
-fn collect_non_exhaustive_tys<'p, 'tcx>(
- pat: &DeconstructedPat<'p, 'tcx>,
- non_exhaustive_tys: &mut FxHashSet<Ty<'tcx>>,
-) {
- if matches!(pat.ctor(), Constructor::NonExhaustive) {
- non_exhaustive_tys.insert(pat.ty());
+/// Checks for common cases of "catchall" patterns that may not be intended as such.
+fn pat_is_catchall(pat: &DeconstructedPat<'_, '_>) -> bool {
+ use Constructor::*;
+ match pat.ctor() {
+ Wildcard => true,
+ Single => pat.iter_fields().all(|pat| pat_is_catchall(pat)),
+ _ => false,
}
- pat.iter_fields()
- .for_each(|field_pat| collect_non_exhaustive_tys(field_pat, non_exhaustive_tys))
}
/// Report that a match is not exhaustive.
-fn non_exhaustive_match<'p, 'tcx>(
+fn report_non_exhaustive_match<'p, 'tcx>(
cx: &MatchCheckCtxt<'p, 'tcx>,
thir: &Thir<'tcx>,
scrut_ty: Ty<'tcx>,
sp: Span,
- witnesses: Vec<DeconstructedPat<'p, 'tcx>>,
+ witnesses: Vec<WitnessPat<'tcx>>,
arms: &[ArmId],
expr_span: Span,
) -> ErrorGuaranteed {
@@ -707,12 +811,19 @@ fn non_exhaustive_match<'p, 'tcx>(
sp,
format!("non-exhaustive patterns: {joined_patterns} not covered"),
);
- err.span_label(sp, pattern_not_covered_label(&witnesses, &joined_patterns));
+ err.span_label(
+ sp,
+ format!(
+ "pattern{} {} not covered",
+ rustc_errors::pluralize!(witnesses.len()),
+ joined_patterns
+ ),
+ );
patterns_len = witnesses.len();
pattern = if witnesses.len() < 4 {
witnesses
.iter()
- .map(|witness| witness.to_pat(cx).to_string())
+ .map(|witness| witness.to_diagnostic_pat(cx).to_string())
.collect::<Vec<String>>()
.join(" | ")
} else {
@@ -720,19 +831,37 @@ fn non_exhaustive_match<'p, 'tcx>(
};
};
- adt_defined_here(cx, &mut err, scrut_ty, &witnesses);
+ // Point at the definition of non-covered `enum` variants.
+ if let Some(AdtDefinedHere { adt_def_span, ty, variants }) =
+ report_adt_defined_here(cx.tcx, scrut_ty, &witnesses, true)
+ {
+ let mut multi_span = MultiSpan::from_span(adt_def_span);
+ multi_span.push_span_label(adt_def_span, "");
+ for Variant { span } in variants {
+ multi_span.push_span_label(span, "not covered");
+ }
+ err.span_note(multi_span, format!("`{ty}` defined here"));
+ }
err.note(format!("the matched value is of type `{}`", scrut_ty));
- if !is_empty_match && witnesses.len() == 1 {
+ if !is_empty_match {
let mut non_exhaustive_tys = FxHashSet::default();
- collect_non_exhaustive_tys(&witnesses[0], &mut non_exhaustive_tys);
+ // Look at the first witness.
+ collect_non_exhaustive_tys(cx.tcx, &witnesses[0], &mut non_exhaustive_tys);
for ty in non_exhaustive_tys {
if ty.is_ptr_sized_integral() {
- err.note(format!(
- "`{ty}` does not have a fixed maximum value, so a wildcard `_` is necessary to match \
- exhaustively",
+ if ty == cx.tcx.types.usize {
+ err.note(format!(
+ "`{ty}` does not have a fixed maximum value, so half-open ranges are necessary to match \
+ exhaustively",
+ ));
+ } else if ty == cx.tcx.types.isize {
+ err.note(format!(
+ "`{ty}` does not have fixed minimum and maximum values, so half-open ranges are necessary to match \
+ exhaustively",
));
+ }
if cx.tcx.sess.is_nightly_build() {
err.help(format!(
"add `#![feature(precise_pointer_size_matching)]` to the crate attributes to \
@@ -770,8 +899,10 @@ fn non_exhaustive_match<'p, 'tcx>(
}
[only] => {
let only = &thir[*only];
- let (pre_indentation, is_multiline) = if let Some(snippet) = sm.indentation_before(only.span)
- && let Ok(with_trailing) = sm.span_extend_while(only.span, |c| c.is_whitespace() || c == ',')
+ let (pre_indentation, is_multiline) = if let Some(snippet) =
+ sm.indentation_before(only.span)
+ && let Ok(with_trailing) =
+ sm.span_extend_while(only.span, |c| c.is_whitespace() || c == ',')
&& sm.is_multiline(with_trailing)
{
(format!("\n{snippet}"), true)
@@ -852,18 +983,18 @@ fn non_exhaustive_match<'p, 'tcx>(
err.emit()
}
-pub(crate) fn joined_uncovered_patterns<'p, 'tcx>(
+fn joined_uncovered_patterns<'p, 'tcx>(
cx: &MatchCheckCtxt<'p, 'tcx>,
- witnesses: &[DeconstructedPat<'p, 'tcx>],
+ witnesses: &[WitnessPat<'tcx>],
) -> String {
const LIMIT: usize = 3;
- let pat_to_str = |pat: &DeconstructedPat<'p, 'tcx>| pat.to_pat(cx).to_string();
+ let pat_to_str = |pat: &WitnessPat<'tcx>| pat.to_diagnostic_pat(cx).to_string();
match witnesses {
[] => bug!(),
- [witness] => format!("`{}`", witness.to_pat(cx)),
+ [witness] => format!("`{}`", witness.to_diagnostic_pat(cx)),
[head @ .., tail] if head.len() < LIMIT => {
let head: Vec<_> = head.iter().map(pat_to_str).collect();
- format!("`{}` and `{}`", head.join("`, `"), tail.to_pat(cx))
+ format!("`{}` and `{}`", head.join("`, `"), tail.to_diagnostic_pat(cx))
}
_ => {
let (head, tail) = witnesses.split_at(LIMIT);
@@ -873,59 +1004,64 @@ pub(crate) fn joined_uncovered_patterns<'p, 'tcx>(
}
}
-pub(crate) fn pattern_not_covered_label(
- witnesses: &[DeconstructedPat<'_, '_>],
- joined_patterns: &str,
-) -> String {
- format!("pattern{} {} not covered", rustc_errors::pluralize!(witnesses.len()), joined_patterns)
+fn collect_non_exhaustive_tys<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ pat: &WitnessPat<'tcx>,
+ non_exhaustive_tys: &mut FxHashSet<Ty<'tcx>>,
+) {
+ if matches!(pat.ctor(), Constructor::NonExhaustive) {
+ non_exhaustive_tys.insert(pat.ty());
+ }
+ if let Constructor::IntRange(range) = pat.ctor() {
+ if range.is_beyond_boundaries(pat.ty(), tcx) {
+ // The range denotes the values before `isize::MIN` or the values after `usize::MAX`/`isize::MAX`.
+ non_exhaustive_tys.insert(pat.ty());
+ }
+ }
+ pat.iter_fields()
+ .for_each(|field_pat| collect_non_exhaustive_tys(tcx, field_pat, non_exhaustive_tys))
}
-/// Point at the definition of non-covered `enum` variants.
-fn adt_defined_here<'p, 'tcx>(
- cx: &MatchCheckCtxt<'p, 'tcx>,
- err: &mut Diagnostic,
+fn report_adt_defined_here<'tcx>(
+ tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
- witnesses: &[DeconstructedPat<'p, 'tcx>],
-) {
+ witnesses: &[WitnessPat<'tcx>],
+ point_at_non_local_ty: bool,
+) -> Option<AdtDefinedHere<'tcx>> {
let ty = ty.peel_refs();
- if let ty::Adt(def, _) = ty.kind() {
- let mut spans = vec![];
- if witnesses.len() < 5 {
- for sp in maybe_point_at_variant(cx, *def, witnesses.iter()) {
- spans.push(sp);
- }
- }
- let def_span = cx
- .tcx
- .hir()
- .get_if_local(def.did())
- .and_then(|node| node.ident())
- .map(|ident| ident.span)
- .unwrap_or_else(|| cx.tcx.def_span(def.did()));
- let mut span: MultiSpan =
- if spans.is_empty() { def_span.into() } else { spans.clone().into() };
-
- span.push_span_label(def_span, "");
- for pat in spans {
- span.push_span_label(pat, "not covered");
- }
- err.span_note(span, format!("`{ty}` defined here"));
+ let ty::Adt(def, _) = ty.kind() else {
+ return None;
+ };
+ let adt_def_span =
+ tcx.hir().get_if_local(def.did()).and_then(|node| node.ident()).map(|ident| ident.span);
+ let adt_def_span = if point_at_non_local_ty {
+ adt_def_span.unwrap_or_else(|| tcx.def_span(def.did()))
+ } else {
+ adt_def_span?
+ };
+
+ let mut variants = vec![];
+ for span in maybe_point_at_variant(tcx, *def, witnesses.iter().take(5)) {
+ variants.push(Variant { span });
}
+ Some(AdtDefinedHere { adt_def_span, ty, variants })
}
-fn maybe_point_at_variant<'a, 'p: 'a, 'tcx: 'a>(
- cx: &MatchCheckCtxt<'p, 'tcx>,
+fn maybe_point_at_variant<'a, 'tcx: 'a>(
+ tcx: TyCtxt<'tcx>,
def: AdtDef<'tcx>,
- patterns: impl Iterator<Item = &'a DeconstructedPat<'p, 'tcx>>,
+ patterns: impl Iterator<Item = &'a WitnessPat<'tcx>>,
) -> Vec<Span> {
use Constructor::*;
let mut covered = vec![];
for pattern in patterns {
if let Variant(variant_index) = pattern.ctor() {
- if let ty::Adt(this_def, _) = pattern.ty().kind() && this_def.did() != def.did() {
+ if let ty::Adt(this_def, _) = pattern.ty().kind()
+ && this_def.did() != def.did()
+ {
continue;
}
- let sp = def.variant(*variant_index).ident(cx.tcx).span;
+ let sp = def.variant(*variant_index).ident(tcx).span;
if covered.contains(&sp) {
// Don't point at variants that have already been covered due to other patterns to avoid
// visual clutter.
@@ -933,112 +1069,7 @@ fn maybe_point_at_variant<'a, 'p: 'a, 'tcx: 'a>(
}
covered.push(sp);
}
- covered.extend(maybe_point_at_variant(cx, def, pattern.iter_fields()));
+ covered.extend(maybe_point_at_variant(tcx, def, pattern.iter_fields()));
}
covered
}
-
-/// Check if a by-value binding is by-value. That is, check if the binding's type is not `Copy`.
-/// Check that there are no borrow or move conflicts in `binding @ subpat` patterns.
-///
-/// For example, this would reject:
-/// - `ref x @ Some(ref mut y)`,
-/// - `ref mut x @ Some(ref y)`,
-/// - `ref mut x @ Some(ref mut y)`,
-/// - `ref mut? x @ Some(y)`, and
-/// - `x @ Some(ref mut? y)`.
-///
-/// This analysis is *not* subsumed by NLL.
-fn check_borrow_conflicts_in_at_patterns<'tcx>(cx: &MatchVisitor<'_, '_, 'tcx>, pat: &Pat<'tcx>) {
- // Extract `sub` in `binding @ sub`.
- let PatKind::Binding { name, mode, ty, subpattern: Some(box ref sub), .. } = pat.kind else {
- return;
- };
-
- let is_binding_by_move = |ty: Ty<'tcx>| !ty.is_copy_modulo_regions(cx.tcx, cx.param_env);
-
- let sess = cx.tcx.sess;
-
- // Get the binding move, extract the mutability if by-ref.
- let mut_outer = match mode {
- BindingMode::ByValue if is_binding_by_move(ty) => {
- // We have `x @ pat` where `x` is by-move. Reject all borrows in `pat`.
- let mut conflicts_ref = Vec::new();
- sub.each_binding(|_, mode, _, span| match mode {
- BindingMode::ByValue => {}
- BindingMode::ByRef(_) => conflicts_ref.push(span),
- });
- if !conflicts_ref.is_empty() {
- sess.emit_err(BorrowOfMovedValue {
- binding_span: pat.span,
- conflicts_ref,
- name,
- ty,
- suggest_borrowing: Some(pat.span.shrink_to_lo()),
- });
- }
- return;
- }
- BindingMode::ByValue => return,
- BindingMode::ByRef(m) => m.mutability(),
- };
-
- // We now have `ref $mut_outer binding @ sub` (semantically).
- // Recurse into each binding in `sub` and find mutability or move conflicts.
- let mut conflicts_move = Vec::new();
- let mut conflicts_mut_mut = Vec::new();
- let mut conflicts_mut_ref = Vec::new();
- sub.each_binding(|name, mode, ty, span| {
- match mode {
- BindingMode::ByRef(mut_inner) => match (mut_outer, mut_inner.mutability()) {
- // Both sides are `ref`.
- (Mutability::Not, Mutability::Not) => {}
- // 2x `ref mut`.
- (Mutability::Mut, Mutability::Mut) => {
- conflicts_mut_mut.push(Conflict::Mut { span, name })
- }
- (Mutability::Not, Mutability::Mut) => {
- conflicts_mut_ref.push(Conflict::Mut { span, name })
- }
- (Mutability::Mut, Mutability::Not) => {
- conflicts_mut_ref.push(Conflict::Ref { span, name })
- }
- },
- BindingMode::ByValue if is_binding_by_move(ty) => {
- conflicts_move.push(Conflict::Moved { span, name }) // `ref mut?` + by-move conflict.
- }
- BindingMode::ByValue => {} // `ref mut?` + by-copy is fine.
- }
- });
-
- let report_mut_mut = !conflicts_mut_mut.is_empty();
- let report_mut_ref = !conflicts_mut_ref.is_empty();
- let report_move_conflict = !conflicts_move.is_empty();
-
- let mut occurrences = match mut_outer {
- Mutability::Mut => vec![Conflict::Mut { span: pat.span, name }],
- Mutability::Not => vec![Conflict::Ref { span: pat.span, name }],
- };
- occurrences.extend(conflicts_mut_mut);
- occurrences.extend(conflicts_mut_ref);
- occurrences.extend(conflicts_move);
-
- // Report errors if any.
- if report_mut_mut {
- // Report mutability conflicts for e.g. `ref mut x @ Some(ref mut y)`.
- sess.emit_err(MultipleMutBorrows { span: pat.span, occurrences });
- } else if report_mut_ref {
- // Report mutability conflicts for e.g. `ref x @ Some(ref mut y)` or the converse.
- match mut_outer {
- Mutability::Mut => {
- sess.emit_err(AlreadyMutBorrowed { span: pat.span, occurrences });
- }
- Mutability::Not => {
- sess.emit_err(AlreadyBorrowed { span: pat.span, occurrences });
- }
- };
- } else if report_move_conflict {
- // Report by-ref and by-move conflicts, e.g. `ref x @ y`.
- sess.emit_err(MovedWhileBorrowed { span: pat.span, occurrences });
- }
-}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
index ae4424660..48a590f5d 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -7,7 +7,7 @@ use rustc_middle::mir;
use rustc_middle::thir::{FieldPat, Pat, PatKind};
use rustc_middle::ty::{self, Ty, TyCtxt, ValTree};
use rustc_session::lint;
-use rustc_span::Span;
+use rustc_span::{ErrorGuaranteed, Span};
use rustc_target::abi::{FieldIdx, VariantIdx};
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
use rustc_trait_selection::traits::{self, ObligationCause};
@@ -48,7 +48,7 @@ struct ConstToPat<'tcx> {
// This tracks if we emitted some hard error for a given const value, so that
// we will not subsequently issue an irrelevant lint for the same const
// value.
- saw_const_match_error: Cell<bool>,
+ saw_const_match_error: Cell<Option<ErrorGuaranteed>>,
// This tracks if we emitted some diagnostic for a given const value, so that
// we will not subsequently issue an irrelevant lint for the same const
@@ -84,7 +84,7 @@ impl<'tcx> ConstToPat<'tcx> {
span,
infcx,
param_env: pat_ctxt.param_env,
- saw_const_match_error: Cell::new(false),
+ saw_const_match_error: Cell::new(None),
saw_const_match_lint: Cell::new(false),
behind_reference: Cell::new(false),
treat_byte_string_as_slice: pat_ctxt
@@ -123,6 +123,8 @@ impl<'tcx> ConstToPat<'tcx> {
});
debug!(?check_body_for_struct_match_violation, ?mir_structural_match_violation);
+ let have_valtree =
+ matches!(cv, mir::Const::Ty(c) if matches!(c.kind(), ty::ConstKind::Value(_)));
let inlined_const_as_pat = match cv {
mir::Const::Ty(c) => match c.kind() {
ty::ConstKind::Param(_)
@@ -154,7 +156,7 @@ impl<'tcx> ConstToPat<'tcx> {
}),
};
- if !self.saw_const_match_error.get() {
+ if self.saw_const_match_error.get().is_none() {
// If we were able to successfully convert the const to some pat (possibly with some
// lints, but no errors), double-check that all types in the const implement
// `Structural` and `PartialEq`.
@@ -180,36 +182,35 @@ impl<'tcx> ConstToPat<'tcx> {
if let Some(non_sm_ty) = structural {
if !self.type_has_partial_eq_impl(cv.ty()) {
- if let ty::Adt(def, ..) = non_sm_ty.kind() {
+ let e = if let ty::Adt(def, ..) = non_sm_ty.kind() {
if def.is_union() {
let err = UnionPattern { span: self.span };
- self.tcx().sess.emit_err(err);
+ self.tcx().sess.emit_err(err)
} else {
// fatal avoids ICE from resolution of nonexistent method (rare case).
self.tcx()
.sess
- .emit_fatal(TypeNotStructural { span: self.span, non_sm_ty });
+ .emit_fatal(TypeNotStructural { span: self.span, non_sm_ty })
}
} else {
let err = InvalidPattern { span: self.span, non_sm_ty };
- self.tcx().sess.emit_err(err);
- }
+ self.tcx().sess.emit_err(err)
+ };
// All branches above emitted an error. Don't print any more lints.
- // The pattern we return is irrelevant since we errored.
- return Box::new(Pat { span: self.span, ty: cv.ty(), kind: PatKind::Wild });
+ // We errored. Signal that in the pattern, so that follow up errors can be silenced.
+ let kind = PatKind::Error(e);
+ return Box::new(Pat { span: self.span, ty: cv.ty(), kind });
+ } else if let ty::Adt(..) = cv.ty().kind() && matches!(cv, mir::Const::Val(..)) {
+ // This branch is only entered when the current `cv` is `mir::Const::Val`.
+ // This is because `mir::Const::ty` has already been handled by `Self::recur`
+ // and the invalid types may be ignored.
+ let err = TypeNotStructural { span: self.span, non_sm_ty };
+ let e = self.tcx().sess.emit_err(err);
+ let kind = PatKind::Error(e);
+ return Box::new(Pat { span: self.span, ty: cv.ty(), kind });
} else if !self.saw_const_match_lint.get() {
if let Some(mir_structural_match_violation) = mir_structural_match_violation {
match non_sm_ty.kind() {
- ty::RawPtr(pointee)
- if pointee.ty.is_sized(self.tcx(), self.param_env) => {}
- ty::FnPtr(..) | ty::RawPtr(..) => {
- self.tcx().emit_spanned_lint(
- lint::builtin::POINTER_STRUCTURAL_MATCH,
- self.id,
- self.span,
- PointerPattern,
- );
- }
ty::Adt(..) if mir_structural_match_violation => {
self.tcx().emit_spanned_lint(
lint::builtin::INDIRECT_STRUCTURAL_MATCH,
@@ -227,19 +228,15 @@ impl<'tcx> ConstToPat<'tcx> {
}
}
}
- } else if !self.saw_const_match_lint.get() {
- match cv.ty().kind() {
- ty::RawPtr(pointee) if pointee.ty.is_sized(self.tcx(), self.param_env) => {}
- ty::FnPtr(..) | ty::RawPtr(..) => {
- self.tcx().emit_spanned_lint(
- lint::builtin::POINTER_STRUCTURAL_MATCH,
- self.id,
- self.span,
- PointerPattern,
- );
- }
- _ => {}
- }
+ } else if !have_valtree && !self.saw_const_match_lint.get() {
+ // The only way valtree construction can fail without the structural match
+ // checker finding a violation is if there is a pointer somewhere.
+ self.tcx().emit_spanned_lint(
+ lint::builtin::POINTER_STRUCTURAL_MATCH,
+ self.id,
+ self.span,
+ PointerPattern,
+ );
}
// Always check for `PartialEq`, even if we emitted other lints. (But not if there were
@@ -330,7 +327,7 @@ impl<'tcx> ConstToPat<'tcx> {
// Backwards compatibility hack because we can't cause hard errors on these
// types, so we compare them via `PartialEq::eq` at runtime.
ty::Adt(..) if !self.type_marked_structural(ty) && self.behind_reference.get() => {
- if !self.saw_const_match_error.get() && !self.saw_const_match_lint.get() {
+ if self.saw_const_match_error.get().is_none() && !self.saw_const_match_lint.get() {
self.saw_const_match_lint.set(true);
tcx.emit_spanned_lint(
lint::builtin::INDIRECT_STRUCTURAL_MATCH,
@@ -345,18 +342,18 @@ impl<'tcx> ConstToPat<'tcx> {
return Err(FallbackToOpaqueConst);
}
ty::FnDef(..) => {
- self.saw_const_match_error.set(true);
- tcx.sess.emit_err(InvalidPattern { span, non_sm_ty: ty });
- // We errored, so the pattern we generate is irrelevant.
- PatKind::Wild
+ let e = tcx.sess.emit_err(InvalidPattern { span, non_sm_ty: ty });
+ self.saw_const_match_error.set(Some(e));
+ // We errored. Signal that in the pattern, so that follow up errors can be silenced.
+ PatKind::Error(e)
}
ty::Adt(adt_def, _) if !self.type_marked_structural(ty) => {
debug!("adt_def {:?} has !type_marked_structural for cv.ty: {:?}", adt_def, ty,);
- self.saw_const_match_error.set(true);
let err = TypeNotStructural { span, non_sm_ty: ty };
- tcx.sess.emit_err(err);
- // We errored, so the pattern we generate is irrelevant.
- PatKind::Wild
+ let e = tcx.sess.emit_err(err);
+ self.saw_const_match_error.set(Some(e));
+ // We errored. Signal that in the pattern, so that follow up errors can be silenced.
+ PatKind::Error(e)
}
ty::Adt(adt_def, args) if adt_def.is_enum() => {
let (&variant_index, fields) = cv.unwrap_branch().split_first().unwrap();
@@ -380,11 +377,19 @@ impl<'tcx> ConstToPat<'tcx> {
subpatterns: self
.field_pats(cv.unwrap_branch().iter().copied().zip(fields.iter()))?,
},
- ty::Adt(def, args) => PatKind::Leaf {
- subpatterns: self.field_pats(cv.unwrap_branch().iter().copied().zip(
- def.non_enum_variant().fields.iter().map(|field| field.ty(self.tcx(), args)),
- ))?,
- },
+ ty::Adt(def, args) => {
+ assert!(!def.is_union()); // Valtree construction would never succeed for unions.
+ PatKind::Leaf {
+ subpatterns: self.field_pats(
+ cv.unwrap_branch().iter().copied().zip(
+ def.non_enum_variant()
+ .fields
+ .iter()
+ .map(|field| field.ty(self.tcx(), args)),
+ ),
+ )?,
+ }
+ }
ty::Slice(elem_ty) => PatKind::Slice {
prefix: cv
.unwrap_branch()
@@ -416,7 +421,9 @@ impl<'tcx> ConstToPat<'tcx> {
// instead of a hard error.
ty::Adt(_, _) if !self.type_marked_structural(*pointee_ty) => {
if self.behind_reference.get() {
- if !self.saw_const_match_error.get() && !self.saw_const_match_lint.get() {
+ if self.saw_const_match_error.get().is_none()
+ && !self.saw_const_match_lint.get()
+ {
self.saw_const_match_lint.set(true);
tcx.emit_spanned_lint(
lint::builtin::INDIRECT_STRUCTURAL_MATCH,
@@ -427,14 +434,16 @@ impl<'tcx> ConstToPat<'tcx> {
}
return Err(FallbackToOpaqueConst);
} else {
- if !self.saw_const_match_error.get() {
- self.saw_const_match_error.set(true);
+ if let Some(e) = self.saw_const_match_error.get() {
+ // We already errored. Signal that in the pattern, so that follow up errors can be silenced.
+ PatKind::Error(e)
+ } else {
let err = TypeNotStructural { span, non_sm_ty: *pointee_ty };
- tcx.sess.emit_err(err);
+ let e = tcx.sess.emit_err(err);
+ self.saw_const_match_error.set(Some(e));
+ // We errored. Signal that in the pattern, so that follow up errors can be silenced.
+ PatKind::Error(e)
}
- tcx.sess.delay_span_bug(span, "`saw_const_match_error` set but no error?");
- // We errored, so the pattern we generate is irrelevant.
- PatKind::Wild
}
}
// All other references are converted into deref patterns and then recursively
@@ -443,11 +452,9 @@ impl<'tcx> ConstToPat<'tcx> {
_ => {
if !pointee_ty.is_sized(tcx, param_env) && !pointee_ty.is_slice() {
let err = UnsizedPattern { span, non_sm_ty: *pointee_ty };
- tcx.sess.emit_err(err);
-
- // FIXME: introduce PatKind::Error to silence follow up diagnostics due to unreachable patterns.
- // We errored, so the pattern we generate is irrelevant.
- PatKind::Wild
+ let e = tcx.sess.emit_err(err);
+ // We errored. Signal that in the pattern, so that follow up errors can be silenced.
+ PatKind::Error(e)
} else {
let old = self.behind_reference.replace(true);
// `b"foo"` produces a `&[u8; 3]`, but you can't use constants of array type when
@@ -469,20 +476,25 @@ impl<'tcx> ConstToPat<'tcx> {
}
}
},
- ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) => {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::RawPtr(..) => {
+ // The raw pointers we see here have been "vetted" by valtree construction to be
+ // just integers, so we simply allow them.
PatKind::Constant { value: mir::Const::Ty(ty::Const::new_value(tcx, cv, ty)) }
}
- ty::FnPtr(..) | ty::RawPtr(..) => unreachable!(),
+ ty::FnPtr(..) => {
+ // Valtree construction would never succeed for these, so this is unreachable.
+ unreachable!()
+ }
_ => {
- self.saw_const_match_error.set(true);
let err = InvalidPattern { span, non_sm_ty: ty };
- tcx.sess.emit_err(err);
- // We errored, so the pattern we generate is irrelevant.
- PatKind::Wild
+ let e = tcx.sess.emit_err(err);
+ self.saw_const_match_error.set(Some(e));
+ // We errored. Signal that in the pattern, so that follow up errors can be silenced.
+ PatKind::Error(e)
}
};
- if !self.saw_const_match_error.get()
+ if self.saw_const_match_error.get().is_none()
&& !self.saw_const_match_lint.get()
&& mir_structural_match_violation
// FIXME(#73448): Find a way to bring const qualification into parity with
@@ -497,7 +509,7 @@ impl<'tcx> ConstToPat<'tcx> {
lint::builtin::NONTRIVIAL_STRUCTURAL_MATCH,
id,
span,
- NontrivialStructuralMatch {non_sm_ty}
+ NontrivialStructuralMatch { non_sm_ty },
);
}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
index b79beb1c5..0c7c2c6f9 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
@@ -39,35 +39,35 @@
//!
//! Splitting is implemented in the [`Constructor::split`] function. We don't do splitting for
//! or-patterns; instead we just try the alternatives one-by-one. For details on splitting
-//! wildcards, see [`SplitWildcard`]; for integer ranges, see [`SplitIntRange`]; for slices, see
-//! [`SplitVarLenSlice`].
+//! wildcards, see [`Constructor::split`]; for integer ranges, see
+//! [`IntRange::split`]; for slices, see [`Slice::split`].
use std::cell::Cell;
use std::cmp::{self, max, min, Ordering};
use std::fmt;
use std::iter::once;
-use std::ops::RangeInclusive;
use smallvec::{smallvec, SmallVec};
+use rustc_apfloat::ieee::{DoubleS, IeeeFloat, SingleS};
use rustc_data_structures::captures::Captures;
-use rustc_hir::{HirId, RangeEnd};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::RangeEnd;
use rustc_index::Idx;
use rustc_middle::middle::stability::EvalResult;
use rustc_middle::mir;
-use rustc_middle::thir::{FieldPat, Pat, PatKind, PatRange};
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::thir::{FieldPat, Pat, PatKind, PatRange, PatRangeBoundary};
use rustc_middle::ty::layout::IntegerExt;
use rustc_middle::ty::{self, Ty, TyCtxt, VariantDef};
-use rustc_session::lint;
use rustc_span::{Span, DUMMY_SP};
-use rustc_target::abi::{FieldIdx, Integer, Size, VariantIdx, FIRST_VARIANT};
+use rustc_target::abi::{FieldIdx, Integer, VariantIdx, FIRST_VARIANT};
use self::Constructor::*;
+use self::MaybeInfiniteInt::*;
use self::SliceKind::*;
-use super::compare_const_vals;
use super::usefulness::{MatchCheckCtxt, PatCtxt};
-use crate::errors::{Overlap, OverlappingRangeEndpoints};
/// Recursively expand this pattern into its subpatterns. Only useful for or-patterns.
fn expand_or_pat<'p, 'tcx>(pat: &'p Pat<'tcx>) -> Vec<&'p Pat<'tcx>> {
@@ -86,324 +86,317 @@ fn expand_or_pat<'p, 'tcx>(pat: &'p Pat<'tcx>) -> Vec<&'p Pat<'tcx>> {
pats
}
-/// An inclusive interval, used for precise integer exhaustiveness checking.
-/// `IntRange`s always store a contiguous range. This means that values are
-/// encoded such that `0` encodes the minimum value for the integer,
-/// regardless of the signedness.
-/// For example, the pattern `-128..=127i8` is encoded as `0..=255`.
-/// This makes comparisons and arithmetic on interval endpoints much more
-/// straightforward. See `signed_bias` for details.
-///
-/// `IntRange` is never used to encode an empty range or a "range" that wraps
-/// around the (offset) space: i.e., `range.lo <= range.hi`.
-#[derive(Clone, PartialEq, Eq)]
-pub(crate) struct IntRange {
- range: RangeInclusive<u128>,
- /// Keeps the bias used for encoding the range. It depends on the type of the range and
- /// possibly the pointer size of the current architecture. The algorithm ensures we never
- /// compare `IntRange`s with different types/architectures.
- bias: u128,
+/// Whether we have seen a constructor in the column or not.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+enum Presence {
+ Unseen,
+ Seen,
}
-impl IntRange {
- #[inline]
- fn is_integral(ty: Ty<'_>) -> bool {
- matches!(ty.kind(), ty::Char | ty::Int(_) | ty::Uint(_) | ty::Bool)
- }
-
- fn is_singleton(&self) -> bool {
- self.range.start() == self.range.end()
- }
-
- fn boundaries(&self) -> (u128, u128) {
- (*self.range.start(), *self.range.end())
- }
+/// A possibly infinite integer. Values are encoded such that the ordering on `u128` matches the
+/// natural order on the original type. For example, `-128i8` is encoded as `0` and `127i8` as
+/// `255`. See `signed_bias` for details.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+pub(crate) enum MaybeInfiniteInt {
+ NegInfinity,
+ /// Encoded value. DO NOT CONSTRUCT BY HAND; use `new_finite`.
+ Finite(u128),
+ /// The integer after `u128::MAX`. We need it to represent `x..=u128::MAX` as an exclusive range.
+ JustAfterMax,
+ PosInfinity,
+}
- #[inline]
- fn integral_size_and_signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> Option<(Size, u128)> {
+impl MaybeInfiniteInt {
+ // The return value of `signed_bias` should be XORed with a value to encode/decode it.
+ fn signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> u128 {
match *ty.kind() {
- ty::Bool => Some((Size::from_bytes(1), 0)),
- ty::Char => Some((Size::from_bytes(4), 0)),
ty::Int(ity) => {
- let size = Integer::from_int_ty(&tcx, ity).size();
- Some((size, 1u128 << (size.bits() as u128 - 1)))
+ let bits = Integer::from_int_ty(&tcx, ity).size().bits() as u128;
+ 1u128 << (bits - 1)
}
- ty::Uint(uty) => Some((Integer::from_uint_ty(&tcx, uty).size(), 0)),
- _ => None,
+ _ => 0,
}
}
- #[inline]
- fn from_constant<'tcx>(
+ fn new_finite(tcx: TyCtxt<'_>, ty: Ty<'_>, bits: u128) -> Self {
+ let bias = Self::signed_bias(tcx, ty);
+ // Perform a shift if the underlying types are signed, which makes the interval arithmetic
+ // type-independent.
+ let x = bits ^ bias;
+ Finite(x)
+ }
+ fn from_pat_range_bdy<'tcx>(
+ bdy: PatRangeBoundary<'tcx>,
+ ty: Ty<'tcx>,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
- value: mir::Const<'tcx>,
- ) -> Option<IntRange> {
- let ty = value.ty();
- let (target_size, bias) = Self::integral_size_and_signed_bias(tcx, ty)?;
- let val = match value {
- mir::Const::Ty(c) if let ty::ConstKind::Value(valtree) = c.kind() => {
- valtree.unwrap_leaf().to_bits(target_size).ok()
- },
- // This is a more general form of the previous case.
- _ => value.try_eval_bits(tcx, param_env),
- }?;
-
- let val = val ^ bias;
- Some(IntRange { range: val..=val, bias })
+ ) -> Self {
+ match bdy {
+ PatRangeBoundary::NegInfinity => NegInfinity,
+ PatRangeBoundary::Finite(value) => {
+ let bits = value.eval_bits(tcx, param_env);
+ Self::new_finite(tcx, ty, bits)
+ }
+ PatRangeBoundary::PosInfinity => PosInfinity,
+ }
}
- #[inline]
- fn from_range<'tcx>(
- tcx: TyCtxt<'tcx>,
- lo: u128,
- hi: u128,
+ /// Used only for diagnostics.
+ /// Note: it is possible to get `isize/usize::MAX+1` here, as explained in the doc for
+ /// [`IntRange::split`]. This cannot be represented as a `Const`, so we represent it with
+ /// `PosInfinity`.
+ fn to_diagnostic_pat_range_bdy<'tcx>(
+ self,
ty: Ty<'tcx>,
- end: &RangeEnd,
- ) -> Option<IntRange> {
- Self::is_integral(ty).then(|| {
- // Perform a shift if the underlying types are signed,
- // which makes the interval arithmetic simpler.
- let bias = IntRange::signed_bias(tcx, ty);
- let (lo, hi) = (lo ^ bias, hi ^ bias);
- let offset = (*end == RangeEnd::Excluded) as u128;
- if lo > hi || (lo == hi && *end == RangeEnd::Excluded) {
- // This should have been caught earlier by E0030.
- bug!("malformed range pattern: {}..={}", lo, (hi - offset));
+ tcx: TyCtxt<'tcx>,
+ ) -> PatRangeBoundary<'tcx> {
+ match self {
+ NegInfinity => PatRangeBoundary::NegInfinity,
+ Finite(x) => {
+ let bias = Self::signed_bias(tcx, ty);
+ let bits = x ^ bias;
+ let size = ty.primitive_size(tcx);
+ match Scalar::try_from_uint(bits, size) {
+ Some(scalar) => {
+ let value = mir::Const::from_scalar(tcx, scalar, ty);
+ PatRangeBoundary::Finite(value)
+ }
+ // The value doesn't fit. Since `x >= 0` and 0 always encodes the minimum value
+ // for a type, the problem isn't that the value is too small. So it must be too
+ // large.
+ None => PatRangeBoundary::PosInfinity,
+ }
}
- IntRange { range: lo..=(hi - offset), bias }
- })
+ JustAfterMax | PosInfinity => PatRangeBoundary::PosInfinity,
+ }
}
- // The return value of `signed_bias` should be XORed with an endpoint to encode/decode it.
- fn signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> u128 {
- match *ty.kind() {
- ty::Int(ity) => {
- let bits = Integer::from_int_ty(&tcx, ity).size().bits() as u128;
- 1u128 << (bits - 1)
- }
- _ => 0,
+ /// Note: this will not turn a finite value into an infinite one or vice-versa.
+ pub(crate) fn minus_one(self) -> Self {
+ match self {
+ Finite(n) => match n.checked_sub(1) {
+ Some(m) => Finite(m),
+ None => bug!(),
+ },
+ JustAfterMax => Finite(u128::MAX),
+ x => x,
}
}
-
- fn is_subrange(&self, other: &Self) -> bool {
- other.range.start() <= self.range.start() && self.range.end() <= other.range.end()
+ /// Note: this will not turn a finite value into an infinite one or vice-versa.
+ pub(crate) fn plus_one(self) -> Self {
+ match self {
+ Finite(n) => match n.checked_add(1) {
+ Some(m) => Finite(m),
+ None => JustAfterMax,
+ },
+ JustAfterMax => bug!(),
+ x => x,
+ }
}
+}
- fn intersection(&self, other: &Self) -> Option<Self> {
- let (lo, hi) = self.boundaries();
- let (other_lo, other_hi) = other.boundaries();
- if lo <= other_hi && other_lo <= hi {
- Some(IntRange { range: max(lo, other_lo)..=min(hi, other_hi), bias: self.bias })
- } else {
- None
- }
+/// An exclusive interval, used for precise integer exhaustiveness checking. `IntRange`s always
+/// store a contiguous range.
+///
+/// `IntRange` is never used to encode an empty range or a "range" that wraps around the (offset)
+/// space: i.e., `range.lo < range.hi`.
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub(crate) struct IntRange {
+ pub(crate) lo: MaybeInfiniteInt, // Must not be `PosInfinity`.
+ pub(crate) hi: MaybeInfiniteInt, // Must not be `NegInfinity`.
+}
+
+impl IntRange {
+ #[inline]
+ pub(super) fn is_integral(ty: Ty<'_>) -> bool {
+ matches!(ty.kind(), ty::Char | ty::Int(_) | ty::Uint(_))
}
- fn suspicious_intersection(&self, other: &Self) -> bool {
- // `false` in the following cases:
- // 1 ---- // 1 ---------- // 1 ---- // 1 ----
- // 2 ---------- // 2 ---- // 2 ---- // 2 ----
- //
- // The following are currently `false`, but could be `true` in the future (#64007):
- // 1 --------- // 1 ---------
- // 2 ---------- // 2 ----------
- //
- // `true` in the following cases:
- // 1 ------- // 1 -------
- // 2 -------- // 2 -------
- let (lo, hi) = self.boundaries();
- let (other_lo, other_hi) = other.boundaries();
- (lo == other_hi || hi == other_lo) && !self.is_singleton() && !other.is_singleton()
+ /// Best effort; will not know that e.g. `255u8..` is a singleton.
+ pub(super) fn is_singleton(&self) -> bool {
+ // Since `lo` and `hi` can't be the same `Infinity` and `plus_one` never changes from finite
+ // to infinite, this correctly only detects ranges that contain exacly one `Finite(x)`.
+ self.lo.plus_one() == self.hi
}
- /// Only used for displaying the range properly.
- fn to_pat<'tcx>(&self, tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Pat<'tcx> {
- let (lo, hi) = self.boundaries();
+ #[inline]
+ fn from_bits<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, bits: u128) -> IntRange {
+ let x = MaybeInfiniteInt::new_finite(tcx, ty, bits);
+ IntRange { lo: x, hi: x.plus_one() }
+ }
- let bias = self.bias;
- let (lo, hi) = (lo ^ bias, hi ^ bias);
+ #[inline]
+ fn from_range(lo: MaybeInfiniteInt, mut hi: MaybeInfiniteInt, end: RangeEnd) -> IntRange {
+ if end == RangeEnd::Included {
+ hi = hi.plus_one();
+ }
+ if lo >= hi {
+ // This should have been caught earlier by E0030.
+ bug!("malformed range pattern: {lo:?}..{hi:?}");
+ }
+ IntRange { lo, hi }
+ }
- let env = ty::ParamEnv::empty().and(ty);
- let lo_const = mir::Const::from_bits(tcx, lo, env);
- let hi_const = mir::Const::from_bits(tcx, hi, env);
+ fn is_subrange(&self, other: &Self) -> bool {
+ other.lo <= self.lo && self.hi <= other.hi
+ }
- let kind = if lo == hi {
- PatKind::Constant { value: lo_const }
+ fn intersection(&self, other: &Self) -> Option<Self> {
+ if self.lo < other.hi && other.lo < self.hi {
+ Some(IntRange { lo: max(self.lo, other.lo), hi: min(self.hi, other.hi) })
} else {
- PatKind::Range(Box::new(PatRange {
- lo: lo_const,
- hi: hi_const,
- end: RangeEnd::Included,
- }))
- };
-
- Pat { ty, span: DUMMY_SP, kind }
+ None
+ }
}
- /// Lint on likely incorrect range patterns (#63987)
- pub(super) fn lint_overlapping_range_endpoints<'a, 'p: 'a, 'tcx: 'a>(
+ /// Partition a range of integers into disjoint subranges. This does constructor splitting for
+ /// integer ranges as explained at the top of the file.
+ ///
+ /// This returns an output that covers `self`. The output is split so that the only
+ /// intersections between an output range and a column range are inclusions. No output range
+ /// straddles the boundary of one of the inputs.
+ ///
+ /// Additionally, we track for each output range whether it is covered by one of the column ranges or not.
+ ///
+ /// The following input:
+ /// ```text
+ /// (--------------------------) // `self`
+ /// (------) (----------) (-)
+ /// (------) (--------)
+ /// ```
+ /// is first intersected with `self`:
+ /// ```text
+ /// (--------------------------) // `self`
+ /// (----) (----------) (-)
+ /// (------) (--------)
+ /// ```
+ /// and then iterated over as follows:
+ /// ```text
+ /// (-(--)-(-)-(------)-)--(-)-
+ /// ```
+ /// where each sequence of dashes is an output range, and dashes outside parentheses are marked
+ /// as `Presence::Missing`.
+ ///
+ /// ## `isize`/`usize`
+ ///
+ /// Whereas a wildcard of type `i32` stands for the range `i32::MIN..=i32::MAX`, a `usize`
+ /// wildcard stands for `0..PosInfinity` and a `isize` wildcard stands for
+ /// `NegInfinity..PosInfinity`. In other words, as far as `IntRange` is concerned, there are
+ /// values before `isize::MIN` and after `usize::MAX`/`isize::MAX`.
+ /// This is to avoid e.g. `0..(u32::MAX as usize)` from being exhaustive on one architecture and
+ /// not others. See discussions around the `precise_pointer_size_matching` feature for more
+ /// details.
+ ///
+ /// These infinities affect splitting subtly: it is possible to get `NegInfinity..0` and
+ /// `usize::MAX+1..PosInfinity` in the output. Diagnostics must be careful to handle these
+ /// fictitious ranges sensibly.
+ fn split(
&self,
- pcx: &PatCtxt<'_, 'p, 'tcx>,
- pats: impl Iterator<Item = &'a DeconstructedPat<'p, 'tcx>>,
- column_count: usize,
- lint_root: HirId,
- ) {
- if self.is_singleton() {
- return;
- }
-
- if column_count != 1 {
- // FIXME: for now, only check for overlapping ranges on simple range
- // patterns. Otherwise with the current logic the following is detected
- // as overlapping:
- // ```
- // match (0u8, true) {
- // (0 ..= 125, false) => {}
- // (125 ..= 255, true) => {}
- // _ => {}
- // }
- // ```
- return;
- }
-
- let overlap: Vec<_> = pats
- .filter_map(|pat| Some((pat.ctor().as_int_range()?, pat.span())))
- .filter(|(range, _)| self.suspicious_intersection(range))
- .map(|(range, span)| Overlap {
- range: self.intersection(&range).unwrap().to_pat(pcx.cx.tcx, pcx.ty),
- span,
- })
+ column_ranges: impl Iterator<Item = IntRange>,
+ ) -> impl Iterator<Item = (Presence, IntRange)> {
+ // The boundaries of ranges in `column_ranges` intersected with `self`.
+ // We do parenthesis matching for input ranges. A boundary counts as +1 if it starts
+ // a range and -1 if it ends it. When the count is > 0 between two boundaries, we
+ // are within an input range.
+ let mut boundaries: Vec<(MaybeInfiniteInt, isize)> = column_ranges
+ .filter_map(|r| self.intersection(&r))
+ .flat_map(|r| [(r.lo, 1), (r.hi, -1)])
.collect();
+ // We sort by boundary, and for each boundary we sort the "closing parentheses" first. The
+ // order of +1/-1 for a same boundary value is actually irrelevant, because we only look at
+ // the accumulated count between distinct boundary values.
+ boundaries.sort_unstable();
+
+ // Accumulate parenthesis counts.
+ let mut paren_counter = 0isize;
+ // Gather pairs of adjacent boundaries.
+ let mut prev_bdy = self.lo;
+ boundaries
+ .into_iter()
+ // End with the end of the range. The count is ignored.
+ .chain(once((self.hi, 0)))
+ // List pairs of adjacent boundaries and the count between them.
+ .map(move |(bdy, delta)| {
+ // `delta` affects the count as we cross `bdy`, so the relevant count between
+ // `prev_bdy` and `bdy` is untouched by `delta`.
+ let ret = (prev_bdy, paren_counter, bdy);
+ prev_bdy = bdy;
+ paren_counter += delta;
+ ret
+ })
+ // Skip empty ranges.
+ .filter(|&(prev_bdy, _, bdy)| prev_bdy != bdy)
+ // Convert back to ranges.
+ .map(move |(prev_bdy, paren_count, bdy)| {
+ use Presence::*;
+ let presence = if paren_count > 0 { Seen } else { Unseen };
+ let range = IntRange { lo: prev_bdy, hi: bdy };
+ (presence, range)
+ })
+ }
- if !overlap.is_empty() {
- pcx.cx.tcx.emit_spanned_lint(
- lint::builtin::OVERLAPPING_RANGE_ENDPOINTS,
- lint_root,
- pcx.span,
- OverlappingRangeEndpoints { overlap, range: pcx.span },
- );
+ /// Whether the range denotes the fictitious values before `isize::MIN` or after
+ /// `usize::MAX`/`isize::MAX` (see doc of [`IntRange::split`] for why these exist).
+ pub(crate) fn is_beyond_boundaries<'tcx>(&self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> bool {
+ ty.is_ptr_sized_integral() && !tcx.features().precise_pointer_size_matching && {
+ // The two invalid ranges are `NegInfinity..isize::MIN` (represented as
+ // `NegInfinity..0`), and `{u,i}size::MAX+1..PosInfinity`. `to_diagnostic_pat_range_bdy`
+ // converts `MAX+1` to `PosInfinity`, and we couldn't have `PosInfinity` in `self.lo`
+ // otherwise.
+ let lo = self.lo.to_diagnostic_pat_range_bdy(ty, tcx);
+ matches!(lo, PatRangeBoundary::PosInfinity)
+ || matches!(self.hi, MaybeInfiniteInt::Finite(0))
}
}
-
- /// See `Constructor::is_covered_by`
- fn is_covered_by(&self, other: &Self) -> bool {
- if self.intersection(other).is_some() {
- // Constructor splitting should ensure that all intersections we encounter are actually
- // inclusions.
- assert!(self.is_subrange(other));
- true
+ /// Only used for displaying the range.
+ pub(super) fn to_diagnostic_pat<'tcx>(&self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Pat<'tcx> {
+ let kind = if matches!((self.lo, self.hi), (NegInfinity, PosInfinity)) {
+ PatKind::Wild
+ } else if self.is_singleton() {
+ let lo = self.lo.to_diagnostic_pat_range_bdy(ty, tcx);
+ let value = lo.as_finite().unwrap();
+ PatKind::Constant { value }
} else {
- false
- }
+ // We convert to an inclusive range for diagnostics.
+ let mut end = RangeEnd::Included;
+ let mut lo = self.lo.to_diagnostic_pat_range_bdy(ty, tcx);
+ if matches!(lo, PatRangeBoundary::PosInfinity) {
+ // The only reason to get `PosInfinity` here is the special case where
+ // `to_diagnostic_pat_range_bdy` found `{u,i}size::MAX+1`. So the range denotes the
+ // fictitious values after `{u,i}size::MAX` (see [`IntRange::split`] for why we do
+ // this). We show this to the user as `usize::MAX..` which is slightly incorrect but
+ // probably clear enough.
+ let c = ty.numeric_max_val(tcx).unwrap();
+ let value = mir::Const::from_ty_const(c, tcx);
+ lo = PatRangeBoundary::Finite(value);
+ }
+ let hi = if matches!(self.hi, MaybeInfiniteInt::Finite(0)) {
+ // The range encodes `..ty::MIN`, so we can't convert it to an inclusive range.
+ end = RangeEnd::Excluded;
+ self.hi
+ } else {
+ self.hi.minus_one()
+ };
+ let hi = hi.to_diagnostic_pat_range_bdy(ty, tcx);
+ PatKind::Range(Box::new(PatRange { lo, hi, end, ty }))
+ };
+
+ Pat { ty, span: DUMMY_SP, kind }
}
}
-/// Note: this is often not what we want: e.g. `false` is converted into the range `0..=0` and
-/// would be displayed as such. To render properly, convert to a pattern first.
+/// Note: this will render signed ranges incorrectly. To render properly, convert to a pattern
+/// first.
impl fmt::Debug for IntRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let (lo, hi) = self.boundaries();
- let bias = self.bias;
- let (lo, hi) = (lo ^ bias, hi ^ bias);
- write!(f, "{lo}")?;
- write!(f, "{}", RangeEnd::Included)?;
- write!(f, "{hi}")
- }
-}
-
-/// Represents a border between 2 integers. Because the intervals spanning borders must be able to
-/// cover every integer, we need to be able to represent 2^128 + 1 such borders.
-#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
-enum IntBorder {
- JustBefore(u128),
- AfterMax,
-}
-
-/// A range of integers that is partitioned into disjoint subranges. This does constructor
-/// splitting for integer ranges as explained at the top of the file.
-///
-/// This is fed multiple ranges, and returns an output that covers the input, but is split so that
-/// the only intersections between an output range and a seen range are inclusions. No output range
-/// straddles the boundary of one of the inputs.
-///
-/// The following input:
-/// ```text
-/// |-------------------------| // `self`
-/// |------| |----------| |----|
-/// |-------| |-------|
-/// ```
-/// would be iterated over as follows:
-/// ```text
-/// ||---|--||-|---|---|---|--|
-/// ```
-#[derive(Debug, Clone)]
-struct SplitIntRange {
- /// The range we are splitting
- range: IntRange,
- /// The borders of ranges we have seen. They are all contained within `range`. This is kept
- /// sorted.
- borders: Vec<IntBorder>,
-}
-
-impl SplitIntRange {
- fn new(range: IntRange) -> Self {
- SplitIntRange { range, borders: Vec::new() }
- }
-
- /// Internal use
- fn to_borders(r: IntRange) -> [IntBorder; 2] {
- use IntBorder::*;
- let (lo, hi) = r.boundaries();
- let lo = JustBefore(lo);
- let hi = match hi.checked_add(1) {
- Some(m) => JustBefore(m),
- None => AfterMax,
- };
- [lo, hi]
- }
-
- /// Add ranges relative to which we split.
- fn split(&mut self, ranges: impl Iterator<Item = IntRange>) {
- let this_range = &self.range;
- let included_ranges = ranges.filter_map(|r| this_range.intersection(&r));
- let included_borders = included_ranges.flat_map(|r| {
- let borders = Self::to_borders(r);
- once(borders[0]).chain(once(borders[1]))
- });
- self.borders.extend(included_borders);
- self.borders.sort_unstable();
- }
-
- /// Iterate over the contained ranges.
- fn iter(&self) -> impl Iterator<Item = IntRange> + Captures<'_> {
- use IntBorder::*;
-
- let self_range = Self::to_borders(self.range.clone());
- // Start with the start of the range.
- let mut prev_border = self_range[0];
- self.borders
- .iter()
- .copied()
- // End with the end of the range.
- .chain(once(self_range[1]))
- // List pairs of adjacent borders.
- .map(move |border| {
- let ret = (prev_border, border);
- prev_border = border;
- ret
- })
- // Skip duplicates.
- .filter(|(prev_border, border)| prev_border != border)
- // Finally, convert to ranges.
- .map(move |(prev_border, border)| {
- let range = match (prev_border, border) {
- (JustBefore(n), JustBefore(m)) if n < m => n..=(m - 1),
- (JustBefore(n), AfterMax) => n..=u128::MAX,
- _ => unreachable!(), // Ruled out by the sorting and filtering we did
- };
- IntRange { range, bias: self.range.bias }
- })
+ if let Finite(lo) = self.lo {
+ write!(f, "{lo}")?;
+ }
+ write!(f, "{}", RangeEnd::Excluded)?;
+ if let Finite(hi) = self.hi {
+ write!(f, "{hi}")?;
+ }
+ Ok(())
}
}
@@ -463,142 +456,164 @@ impl Slice {
fn is_covered_by(self, other: Self) -> bool {
other.kind.covers_length(self.arity())
}
-}
-/// This computes constructor splitting for variable-length slices, as explained at the top of the
-/// file.
-///
-/// A slice pattern `[x, .., y]` behaves like the infinite or-pattern `[x, y] | [x, _, y] | [x, _,
-/// _, y] | ...`. The corresponding value constructors are fixed-length array constructors above a
-/// given minimum length. We obviously can't list this infinitude of constructors. Thankfully,
-/// it turns out that for each finite set of slice patterns, all sufficiently large array lengths
-/// are equivalent.
-///
-/// Let's look at an example, where we are trying to split the last pattern:
-/// ```
-/// # fn foo(x: &[bool]) {
-/// match x {
-/// [true, true, ..] => {}
-/// [.., false, false] => {}
-/// [..] => {}
-/// }
-/// # }
-/// ```
-/// Here are the results of specialization for the first few lengths:
-/// ```
-/// # fn foo(x: &[bool]) { match x {
-/// // length 0
-/// [] => {}
-/// // length 1
-/// [_] => {}
-/// // length 2
-/// [true, true] => {}
-/// [false, false] => {}
-/// [_, _] => {}
-/// // length 3
-/// [true, true, _ ] => {}
-/// [_, false, false] => {}
-/// [_, _, _ ] => {}
-/// // length 4
-/// [true, true, _, _ ] => {}
-/// [_, _, false, false] => {}
-/// [_, _, _, _ ] => {}
-/// // length 5
-/// [true, true, _, _, _ ] => {}
-/// [_, _, _, false, false] => {}
-/// [_, _, _, _, _ ] => {}
-/// # _ => {}
-/// # }}
-/// ```
-///
-/// If we went above length 5, we would simply be inserting more columns full of wildcards in the
-/// middle. This means that the set of witnesses for length `l >= 5` if equivalent to the set for
-/// any other `l' >= 5`: simply add or remove wildcards in the middle to convert between them.
-///
-/// This applies to any set of slice patterns: there will be a length `L` above which all lengths
-/// behave the same. This is exactly what we need for constructor splitting. Therefore a
-/// variable-length slice can be split into a variable-length slice of minimal length `L`, and many
-/// fixed-length slices of lengths `< L`.
-///
-/// For each variable-length pattern `p` with a prefix of length `plâ‚š` and suffix of length `slâ‚š`,
-/// only the first `plâ‚š` and the last `slâ‚š` elements are examined. Therefore, as long as `L` is
-/// positive (to avoid concerns about empty types), all elements after the maximum prefix length
-/// and before the maximum suffix length are not examined by any variable-length pattern, and
-/// therefore can be added/removed without affecting them - creating equivalent patterns from any
-/// sufficiently-large length.
-///
-/// Of course, if fixed-length patterns exist, we must be sure that our length is large enough to
-/// miss them all, so we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`
-///
-/// `max_slice` below will be made to have arity `L`.
-#[derive(Debug)]
-struct SplitVarLenSlice {
- /// If the type is an array, this is its size.
- array_len: Option<usize>,
- /// The arity of the input slice.
- arity: usize,
- /// The smallest slice bigger than any slice seen. `max_slice.arity()` is the length `L`
- /// described above.
- max_slice: SliceKind,
-}
-
-impl SplitVarLenSlice {
- fn new(prefix: usize, suffix: usize, array_len: Option<usize>) -> Self {
- SplitVarLenSlice { array_len, arity: prefix + suffix, max_slice: VarLen(prefix, suffix) }
- }
-
- /// Pass a set of slices relative to which to split this one.
- fn split(&mut self, slices: impl Iterator<Item = SliceKind>) {
- let VarLen(max_prefix_len, max_suffix_len) = &mut self.max_slice else {
- // No need to split
- return;
- };
- // We grow `self.max_slice` to be larger than all slices encountered, as described above.
- // For diagnostics, we keep the prefix and suffix lengths separate, but grow them so that
- // `L = max_prefix_len + max_suffix_len`.
- let mut max_fixed_len = 0;
- for slice in slices {
- match slice {
- FixedLen(len) => {
- max_fixed_len = cmp::max(max_fixed_len, len);
+ /// This computes constructor splitting for variable-length slices, as explained at the top of
+ /// the file.
+ ///
+ /// A slice pattern `[x, .., y]` behaves like the infinite or-pattern `[x, y] | [x, _, y] | [x,
+ /// _, _, y] | etc`. The corresponding value constructors are fixed-length array constructors of
+ /// corresponding lengths. We obviously can't list this infinitude of constructors.
+ /// Thankfully, it turns out that for each finite set of slice patterns, all sufficiently large
+ /// array lengths are equivalent.
+ ///
+ /// Let's look at an example, where we are trying to split the last pattern:
+ /// ```
+ /// # fn foo(x: &[bool]) {
+ /// match x {
+ /// [true, true, ..] => {}
+ /// [.., false, false] => {}
+ /// [..] => {}
+ /// }
+ /// # }
+ /// ```
+ /// Here are the results of specialization for the first few lengths:
+ /// ```
+ /// # fn foo(x: &[bool]) { match x {
+ /// // length 0
+ /// [] => {}
+ /// // length 1
+ /// [_] => {}
+ /// // length 2
+ /// [true, true] => {}
+ /// [false, false] => {}
+ /// [_, _] => {}
+ /// // length 3
+ /// [true, true, _ ] => {}
+ /// [_, false, false] => {}
+ /// [_, _, _ ] => {}
+ /// // length 4
+ /// [true, true, _, _ ] => {}
+ /// [_, _, false, false] => {}
+ /// [_, _, _, _ ] => {}
+ /// // length 5
+ /// [true, true, _, _, _ ] => {}
+ /// [_, _, _, false, false] => {}
+ /// [_, _, _, _, _ ] => {}
+ /// # _ => {}
+ /// # }}
+ /// ```
+ ///
+ /// We see that above length 4, we are simply inserting columns full of wildcards in the middle.
+ /// This means that specialization and witness computation with slices of length `l >= 4` will
+ /// give equivalent results regardless of `l`. This applies to any set of slice patterns: there
+ /// will be a length `L` above which all lengths behave the same. This is exactly what we need
+ /// for constructor splitting.
+ ///
+ /// A variable-length slice pattern covers all lengths from its arity up to infinity. As we just
+ /// saw, we can split this in two: lengths below `L` are treated individually with a
+ /// fixed-length slice each; lengths above `L` are grouped into a single variable-length slice
+ /// constructor.
+ ///
+ /// For each variable-length slice pattern `p` with a prefix of length `plâ‚š` and suffix of
+ /// length `slâ‚š`, only the first `plâ‚š` and the last `slâ‚š` elements are examined. Therefore, as
+ /// long as `L` is positive (to avoid concerns about empty types), all elements after the
+ /// maximum prefix length and before the maximum suffix length are not examined by any
+ /// variable-length pattern, and therefore can be ignored. This gives us a way to compute `L`.
+ ///
+ /// Additionally, if fixed-length patterns exist, we must pick an `L` large enough to miss them,
+ /// so we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`.
+ /// `max_slice` below will be made to have this arity `L`.
+ ///
+ /// If `self` is fixed-length, it is returned as-is.
+ ///
+ /// Additionally, we track for each output slice whether it is covered by one of the column slices or not.
+ fn split(
+ self,
+ column_slices: impl Iterator<Item = Slice>,
+ ) -> impl Iterator<Item = (Presence, Slice)> {
+ // Range of lengths below `L`.
+ let smaller_lengths;
+ let arity = self.arity();
+ let mut max_slice = self.kind;
+ // Tracks the smallest variable-length slice we've seen. Any slice arity above it is
+ // therefore `Presence::Seen` in the column.
+ let mut min_var_len = usize::MAX;
+ // Tracks the fixed-length slices we've seen, to mark them as `Presence::Seen`.
+ let mut seen_fixed_lens = FxHashSet::default();
+ match &mut max_slice {
+ VarLen(max_prefix_len, max_suffix_len) => {
+ // We grow `max_slice` to be larger than all slices encountered, as described above.
+ // For diagnostics, we keep the prefix and suffix lengths separate, but grow them so that
+ // `L = max_prefix_len + max_suffix_len`.
+ let mut max_fixed_len = 0;
+ for slice in column_slices {
+ match slice.kind {
+ FixedLen(len) => {
+ max_fixed_len = cmp::max(max_fixed_len, len);
+ if arity <= len {
+ seen_fixed_lens.insert(len);
+ }
+ }
+ VarLen(prefix, suffix) => {
+ *max_prefix_len = cmp::max(*max_prefix_len, prefix);
+ *max_suffix_len = cmp::max(*max_suffix_len, suffix);
+ min_var_len = cmp::min(min_var_len, prefix + suffix);
+ }
+ }
}
- VarLen(prefix, suffix) => {
- *max_prefix_len = cmp::max(*max_prefix_len, prefix);
- *max_suffix_len = cmp::max(*max_suffix_len, suffix);
+ // We want `L = max(L, max_fixed_len + 1)`, modulo the fact that we keep prefix and
+ // suffix separate.
+ if max_fixed_len + 1 >= *max_prefix_len + *max_suffix_len {
+ // The subtraction can't overflow thanks to the above check.
+ // The new `max_prefix_len` is larger than its previous value.
+ *max_prefix_len = max_fixed_len + 1 - *max_suffix_len;
}
- }
- }
- // We want `L = max(L, max_fixed_len + 1)`, modulo the fact that we keep prefix and
- // suffix separate.
- if max_fixed_len + 1 >= *max_prefix_len + *max_suffix_len {
- // The subtraction can't overflow thanks to the above check.
- // The new `max_prefix_len` is larger than its previous value.
- *max_prefix_len = max_fixed_len + 1 - *max_suffix_len;
- }
- // We cap the arity of `max_slice` at the array size.
- match self.array_len {
- Some(len) if self.max_slice.arity() >= len => self.max_slice = FixedLen(len),
- _ => {}
- }
- }
+ // We cap the arity of `max_slice` at the array size.
+ match self.array_len {
+ Some(len) if max_slice.arity() >= len => max_slice = FixedLen(len),
+ _ => {}
+ }
- /// Iterate over the partition of this slice.
- fn iter(&self) -> impl Iterator<Item = Slice> + Captures<'_> {
- let smaller_lengths = match self.array_len {
- // The only admissible fixed-length slice is one of the array size. Whether `max_slice`
- // is fixed-length or variable-length, it will be the only relevant slice to output
- // here.
- Some(_) => 0..0, // empty range
- // We cover all arities in the range `(self.arity..infinity)`. We split that range into
- // two: lengths smaller than `max_slice.arity()` are treated independently as
- // fixed-lengths slices, and lengths above are captured by `max_slice`.
- None => self.arity..self.max_slice.arity(),
+ smaller_lengths = match self.array_len {
+ // The only admissible fixed-length slice is one of the array size. Whether `max_slice`
+ // is fixed-length or variable-length, it will be the only relevant slice to output
+ // here.
+ Some(_) => 0..0, // empty range
+ // We need to cover all arities in the range `(arity..infinity)`. We split that
+ // range into two: lengths smaller than `max_slice.arity()` are treated
+ // independently as fixed-lengths slices, and lengths above are captured by
+ // `max_slice`.
+ None => self.arity()..max_slice.arity(),
+ };
+ }
+ FixedLen(_) => {
+ // No need to split here. We only track presence.
+ for slice in column_slices {
+ match slice.kind {
+ FixedLen(len) => {
+ if len == arity {
+ seen_fixed_lens.insert(len);
+ }
+ }
+ VarLen(prefix, suffix) => {
+ min_var_len = cmp::min(min_var_len, prefix + suffix);
+ }
+ }
+ }
+ smaller_lengths = 0..0;
+ }
};
- smaller_lengths
- .map(FixedLen)
- .chain(once(self.max_slice))
- .map(move |kind| Slice::new(self.array_len, kind))
+
+ smaller_lengths.map(FixedLen).chain(once(max_slice)).map(move |kind| {
+ let arity = kind.arity();
+ let seen = if min_var_len <= arity || seen_fixed_lens.contains(&arity) {
+ Presence::Seen
+ } else {
+ Presence::Unseen
+ };
+ (seen, Slice::new(self.array_len, kind))
+ })
}
}
@@ -616,10 +631,13 @@ pub(super) enum Constructor<'tcx> {
Single,
/// Enum variants.
Variant(VariantIdx),
+ /// Booleans
+ Bool(bool),
/// Ranges of integer literal values (`2`, `2..=5` or `2..5`).
IntRange(IntRange),
/// Ranges of floating-point literal values (`2.0..=5.2`).
- FloatRange(mir::Const<'tcx>, mir::Const<'tcx>, RangeEnd),
+ F32Range(IeeeFloat<SingleS>, IeeeFloat<SingleS>, RangeEnd),
+ F64Range(IeeeFloat<DoubleS>, IeeeFloat<DoubleS>, RangeEnd),
/// String literals. Strings are not quite the same as `&[u8]` so we treat them separately.
Str(mir::Const<'tcx>),
/// Array and slice patterns.
@@ -628,66 +646,50 @@ pub(super) enum Constructor<'tcx> {
/// boxes for the purposes of exhaustiveness: we must not inspect them, and they
/// don't count towards making a match exhaustive.
Opaque,
+ /// Or-pattern.
+ Or,
+ /// Wildcard pattern.
+ Wildcard,
/// Fake extra constructor for enums that aren't allowed to be matched exhaustively. Also used
/// for those types for which we cannot list constructors explicitly, like `f64` and `str`.
NonExhaustive,
- /// Stands for constructors that are not seen in the matrix, as explained in the documentation
- /// for [`SplitWildcard`]. The carried `bool` is used for the `non_exhaustive_omitted_patterns`
- /// lint.
- Missing { nonexhaustive_enum_missing_real_variants: bool },
- /// Wildcard pattern.
- Wildcard,
- /// Or-pattern.
- Or,
+ /// Fake extra constructor for variants that should not be mentioned in diagnostics.
+ /// We use this for variants behind an unstable gate as well as
+ /// `#[doc(hidden)]` ones.
+ Hidden,
+ /// Fake extra constructor for constructors that are not seen in the matrix, as explained in the
+ /// code for [`Constructor::split`].
+ Missing,
}
impl<'tcx> Constructor<'tcx> {
- pub(super) fn is_wildcard(&self) -> bool {
- matches!(self, Wildcard)
- }
-
pub(super) fn is_non_exhaustive(&self) -> bool {
matches!(self, NonExhaustive)
}
- fn as_int_range(&self) -> Option<&IntRange> {
+ pub(super) fn as_variant(&self) -> Option<VariantIdx> {
match self {
- IntRange(range) => Some(range),
+ Variant(i) => Some(*i),
_ => None,
}
}
-
- fn as_slice(&self) -> Option<Slice> {
+ fn as_bool(&self) -> Option<bool> {
match self {
- Slice(slice) => Some(*slice),
+ Bool(b) => Some(*b),
_ => None,
}
}
-
- /// Checks if the `Constructor` is a variant and `TyCtxt::eval_stability` returns
- /// `EvalResult::Deny { .. }`.
- ///
- /// This means that the variant has a stdlib unstable feature marking it.
- pub(super) fn is_unstable_variant(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> bool {
- if let Constructor::Variant(idx) = self && let ty::Adt(adt, _) = pcx.ty.kind() {
- let variant_def_id = adt.variant(*idx).def_id;
- // Filter variants that depend on a disabled unstable feature.
- return matches!(
- pcx.cx.tcx.eval_stability(variant_def_id, None, DUMMY_SP, None),
- EvalResult::Deny { .. }
- );
+ pub(super) fn as_int_range(&self) -> Option<&IntRange> {
+ match self {
+ IntRange(range) => Some(range),
+ _ => None,
}
- false
}
-
- /// Checks if the `Constructor` is a `Constructor::Variant` with a `#[doc(hidden)]`
- /// attribute from a type not local to the current crate.
- pub(super) fn is_doc_hidden_variant(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> bool {
- if let Constructor::Variant(idx) = self && let ty::Adt(adt, _) = pcx.ty.kind() {
- let variant_def_id = adt.variants()[*idx].def_id;
- return pcx.cx.tcx.is_doc_hidden(variant_def_id) && !variant_def_id.is_local();
+ fn as_slice(&self) -> Option<Slice> {
+ match self {
+ Slice(slice) => Some(*slice),
+ _ => None,
}
- false
}
fn variant_index_for_adt(&self, adt: ty::AdtDef<'tcx>) -> VariantIdx {
@@ -721,30 +723,33 @@ impl<'tcx> Constructor<'tcx> {
_ => bug!("Unexpected type for `Single` constructor: {:?}", pcx.ty),
},
Slice(slice) => slice.arity(),
- Str(..)
- | FloatRange(..)
+ Bool(..)
| IntRange(..)
- | NonExhaustive
+ | F32Range(..)
+ | F64Range(..)
+ | Str(..)
| Opaque
+ | NonExhaustive
+ | Hidden
| Missing { .. }
| Wildcard => 0,
Or => bug!("The `Or` constructor doesn't have a fixed arity"),
}
}
- /// Some constructors (namely `Wildcard`, `IntRange` and `Slice`) actually stand for a set of actual
- /// constructors (like variants, integers or fixed-sized slices). When specializing for these
- /// constructors, we want to be specialising for the actual underlying constructors.
+ /// Some constructors (namely `Wildcard`, `IntRange` and `Slice`) actually stand for a set of
+ /// actual constructors (like variants, integers or fixed-sized slices). When specializing for
+ /// these constructors, we want to be specialising for the actual underlying constructors.
/// Naively, we would simply return the list of constructors they correspond to. We instead are
- /// more clever: if there are constructors that we know will behave the same wrt the current
- /// matrix, we keep them grouped. For example, all slices of a sufficiently large length
- /// will either be all useful or all non-useful with a given matrix.
+ /// more clever: if there are constructors that we know will behave the same w.r.t. the current
+ /// matrix, we keep them grouped. For example, all slices of a sufficiently large length will
+ /// either be all useful or all non-useful with a given matrix.
///
/// See the branches for details on how the splitting is done.
///
- /// This function may discard some irrelevant constructors if this preserves behavior and
- /// diagnostics. Eg. for the `_` case, we ignore the constructors already present in the
- /// matrix, unless all of them are.
+ /// This function may discard some irrelevant constructors if this preserves behavior. Eg. for
+ /// the `_` case, we ignore the constructors already present in the column, unless all of them
+ /// are.
pub(super) fn split<'a>(
&self,
pcx: &PatCtxt<'_, '_, 'tcx>,
@@ -755,23 +760,68 @@ impl<'tcx> Constructor<'tcx> {
{
match self {
Wildcard => {
- let mut split_wildcard = SplitWildcard::new(pcx);
- split_wildcard.split(pcx, ctors);
- split_wildcard.into_ctors(pcx)
+ let split_set = ConstructorSet::for_ty(pcx.cx, pcx.ty).split(pcx, ctors);
+ if !split_set.missing.is_empty() {
+ // We are splitting a wildcard in order to compute its usefulness. Some constructors are
+ // not present in the column. The first thing we note is that specializing with any of
+ // the missing constructors would select exactly the rows with wildcards. Moreover, they
+ // would all return equivalent results. We can therefore group them all into a
+ // fictitious `Missing` constructor.
+ //
+ // As an important optimization, this function will skip all the present constructors.
+ // This is correct because specializing with any of the present constructors would
+ // select a strict superset of the wildcard rows, and thus would only find witnesses
+ // already found with the `Missing` constructor.
+ // This does mean that diagnostics are incomplete: in
+ // ```
+ // match x {
+ // Some(true) => {}
+ // }
+ // ```
+ // we report `None` as missing but not `Some(false)`.
+ //
+ // When all the constructors are missing we can equivalently return the `Wildcard`
+ // constructor on its own. The difference between `Wildcard` and `Missing` will then
+ // only be in diagnostics.
+
+ // If some constructors are missing, we typically want to report those constructors,
+ // e.g.:
+ // ```
+ // enum Direction { N, S, E, W }
+ // let Direction::N = ...;
+ // ```
+ // we can report 3 witnesses: `S`, `E`, and `W`.
+ //
+ // However, if the user didn't actually specify a constructor
+ // in this arm, e.g., in
+ // ```
+ // let x: (Direction, Direction, bool) = ...;
+ // let (_, _, false) = x;
+ // ```
+ // we don't want to show all 16 possible witnesses `(<direction-1>, <direction-2>,
+ // true)` - we are satisfied with `(_, _, true)`. So if all constructors are missing we
+ // prefer to report just a wildcard `_`.
+ //
+ // The exception is: if we are at the top-level, for example in an empty match, we
+ // usually prefer to report the full list of constructors.
+ let all_missing = split_set.present.is_empty();
+ let report_when_all_missing =
+ pcx.is_top_level && !IntRange::is_integral(pcx.ty);
+ let ctor =
+ if all_missing && !report_when_all_missing { Wildcard } else { Missing };
+ smallvec![ctor]
+ } else {
+ split_set.present
+ }
}
- // Fast-track if the range is trivial. In particular, we don't do the overlapping
- // ranges check.
- IntRange(ctor_range) if !ctor_range.is_singleton() => {
- let mut split_range = SplitIntRange::new(ctor_range.clone());
- let int_ranges = ctors.filter_map(|ctor| ctor.as_int_range());
- split_range.split(int_ranges.cloned());
- split_range.iter().map(IntRange).collect()
+ // Fast-track if the range is trivial.
+ IntRange(this_range) if !this_range.is_singleton() => {
+ let column_ranges = ctors.filter_map(|ctor| ctor.as_int_range()).cloned();
+ this_range.split(column_ranges).map(|(_, range)| IntRange(range)).collect()
}
- &Slice(Slice { kind: VarLen(self_prefix, self_suffix), array_len }) => {
- let mut split_self = SplitVarLenSlice::new(self_prefix, self_suffix, array_len);
- let slices = ctors.filter_map(|c| c.as_slice()).map(|s| s.kind);
- split_self.split(slices);
- split_self.iter().map(Slice).collect()
+ Slice(this_slice @ Slice { kind: VarLen(..), .. }) => {
+ let column_slices = ctors.filter_map(|c| c.as_slice());
+ this_slice.split(column_slices).map(|(_, slice)| Slice(slice)).collect()
}
// Any other constructor can be used unchanged.
_ => smallvec![self.clone()],
@@ -788,28 +838,29 @@ impl<'tcx> Constructor<'tcx> {
match (self, other) {
// Wildcards cover anything
(_, Wildcard) => true,
- // The missing ctors are not covered by anything in the matrix except wildcards.
- (Missing { .. } | Wildcard, _) => false,
+ // Only a wildcard pattern can match these special constructors.
+ (Wildcard | Missing { .. } | NonExhaustive | Hidden, _) => false,
(Single, Single) => true,
(Variant(self_id), Variant(other_id)) => self_id == other_id,
-
- (IntRange(self_range), IntRange(other_range)) => self_range.is_covered_by(other_range),
- (
- FloatRange(self_from, self_to, self_end),
- FloatRange(other_from, other_to, other_end),
- ) => {
- match (
- compare_const_vals(pcx.cx.tcx, *self_to, *other_to, pcx.cx.param_env),
- compare_const_vals(pcx.cx.tcx, *self_from, *other_from, pcx.cx.param_env),
- ) {
- (Some(to), Some(from)) => {
- (from == Ordering::Greater || from == Ordering::Equal)
- && (to == Ordering::Less
- || (other_end == self_end && to == Ordering::Equal))
+ (Bool(self_b), Bool(other_b)) => self_b == other_b,
+
+ (IntRange(self_range), IntRange(other_range)) => self_range.is_subrange(other_range),
+ (F32Range(self_from, self_to, self_end), F32Range(other_from, other_to, other_end)) => {
+ self_from.ge(other_from)
+ && match self_to.partial_cmp(other_to) {
+ Some(Ordering::Less) => true,
+ Some(Ordering::Equal) => other_end == self_end,
+ _ => false,
+ }
+ }
+ (F64Range(self_from, self_to, self_end), F64Range(other_from, other_to, other_end)) => {
+ self_from.ge(other_from)
+ && match self_to.partial_cmp(other_to) {
+ Some(Ordering::Less) => true,
+ Some(Ordering::Equal) => other_end == self_end,
+ _ => false,
}
- _ => false,
- }
}
(Str(self_val), Str(other_val)) => {
// FIXME Once valtrees are available we can directly use the bytes
@@ -820,8 +871,6 @@ impl<'tcx> Constructor<'tcx> {
// We are trying to inspect an opaque constant. Thus we skip the row.
(Opaque, _) | (_, Opaque) => false,
- // Only a wildcard pattern can match the special extra constructor.
- (NonExhaustive, _) => false,
_ => span_bug!(
pcx.span,
@@ -831,96 +880,131 @@ impl<'tcx> Constructor<'tcx> {
),
}
}
+}
- /// Faster version of `is_covered_by` when applied to many constructors. `used_ctors` is
- /// assumed to be built from `matrix.head_ctors()` with wildcards and opaques filtered out,
- /// and `self` is assumed to have been split from a wildcard.
- fn is_covered_by_any<'p>(
- &self,
- pcx: &PatCtxt<'_, 'p, 'tcx>,
- used_ctors: &[Constructor<'tcx>],
- ) -> bool {
- if used_ctors.is_empty() {
- return false;
- }
-
- // This must be kept in sync with `is_covered_by`.
- match self {
- // If `self` is `Single`, `used_ctors` cannot contain anything else than `Single`s.
- Single => !used_ctors.is_empty(),
- Variant(vid) => used_ctors.iter().any(|c| matches!(c, Variant(i) if i == vid)),
- IntRange(range) => used_ctors
- .iter()
- .filter_map(|c| c.as_int_range())
- .any(|other| range.is_covered_by(other)),
- Slice(slice) => used_ctors
- .iter()
- .filter_map(|c| c.as_slice())
- .any(|other| slice.is_covered_by(other)),
- // This constructor is never covered by anything else
- NonExhaustive => false,
- Str(..) | FloatRange(..) | Opaque | Missing { .. } | Wildcard | Or => {
- span_bug!(pcx.span, "found unexpected ctor in all_ctors: {:?}", self)
- }
- }
- }
+/// Describes the set of all constructors for a type.
+#[derive(Debug)]
+pub(super) enum ConstructorSet {
+ /// The type has a single constructor, e.g. `&T` or a struct.
+ Single,
+ /// This type has the following list of constructors.
+ /// Some variants are hidden, which means they won't be mentioned in diagnostics unless the user
+ /// mentioned them first. We use this for variants behind an unstable gate as well as
+ /// `#[doc(hidden)]` ones.
+ Variants {
+ visible_variants: Vec<VariantIdx>,
+ hidden_variants: Vec<VariantIdx>,
+ non_exhaustive: bool,
+ },
+ /// Booleans.
+ Bool,
+ /// The type is spanned by integer values. The range or ranges give the set of allowed values.
+ /// The second range is only useful for `char`.
+ Integers { range_1: IntRange, range_2: Option<IntRange> },
+ /// The type is matched by slices. The usize is the compile-time length of the array, if known.
+ Slice(Option<usize>),
+ /// The type is matched by slices whose elements are uninhabited.
+ SliceOfEmpty,
+ /// The constructors cannot be listed, and the type cannot be matched exhaustively. E.g. `str`,
+ /// floats.
+ Unlistable,
+ /// The type has no inhabitants.
+ Uninhabited,
}
-/// A wildcard constructor that we split relative to the constructors in the matrix, as explained
-/// at the top of the file.
+/// Describes the result of analyzing the constructors in a column of a match.
///
-/// A constructor that is not present in the matrix rows will only be covered by the rows that have
-/// wildcards. Thus we can group all of those constructors together; we call them "missing
-/// constructors". Splitting a wildcard would therefore list all present constructors individually
-/// (or grouped if they are integers or slices), and then all missing constructors together as a
-/// group.
+/// `present` is morally the set of constructors present in the column, and `missing` is the set of
+/// constructors that exist in the type but are not present in the column.
///
-/// However we can go further: since any constructor will match the wildcard rows, and having more
-/// rows can only reduce the amount of usefulness witnesses, we can skip the present constructors
-/// and only try the missing ones.
-/// This will not preserve the whole list of witnesses, but will preserve whether the list is empty
-/// or not. In fact this is quite natural from the point of view of diagnostics too. This is done
-/// in `to_ctors`: in some cases we only return `Missing`.
+/// More formally, they respect the following constraints:
+/// - the union of `present` and `missing` covers the whole type
+/// - `present` and `missing` are disjoint
+/// - neither contains wildcards
+/// - each constructor in `present` is covered by some non-wildcard constructor in the column
+/// - together, the constructors in `present` cover all the non-wildcard constructor in the column
+/// - non-wildcards in the column do no cover anything in `missing`
+/// - constructors in `present` and `missing` are split for the column; in other words, they are
+/// either fully included in or disjoint from each constructor in the column. This avoids
+/// non-trivial intersections like between `0..10` and `5..15`.
#[derive(Debug)]
-pub(super) struct SplitWildcard<'tcx> {
- /// Constructors (other than wildcards and opaques) seen in the matrix.
- matrix_ctors: Vec<Constructor<'tcx>>,
- /// All the constructors for this type
- all_ctors: SmallVec<[Constructor<'tcx>; 1]>,
+pub(super) struct SplitConstructorSet<'tcx> {
+ pub(super) present: SmallVec<[Constructor<'tcx>; 1]>,
+ pub(super) missing: Vec<Constructor<'tcx>>,
}
-impl<'tcx> SplitWildcard<'tcx> {
- pub(super) fn new<'p>(pcx: &PatCtxt<'_, 'p, 'tcx>) -> Self {
- debug!("SplitWildcard::new({:?})", pcx.ty);
- let cx = pcx.cx;
+impl ConstructorSet {
+ #[instrument(level = "debug", skip(cx), ret)]
+ pub(super) fn for_ty<'p, 'tcx>(cx: &MatchCheckCtxt<'p, 'tcx>, ty: Ty<'tcx>) -> Self {
let make_range = |start, end| {
- IntRange(
- // `unwrap()` is ok because we know the type is an integer.
- IntRange::from_range(cx.tcx, start, end, pcx.ty, &RangeEnd::Included).unwrap(),
+ IntRange::from_range(
+ MaybeInfiniteInt::new_finite(cx.tcx, ty, start),
+ MaybeInfiniteInt::new_finite(cx.tcx, ty, end),
+ RangeEnd::Included,
)
};
- // This determines the set of all possible constructors for the type `pcx.ty`. For numbers,
+ // This determines the set of all possible constructors for the type `ty`. For numbers,
// arrays and slices we use ranges and variable-length slices when appropriate.
//
// If the `exhaustive_patterns` feature is enabled, we make sure to omit constructors that
// are statically impossible. E.g., for `Option<!>`, we do not include `Some(_)` in the
// returned list of constructors.
- // Invariant: this is empty if and only if the type is uninhabited (as determined by
+ // Invariant: this is `Uninhabited` if and only if the type is uninhabited (as determined by
// `cx.is_uninhabited()`).
- let all_ctors = match pcx.ty.kind() {
- ty::Bool => smallvec![make_range(0, 1)],
+ match ty.kind() {
+ ty::Bool => Self::Bool,
+ ty::Char => {
+ // The valid Unicode Scalar Value ranges.
+ Self::Integers {
+ range_1: make_range('\u{0000}' as u128, '\u{D7FF}' as u128),
+ range_2: Some(make_range('\u{E000}' as u128, '\u{10FFFF}' as u128)),
+ }
+ }
+ &ty::Int(ity) => {
+ let range = if ty.is_ptr_sized_integral()
+ && !cx.tcx.features().precise_pointer_size_matching
+ {
+ // The min/max values of `isize` are not allowed to be observed unless the
+ // `precise_pointer_size_matching` feature is enabled.
+ IntRange { lo: NegInfinity, hi: PosInfinity }
+ } else {
+ let bits = Integer::from_int_ty(&cx.tcx, ity).size().bits() as u128;
+ let min = 1u128 << (bits - 1);
+ let max = min - 1;
+ make_range(min, max)
+ };
+ Self::Integers { range_1: range, range_2: None }
+ }
+ &ty::Uint(uty) => {
+ let range = if ty.is_ptr_sized_integral()
+ && !cx.tcx.features().precise_pointer_size_matching
+ {
+ // The max value of `usize` is not allowed to be observed unless the
+ // `precise_pointer_size_matching` feature is enabled.
+ let lo = MaybeInfiniteInt::new_finite(cx.tcx, ty, 0);
+ IntRange { lo, hi: PosInfinity }
+ } else {
+ let size = Integer::from_uint_ty(&cx.tcx, uty).size();
+ let max = size.truncate(u128::MAX);
+ make_range(0, max)
+ };
+ Self::Integers { range_1: range, range_2: None }
+ }
ty::Array(sub_ty, len) if len.try_eval_target_usize(cx.tcx, cx.param_env).is_some() => {
let len = len.eval_target_usize(cx.tcx, cx.param_env) as usize;
if len != 0 && cx.is_uninhabited(*sub_ty) {
- smallvec![]
+ Self::Uninhabited
} else {
- smallvec![Slice(Slice::new(Some(len), VarLen(0, 0)))]
+ Self::Slice(Some(len))
}
}
// Treat arrays of a constant but unknown length like slices.
ty::Array(sub_ty, _) | ty::Slice(sub_ty) => {
- let kind = if cx.is_uninhabited(*sub_ty) { FixedLen(0) } else { VarLen(0, 0) };
- smallvec![Slice(Slice::new(None, kind))]
+ if cx.is_uninhabited(*sub_ty) {
+ Self::SliceOfEmpty
+ } else {
+ Self::Slice(None)
+ }
}
ty::Adt(def, args) if def.is_enum() => {
// If the enum is declared as `#[non_exhaustive]`, we treat it as if it had an
@@ -939,19 +1023,14 @@ impl<'tcx> SplitWildcard<'tcx> {
//
// we don't want to show every possible IO error, but instead have only `_` as the
// witness.
- let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(pcx.ty);
-
- let is_exhaustive_pat_feature = cx.tcx.features().exhaustive_patterns;
-
- // If `exhaustive_patterns` is disabled and our scrutinee is an empty enum, we treat it
- // as though it had an "unknown" constructor to avoid exposing its emptiness. The
- // exception is if the pattern is at the top level, because we want empty matches to be
- // considered exhaustive.
- let is_secretly_empty =
- def.variants().is_empty() && !is_exhaustive_pat_feature && !pcx.is_top_level;
+ let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(ty);
- let mut ctors: SmallVec<[_; 1]> =
- def.variants()
+ if def.variants().is_empty() && !is_declared_nonexhaustive {
+ Self::Uninhabited
+ } else {
+ let is_exhaustive_pat_feature = cx.tcx.features().exhaustive_patterns;
+ let (hidden_variants, visible_variants) = def
+ .variants()
.iter_enumerated()
.filter(|(_, v)| {
// If `exhaustive_patterns` is enabled, we exclude variants known to be
@@ -961,135 +1040,188 @@ impl<'tcx> SplitWildcard<'tcx> {
.instantiate(cx.tcx, args)
.apply(cx.tcx, cx.param_env, cx.module)
})
- .map(|(idx, _)| Variant(idx))
- .collect();
+ .map(|(idx, _)| idx)
+ .partition(|idx| {
+ let variant_def_id = def.variant(*idx).def_id;
+ // Filter variants that depend on a disabled unstable feature.
+ let is_unstable = matches!(
+ cx.tcx.eval_stability(variant_def_id, None, DUMMY_SP, None),
+ EvalResult::Deny { .. }
+ );
+ // Filter foreign `#[doc(hidden)]` variants.
+ let is_doc_hidden =
+ cx.tcx.is_doc_hidden(variant_def_id) && !variant_def_id.is_local();
+ is_unstable || is_doc_hidden
+ });
+
+ Self::Variants {
+ visible_variants,
+ hidden_variants,
+ non_exhaustive: is_declared_nonexhaustive,
+ }
+ }
+ }
+ ty::Never => Self::Uninhabited,
+ _ if cx.is_uninhabited(ty) => Self::Uninhabited,
+ ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => Self::Single,
+ // This type is one for which we cannot list constructors, like `str` or `f64`.
+ _ => Self::Unlistable,
+ }
+ }
- if is_secretly_empty || is_declared_nonexhaustive {
- ctors.push(NonExhaustive);
+ /// This is the core logical operation of exhaustiveness checking. This analyzes a column a
+ /// constructors to 1/ determine which constructors of the type (if any) are missing; 2/ split
+ /// constructors to handle non-trivial intersections e.g. on ranges or slices.
+ #[instrument(level = "debug", skip(self, pcx, ctors), ret)]
+ pub(super) fn split<'a, 'tcx>(
+ &self,
+ pcx: &PatCtxt<'_, '_, 'tcx>,
+ ctors: impl Iterator<Item = &'a Constructor<'tcx>> + Clone,
+ ) -> SplitConstructorSet<'tcx>
+ where
+ 'tcx: 'a,
+ {
+ let mut present: SmallVec<[_; 1]> = SmallVec::new();
+ let mut missing = Vec::new();
+ // Constructors in `ctors`, except wildcards.
+ let mut seen = ctors.filter(|c| !(matches!(c, Opaque | Wildcard)));
+ match self {
+ ConstructorSet::Single => {
+ if seen.next().is_none() {
+ missing.push(Single);
+ } else {
+ present.push(Single);
}
- ctors
}
- ty::Char => {
- smallvec![
- // The valid Unicode Scalar Value ranges.
- make_range('\u{0000}' as u128, '\u{D7FF}' as u128),
- make_range('\u{E000}' as u128, '\u{10FFFF}' as u128),
- ]
+ ConstructorSet::Variants { visible_variants, hidden_variants, non_exhaustive } => {
+ let seen_set: FxHashSet<_> = seen.map(|c| c.as_variant().unwrap()).collect();
+ let mut skipped_a_hidden_variant = false;
+
+ for variant in visible_variants {
+ let ctor = Variant(*variant);
+ if seen_set.contains(&variant) {
+ present.push(ctor);
+ } else {
+ missing.push(ctor);
+ }
+ }
+
+ for variant in hidden_variants {
+ let ctor = Variant(*variant);
+ if seen_set.contains(&variant) {
+ present.push(ctor);
+ } else {
+ skipped_a_hidden_variant = true;
+ }
+ }
+ if skipped_a_hidden_variant {
+ missing.push(Hidden);
+ }
+
+ if *non_exhaustive {
+ missing.push(NonExhaustive);
+ }
}
- ty::Int(_) | ty::Uint(_)
- if pcx.ty.is_ptr_sized_integral()
- && !cx.tcx.features().precise_pointer_size_matching =>
- {
- // `usize`/`isize` are not allowed to be matched exhaustively unless the
- // `precise_pointer_size_matching` feature is enabled. So we treat those types like
- // `#[non_exhaustive]` enums by returning a special unmatchable constructor.
- smallvec![NonExhaustive]
+ ConstructorSet::Bool => {
+ let mut seen_false = false;
+ let mut seen_true = false;
+ for b in seen.map(|ctor| ctor.as_bool().unwrap()) {
+ if b {
+ seen_true = true;
+ } else {
+ seen_false = true;
+ }
+ }
+ if seen_false {
+ present.push(Bool(false));
+ } else {
+ missing.push(Bool(false));
+ }
+ if seen_true {
+ present.push(Bool(true));
+ } else {
+ missing.push(Bool(true));
+ }
}
- &ty::Int(ity) => {
- let bits = Integer::from_int_ty(&cx.tcx, ity).size().bits() as u128;
- let min = 1u128 << (bits - 1);
- let max = min - 1;
- smallvec![make_range(min, max)]
+ ConstructorSet::Integers { range_1, range_2 } => {
+ let seen_ranges: Vec<_> =
+ seen.map(|ctor| ctor.as_int_range().unwrap().clone()).collect();
+ for (seen, splitted_range) in range_1.split(seen_ranges.iter().cloned()) {
+ match seen {
+ Presence::Unseen => missing.push(IntRange(splitted_range)),
+ Presence::Seen => present.push(IntRange(splitted_range)),
+ }
+ }
+ if let Some(range_2) = range_2 {
+ for (seen, splitted_range) in range_2.split(seen_ranges.into_iter()) {
+ match seen {
+ Presence::Unseen => missing.push(IntRange(splitted_range)),
+ Presence::Seen => present.push(IntRange(splitted_range)),
+ }
+ }
+ }
}
- &ty::Uint(uty) => {
- let size = Integer::from_uint_ty(&cx.tcx, uty).size();
- let max = size.truncate(u128::MAX);
- smallvec![make_range(0, max)]
+ &ConstructorSet::Slice(array_len) => {
+ let seen_slices = seen.map(|c| c.as_slice().unwrap());
+ let base_slice = Slice::new(array_len, VarLen(0, 0));
+ for (seen, splitted_slice) in base_slice.split(seen_slices) {
+ let ctor = Slice(splitted_slice);
+ match seen {
+ Presence::Unseen => missing.push(ctor),
+ Presence::Seen => present.push(ctor),
+ }
+ }
+ }
+ ConstructorSet::SliceOfEmpty => {
+ // This one is tricky because even though there's only one possible value of this
+ // type (namely `[]`), slice patterns of all lengths are allowed, they're just
+ // unreachable if length != 0.
+ // We still gather the seen constructors in `present`, but the only slice that can
+ // go in `missing` is `[]`.
+ let seen_slices = seen.map(|c| c.as_slice().unwrap());
+ let base_slice = Slice::new(None, VarLen(0, 0));
+ for (seen, splitted_slice) in base_slice.split(seen_slices) {
+ let ctor = Slice(splitted_slice);
+ match seen {
+ Presence::Seen => present.push(ctor),
+ Presence::Unseen if splitted_slice.arity() == 0 => {
+ missing.push(Slice(Slice::new(None, FixedLen(0))))
+ }
+ Presence::Unseen => {}
+ }
+ }
}
- // If `exhaustive_patterns` is disabled and our scrutinee is the never type, we cannot
+ ConstructorSet::Unlistable => {
+ // Since we can't list constructors, we take the ones in the column. This might list
+ // some constructors several times but there's not much we can do.
+ present.extend(seen.cloned());
+ missing.push(NonExhaustive);
+ }
+ // If `exhaustive_patterns` is disabled and our scrutinee is an empty type, we cannot
// expose its emptiness. The exception is if the pattern is at the top level, because we
// want empty matches to be considered exhaustive.
- ty::Never if !cx.tcx.features().exhaustive_patterns && !pcx.is_top_level => {
- smallvec![NonExhaustive]
+ ConstructorSet::Uninhabited
+ if !pcx.cx.tcx.features().exhaustive_patterns && !pcx.is_top_level =>
+ {
+ missing.push(NonExhaustive);
}
- ty::Never => smallvec![],
- _ if cx.is_uninhabited(pcx.ty) => smallvec![],
- ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => smallvec![Single],
- // This type is one for which we cannot list constructors, like `str` or `f64`.
- _ => smallvec![NonExhaustive],
- };
+ ConstructorSet::Uninhabited => {}
+ }
- SplitWildcard { matrix_ctors: Vec::new(), all_ctors }
+ SplitConstructorSet { present, missing }
}
- /// Pass a set of constructors relative to which to split this one. Don't call twice, it won't
- /// do what you want.
- pub(super) fn split<'a>(
- &mut self,
+ /// Compute the set of constructors missing from this column.
+ /// This is only used for reporting to the user.
+ pub(super) fn compute_missing<'a, 'tcx>(
+ &self,
pcx: &PatCtxt<'_, '_, 'tcx>,
ctors: impl Iterator<Item = &'a Constructor<'tcx>> + Clone,
- ) where
+ ) -> Vec<Constructor<'tcx>>
+ where
'tcx: 'a,
{
- // Since `all_ctors` never contains wildcards, this won't recurse further.
- self.all_ctors =
- self.all_ctors.iter().flat_map(|ctor| ctor.split(pcx, ctors.clone())).collect();
- self.matrix_ctors = ctors.filter(|c| !matches!(c, Wildcard | Opaque)).cloned().collect();
- }
-
- /// Whether there are any value constructors for this type that are not present in the matrix.
- fn any_missing(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> bool {
- self.iter_missing(pcx).next().is_some()
- }
-
- /// Iterate over the constructors for this type that are not present in the matrix.
- pub(super) fn iter_missing<'a, 'p>(
- &'a self,
- pcx: &'a PatCtxt<'a, 'p, 'tcx>,
- ) -> impl Iterator<Item = &'a Constructor<'tcx>> + Captures<'p> {
- self.all_ctors.iter().filter(move |ctor| !ctor.is_covered_by_any(pcx, &self.matrix_ctors))
- }
-
- /// Return the set of constructors resulting from splitting the wildcard. As explained at the
- /// top of the file, if any constructors are missing we can ignore the present ones.
- fn into_ctors(self, pcx: &PatCtxt<'_, '_, 'tcx>) -> SmallVec<[Constructor<'tcx>; 1]> {
- if self.any_missing(pcx) {
- // Some constructors are missing, thus we can specialize with the special `Missing`
- // constructor, which stands for those constructors that are not seen in the matrix,
- // and matches the same rows as any of them (namely the wildcard rows). See the top of
- // the file for details.
- // However, when all constructors are missing we can also specialize with the full
- // `Wildcard` constructor. The difference will depend on what we want in diagnostics.
-
- // If some constructors are missing, we typically want to report those constructors,
- // e.g.:
- // ```
- // enum Direction { N, S, E, W }
- // let Direction::N = ...;
- // ```
- // we can report 3 witnesses: `S`, `E`, and `W`.
- //
- // However, if the user didn't actually specify a constructor
- // in this arm, e.g., in
- // ```
- // let x: (Direction, Direction, bool) = ...;
- // let (_, _, false) = x;
- // ```
- // we don't want to show all 16 possible witnesses `(<direction-1>, <direction-2>,
- // true)` - we are satisfied with `(_, _, true)`. So if all constructors are missing we
- // prefer to report just a wildcard `_`.
- //
- // The exception is: if we are at the top-level, for example in an empty match, we
- // sometimes prefer reporting the list of constructors instead of just `_`.
- let report_when_all_missing = pcx.is_top_level && !IntRange::is_integral(pcx.ty);
- let ctor = if !self.matrix_ctors.is_empty() || report_when_all_missing {
- if pcx.is_non_exhaustive {
- Missing {
- nonexhaustive_enum_missing_real_variants: self
- .iter_missing(pcx)
- .any(|c| !(c.is_non_exhaustive() || c.is_unstable_variant(pcx))),
- }
- } else {
- Missing { nonexhaustive_enum_missing_real_variants: false }
- }
- } else {
- Wildcard
- };
- return smallvec![ctor];
- }
-
- // All the constructors are present in the matrix, so we just go through them all.
- self.all_ctors
+ self.split(pcx, ctors).missing
}
}
@@ -1202,11 +1334,14 @@ impl<'p, 'tcx> Fields<'p, 'tcx> {
}
_ => bug!("bad slice pattern {:?} {:?}", constructor, pcx),
},
- Str(..)
- | FloatRange(..)
+ Bool(..)
| IntRange(..)
- | NonExhaustive
+ | F32Range(..)
+ | F64Range(..)
+ | Str(..)
| Opaque
+ | NonExhaustive
+ | Hidden
| Missing { .. }
| Wildcard => Fields::empty(),
Or => {
@@ -1227,9 +1362,10 @@ impl<'p, 'tcx> Fields<'p, 'tcx> {
/// Values and patterns can be represented as a constructor applied to some fields. This represents
/// a pattern in this form.
-/// This also keeps track of whether the pattern has been found reachable during analysis. For this
-/// reason we should be careful not to clone patterns for which we care about that. Use
-/// `clone_and_forget_reachability` if you're sure.
+/// This also uses interior mutability to keep track of whether the pattern has been found reachable
+/// during analysis. For this reason they cannot be cloned.
+/// A `DeconstructedPat` will almost always come from user input; the only exception are some
+/// `Wildcard`s introduced during specialization.
pub(crate) struct DeconstructedPat<'p, 'tcx> {
ctor: Constructor<'tcx>,
fields: Fields<'p, 'tcx>,
@@ -1252,26 +1388,13 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
DeconstructedPat { ctor, fields, ty, span, reachable: Cell::new(false) }
}
- /// Construct a pattern that matches everything that starts with this constructor.
- /// For example, if `ctor` is a `Constructor::Variant` for `Option::Some`, we get the pattern
- /// `Some(_)`.
- pub(super) fn wild_from_ctor(pcx: &PatCtxt<'_, 'p, 'tcx>, ctor: Constructor<'tcx>) -> Self {
- let fields = Fields::wildcards(pcx, &ctor);
- DeconstructedPat::new(ctor, fields, pcx.ty, pcx.span)
- }
-
- /// Clone this value. This method emphasizes that cloning loses reachability information and
- /// should be done carefully.
- pub(super) fn clone_and_forget_reachability(&self) -> Self {
- DeconstructedPat::new(self.ctor.clone(), self.fields, self.ty, self.span)
- }
-
pub(crate) fn from_pat(cx: &MatchCheckCtxt<'p, 'tcx>, pat: &Pat<'tcx>) -> Self {
let mkpat = |pat| DeconstructedPat::from_pat(cx, pat);
let ctor;
let fields;
match &pat.kind {
- PatKind::AscribeUserType { subpattern, .. } => return mkpat(subpattern),
+ PatKind::AscribeUserType { subpattern, .. }
+ | PatKind::InlineConstant { subpattern, .. } => return mkpat(subpattern),
PatKind::Binding { subpattern: Some(subpat), .. } => return mkpat(subpat),
PatKind::Binding { subpattern: None, .. } | PatKind::Wild => {
ctor = Wildcard;
@@ -1343,50 +1466,95 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
}
}
PatKind::Constant { value } => {
- if let Some(int_range) = IntRange::from_constant(cx.tcx, cx.param_env, *value) {
- ctor = IntRange(int_range);
- fields = Fields::empty();
- } else {
- match pat.ty.kind() {
- ty::Float(_) => {
- ctor = FloatRange(*value, *value, RangeEnd::Included);
- fields = Fields::empty();
- }
- ty::Ref(_, t, _) if t.is_str() => {
- // We want a `&str` constant to behave like a `Deref` pattern, to be compatible
- // with other `Deref` patterns. This could have been done in `const_to_pat`,
- // but that causes issues with the rest of the matching code.
- // So here, the constructor for a `"foo"` pattern is `&` (represented by
- // `Single`), and has one field. That field has constructor `Str(value)` and no
- // fields.
- // Note: `t` is `str`, not `&str`.
- let subpattern =
- DeconstructedPat::new(Str(*value), Fields::empty(), *t, pat.span);
- ctor = Single;
- fields = Fields::singleton(cx, subpattern)
- }
- // All constants that can be structurally matched have already been expanded
- // into the corresponding `Pat`s by `const_to_pat`. Constants that remain are
- // opaque.
- _ => {
- ctor = Opaque;
- fields = Fields::empty();
- }
+ match pat.ty.kind() {
+ ty::Bool => {
+ ctor = match value.try_eval_bool(cx.tcx, cx.param_env) {
+ Some(b) => Bool(b),
+ None => Opaque,
+ };
+ fields = Fields::empty();
+ }
+ ty::Char | ty::Int(_) | ty::Uint(_) => {
+ ctor = match value.try_eval_bits(cx.tcx, cx.param_env) {
+ Some(bits) => IntRange(IntRange::from_bits(cx.tcx, pat.ty, bits)),
+ None => Opaque,
+ };
+ fields = Fields::empty();
+ }
+ ty::Float(ty::FloatTy::F32) => {
+ ctor = match value.try_eval_bits(cx.tcx, cx.param_env) {
+ Some(bits) => {
+ use rustc_apfloat::Float;
+ let value = rustc_apfloat::ieee::Single::from_bits(bits);
+ F32Range(value, value, RangeEnd::Included)
+ }
+ None => Opaque,
+ };
+ fields = Fields::empty();
+ }
+ ty::Float(ty::FloatTy::F64) => {
+ ctor = match value.try_eval_bits(cx.tcx, cx.param_env) {
+ Some(bits) => {
+ use rustc_apfloat::Float;
+ let value = rustc_apfloat::ieee::Double::from_bits(bits);
+ F64Range(value, value, RangeEnd::Included)
+ }
+ None => Opaque,
+ };
+ fields = Fields::empty();
+ }
+ ty::Ref(_, t, _) if t.is_str() => {
+ // We want a `&str` constant to behave like a `Deref` pattern, to be compatible
+ // with other `Deref` patterns. This could have been done in `const_to_pat`,
+ // but that causes issues with the rest of the matching code.
+ // So here, the constructor for a `"foo"` pattern is `&` (represented by
+ // `Single`), and has one field. That field has constructor `Str(value)` and no
+ // fields.
+ // Note: `t` is `str`, not `&str`.
+ let subpattern =
+ DeconstructedPat::new(Str(*value), Fields::empty(), *t, pat.span);
+ ctor = Single;
+ fields = Fields::singleton(cx, subpattern)
+ }
+ // All constants that can be structurally matched have already been expanded
+ // into the corresponding `Pat`s by `const_to_pat`. Constants that remain are
+ // opaque.
+ _ => {
+ ctor = Opaque;
+ fields = Fields::empty();
}
}
}
- &PatKind::Range(box PatRange { lo, hi, end }) => {
- let ty = lo.ty();
- ctor = if let Some(int_range) = IntRange::from_range(
- cx.tcx,
- lo.eval_bits(cx.tcx, cx.param_env),
- hi.eval_bits(cx.tcx, cx.param_env),
- ty,
- &end,
- ) {
- IntRange(int_range)
- } else {
- FloatRange(lo, hi, end)
+ PatKind::Range(box PatRange { lo, hi, end, .. }) => {
+ let ty = pat.ty;
+ ctor = match ty.kind() {
+ ty::Char | ty::Int(_) | ty::Uint(_) => {
+ let lo =
+ MaybeInfiniteInt::from_pat_range_bdy(*lo, ty, cx.tcx, cx.param_env);
+ let hi =
+ MaybeInfiniteInt::from_pat_range_bdy(*hi, ty, cx.tcx, cx.param_env);
+ IntRange(IntRange::from_range(lo, hi, *end))
+ }
+ ty::Float(fty) => {
+ use rustc_apfloat::Float;
+ let lo = lo.as_finite().map(|c| c.eval_bits(cx.tcx, cx.param_env));
+ let hi = hi.as_finite().map(|c| c.eval_bits(cx.tcx, cx.param_env));
+ match fty {
+ ty::FloatTy::F32 => {
+ use rustc_apfloat::ieee::Single;
+ let lo = lo.map(Single::from_bits).unwrap_or(-Single::INFINITY);
+ let hi = hi.map(Single::from_bits).unwrap_or(Single::INFINITY);
+ F32Range(lo, hi, *end)
+ }
+ ty::FloatTy::F64 => {
+ use rustc_apfloat::ieee::Double;
+ let lo = lo.map(Double::from_bits).unwrap_or(-Double::INFINITY);
+ let hi = hi.map(Double::from_bits).unwrap_or(Double::INFINITY);
+ F64Range(lo, hi, *end)
+ }
+ }
+ }
+ _ => bug!("invalid type for range pattern: {}", ty),
};
fields = Fields::empty();
}
@@ -1412,103 +1580,24 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
let pats = expand_or_pat(pat);
fields = Fields::from_iter(cx, pats.into_iter().map(mkpat));
}
+ PatKind::Error(_) => {
+ ctor = Opaque;
+ fields = Fields::empty();
+ }
}
DeconstructedPat::new(ctor, fields, pat.ty, pat.span)
}
- pub(crate) fn to_pat(&self, cx: &MatchCheckCtxt<'p, 'tcx>) -> Pat<'tcx> {
- let is_wildcard = |pat: &Pat<'_>| {
- matches!(pat.kind, PatKind::Binding { subpattern: None, .. } | PatKind::Wild)
- };
- let mut subpatterns = self.iter_fields().map(|p| Box::new(p.to_pat(cx)));
- let kind = match &self.ctor {
- Single | Variant(_) => match self.ty.kind() {
- ty::Tuple(..) => PatKind::Leaf {
- subpatterns: subpatterns
- .enumerate()
- .map(|(i, pattern)| FieldPat { field: FieldIdx::new(i), pattern })
- .collect(),
- },
- ty::Adt(adt_def, _) if adt_def.is_box() => {
- // Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside
- // of `std`). So this branch is only reachable when the feature is enabled and
- // the pattern is a box pattern.
- PatKind::Deref { subpattern: subpatterns.next().unwrap() }
- }
- ty::Adt(adt_def, args) => {
- let variant_index = self.ctor.variant_index_for_adt(*adt_def);
- let variant = &adt_def.variant(variant_index);
- let subpatterns = Fields::list_variant_nonhidden_fields(cx, self.ty, variant)
- .zip(subpatterns)
- .map(|((field, _ty), pattern)| FieldPat { field, pattern })
- .collect();
-
- if adt_def.is_enum() {
- PatKind::Variant { adt_def: *adt_def, args, variant_index, subpatterns }
- } else {
- PatKind::Leaf { subpatterns }
- }
- }
- // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
- // be careful to reconstruct the correct constant pattern here. However a string
- // literal pattern will never be reported as a non-exhaustiveness witness, so we
- // ignore this issue.
- ty::Ref(..) => PatKind::Deref { subpattern: subpatterns.next().unwrap() },
- _ => bug!("unexpected ctor for type {:?} {:?}", self.ctor, self.ty),
- },
- Slice(slice) => {
- match slice.kind {
- FixedLen(_) => PatKind::Slice {
- prefix: subpatterns.collect(),
- slice: None,
- suffix: Box::new([]),
- },
- VarLen(prefix, _) => {
- let mut subpatterns = subpatterns.peekable();
- let mut prefix: Vec<_> = subpatterns.by_ref().take(prefix).collect();
- if slice.array_len.is_some() {
- // Improves diagnostics a bit: if the type is a known-size array, instead
- // of reporting `[x, _, .., _, y]`, we prefer to report `[x, .., y]`.
- // This is incorrect if the size is not known, since `[_, ..]` captures
- // arrays of lengths `>= 1` whereas `[..]` captures any length.
- while !prefix.is_empty() && is_wildcard(prefix.last().unwrap()) {
- prefix.pop();
- }
- while subpatterns.peek().is_some()
- && is_wildcard(subpatterns.peek().unwrap())
- {
- subpatterns.next();
- }
- }
- let suffix: Box<[_]> = subpatterns.collect();
- let wild = Pat::wildcard_from_ty(self.ty);
- PatKind::Slice {
- prefix: prefix.into_boxed_slice(),
- slice: Some(Box::new(wild)),
- suffix,
- }
- }
- }
- }
- &Str(value) => PatKind::Constant { value },
- &FloatRange(lo, hi, end) => PatKind::Range(Box::new(PatRange { lo, hi, end })),
- IntRange(range) => return range.to_pat(cx.tcx, self.ty),
- Wildcard | NonExhaustive => PatKind::Wild,
- Missing { .. } => bug!(
- "trying to convert a `Missing` constructor into a `Pat`; this is probably a bug,
- `Missing` should have been processed in `apply_constructors`"
- ),
- Opaque | Or => {
- bug!("can't convert to pattern: {:?}", self)
- }
- };
-
- Pat { ty: self.ty, span: DUMMY_SP, kind }
- }
-
pub(super) fn is_or_pat(&self) -> bool {
matches!(self.ctor, Or)
}
+ pub(super) fn flatten_or_pat(&'p self) -> SmallVec<[&'p Self; 1]> {
+ if self.is_or_pat() {
+ self.iter_fields().flat_map(|p| p.flatten_or_pat()).collect()
+ } else {
+ smallvec![self]
+ }
+ }
pub(super) fn ctor(&self) -> &Constructor<'tcx> {
&self.ctor
@@ -1673,21 +1762,151 @@ impl<'p, 'tcx> fmt::Debug for DeconstructedPat<'p, 'tcx> {
}
write!(f, "]")
}
- &FloatRange(lo, hi, end) => {
- write!(f, "{lo}")?;
- write!(f, "{end}")?;
- write!(f, "{hi}")
- }
- IntRange(range) => write!(f, "{range:?}"), // Best-effort, will render e.g. `false` as `0..=0`
- Wildcard | Missing { .. } | NonExhaustive => write!(f, "_ : {:?}", self.ty),
+ Bool(b) => write!(f, "{b}"),
+ // Best-effort, will render signed ranges incorrectly
+ IntRange(range) => write!(f, "{range:?}"),
+ F32Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"),
+ F64Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"),
+ Str(value) => write!(f, "{value}"),
+ Opaque => write!(f, "<constant pattern>"),
Or => {
for pat in self.iter_fields() {
write!(f, "{}{:?}", start_or_continue(" | "), pat)?;
}
Ok(())
}
- Str(value) => write!(f, "{value}"),
- Opaque => write!(f, "<constant pattern>"),
+ Wildcard | Missing { .. } | NonExhaustive | Hidden => write!(f, "_ : {:?}", self.ty),
}
}
}
+
+/// Same idea as `DeconstructedPat`, except this is a fictitious pattern built up for diagnostics
+/// purposes. As such they don't use interning and can be cloned.
+#[derive(Debug, Clone)]
+pub(crate) struct WitnessPat<'tcx> {
+ ctor: Constructor<'tcx>,
+ pub(crate) fields: Vec<WitnessPat<'tcx>>,
+ ty: Ty<'tcx>,
+}
+
+impl<'tcx> WitnessPat<'tcx> {
+ pub(super) fn new(ctor: Constructor<'tcx>, fields: Vec<Self>, ty: Ty<'tcx>) -> Self {
+ Self { ctor, fields, ty }
+ }
+ pub(super) fn wildcard(ty: Ty<'tcx>) -> Self {
+ Self::new(Wildcard, Vec::new(), ty)
+ }
+
+ /// Construct a pattern that matches everything that starts with this constructor.
+ /// For example, if `ctor` is a `Constructor::Variant` for `Option::Some`, we get the pattern
+ /// `Some(_)`.
+ pub(super) fn wild_from_ctor(pcx: &PatCtxt<'_, '_, 'tcx>, ctor: Constructor<'tcx>) -> Self {
+ // Reuse `Fields::wildcards` to get the types.
+ let fields = Fields::wildcards(pcx, &ctor)
+ .iter_patterns()
+ .map(|deco_pat| Self::wildcard(deco_pat.ty()))
+ .collect();
+ Self::new(ctor, fields, pcx.ty)
+ }
+
+ pub(super) fn ctor(&self) -> &Constructor<'tcx> {
+ &self.ctor
+ }
+ pub(super) fn ty(&self) -> Ty<'tcx> {
+ self.ty
+ }
+
+ /// Convert back to a `thir::Pat` for diagnostic purposes. This panics for patterns that don't
+ /// appear in diagnostics, like float ranges.
+ pub(crate) fn to_diagnostic_pat(&self, cx: &MatchCheckCtxt<'_, 'tcx>) -> Pat<'tcx> {
+ let is_wildcard = |pat: &Pat<'_>| matches!(pat.kind, PatKind::Wild);
+ let mut subpatterns = self.iter_fields().map(|p| Box::new(p.to_diagnostic_pat(cx)));
+ let kind = match &self.ctor {
+ Bool(b) => PatKind::Constant { value: mir::Const::from_bool(cx.tcx, *b) },
+ IntRange(range) => return range.to_diagnostic_pat(self.ty, cx.tcx),
+ Single | Variant(_) => match self.ty.kind() {
+ ty::Tuple(..) => PatKind::Leaf {
+ subpatterns: subpatterns
+ .enumerate()
+ .map(|(i, pattern)| FieldPat { field: FieldIdx::new(i), pattern })
+ .collect(),
+ },
+ ty::Adt(adt_def, _) if adt_def.is_box() => {
+ // Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside
+ // of `std`). So this branch is only reachable when the feature is enabled and
+ // the pattern is a box pattern.
+ PatKind::Deref { subpattern: subpatterns.next().unwrap() }
+ }
+ ty::Adt(adt_def, args) => {
+ let variant_index = self.ctor.variant_index_for_adt(*adt_def);
+ let variant = &adt_def.variant(variant_index);
+ let subpatterns = Fields::list_variant_nonhidden_fields(cx, self.ty, variant)
+ .zip(subpatterns)
+ .map(|((field, _ty), pattern)| FieldPat { field, pattern })
+ .collect();
+
+ if adt_def.is_enum() {
+ PatKind::Variant { adt_def: *adt_def, args, variant_index, subpatterns }
+ } else {
+ PatKind::Leaf { subpatterns }
+ }
+ }
+ // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
+ // be careful to reconstruct the correct constant pattern here. However a string
+ // literal pattern will never be reported as a non-exhaustiveness witness, so we
+ // ignore this issue.
+ ty::Ref(..) => PatKind::Deref { subpattern: subpatterns.next().unwrap() },
+ _ => bug!("unexpected ctor for type {:?} {:?}", self.ctor, self.ty),
+ },
+ Slice(slice) => {
+ match slice.kind {
+ FixedLen(_) => PatKind::Slice {
+ prefix: subpatterns.collect(),
+ slice: None,
+ suffix: Box::new([]),
+ },
+ VarLen(prefix, _) => {
+ let mut subpatterns = subpatterns.peekable();
+ let mut prefix: Vec<_> = subpatterns.by_ref().take(prefix).collect();
+ if slice.array_len.is_some() {
+ // Improves diagnostics a bit: if the type is a known-size array, instead
+ // of reporting `[x, _, .., _, y]`, we prefer to report `[x, .., y]`.
+ // This is incorrect if the size is not known, since `[_, ..]` captures
+ // arrays of lengths `>= 1` whereas `[..]` captures any length.
+ while !prefix.is_empty() && is_wildcard(prefix.last().unwrap()) {
+ prefix.pop();
+ }
+ while subpatterns.peek().is_some()
+ && is_wildcard(subpatterns.peek().unwrap())
+ {
+ subpatterns.next();
+ }
+ }
+ let suffix: Box<[_]> = subpatterns.collect();
+ let wild = Pat::wildcard_from_ty(self.ty);
+ PatKind::Slice {
+ prefix: prefix.into_boxed_slice(),
+ slice: Some(Box::new(wild)),
+ suffix,
+ }
+ }
+ }
+ }
+ &Str(value) => PatKind::Constant { value },
+ Wildcard | NonExhaustive | Hidden => PatKind::Wild,
+ Missing { .. } => bug!(
+ "trying to convert a `Missing` constructor into a `Pat`; this is probably a bug,
+ `Missing` should have been processed in `apply_constructors`"
+ ),
+ F32Range(..) | F64Range(..) | Opaque | Or => {
+ bug!("can't convert to pattern: {:?}", self)
+ }
+ };
+
+ Pat { ty: self.ty, span: DUMMY_SP, kind }
+ }
+
+ pub(super) fn iter_fields<'a>(&'a self) -> impl Iterator<Item = &'a WitnessPat<'tcx>> {
+ self.fields.iter()
+ }
+}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
index fe47a1cd7..0811ab6a0 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
@@ -17,18 +17,19 @@ use rustc_hir::def::{CtorOf, DefKind, Res};
use rustc_hir::pat_util::EnumerateAndAdjustIterator;
use rustc_hir::RangeEnd;
use rustc_index::Idx;
-use rustc_middle::mir::interpret::{
- ErrorHandled, GlobalId, LitToConstError, LitToConstInput, Scalar,
+use rustc_middle::mir::interpret::{ErrorHandled, GlobalId, LitToConstError, LitToConstInput};
+use rustc_middle::mir::{self, BorrowKind, Const, Mutability, UserTypeProjection};
+use rustc_middle::thir::{
+ Ascription, BindingMode, FieldPat, LocalVarId, Pat, PatKind, PatRange, PatRangeBoundary,
};
-use rustc_middle::mir::{self, Const, UserTypeProjection};
-use rustc_middle::mir::{BorrowKind, Mutability};
-use rustc_middle::thir::{Ascription, BindingMode, FieldPat, LocalVarId, Pat, PatKind, PatRange};
-use rustc_middle::ty::CanonicalUserTypeAnnotation;
-use rustc_middle::ty::TypeVisitableExt;
-use rustc_middle::ty::{self, AdtDef, Region, Ty, TyCtxt, UserType};
-use rustc_middle::ty::{GenericArg, GenericArgsRef};
-use rustc_span::{Span, Symbol};
-use rustc_target::abi::FieldIdx;
+use rustc_middle::ty::layout::IntegerExt;
+use rustc_middle::ty::{
+ self, AdtDef, CanonicalUserTypeAnnotation, GenericArg, GenericArgsRef, Region, Ty, TyCtxt,
+ TypeVisitableExt, UserType,
+};
+use rustc_span::def_id::LocalDefId;
+use rustc_span::{ErrorGuaranteed, Span, Symbol};
+use rustc_target::abi::{FieldIdx, Integer};
use std::cmp::Ordering;
@@ -85,127 +86,164 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
)
}
- fn lower_range_expr(
+ fn lower_pattern_range_endpoint(
&mut self,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> (PatKind<'tcx>, Option<Ascription<'tcx>>) {
- match self.lower_lit(expr) {
- PatKind::AscribeUserType { ascription, subpattern: box Pat { kind, .. } } => {
- (kind, Some(ascription))
+ expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) -> Result<
+ (Option<PatRangeBoundary<'tcx>>, Option<Ascription<'tcx>>, Option<LocalDefId>),
+ ErrorGuaranteed,
+ > {
+ match expr {
+ None => Ok((None, None, None)),
+ Some(expr) => {
+ let (kind, ascr, inline_const) = match self.lower_lit(expr) {
+ PatKind::InlineConstant { subpattern, def } => {
+ (subpattern.kind, None, Some(def))
+ }
+ PatKind::AscribeUserType { ascription, subpattern: box Pat { kind, .. } } => {
+ (kind, Some(ascription), None)
+ }
+ kind => (kind, None, None),
+ };
+ let value = if let PatKind::Constant { value } = kind {
+ value
+ } else {
+ let msg = format!(
+ "found bad range pattern endpoint `{expr:?}` outside of error recovery"
+ );
+ return Err(self.tcx.sess.delay_span_bug(expr.span, msg));
+ };
+ Ok((Some(PatRangeBoundary::Finite(value)), ascr, inline_const))
}
- kind => (kind, None),
}
}
+ /// Overflowing literals are linted against in a late pass. This is mostly fine, except when we
+ /// encounter a range pattern like `-130i8..2`: if we believe `eval_bits`, this looks like a
+ /// range where the endpoints are in the wrong order. To avoid a confusing error message, we
+ /// check for overflow then.
+ /// This is only called when the range is already known to be malformed.
+ fn error_on_literal_overflow(
+ &self,
+ expr: Option<&'tcx hir::Expr<'tcx>>,
+ ty: Ty<'tcx>,
+ ) -> Result<(), ErrorGuaranteed> {
+ use hir::{ExprKind, UnOp};
+ use rustc_ast::ast::LitKind;
+
+ let Some(mut expr) = expr else {
+ return Ok(());
+ };
+ let span = expr.span;
+
+ // We need to inspect the original expression, because if we only inspect the output of
+ // `eval_bits`, an overflowed value has already been wrapped around.
+ // We mostly copy the logic from the `rustc_lint::OVERFLOWING_LITERALS` lint.
+ let mut negated = false;
+ if let ExprKind::Unary(UnOp::Neg, sub_expr) = expr.kind {
+ negated = true;
+ expr = sub_expr;
+ }
+ let ExprKind::Lit(lit) = expr.kind else {
+ return Ok(());
+ };
+ let LitKind::Int(lit_val, _) = lit.node else {
+ return Ok(());
+ };
+ let (min, max): (i128, u128) = match ty.kind() {
+ ty::Int(ity) => {
+ let size = Integer::from_int_ty(&self.tcx, *ity).size();
+ (size.signed_int_min(), size.signed_int_max() as u128)
+ }
+ ty::Uint(uty) => {
+ let size = Integer::from_uint_ty(&self.tcx, *uty).size();
+ (0, size.unsigned_int_max())
+ }
+ _ => {
+ return Ok(());
+ }
+ };
+ // Detect literal value out of range `[min, max]` inclusive, avoiding use of `-min` to
+ // prevent overflow/panic.
+ if (negated && lit_val > max + 1) || (!negated && lit_val > max) {
+ return Err(self.tcx.sess.emit_err(LiteralOutOfRange { span, ty, min, max }));
+ }
+ Ok(())
+ }
+
fn lower_pattern_range(
&mut self,
- ty: Ty<'tcx>,
- lo: mir::Const<'tcx>,
- hi: mir::Const<'tcx>,
+ lo_expr: Option<&'tcx hir::Expr<'tcx>>,
+ hi_expr: Option<&'tcx hir::Expr<'tcx>>,
end: RangeEnd,
+ ty: Ty<'tcx>,
span: Span,
- lo_expr: Option<&hir::Expr<'tcx>>,
- hi_expr: Option<&hir::Expr<'tcx>>,
- ) -> PatKind<'tcx> {
- assert_eq!(lo.ty(), ty);
- assert_eq!(hi.ty(), ty);
- let cmp = compare_const_vals(self.tcx, lo, hi, self.param_env);
- let max = || {
- self.tcx
- .layout_of(self.param_env.with_reveal_all_normalized(self.tcx).and(ty))
- .ok()
- .unwrap()
- .size
- .unsigned_int_max()
- };
+ ) -> Result<PatKind<'tcx>, ErrorGuaranteed> {
+ if lo_expr.is_none() && hi_expr.is_none() {
+ let msg = format!("found twice-open range pattern (`..`) outside of error recovery");
+ return Err(self.tcx.sess.delay_span_bug(span, msg));
+ }
+
+ let (lo, lo_ascr, lo_inline) = self.lower_pattern_range_endpoint(lo_expr)?;
+ let (hi, hi_ascr, hi_inline) = self.lower_pattern_range_endpoint(hi_expr)?;
+
+ let lo = lo.unwrap_or(PatRangeBoundary::NegInfinity);
+ let hi = hi.unwrap_or(PatRangeBoundary::PosInfinity);
+
+ let cmp = lo.compare_with(hi, ty, self.tcx, self.param_env);
+ let mut kind = PatKind::Range(Box::new(PatRange { lo, hi, end, ty }));
match (end, cmp) {
// `x..y` where `x < y`.
- // Non-empty because the range includes at least `x`.
- (RangeEnd::Excluded, Some(Ordering::Less)) => {
- PatKind::Range(Box::new(PatRange { lo, hi, end }))
- }
- // `x..y` where `x >= y`. The range is empty => error.
- (RangeEnd::Excluded, _) => {
- let mut lower_overflow = false;
- let mut higher_overflow = false;
- if let Some(hir::Expr { kind: hir::ExprKind::Lit(lit), .. }) = lo_expr
- && let rustc_ast::ast::LitKind::Int(val, _) = lit.node
- {
- if lo.eval_bits(self.tcx, self.param_env) != val {
- lower_overflow = true;
- self.tcx.sess.emit_err(LiteralOutOfRange { span: lit.span, ty, max: max() });
- }
- }
- if let Some(hir::Expr { kind: hir::ExprKind::Lit(lit), .. }) = hi_expr
- && let rustc_ast::ast::LitKind::Int(val, _) = lit.node
- {
- if hi.eval_bits(self.tcx, self.param_env) != val {
- higher_overflow = true;
- self.tcx.sess.emit_err(LiteralOutOfRange { span: lit.span, ty, max: max() });
- }
- }
- if !lower_overflow && !higher_overflow {
- self.tcx.sess.emit_err(LowerRangeBoundMustBeLessThanUpper { span });
- }
- PatKind::Wild
- }
- // `x..=y` where `x == y`.
- (RangeEnd::Included, Some(Ordering::Equal)) => PatKind::Constant { value: lo },
+ (RangeEnd::Excluded, Some(Ordering::Less)) => {}
// `x..=y` where `x < y`.
- (RangeEnd::Included, Some(Ordering::Less)) => {
- PatKind::Range(Box::new(PatRange { lo, hi, end }))
- }
- // `x..=y` where `x > y` hence the range is empty => error.
- (RangeEnd::Included, _) => {
- let mut lower_overflow = false;
- let mut higher_overflow = false;
- if let Some(hir::Expr { kind: hir::ExprKind::Lit(lit), .. }) = lo_expr
- && let rustc_ast::ast::LitKind::Int(val, _) = lit.node
- {
- if lo.eval_bits(self.tcx, self.param_env) != val {
- lower_overflow = true;
- self.tcx.sess.emit_err(LiteralOutOfRange { span: lit.span, ty, max: max() });
+ (RangeEnd::Included, Some(Ordering::Less)) => {}
+ // `x..=y` where `x == y` and `x` and `y` are finite.
+ (RangeEnd::Included, Some(Ordering::Equal)) if lo.is_finite() && hi.is_finite() => {
+ kind = PatKind::Constant { value: lo.as_finite().unwrap() };
+ }
+ // `..=x` where `x == ty::MIN`.
+ (RangeEnd::Included, Some(Ordering::Equal)) if !lo.is_finite() => {}
+ // `x..` where `x == ty::MAX` (yes, `x..` gives `RangeEnd::Included` since it is meant
+ // to include `ty::MAX`).
+ (RangeEnd::Included, Some(Ordering::Equal)) if !hi.is_finite() => {}
+ // `x..y` where `x >= y`, or `x..=y` where `x > y`. The range is empty => error.
+ _ => {
+ // Emit a more appropriate message if there was overflow.
+ self.error_on_literal_overflow(lo_expr, ty)?;
+ self.error_on_literal_overflow(hi_expr, ty)?;
+ let e = match end {
+ RangeEnd::Included => {
+ self.tcx.sess.emit_err(LowerRangeBoundMustBeLessThanOrEqualToUpper {
+ span,
+ teach: self.tcx.sess.teach(&error_code!(E0030)).then_some(()),
+ })
}
- }
- if let Some(hir::Expr { kind: hir::ExprKind::Lit(lit), .. }) = hi_expr
- && let rustc_ast::ast::LitKind::Int(val, _) = lit.node
- {
- if hi.eval_bits(self.tcx, self.param_env) != val {
- higher_overflow = true;
- self.tcx.sess.emit_err(LiteralOutOfRange { span: lit.span, ty, max: max() });
+ RangeEnd::Excluded => {
+ self.tcx.sess.emit_err(LowerRangeBoundMustBeLessThanUpper { span })
}
- }
- if !lower_overflow && !higher_overflow {
- self.tcx.sess.emit_err(LowerRangeBoundMustBeLessThanOrEqualToUpper {
- span,
- teach: self.tcx.sess.teach(&error_code!(E0030)).then_some(()),
- });
- }
- PatKind::Wild
+ };
+ return Err(e);
}
}
- }
- fn normalize_range_pattern_ends(
- &self,
- ty: Ty<'tcx>,
- lo: Option<&PatKind<'tcx>>,
- hi: Option<&PatKind<'tcx>>,
- ) -> Option<(mir::Const<'tcx>, mir::Const<'tcx>)> {
- match (lo, hi) {
- (Some(PatKind::Constant { value: lo }), Some(PatKind::Constant { value: hi })) => {
- Some((*lo, *hi))
- }
- (Some(PatKind::Constant { value: lo }), None) => {
- let hi = ty.numeric_max_val(self.tcx)?;
- Some((*lo, mir::Const::from_ty_const(hi, self.tcx)))
+ // If we are handling a range with associated constants (e.g.
+ // `Foo::<'a>::A..=Foo::B`), we need to put the ascriptions for the associated
+ // constants somewhere. Have them on the range pattern.
+ for ascr in [lo_ascr, hi_ascr] {
+ if let Some(ascription) = ascr {
+ kind = PatKind::AscribeUserType {
+ ascription,
+ subpattern: Box::new(Pat { span, ty, kind }),
+ };
}
- (None, Some(PatKind::Constant { value: hi })) => {
- let lo = ty.numeric_min_val(self.tcx)?;
- Some((mir::Const::from_ty_const(lo, self.tcx), *hi))
+ }
+ for inline_const in [lo_inline, hi_inline] {
+ if let Some(def) = inline_const {
+ kind =
+ PatKind::InlineConstant { def, subpattern: Box::new(Pat { span, ty, kind }) };
}
- _ => None,
}
+ Ok(kind)
}
#[instrument(skip(self), level = "debug")]
@@ -220,37 +258,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
hir::PatKind::Range(ref lo_expr, ref hi_expr, end) => {
let (lo_expr, hi_expr) = (lo_expr.as_deref(), hi_expr.as_deref());
- let lo_span = lo_expr.map_or(pat.span, |e| e.span);
- let lo = lo_expr.map(|e| self.lower_range_expr(e));
- let hi = hi_expr.map(|e| self.lower_range_expr(e));
-
- let (lp, hp) = (lo.as_ref().map(|(x, _)| x), hi.as_ref().map(|(x, _)| x));
- let mut kind = match self.normalize_range_pattern_ends(ty, lp, hp) {
- Some((lc, hc)) => {
- self.lower_pattern_range(ty, lc, hc, end, lo_span, lo_expr, hi_expr)
- }
- None => {
- let msg = format!(
- "found bad range pattern `{:?}` outside of error recovery",
- (&lo, &hi),
- );
- self.tcx.sess.delay_span_bug(pat.span, msg);
- PatKind::Wild
- }
- };
-
- // If we are handling a range with associated constants (e.g.
- // `Foo::<'a>::A..=Foo::B`), we need to put the ascriptions for the associated
- // constants somewhere. Have them on the range pattern.
- for end in &[lo, hi] {
- if let Some((_, Some(ascription))) = end {
- let subpattern = Box::new(Pat { span: pat.span, ty, kind });
- kind =
- PatKind::AscribeUserType { ascription: ascription.clone(), subpattern };
- }
- }
-
- kind
+ self.lower_pattern_range(lo_expr, hi_expr, end, ty, span)
+ .unwrap_or_else(PatKind::Error)
}
hir::PatKind::Path(ref qpath) => {
@@ -418,9 +427,9 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
if adt_def.is_enum() {
let args = match ty.kind() {
ty::Adt(_, args) | ty::FnDef(_, args) => args,
- ty::Error(_) => {
+ ty::Error(e) => {
// Avoid ICE (#50585)
- return PatKind::Wild;
+ return PatKind::Error(*e);
}
_ => bug!("inappropriate type for def: {:?}", ty),
};
@@ -447,7 +456,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
| Res::SelfTyAlias { .. }
| Res::SelfCtor(..) => PatKind::Leaf { subpatterns },
_ => {
- match res {
+ let e = match res {
Res::Def(DefKind::ConstParam, _) => {
self.tcx.sess.emit_err(ConstParamInPattern { span })
}
@@ -456,7 +465,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
}
_ => self.tcx.sess.emit_err(NonConstPath { span }),
};
- PatKind::Wild
+ PatKind::Error(e)
}
};
@@ -508,14 +517,13 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
// It should be assoc consts if there's no error but we cannot resolve it.
debug_assert!(is_associated_const);
- self.tcx.sess.emit_err(AssocConstInPattern { span });
-
- return pat_from_kind(PatKind::Wild);
+ let e = self.tcx.sess.emit_err(AssocConstInPattern { span });
+ return pat_from_kind(PatKind::Error(e));
}
Err(_) => {
- self.tcx.sess.emit_err(CouldNotEvalConstPattern { span });
- return pat_from_kind(PatKind::Wild);
+ let e = self.tcx.sess.emit_err(CouldNotEvalConstPattern { span });
+ return pat_from_kind(PatKind::Error(e));
}
};
@@ -569,12 +577,12 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
Err(ErrorHandled::TooGeneric(_)) => {
// While `Reported | Linted` cases will have diagnostics emitted already
// it is not true for TooGeneric case, so we need to give user more information.
- self.tcx.sess.emit_err(ConstPatternDependsOnGenericParameter { span });
- pat_from_kind(PatKind::Wild)
+ let e = self.tcx.sess.emit_err(ConstPatternDependsOnGenericParameter { span });
+ pat_from_kind(PatKind::Error(e))
}
Err(_) => {
- self.tcx.sess.emit_err(CouldNotEvalConstPattern { span });
- pat_from_kind(PatKind::Wild)
+ let e = self.tcx.sess.emit_err(CouldNotEvalConstPattern { span });
+ pat_from_kind(PatKind::Error(e))
}
}
}
@@ -597,11 +605,9 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
// const eval path below.
// FIXME: investigate the performance impact of removing this.
let lit_input = match expr.kind {
- hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
- hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind {
- hir::ExprKind::Lit(ref lit) => {
- Some(LitToConstInput { lit: &lit.node, ty, neg: true })
- }
+ hir::ExprKind::Lit(lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
+ hir::ExprKind::Unary(hir::UnOp::Neg, expr) => match expr.kind {
+ hir::ExprKind::Lit(lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: true }),
_ => None,
},
_ => None,
@@ -624,30 +630,30 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
let uneval = mir::UnevaluatedConst { def: def_id.to_def_id(), args, promoted: None };
debug_assert!(!args.has_free_regions());
- let ct = ty::UnevaluatedConst { def: def_id.to_def_id(), args: args };
+ let ct = ty::UnevaluatedConst { def: def_id.to_def_id(), args };
// First try using a valtree in order to destructure the constant into a pattern.
// FIXME: replace "try to do a thing, then fall back to another thing"
// but something more principled, like a trait query checking whether this can be turned into a valtree.
if let Ok(Some(valtree)) =
self.tcx.const_eval_resolve_for_typeck(self.param_env, ct, Some(span))
{
- self.const_to_pat(
+ let subpattern = self.const_to_pat(
Const::Ty(ty::Const::new_value(self.tcx, valtree, ty)),
id,
span,
None,
- )
- .kind
+ );
+ PatKind::InlineConstant { subpattern, def: def_id }
} else {
// If that fails, convert it to an opaque constant pattern.
match tcx.const_eval_resolve(self.param_env, uneval, Some(span)) {
Ok(val) => self.const_to_pat(mir::Const::Val(val, ty), id, span, None).kind,
Err(ErrorHandled::TooGeneric(_)) => {
// If we land here it means the const can't be evaluated because it's `TooGeneric`.
- self.tcx.sess.emit_err(ConstPatternDependsOnGenericParameter { span });
- PatKind::Wild
+ let e = self.tcx.sess.emit_err(ConstPatternDependsOnGenericParameter { span });
+ PatKind::Error(e)
}
- Err(ErrorHandled::Reported(..)) => PatKind::Wild,
+ Err(ErrorHandled::Reported(err, ..)) => PatKind::Error(err.into()),
}
}
}
@@ -680,7 +686,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
Ok(constant) => {
self.const_to_pat(Const::Ty(constant), expr.hir_id, lit.span, None).kind
}
- Err(LitToConstError::Reported(_)) => PatKind::Wild,
+ Err(LitToConstError::Reported(e)) => PatKind::Error(e),
Err(LitToConstError::TypeError) => bug!("lower_lit: had type error"),
}
}
@@ -786,6 +792,7 @@ impl<'tcx> PatternFoldable<'tcx> for PatKind<'tcx> {
fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
match *self {
PatKind::Wild => PatKind::Wild,
+ PatKind::Error(e) => PatKind::Error(e),
PatKind::AscribeUserType {
ref subpattern,
ascription: Ascription { ref annotation, variance },
@@ -819,6 +826,9 @@ impl<'tcx> PatternFoldable<'tcx> for PatKind<'tcx> {
PatKind::Deref { subpattern: subpattern.fold_with(folder) }
}
PatKind::Constant { value } => PatKind::Constant { value },
+ PatKind::InlineConstant { def, subpattern: ref pattern } => {
+ PatKind::InlineConstant { def, subpattern: pattern.fold_with(folder) }
+ }
PatKind::Range(ref range) => PatKind::Range(range.clone()),
PatKind::Slice { ref prefix, ref slice, ref suffix } => PatKind::Slice {
prefix: prefix.fold_with(folder),
@@ -834,59 +844,3 @@ impl<'tcx> PatternFoldable<'tcx> for PatKind<'tcx> {
}
}
}
-
-#[instrument(skip(tcx), level = "debug")]
-pub(crate) fn compare_const_vals<'tcx>(
- tcx: TyCtxt<'tcx>,
- a: mir::Const<'tcx>,
- b: mir::Const<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
-) -> Option<Ordering> {
- assert_eq!(a.ty(), b.ty());
-
- let ty = a.ty();
-
- // This code is hot when compiling matches with many ranges. So we
- // special-case extraction of evaluated scalars for speed, for types where
- // raw data comparisons are appropriate. E.g. `unicode-normalization` has
- // many ranges such as '\u{037A}'..='\u{037F}', and chars can be compared
- // in this way.
- match ty.kind() {
- ty::Float(_) | ty::Int(_) => {} // require special handling, see below
- _ => match (a, b) {
- (
- mir::Const::Val(mir::ConstValue::Scalar(Scalar::Int(a)), _a_ty),
- mir::Const::Val(mir::ConstValue::Scalar(Scalar::Int(b)), _b_ty),
- ) => return Some(a.cmp(&b)),
- (mir::Const::Ty(a), mir::Const::Ty(b)) => {
- return Some(a.kind().cmp(&b.kind()));
- }
- _ => {}
- },
- }
-
- let a = a.eval_bits(tcx, param_env);
- let b = b.eval_bits(tcx, param_env);
-
- use rustc_apfloat::Float;
- match *ty.kind() {
- ty::Float(ty::FloatTy::F32) => {
- let a = rustc_apfloat::ieee::Single::from_bits(a);
- let b = rustc_apfloat::ieee::Single::from_bits(b);
- a.partial_cmp(&b)
- }
- ty::Float(ty::FloatTy::F64) => {
- let a = rustc_apfloat::ieee::Double::from_bits(a);
- let b = rustc_apfloat::ieee::Double::from_bits(b);
- a.partial_cmp(&b)
- }
- ty::Int(ity) => {
- use rustc_middle::ty::layout::IntegerExt;
- let size = rustc_target::abi::Integer::from_int_ty(&tcx, ity).size();
- let a = size.sign_extend(a);
- let b = size.sign_extend(b);
- Some((a as i128).cmp(&(b as i128)))
- }
- _ => Some(a.cmp(&b)),
- }
-}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
index 21031e8ba..da7b6587a 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
@@ -213,7 +213,7 @@
//! or-patterns in the first column are expanded before being stored in the matrix. Specialization
//! for a single patstack is done from a combination of [`Constructor::is_covered_by`] and
//! [`PatStack::pop_head_constructor`]. The internals of how it's done mostly live in the
-//! [`Fields`] struct.
+//! [`super::deconstruct_pat::Fields`] struct.
//!
//!
//! # Computing usefulness
@@ -307,8 +307,14 @@
use self::ArmType::*;
use self::Usefulness::*;
-use super::deconstruct_pat::{Constructor, DeconstructedPat, Fields, SplitWildcard};
-use crate::errors::{NonExhaustiveOmittedPattern, Uncovered};
+use super::deconstruct_pat::{
+ Constructor, ConstructorSet, DeconstructedPat, IntRange, MaybeInfiniteInt, SplitConstructorSet,
+ WitnessPat,
+};
+use crate::errors::{
+ NonExhaustiveOmittedPattern, NonExhaustiveOmittedPatternLintOnArm, Overlap,
+ OverlappingRangeEndpoints, Uncovered,
+};
use rustc_data_structures::captures::Captures;
@@ -317,12 +323,12 @@ use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir::def_id::DefId;
use rustc_hir::HirId;
use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::lint;
use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS;
use rustc_span::{Span, DUMMY_SP};
use smallvec::{smallvec, SmallVec};
use std::fmt;
-use std::iter::once;
pub(crate) struct MatchCheckCtxt<'p, 'tcx> {
pub(crate) tcx: TyCtxt<'tcx>,
@@ -334,6 +340,8 @@ pub(crate) struct MatchCheckCtxt<'p, 'tcx> {
pub(crate) module: DefId,
pub(crate) param_env: ty::ParamEnv<'tcx>,
pub(crate) pattern_arena: &'p TypedArena<DeconstructedPat<'p, 'tcx>>,
+ /// The span of the whole match, if applicable.
+ pub(crate) match_span: Option<Span>,
/// Only produce `NON_EXHAUSTIVE_OMITTED_PATTERNS` lint on refutable patterns.
pub(crate) refutable: bool,
}
@@ -368,8 +376,6 @@ pub(super) struct PatCtxt<'a, 'p, 'tcx> {
/// Whether the current pattern is the whole pattern as found in a match arm, or if it's a
/// subpattern.
pub(super) is_top_level: bool,
- /// Whether the current pattern is from a `non_exhaustive` enum.
- pub(super) is_non_exhaustive: bool,
}
impl<'a, 'p, 'tcx> fmt::Debug for PatCtxt<'a, 'p, 'tcx> {
@@ -476,11 +482,6 @@ impl<'p, 'tcx> Matrix<'p, 'tcx> {
Matrix { patterns: vec![] }
}
- /// Number of columns of this matrix. `None` is the matrix is empty.
- pub(super) fn column_count(&self) -> Option<usize> {
- self.patterns.get(0).map(|r| r.len())
- }
-
/// Pushes a new row to the matrix. If the row starts with an or-pattern, this recursively
/// expands it.
fn push(&mut self, row: PatStack<'p, 'tcx>) {
@@ -557,20 +558,20 @@ impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> {
/// exhaustiveness of a whole match, we use the `WithWitnesses` variant, which carries a list of
/// witnesses of non-exhaustiveness when there are any.
/// Which variant to use is dictated by `ArmType`.
-#[derive(Debug)]
-enum Usefulness<'p, 'tcx> {
+#[derive(Debug, Clone)]
+enum Usefulness<'tcx> {
/// If we don't care about witnesses, simply remember if the pattern was useful.
NoWitnesses { useful: bool },
/// Carries a list of witnesses of non-exhaustiveness. If empty, indicates that the whole
/// pattern is unreachable.
- WithWitnesses(Vec<Witness<'p, 'tcx>>),
+ WithWitnesses(Vec<WitnessStack<'tcx>>),
}
-impl<'p, 'tcx> Usefulness<'p, 'tcx> {
+impl<'tcx> Usefulness<'tcx> {
fn new_useful(preference: ArmType) -> Self {
match preference {
// A single (empty) witness of reachability.
- FakeExtraWildcard => WithWitnesses(vec![Witness(vec![])]),
+ FakeExtraWildcard => WithWitnesses(vec![WitnessStack(vec![])]),
RealArm => NoWitnesses { useful: true },
}
}
@@ -607,8 +608,8 @@ impl<'p, 'tcx> Usefulness<'p, 'tcx> {
/// with the results of specializing with the other constructors.
fn apply_constructor(
self,
- pcx: &PatCtxt<'_, 'p, 'tcx>,
- matrix: &Matrix<'p, 'tcx>, // used to compute missing ctors
+ pcx: &PatCtxt<'_, '_, 'tcx>,
+ matrix: &Matrix<'_, 'tcx>, // used to compute missing ctors
ctor: &Constructor<'tcx>,
) -> Self {
match self {
@@ -616,62 +617,34 @@ impl<'p, 'tcx> Usefulness<'p, 'tcx> {
WithWitnesses(ref witnesses) if witnesses.is_empty() => self,
WithWitnesses(witnesses) => {
let new_witnesses = if let Constructor::Missing { .. } = ctor {
- // We got the special `Missing` constructor, so each of the missing constructors
- // gives a new pattern that is not caught by the match. We list those patterns.
- if pcx.is_non_exhaustive {
- witnesses
- .into_iter()
- // Here we don't want the user to try to list all variants, we want them to add
- // a wildcard, so we only suggest that.
- .map(|witness| {
- witness.apply_constructor(pcx, &Constructor::NonExhaustive)
- })
- .collect()
- } else {
- let mut split_wildcard = SplitWildcard::new(pcx);
- split_wildcard.split(pcx, matrix.heads().map(DeconstructedPat::ctor));
-
- // This lets us know if we skipped any variants because they are marked
- // `doc(hidden)` or they are unstable feature gate (only stdlib types).
- let mut hide_variant_show_wild = false;
- // Construct for each missing constructor a "wild" version of this
- // constructor, that matches everything that can be built with
- // it. For example, if `ctor` is a `Constructor::Variant` for
- // `Option::Some`, we get the pattern `Some(_)`.
- let mut new_patterns: Vec<DeconstructedPat<'_, '_>> = split_wildcard
- .iter_missing(pcx)
- .filter_map(|missing_ctor| {
- // Check if this variant is marked `doc(hidden)`
- if missing_ctor.is_doc_hidden_variant(pcx)
- || missing_ctor.is_unstable_variant(pcx)
- {
- hide_variant_show_wild = true;
- return None;
- }
- Some(DeconstructedPat::wild_from_ctor(pcx, missing_ctor.clone()))
- })
- .collect();
+ let mut missing = ConstructorSet::for_ty(pcx.cx, pcx.ty)
+ .compute_missing(pcx, matrix.heads().map(DeconstructedPat::ctor));
+ if missing.iter().any(|c| c.is_non_exhaustive()) {
+ // We only report `_` here; listing other constructors would be redundant.
+ missing = vec![Constructor::NonExhaustive];
+ }
- if hide_variant_show_wild {
- new_patterns.push(DeconstructedPat::wildcard(pcx.ty, pcx.span));
- }
+ // We got the special `Missing` constructor, so each of the missing constructors
+ // gives a new pattern that is not caught by the match.
+ // We construct for each missing constructor a version of this constructor with
+ // wildcards for fields, i.e. that matches everything that can be built with it.
+ // For example, if `ctor` is a `Constructor::Variant` for `Option::Some`, we get
+ // the pattern `Some(_)`.
+ let new_patterns: Vec<WitnessPat<'_>> = missing
+ .into_iter()
+ .map(|missing_ctor| WitnessPat::wild_from_ctor(pcx, missing_ctor.clone()))
+ .collect();
- witnesses
- .into_iter()
- .flat_map(|witness| {
- new_patterns.iter().map(move |pat| {
- Witness(
- witness
- .0
- .iter()
- .chain(once(pat))
- .map(DeconstructedPat::clone_and_forget_reachability)
- .collect(),
- )
- })
+ witnesses
+ .into_iter()
+ .flat_map(|witness| {
+ new_patterns.iter().map(move |pat| {
+ let mut stack = witness.clone();
+ stack.0.push(pat.clone());
+ stack
})
- .collect()
- }
+ })
+ .collect()
} else {
witnesses
.into_iter()
@@ -690,15 +663,17 @@ enum ArmType {
RealArm,
}
-/// A witness of non-exhaustiveness for error reporting, represented
-/// as a list of patterns (in reverse order of construction) with
-/// wildcards inside to represent elements that can take any inhabitant
-/// of the type as a value.
+/// A witness-tuple of non-exhaustiveness for error reporting, represented as a list of patterns (in
+/// reverse order of construction) with wildcards inside to represent elements that can take any
+/// inhabitant of the type as a value.
///
-/// A witness against a list of patterns should have the same types
-/// and length as the pattern matched against. Because Rust `match`
-/// is always against a single pattern, at the end the witness will
-/// have length 1, but in the middle of the algorithm, it can contain
+/// This mirrors `PatStack`: they function similarly, except `PatStack` contains user patterns we
+/// are inspecting, and `WitnessStack` contains witnesses we are constructing.
+/// FIXME(Nadrieril): use the same order of patterns for both
+///
+/// A `WitnessStack` should have the same types and length as the `PatStacks` we are inspecting
+/// (except we store the patterns in reverse order). Because Rust `match` is always against a single
+/// pattern, at the end the stack will have length 1. In the middle of the algorithm, it can contain
/// multiple patterns.
///
/// For example, if we are constructing a witness for the match against
@@ -713,23 +688,37 @@ enum ArmType {
/// # }
/// ```
///
-/// We'll perform the following steps:
-/// 1. Start with an empty witness
-/// `Witness(vec![])`
-/// 2. Push a witness `true` against the `false`
-/// `Witness(vec![true])`
-/// 3. Push a witness `Some(_)` against the `None`
-/// `Witness(vec![true, Some(_)])`
-/// 4. Apply the `Pair` constructor to the witnesses
-/// `Witness(vec![Pair(Some(_), true)])`
+/// We'll perform the following steps (among others):
+/// - Start with a matrix representing the match
+/// `PatStack(vec![Pair(None, _)])`
+/// `PatStack(vec![Pair(_, false)])`
+/// - Specialize with `Pair`
+/// `PatStack(vec![None, _])`
+/// `PatStack(vec![_, false])`
+/// - Specialize with `Some`
+/// `PatStack(vec![_, false])`
+/// - Specialize with `_`
+/// `PatStack(vec![false])`
+/// - Specialize with `true`
+/// // no patstacks left
+/// - This is a non-exhaustive match: we have the empty witness stack as a witness.
+/// `WitnessStack(vec![])`
+/// - Apply `true`
+/// `WitnessStack(vec![true])`
+/// - Apply `_`
+/// `WitnessStack(vec![true, _])`
+/// - Apply `Some`
+/// `WitnessStack(vec![true, Some(_)])`
+/// - Apply `Pair`
+/// `WitnessStack(vec![Pair(Some(_), true)])`
///
/// The final `Pair(Some(_), true)` is then the resulting witness.
-#[derive(Debug)]
-pub(crate) struct Witness<'p, 'tcx>(Vec<DeconstructedPat<'p, 'tcx>>);
+#[derive(Debug, Clone)]
+pub(crate) struct WitnessStack<'tcx>(Vec<WitnessPat<'tcx>>);
-impl<'p, 'tcx> Witness<'p, 'tcx> {
+impl<'tcx> WitnessStack<'tcx> {
/// Asserts that the witness contains a single pattern, and returns it.
- fn single_pattern(self) -> DeconstructedPat<'p, 'tcx> {
+ fn single_pattern(self) -> WitnessPat<'tcx> {
assert_eq!(self.0.len(), 1);
self.0.into_iter().next().unwrap()
}
@@ -747,13 +736,12 @@ impl<'p, 'tcx> Witness<'p, 'tcx> {
///
/// left_ty: struct X { a: (bool, &'static str), b: usize}
/// pats: [(false, "foo"), 42] => X { a: (false, "foo"), b: 42 }
- fn apply_constructor(mut self, pcx: &PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>) -> Self {
+ fn apply_constructor(mut self, pcx: &PatCtxt<'_, '_, 'tcx>, ctor: &Constructor<'tcx>) -> Self {
let pat = {
let len = self.0.len();
let arity = ctor.arity(pcx);
- let pats = self.0.drain((len - arity)..).rev();
- let fields = Fields::from_iter(pcx.cx, pats);
- DeconstructedPat::new(ctor.clone(), fields, pcx.ty, pcx.span)
+ let fields = self.0.drain((len - arity)..).rev().collect();
+ WitnessPat::new(ctor.clone(), fields, pcx.ty)
};
self.0.push(pat);
@@ -793,7 +781,7 @@ fn is_useful<'p, 'tcx>(
lint_root: HirId,
is_under_guard: bool,
is_top_level: bool,
-) -> Usefulness<'p, 'tcx> {
+) -> Usefulness<'tcx> {
debug!(?matrix, ?v);
let Matrix { patterns: rows, .. } = matrix;
@@ -844,24 +832,13 @@ fn is_useful<'p, 'tcx>(
ty = row.head().ty();
}
}
- let is_non_exhaustive = cx.is_foreign_non_exhaustive_enum(ty);
debug!("v.head: {:?}, v.span: {:?}", v.head(), v.head().span());
- let pcx = &PatCtxt { cx, ty, span: v.head().span(), is_top_level, is_non_exhaustive };
+ let pcx = &PatCtxt { cx, ty, span: v.head().span(), is_top_level };
let v_ctor = v.head().ctor();
debug!(?v_ctor);
- if let Constructor::IntRange(ctor_range) = &v_ctor {
- // Lint on likely incorrect range patterns (#63987)
- ctor_range.lint_overlapping_range_endpoints(
- pcx,
- matrix.heads(),
- matrix.column_count().unwrap_or(0),
- lint_root,
- )
- }
// We split the head constructor of `v`.
let split_ctors = v_ctor.split(pcx, matrix.heads().map(DeconstructedPat::ctor));
- let is_non_exhaustive_and_wild = is_non_exhaustive && v_ctor.is_wildcard();
// For each constructor, we compute whether there's a value that starts with it that would
// witness the usefulness of `v`.
let start_matrix = &matrix;
@@ -882,56 +859,6 @@ fn is_useful<'p, 'tcx>(
)
});
let usefulness = usefulness.apply_constructor(pcx, start_matrix, &ctor);
-
- // When all the conditions are met we have a match with a `non_exhaustive` enum
- // that has the potential to trigger the `non_exhaustive_omitted_patterns` lint.
- // To understand the workings checkout `Constructor::split` and `SplitWildcard::new/into_ctors`
- if is_non_exhaustive_and_wild
- // Only emit a lint on refutable patterns.
- && cx.refutable
- // We check that the match has a wildcard pattern and that wildcard is useful,
- // meaning there are variants that are covered by the wildcard. Without the check
- // for `witness_preference` the lint would trigger on `if let NonExhaustiveEnum::A = foo {}`
- && usefulness.is_useful() && matches!(witness_preference, RealArm)
- && matches!(
- &ctor,
- Constructor::Missing { nonexhaustive_enum_missing_real_variants: true }
- )
- {
- let patterns = {
- let mut split_wildcard = SplitWildcard::new(pcx);
- split_wildcard.split(pcx, matrix.heads().map(DeconstructedPat::ctor));
- // Construct for each missing constructor a "wild" version of this
- // constructor, that matches everything that can be built with
- // it. For example, if `ctor` is a `Constructor::Variant` for
- // `Option::Some`, we get the pattern `Some(_)`.
- split_wildcard
- .iter_missing(pcx)
- // Filter out the `NonExhaustive` because we want to list only real
- // variants. Also remove any unstable feature gated variants.
- // Because of how we computed `nonexhaustive_enum_missing_real_variants`,
- // this will not return an empty `Vec`.
- .filter(|c| !(c.is_non_exhaustive() || c.is_unstable_variant(pcx)))
- .cloned()
- .map(|missing_ctor| DeconstructedPat::wild_from_ctor(pcx, missing_ctor))
- .collect::<Vec<_>>()
- };
-
- // Report that a match of a `non_exhaustive` enum marked with `non_exhaustive_omitted_patterns`
- // is not exhaustive enough.
- //
- // NB: The partner lint for structs lives in `compiler/rustc_hir_analysis/src/check/pat.rs`.
- cx.tcx.emit_spanned_lint(
- NON_EXHAUSTIVE_OMITTED_PATTERNS,
- lint_root,
- pcx.span,
- NonExhaustiveOmittedPattern {
- scrut_ty: pcx.ty,
- uncovered: Uncovered::new(pcx.span, pcx.cx, patterns),
- },
- );
- }
-
ret.extend(usefulness);
}
}
@@ -943,6 +870,214 @@ fn is_useful<'p, 'tcx>(
ret
}
+/// A column of patterns in the matrix, where a column is the intuitive notion of "subpatterns that
+/// inspect the same subvalue".
+/// This is used to traverse patterns column-by-column for lints. Despite similarities with
+/// `is_useful`, this is a different traversal. Notably this is linear in the depth of patterns,
+/// whereas `is_useful` is worst-case exponential (exhaustiveness is NP-complete).
+#[derive(Debug)]
+struct PatternColumn<'p, 'tcx> {
+ patterns: Vec<&'p DeconstructedPat<'p, 'tcx>>,
+}
+
+impl<'p, 'tcx> PatternColumn<'p, 'tcx> {
+ fn new(patterns: Vec<&'p DeconstructedPat<'p, 'tcx>>) -> Self {
+ Self { patterns }
+ }
+
+ fn is_empty(&self) -> bool {
+ self.patterns.is_empty()
+ }
+ fn head_ty(&self) -> Option<Ty<'tcx>> {
+ if self.patterns.len() == 0 {
+ return None;
+ }
+ // If the type is opaque and it is revealed anywhere in the column, we take the revealed
+ // version. Otherwise we could encounter constructors for the revealed type and crash.
+ let is_opaque = |ty: Ty<'tcx>| matches!(ty.kind(), ty::Alias(ty::Opaque, ..));
+ let first_ty = self.patterns[0].ty();
+ if is_opaque(first_ty) {
+ for pat in &self.patterns {
+ let ty = pat.ty();
+ if !is_opaque(ty) {
+ return Some(ty);
+ }
+ }
+ }
+ Some(first_ty)
+ }
+
+ fn analyze_ctors(&self, pcx: &PatCtxt<'_, 'p, 'tcx>) -> SplitConstructorSet<'tcx> {
+ let column_ctors = self.patterns.iter().map(|p| p.ctor());
+ ConstructorSet::for_ty(pcx.cx, pcx.ty).split(pcx, column_ctors)
+ }
+ fn iter<'a>(&'a self) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Captures<'a> {
+ self.patterns.iter().copied()
+ }
+
+ /// Does specialization: given a constructor, this takes the patterns from the column that match
+ /// the constructor, and outputs their fields.
+ /// This returns one column per field of the constructor. The normally all have the same length
+ /// (the number of patterns in `self` that matched `ctor`), except that we expand or-patterns
+ /// which may change the lengths.
+ fn specialize(&self, pcx: &PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>) -> Vec<Self> {
+ let arity = ctor.arity(pcx);
+ if arity == 0 {
+ return Vec::new();
+ }
+
+ // We specialize the column by `ctor`. This gives us `arity`-many columns of patterns. These
+ // columns may have different lengths in the presence of or-patterns (this is why we can't
+ // reuse `Matrix`).
+ let mut specialized_columns: Vec<_> =
+ (0..arity).map(|_| Self { patterns: Vec::new() }).collect();
+ let relevant_patterns =
+ self.patterns.iter().filter(|pat| ctor.is_covered_by(pcx, pat.ctor()));
+ for pat in relevant_patterns {
+ let specialized = pat.specialize(pcx, &ctor);
+ for (subpat, column) in specialized.iter().zip(&mut specialized_columns) {
+ if subpat.is_or_pat() {
+ column.patterns.extend(subpat.flatten_or_pat())
+ } else {
+ column.patterns.push(subpat)
+ }
+ }
+ }
+
+ assert!(
+ !specialized_columns[0].is_empty(),
+ "ctor {ctor:?} was listed as present but isn't;
+ there is an inconsistency between `Constructor::is_covered_by` and `ConstructorSet::split`"
+ );
+ specialized_columns
+ }
+}
+
+/// Traverse the patterns to collect any variants of a non_exhaustive enum that fail to be mentioned
+/// in a given column.
+#[instrument(level = "debug", skip(cx), ret)]
+fn collect_nonexhaustive_missing_variants<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ column: &PatternColumn<'p, 'tcx>,
+) -> Vec<WitnessPat<'tcx>> {
+ let Some(ty) = column.head_ty() else {
+ return Vec::new();
+ };
+ let pcx = &PatCtxt { cx, ty, span: DUMMY_SP, is_top_level: false };
+
+ let set = column.analyze_ctors(pcx);
+ if set.present.is_empty() {
+ // We can't consistently handle the case where no constructors are present (since this would
+ // require digging deep through any type in case there's a non_exhaustive enum somewhere),
+ // so for consistency we refuse to handle the top-level case, where we could handle it.
+ return vec![];
+ }
+
+ let mut witnesses = Vec::new();
+ if cx.is_foreign_non_exhaustive_enum(ty) {
+ witnesses.extend(
+ set.missing
+ .into_iter()
+ // This will list missing visible variants.
+ .filter(|c| !matches!(c, Constructor::Hidden | Constructor::NonExhaustive))
+ .map(|missing_ctor| WitnessPat::wild_from_ctor(pcx, missing_ctor)),
+ )
+ }
+
+ // Recurse into the fields.
+ for ctor in set.present {
+ let specialized_columns = column.specialize(pcx, &ctor);
+ let wild_pat = WitnessPat::wild_from_ctor(pcx, ctor);
+ for (i, col_i) in specialized_columns.iter().enumerate() {
+ // Compute witnesses for each column.
+ let wits_for_col_i = collect_nonexhaustive_missing_variants(cx, col_i);
+ // For each witness, we build a new pattern in the shape of `ctor(_, _, wit, _, _)`,
+ // adding enough wildcards to match `arity`.
+ for wit in wits_for_col_i {
+ let mut pat = wild_pat.clone();
+ pat.fields[i] = wit;
+ witnesses.push(pat);
+ }
+ }
+ }
+ witnesses
+}
+
+/// Traverse the patterns to warn the user about ranges that overlap on their endpoints.
+#[instrument(level = "debug", skip(cx, lint_root))]
+fn lint_overlapping_range_endpoints<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ column: &PatternColumn<'p, 'tcx>,
+ lint_root: HirId,
+) {
+ let Some(ty) = column.head_ty() else {
+ return;
+ };
+ let pcx = &PatCtxt { cx, ty, span: DUMMY_SP, is_top_level: false };
+
+ let set = column.analyze_ctors(pcx);
+
+ if IntRange::is_integral(ty) {
+ let emit_lint = |overlap: &IntRange, this_span: Span, overlapped_spans: &[Span]| {
+ let overlap_as_pat = overlap.to_diagnostic_pat(ty, cx.tcx);
+ let overlaps: Vec<_> = overlapped_spans
+ .iter()
+ .copied()
+ .map(|span| Overlap { range: overlap_as_pat.clone(), span })
+ .collect();
+ cx.tcx.emit_spanned_lint(
+ lint::builtin::OVERLAPPING_RANGE_ENDPOINTS,
+ lint_root,
+ this_span,
+ OverlappingRangeEndpoints { overlap: overlaps, range: this_span },
+ );
+ };
+
+ // If two ranges overlapped, the split set will contain their intersection as a singleton.
+ let split_int_ranges = set.present.iter().filter_map(|c| c.as_int_range());
+ for overlap_range in split_int_ranges.clone() {
+ if overlap_range.is_singleton() {
+ let overlap: MaybeInfiniteInt = overlap_range.lo;
+ // Ranges that look like `lo..=overlap`.
+ let mut prefixes: SmallVec<[_; 1]> = Default::default();
+ // Ranges that look like `overlap..=hi`.
+ let mut suffixes: SmallVec<[_; 1]> = Default::default();
+ // Iterate on patterns that contained `overlap`.
+ for pat in column.iter() {
+ let this_span = pat.span();
+ let Constructor::IntRange(this_range) = pat.ctor() else { continue };
+ if this_range.is_singleton() {
+ // Don't lint when one of the ranges is a singleton.
+ continue;
+ }
+ if this_range.lo == overlap {
+ // `this_range` looks like `overlap..=this_range.hi`; it overlaps with any
+ // ranges that look like `lo..=overlap`.
+ if !prefixes.is_empty() {
+ emit_lint(overlap_range, this_span, &prefixes);
+ }
+ suffixes.push(this_span)
+ } else if this_range.hi == overlap.plus_one() {
+ // `this_range` looks like `this_range.lo..=overlap`; it overlaps with any
+ // ranges that look like `overlap..=hi`.
+ if !suffixes.is_empty() {
+ emit_lint(overlap_range, this_span, &suffixes);
+ }
+ prefixes.push(this_span)
+ }
+ }
+ }
+ }
+ } else {
+ // Recurse into the fields.
+ for ctor in set.present {
+ for col in column.specialize(pcx, &ctor) {
+ lint_overlapping_range_endpoints(cx, &col, lint_root);
+ }
+ }
+ }
+}
+
/// The arm of a match expression.
#[derive(Clone, Copy, Debug)]
pub(crate) struct MatchArm<'p, 'tcx> {
@@ -969,7 +1104,7 @@ pub(crate) struct UsefulnessReport<'p, 'tcx> {
pub(crate) arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Reachability)>,
/// If the match is exhaustive, this is empty. If not, this contains witnesses for the lack of
/// exhaustiveness.
- pub(crate) non_exhaustiveness_witnesses: Vec<DeconstructedPat<'p, 'tcx>>,
+ pub(crate) non_exhaustiveness_witnesses: Vec<WitnessPat<'tcx>>,
}
/// The entrypoint for the usefulness algorithm. Computes whether a match is exhaustive and which
@@ -983,6 +1118,7 @@ pub(crate) fn compute_match_usefulness<'p, 'tcx>(
arms: &[MatchArm<'p, 'tcx>],
lint_root: HirId,
scrut_ty: Ty<'tcx>,
+ scrut_span: Span,
) -> UsefulnessReport<'p, 'tcx> {
let mut matrix = Matrix::empty();
let arm_usefulness: Vec<_> = arms
@@ -1007,9 +1143,63 @@ pub(crate) fn compute_match_usefulness<'p, 'tcx>(
let wild_pattern = cx.pattern_arena.alloc(DeconstructedPat::wildcard(scrut_ty, DUMMY_SP));
let v = PatStack::from_pattern(wild_pattern);
let usefulness = is_useful(cx, &matrix, &v, FakeExtraWildcard, lint_root, false, true);
- let non_exhaustiveness_witnesses = match usefulness {
+ let non_exhaustiveness_witnesses: Vec<_> = match usefulness {
WithWitnesses(pats) => pats.into_iter().map(|w| w.single_pattern()).collect(),
NoWitnesses { .. } => bug!(),
};
+
+ let pat_column = arms.iter().flat_map(|arm| arm.pat.flatten_or_pat()).collect::<Vec<_>>();
+ let pat_column = PatternColumn::new(pat_column);
+ lint_overlapping_range_endpoints(cx, &pat_column, lint_root);
+
+ // Run the non_exhaustive_omitted_patterns lint. Only run on refutable patterns to avoid hitting
+ // `if let`s. Only run if the match is exhaustive otherwise the error is redundant.
+ if cx.refutable && non_exhaustiveness_witnesses.is_empty() {
+ if !matches!(
+ cx.tcx.lint_level_at_node(NON_EXHAUSTIVE_OMITTED_PATTERNS, lint_root).0,
+ rustc_session::lint::Level::Allow
+ ) {
+ let witnesses = collect_nonexhaustive_missing_variants(cx, &pat_column);
+
+ if !witnesses.is_empty() {
+ // Report that a match of a `non_exhaustive` enum marked with `non_exhaustive_omitted_patterns`
+ // is not exhaustive enough.
+ //
+ // NB: The partner lint for structs lives in `compiler/rustc_hir_analysis/src/check/pat.rs`.
+ cx.tcx.emit_spanned_lint(
+ NON_EXHAUSTIVE_OMITTED_PATTERNS,
+ lint_root,
+ scrut_span,
+ NonExhaustiveOmittedPattern {
+ scrut_ty,
+ uncovered: Uncovered::new(scrut_span, cx, witnesses),
+ },
+ );
+ }
+ } else {
+ // We used to allow putting the `#[allow(non_exhaustive_omitted_patterns)]` on a match
+ // arm. This no longer makes sense so we warn users, to avoid silently breaking their
+ // usage of the lint.
+ for arm in arms {
+ let (lint_level, lint_level_source) =
+ cx.tcx.lint_level_at_node(NON_EXHAUSTIVE_OMITTED_PATTERNS, arm.hir_id);
+ if !matches!(lint_level, rustc_session::lint::Level::Allow) {
+ let decorator = NonExhaustiveOmittedPatternLintOnArm {
+ lint_span: lint_level_source.span(),
+ suggest_lint_on_match: cx.match_span.map(|span| span.shrink_to_lo()),
+ lint_level: lint_level.as_str(),
+ lint_name: "non_exhaustive_omitted_patterns",
+ };
+
+ use rustc_errors::DecorateLint;
+ let mut err = cx.tcx.sess.struct_span_warn(arm.pat.span(), "");
+ err.set_primary_message(decorator.msg());
+ decorator.decorate_lint(&mut err);
+ err.emit();
+ }
+ }
+ }
+ }
+
UsefulnessReport { arm_usefulness, non_exhaustiveness_witnesses }
}
diff --git a/compiler/rustc_mir_build/src/thir/print.rs b/compiler/rustc_mir_build/src/thir/print.rs
index 3b6276cfe..c3b2309b7 100644
--- a/compiler/rustc_mir_build/src/thir/print.rs
+++ b/compiler/rustc_mir_build/src/thir/print.rs
@@ -692,7 +692,7 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
}
PatKind::Deref { subpattern } => {
print_indented!(self, "Deref { ", depth_lvl + 1);
- print_indented!(self, "subpattern: ", depth_lvl + 2);
+ print_indented!(self, "subpattern:", depth_lvl + 2);
self.print_pat(subpattern, depth_lvl + 2);
print_indented!(self, "}", depth_lvl + 1);
}
@@ -701,6 +701,13 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
print_indented!(self, format!("value: {:?}", value), depth_lvl + 2);
print_indented!(self, "}", depth_lvl + 1);
}
+ PatKind::InlineConstant { def, subpattern } => {
+ print_indented!(self, "InlineConstant {", depth_lvl + 1);
+ print_indented!(self, format!("def: {:?}", def), depth_lvl + 2);
+ print_indented!(self, "subpattern:", depth_lvl + 2);
+ self.print_pat(subpattern, depth_lvl + 2);
+ print_indented!(self, "}", depth_lvl + 1);
+ }
PatKind::Range(pat_range) => {
print_indented!(self, format!("Range ( {:?} )", pat_range), depth_lvl + 1);
}
@@ -757,6 +764,9 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> {
print_indented!(self, "]", depth_lvl + 2);
print_indented!(self, "}", depth_lvl + 1);
}
+ PatKind::Error(_) => {
+ print_indented!(self, "Error", depth_lvl + 1);
+ }
}
print_indented!(self, "}", depth_lvl);