From 17d40c6057c88f4c432b0d7bac88e1b84cb7e67f Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:03:36 +0200 Subject: Adding upstream version 1.65.0+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_parse/src/lexer/mod.rs | 2 - .../src/lexer/unescape_error_reporting.rs | 8 +- compiler/rustc_parse/src/lib.rs | 6 +- compiler/rustc_parse/src/parser/attr.rs | 31 +- compiler/rustc_parse/src/parser/attr_wrapper.rs | 78 +- compiler/rustc_parse/src/parser/diagnostics.rs | 476 +++++++++++- compiler/rustc_parse/src/parser/expr.rs | 837 +++++++++------------ compiler/rustc_parse/src/parser/generics.rs | 15 +- compiler/rustc_parse/src/parser/item.rs | 192 +++-- compiler/rustc_parse/src/parser/mod.rs | 45 +- compiler/rustc_parse/src/parser/nonterminal.rs | 24 +- compiler/rustc_parse/src/parser/pat.rs | 59 +- compiler/rustc_parse/src/parser/path.rs | 22 +- compiler/rustc_parse/src/parser/stmt.rs | 70 +- compiler/rustc_parse/src/parser/ty.rs | 39 +- 15 files changed, 1151 insertions(+), 753 deletions(-) (limited to 'compiler/rustc_parse/src') diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs index 848e142e5..63819a2f9 100644 --- a/compiler/rustc_parse/src/lexer/mod.rs +++ b/compiler/rustc_parse/src/lexer/mod.rs @@ -14,8 +14,6 @@ use rustc_session::parse::ParseSess; use rustc_span::symbol::{sym, Symbol}; use rustc_span::{edition::Edition, BytePos, Pos, Span}; -use tracing::debug; - mod tokentrees; mod unescape_error_reporting; mod unicode_chars; diff --git a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs index 273827864..77c4fadab 100644 --- a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs +++ b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs @@ -20,13 +20,9 @@ pub(crate) fn emit_unescape_error( range: Range, error: EscapeError, ) { - tracing::debug!( + debug!( "emit_unescape_error: {:?}, {:?}, {:?}, {:?}, {:?}", - lit, - span_with_quotes, - mode, - range, - error + lit, span_with_quotes, mode, range, error ); let last_char = || { let c = lit[range.clone()].chars().rev().next().unwrap(); diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs index 8c087c65c..a37327f42 100644 --- a/compiler/rustc_parse/src/lib.rs +++ b/compiler/rustc_parse/src/lib.rs @@ -4,7 +4,7 @@ #![feature(box_patterns)] #![feature(if_let_guard)] #![feature(let_chains)] -#![feature(let_else)] +#![cfg_attr(bootstrap, feature(let_else))] #![feature(never_type)] #![feature(rustc_attrs)] #![recursion_limit = "256"] @@ -63,7 +63,7 @@ pub fn parse_crate_from_file<'a>(input: &Path, sess: &'a ParseSess) -> PResult<' pub fn parse_crate_attrs_from_file<'a>( input: &Path, sess: &'a ParseSess, -) -> PResult<'a, Vec> { +) -> PResult<'a, ast::AttrVec> { let mut parser = new_parser_from_file(sess, input, None); parser.parse_inner_attributes() } @@ -80,7 +80,7 @@ pub fn parse_crate_attrs_from_source_str( name: FileName, source: String, sess: &ParseSess, -) -> PResult<'_, Vec> { +) -> PResult<'_, ast::AttrVec> { new_parser_from_source_str(sess, name, source).parse_inner_attributes() } diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs index acdbddf40..5fd69b15e 100644 --- a/compiler/rustc_parse/src/parser/attr.rs +++ b/compiler/rustc_parse/src/parser/attr.rs @@ -7,8 +7,6 @@ use rustc_errors::{error_code, Diagnostic, PResult}; use rustc_span::{sym, BytePos, Span}; use std::convert::TryInto; -use tracing::debug; - // Public for rustfmt usage #[derive(Debug)] pub enum InnerAttrPolicy<'a> { @@ -34,7 +32,7 @@ enum OuterAttributeType { impl<'a> Parser<'a> { /// Parses attributes that appear before an item. pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> { - let mut outer_attrs: Vec = Vec::new(); + let mut outer_attrs = ast::AttrVec::new(); let mut just_parsed_doc_comment = false; let start_pos = self.token_cursor.num_next_calls; loop { @@ -89,6 +87,7 @@ impl<'a> Parser<'a> { // Always make an outer attribute - this allows us to recover from a misplaced // inner attribute. Some(attr::mk_doc_comment( + &self.sess.attr_id_generator, comment_kind, ast::AttrStyle::Outer, data, @@ -106,7 +105,7 @@ impl<'a> Parser<'a> { break; } } - Ok(AttrWrapper::new(outer_attrs.into(), start_pos)) + Ok(AttrWrapper::new(outer_attrs, start_pos)) } /// Matches `attribute = # ! [ meta_item ]`. @@ -140,7 +139,13 @@ impl<'a> Parser<'a> { this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy); } - Ok(attr::mk_attr_from_item(item, None, style, attr_sp)) + Ok(attr::mk_attr_from_item( + &self.sess.attr_id_generator, + item, + None, + style, + attr_sp, + )) } else { let token_str = pprust::token_to_string(&this.token); let msg = &format!("expected `#`, found `{token_str}`"); @@ -283,8 +288,8 @@ impl<'a> Parser<'a> { /// terminated by a semicolon. /// /// Matches `inner_attrs*`. - pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, Vec> { - let mut attrs: Vec = vec![]; + pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, ast::AttrVec> { + let mut attrs = ast::AttrVec::new(); loop { let start_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap(); // Only try to parse if it is an inner attribute (has `!`). @@ -293,7 +298,13 @@ impl<'a> Parser<'a> { } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind { if attr_style == ast::AttrStyle::Inner { self.bump(); - Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span)) + Some(attr::mk_doc_comment( + &self.sess.attr_id_generator, + comment_kind, + attr_style, + data, + self.prev_token.span, + )) } else { None } @@ -303,9 +314,9 @@ impl<'a> Parser<'a> { if let Some(attr) = attr { let end_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap(); // If we are currently capturing tokens, mark the location of this inner attribute. - // If capturing ends up creating a `LazyTokenStream`, we will include + // If capturing ends up creating a `LazyAttrTokenStream`, we will include // this replace range with it, removing the inner attribute from the final - // `AttrAnnotatedTokenStream`. Inner attributes are stored in the parsed AST note. + // `AttrTokenStream`. Inner attributes are stored in the parsed AST note. // During macro expansion, they are selectively inserted back into the // token stream (the first inner attribute is removed each time we invoke the // corresponding macro). diff --git a/compiler/rustc_parse/src/parser/attr_wrapper.rs b/compiler/rustc_parse/src/parser/attr_wrapper.rs index 6c750ff42..5fdafd187 100644 --- a/compiler/rustc_parse/src/parser/attr_wrapper.rs +++ b/compiler/rustc_parse/src/parser/attr_wrapper.rs @@ -1,7 +1,7 @@ use super::{Capturing, FlatToken, ForceCollect, Parser, ReplaceRange, TokenCursor, TrailingToken}; use rustc_ast::token::{self, Delimiter, Token, TokenKind}; -use rustc_ast::tokenstream::{AttrAnnotatedTokenStream, AttributesData, CreateTokenStream}; -use rustc_ast::tokenstream::{AttrAnnotatedTokenTree, DelimSpan, LazyTokenStream, Spacing}; +use rustc_ast::tokenstream::{AttrTokenStream, AttributesData, ToAttrTokenStream}; +use rustc_ast::tokenstream::{AttrTokenTree, DelimSpan, LazyAttrTokenStream, Spacing}; use rustc_ast::{self as ast}; use rustc_ast::{AttrVec, Attribute, HasAttrs, HasTokens}; use rustc_errors::PResult; @@ -15,11 +15,11 @@ use std::ops::Range; /// for the attribute target. This allows us to perform cfg-expansion on /// a token stream before we invoke a derive proc-macro. /// -/// This wrapper prevents direct access to the underlying `Vec`. +/// This wrapper prevents direct access to the underlying `ast::AttrVec>`. /// Parsing code can only get access to the underlying attributes /// by passing an `AttrWrapper` to `collect_tokens_trailing_tokens`. /// This makes it difficult to accidentally construct an AST node -/// (which stores a `Vec`) without first collecting tokens. +/// (which stores an `ast::AttrVec`) without first collecting tokens. /// /// This struct has its own module, to ensure that the parser code /// cannot directly access the `attrs` field @@ -49,9 +49,10 @@ impl AttrWrapper { self.attrs } + // Prepend `self.attrs` to `attrs`. // FIXME: require passing an NT to prevent misuse of this method - pub(crate) fn prepend_to_nt_inner(self, attrs: &mut Vec) { - let mut self_attrs: Vec<_> = self.attrs.into(); + pub(crate) fn prepend_to_nt_inner(self, attrs: &mut AttrVec) { + let mut self_attrs = self.attrs; std::mem::swap(attrs, &mut self_attrs); attrs.extend(self_attrs); } @@ -87,7 +88,7 @@ fn has_cfg_or_cfg_attr(attrs: &[Attribute]) -> bool { // This also makes `Parser` very cheap to clone, since // there is no intermediate collection buffer to clone. #[derive(Clone)] -struct LazyTokenStreamImpl { +struct LazyAttrTokenStreamImpl { start_token: (Token, Spacing), cursor_snapshot: TokenCursor, num_calls: usize, @@ -96,10 +97,10 @@ struct LazyTokenStreamImpl { } #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -rustc_data_structures::static_assert_size!(LazyTokenStreamImpl, 144); +rustc_data_structures::static_assert_size!(LazyAttrTokenStreamImpl, 144); -impl CreateTokenStream for LazyTokenStreamImpl { - fn create_token_stream(&self) -> AttrAnnotatedTokenStream { +impl ToAttrTokenStream for LazyAttrTokenStreamImpl { + fn to_attr_token_stream(&self) -> AttrTokenStream { // The token produced by the final call to `{,inlined_}next` was not // actually consumed by the callback. The combination of chaining the // initial token and using `take` produces the desired result - we @@ -116,7 +117,7 @@ impl CreateTokenStream for LazyTokenStreamImpl { if !self.replace_ranges.is_empty() { let mut tokens: Vec<_> = tokens.collect(); - let mut replace_ranges = self.replace_ranges.clone(); + let mut replace_ranges = self.replace_ranges.to_vec(); replace_ranges.sort_by_key(|(range, _)| range.start); #[cfg(debug_assertions)] @@ -146,7 +147,7 @@ impl CreateTokenStream for LazyTokenStreamImpl { // start position, we ensure that any replace range which encloses // another replace range will capture the *replaced* tokens for the inner // range, not the original tokens. - for (range, new_tokens) in replace_ranges.iter().rev() { + for (range, new_tokens) in replace_ranges.into_iter().rev() { assert!(!range.is_empty(), "Cannot replace an empty range: {:?}", range); // Replace ranges are only allowed to decrease the number of tokens. assert!( @@ -165,7 +166,7 @@ impl CreateTokenStream for LazyTokenStreamImpl { tokens.splice( (range.start as usize)..(range.end as usize), - new_tokens.clone().into_iter().chain(filler), + new_tokens.into_iter().chain(filler), ); } make_token_stream(tokens.into_iter(), self.break_last_token) @@ -178,7 +179,7 @@ impl CreateTokenStream for LazyTokenStreamImpl { impl<'a> Parser<'a> { /// Records all tokens consumed by the provided callback, /// including the current token. These tokens are collected - /// into a `LazyTokenStream`, and returned along with the result + /// into a `LazyAttrTokenStream`, and returned along with the result /// of the callback. /// /// Note: If your callback consumes an opening delimiter @@ -196,7 +197,7 @@ impl<'a> Parser<'a> { &mut self, attrs: AttrWrapper, force_collect: ForceCollect, - f: impl FnOnce(&mut Self, Vec) -> PResult<'a, (R, TrailingToken)>, + f: impl FnOnce(&mut Self, ast::AttrVec) -> PResult<'a, (R, TrailingToken)>, ) -> PResult<'a, R> { // We only bail out when nothing could possibly observe the collected tokens: // 1. We cannot be force collecting tokens (since force-collecting requires tokens @@ -212,7 +213,7 @@ impl<'a> Parser<'a> { // or `#[cfg_attr]` attributes. && !self.capture_cfg { - return Ok(f(self, attrs.attrs.into())?.0); + return Ok(f(self, attrs.attrs)?.0); } let start_token = (self.token.clone(), self.token_spacing); @@ -222,7 +223,7 @@ impl<'a> Parser<'a> { let prev_capturing = std::mem::replace(&mut self.capture_state.capturing, Capturing::Yes); let replace_ranges_start = self.capture_state.replace_ranges.len(); - let ret = f(self, attrs.attrs.into()); + let ret = f(self, attrs.attrs); self.capture_state.capturing = prev_capturing; @@ -296,8 +297,8 @@ impl<'a> Parser<'a> { // If we 'broke' the last token (e.g. breaking a '>>' token to two '>' tokens), // then extend the range of captured tokens to include it, since the parser - // was not actually bumped past it. When the `LazyTokenStream` gets converted - // into an `AttrAnnotatedTokenStream`, we will create the proper token. + // was not actually bumped past it. When the `LazyAttrTokenStream` gets converted + // into an `AttrTokenStream`, we will create the proper token. if self.token_cursor.break_last_token { assert_eq!( trailing, @@ -315,20 +316,20 @@ impl<'a> Parser<'a> { Box::new([]) } else { // Grab any replace ranges that occur *inside* the current AST node. - // We will perform the actual replacement when we convert the `LazyTokenStream` - // to an `AttrAnnotatedTokenStream` + // We will perform the actual replacement when we convert the `LazyAttrTokenStream` + // to an `AttrTokenStream`. let start_calls: u32 = cursor_snapshot_next_calls.try_into().unwrap(); self.capture_state.replace_ranges[replace_ranges_start..replace_ranges_end] .iter() .cloned() - .chain(inner_attr_replace_ranges.clone().into_iter()) + .chain(inner_attr_replace_ranges.iter().cloned()) .map(|(range, tokens)| { ((range.start - start_calls)..(range.end - start_calls), tokens) }) .collect() }; - let tokens = LazyTokenStream::new(LazyTokenStreamImpl { + let tokens = LazyAttrTokenStream::new(LazyAttrTokenStreamImpl { start_token, num_calls, cursor_snapshot, @@ -352,9 +353,9 @@ impl<'a> Parser<'a> { // on the captured token stream. if self.capture_cfg && matches!(self.capture_state.capturing, Capturing::Yes) - && has_cfg_or_cfg_attr(&final_attrs) + && has_cfg_or_cfg_attr(final_attrs) { - let attr_data = AttributesData { attrs: final_attrs.to_vec().into(), tokens }; + let attr_data = AttributesData { attrs: final_attrs.iter().cloned().collect(), tokens }; // Replace the entire AST node that we just parsed, including attributes, // with a `FlatToken::AttrTarget`. If this AST node is inside an item @@ -391,12 +392,12 @@ impl<'a> Parser<'a> { fn make_token_stream( mut iter: impl Iterator, break_last_token: bool, -) -> AttrAnnotatedTokenStream { +) -> AttrTokenStream { #[derive(Debug)] struct FrameData { // This is `None` for the first frame, `Some` for all others. open_delim_sp: Option<(Delimiter, Span)>, - inner: Vec<(AttrAnnotatedTokenTree, Spacing)>, + inner: Vec, } let mut stack = vec![FrameData { open_delim_sp: None, inner: vec![] }]; let mut token_and_spacing = iter.next(); @@ -417,48 +418,47 @@ fn make_token_stream( open_delim, span ); let dspan = DelimSpan::from_pair(open_sp, span); - let stream = AttrAnnotatedTokenStream::new(frame_data.inner); - let delimited = AttrAnnotatedTokenTree::Delimited(dspan, delim, stream); + let stream = AttrTokenStream::new(frame_data.inner); + let delimited = AttrTokenTree::Delimited(dspan, delim, stream); stack .last_mut() .unwrap_or_else(|| { panic!("Bottom token frame is missing for token: {:?}", token) }) .inner - .push((delimited, Spacing::Alone)); + .push(delimited); } FlatToken::Token(token) => stack .last_mut() .expect("Bottom token frame is missing!") .inner - .push((AttrAnnotatedTokenTree::Token(token), spacing)), + .push(AttrTokenTree::Token(token, spacing)), FlatToken::AttrTarget(data) => stack .last_mut() .expect("Bottom token frame is missing!") .inner - .push((AttrAnnotatedTokenTree::Attributes(data), spacing)), + .push(AttrTokenTree::Attributes(data)), FlatToken::Empty => {} } token_and_spacing = iter.next(); } let mut final_buf = stack.pop().expect("Missing final buf!"); if break_last_token { - let (last_token, spacing) = final_buf.inner.pop().unwrap(); - if let AttrAnnotatedTokenTree::Token(last_token) = last_token { + let last_token = final_buf.inner.pop().unwrap(); + if let AttrTokenTree::Token(last_token, spacing) = last_token { let unglued_first = last_token.kind.break_two_token_op().unwrap().0; // An 'unglued' token is always two ASCII characters let mut first_span = last_token.span.shrink_to_lo(); first_span = first_span.with_hi(first_span.lo() + rustc_span::BytePos(1)); - final_buf.inner.push(( - AttrAnnotatedTokenTree::Token(Token::new(unglued_first, first_span)), - spacing, - )); + final_buf + .inner + .push(AttrTokenTree::Token(Token::new(unglued_first, first_span), spacing)); } else { panic!("Unexpected last token {:?}", last_token) } } assert!(stack.is_empty(), "Stack should be empty: final_buf={:?} stack={:?}", final_buf, stack); - AttrAnnotatedTokenStream::new(final_buf.inner) + AttrTokenStream::new(final_buf.inner) } diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index a2155ac1d..be524db78 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -10,26 +10,25 @@ use rustc_ast::ptr::P; use rustc_ast::token::{self, Delimiter, Lit, LitKind, TokenKind}; use rustc_ast::util::parser::AssocOp; use rustc_ast::{ - AngleBracketedArg, AngleBracketedArgs, AnonConst, AttrVec, BinOpKind, BindingMode, Block, - BlockCheckMode, Expr, ExprKind, GenericArg, Generics, Item, ItemKind, Mutability, Param, Pat, - PatKind, Path, PathSegment, QSelf, Ty, TyKind, + AngleBracketedArg, AngleBracketedArgs, AnonConst, AttrVec, BinOpKind, BindingAnnotation, Block, + BlockCheckMode, Expr, ExprKind, GenericArg, Generics, Item, ItemKind, Param, Pat, PatKind, + Path, PathSegment, QSelf, Ty, TyKind, }; use rustc_ast_pretty::pprust; use rustc_data_structures::fx::FxHashSet; use rustc_errors::{ fluent, Applicability, DiagnosticBuilder, DiagnosticMessage, Handler, MultiSpan, PResult, }; -use rustc_errors::{pluralize, struct_span_err, Diagnostic, EmissionGuarantee, ErrorGuaranteed}; +use rustc_errors::{pluralize, struct_span_err, Diagnostic, ErrorGuaranteed}; use rustc_macros::{SessionDiagnostic, SessionSubdiagnostic}; use rustc_span::source_map::Spanned; -use rustc_span::symbol::{kw, Ident}; +use rustc_span::symbol::{kw, sym, Ident}; use rustc_span::{Span, SpanSnippetError, DUMMY_SP}; use std::ops::{Deref, DerefMut}; use std::mem::take; use crate::parser; -use tracing::{debug, trace}; const TURBOFISH_SUGGESTION_STR: &str = "use `::<...>` instead of `<...>` to specify lifetime, type, or const arguments"; @@ -38,7 +37,7 @@ const TURBOFISH_SUGGESTION_STR: &str = pub(super) fn dummy_arg(ident: Ident) -> Param { let pat = P(Pat { id: ast::DUMMY_NODE_ID, - kind: PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None), + kind: PatKind::Ident(BindingAnnotation::NONE, ident, None), span: ident.span, tokens: None, }); @@ -228,13 +227,13 @@ struct MultiSugg { } impl MultiSugg { - fn emit(self, err: &mut DiagnosticBuilder<'_, G>) { + fn emit(self, err: &mut Diagnostic) { err.multipart_suggestion(&self.msg, self.patches, self.applicability); } /// Overrides individual messages and applicabilities. - fn emit_many( - err: &mut DiagnosticBuilder<'_, G>, + fn emit_many( + err: &mut Diagnostic, msg: &str, applicability: Applicability, suggestions: impl Iterator, @@ -244,7 +243,7 @@ impl MultiSugg { } #[derive(SessionDiagnostic)] -#[error(parser::maybe_report_ambiguous_plus)] +#[diag(parser::maybe_report_ambiguous_plus)] struct AmbiguousPlus { pub sum_ty: String, #[primary_span] @@ -253,7 +252,7 @@ struct AmbiguousPlus { } #[derive(SessionDiagnostic)] -#[error(parser::maybe_recover_from_bad_type_plus, code = "E0178")] +#[diag(parser::maybe_recover_from_bad_type_plus, code = "E0178")] struct BadTypePlus { pub ty: String, #[primary_span] @@ -287,7 +286,7 @@ pub enum BadTypePlusSub { } #[derive(SessionDiagnostic)] -#[error(parser::maybe_recover_from_bad_qpath_stage_2)] +#[diag(parser::maybe_recover_from_bad_qpath_stage_2)] struct BadQPathStage2 { #[primary_span] #[suggestion(applicability = "maybe-incorrect")] @@ -296,7 +295,7 @@ struct BadQPathStage2 { } #[derive(SessionDiagnostic)] -#[error(parser::incorrect_semicolon)] +#[diag(parser::incorrect_semicolon)] struct IncorrectSemicolon<'a> { #[primary_span] #[suggestion_short(applicability = "machine-applicable")] @@ -307,7 +306,7 @@ struct IncorrectSemicolon<'a> { } #[derive(SessionDiagnostic)] -#[error(parser::incorrect_use_of_await)] +#[diag(parser::incorrect_use_of_await)] struct IncorrectUseOfAwait { #[primary_span] #[suggestion(parser::parentheses_suggestion, applicability = "machine-applicable")] @@ -315,7 +314,7 @@ struct IncorrectUseOfAwait { } #[derive(SessionDiagnostic)] -#[error(parser::incorrect_use_of_await)] +#[diag(parser::incorrect_use_of_await)] struct IncorrectAwait { #[primary_span] span: Span, @@ -326,7 +325,7 @@ struct IncorrectAwait { } #[derive(SessionDiagnostic)] -#[error(parser::in_in_typo)] +#[diag(parser::in_in_typo)] struct InInTypo { #[primary_span] span: Span, @@ -334,6 +333,394 @@ struct InInTypo { sugg_span: Span, } +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_variable_declaration)] +pub struct InvalidVariableDeclaration { + #[primary_span] + pub span: Span, + #[subdiagnostic] + pub sub: InvalidVariableDeclarationSub, +} + +#[derive(SessionSubdiagnostic)] +pub enum InvalidVariableDeclarationSub { + #[suggestion( + parser::switch_mut_let_order, + applicability = "maybe-incorrect", + code = "let mut" + )] + SwitchMutLetOrder(#[primary_span] Span), + #[suggestion( + parser::missing_let_before_mut, + applicability = "machine-applicable", + code = "let mut" + )] + MissingLet(#[primary_span] Span), + #[suggestion(parser::use_let_not_auto, applicability = "machine-applicable", code = "let")] + UseLetNotAuto(#[primary_span] Span), + #[suggestion(parser::use_let_not_var, applicability = "machine-applicable", code = "let")] + UseLetNotVar(#[primary_span] Span), +} + +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_comparison_operator)] +pub(crate) struct InvalidComparisonOperator { + #[primary_span] + pub span: Span, + pub invalid: String, + #[subdiagnostic] + pub sub: InvalidComparisonOperatorSub, +} + +#[derive(SessionSubdiagnostic)] +pub(crate) enum InvalidComparisonOperatorSub { + #[suggestion_short( + parser::use_instead, + applicability = "machine-applicable", + code = "{correct}" + )] + Correctable { + #[primary_span] + span: Span, + invalid: String, + correct: String, + }, + #[label(parser::spaceship_operator_invalid)] + Spaceship(#[primary_span] Span), +} + +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_logical_operator)] +#[note] +pub(crate) struct InvalidLogicalOperator { + #[primary_span] + pub span: Span, + pub incorrect: String, + #[subdiagnostic] + pub sub: InvalidLogicalOperatorSub, +} + +#[derive(SessionSubdiagnostic)] +pub(crate) enum InvalidLogicalOperatorSub { + #[suggestion_short( + parser::use_amp_amp_for_conjunction, + applicability = "machine-applicable", + code = "&&" + )] + Conjunction(#[primary_span] Span), + #[suggestion_short( + parser::use_pipe_pipe_for_disjunction, + applicability = "machine-applicable", + code = "||" + )] + Disjunction(#[primary_span] Span), +} + +#[derive(SessionDiagnostic)] +#[diag(parser::tilde_is_not_unary_operator)] +pub(crate) struct TildeAsUnaryOperator( + #[primary_span] + #[suggestion_short(applicability = "machine-applicable", code = "!")] + pub Span, +); + +#[derive(SessionDiagnostic)] +#[diag(parser::unexpected_token_after_not)] +pub(crate) struct NotAsNegationOperator { + #[primary_span] + pub negated: Span, + pub negated_desc: String, + #[suggestion_short(applicability = "machine-applicable", code = "!")] + pub not: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::malformed_loop_label)] +pub(crate) struct MalformedLoopLabel { + #[primary_span] + #[suggestion(applicability = "machine-applicable", code = "{correct_label}")] + pub span: Span, + pub correct_label: Ident, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::lifetime_in_borrow_expression)] +pub(crate) struct LifetimeInBorrowExpression { + #[primary_span] + pub span: Span, + #[suggestion(applicability = "machine-applicable", code = "")] + #[label] + pub lifetime_span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::field_expression_with_generic)] +pub(crate) struct FieldExpressionWithGeneric(#[primary_span] pub Span); + +#[derive(SessionDiagnostic)] +#[diag(parser::macro_invocation_with_qualified_path)] +pub(crate) struct MacroInvocationWithQualifiedPath(#[primary_span] pub Span); + +#[derive(SessionDiagnostic)] +#[diag(parser::unexpected_token_after_label)] +pub(crate) struct UnexpectedTokenAfterLabel( + #[primary_span] + #[label(parser::unexpected_token_after_label)] + pub Span, +); + +#[derive(SessionDiagnostic)] +#[diag(parser::require_colon_after_labeled_expression)] +#[note] +pub(crate) struct RequireColonAfterLabeledExpression { + #[primary_span] + pub span: Span, + #[label] + pub label: Span, + #[suggestion_short(applicability = "machine-applicable", code = ": ")] + pub label_end: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::do_catch_syntax_removed)] +#[note] +pub(crate) struct DoCatchSyntaxRemoved { + #[primary_span] + #[suggestion(applicability = "machine-applicable", code = "try")] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::float_literal_requires_integer_part)] +pub(crate) struct FloatLiteralRequiresIntegerPart { + #[primary_span] + #[suggestion(applicability = "machine-applicable", code = "{correct}")] + pub span: Span, + pub correct: String, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_int_literal_width)] +#[help] +pub(crate) struct InvalidIntLiteralWidth { + #[primary_span] + pub span: Span, + pub width: String, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_num_literal_base_prefix)] +#[note] +pub(crate) struct InvalidNumLiteralBasePrefix { + #[primary_span] + #[suggestion(applicability = "maybe-incorrect", code = "{fixed}")] + pub span: Span, + pub fixed: String, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_num_literal_suffix)] +#[help] +pub(crate) struct InvalidNumLiteralSuffix { + #[primary_span] + #[label] + pub span: Span, + pub suffix: String, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_float_literal_width)] +#[help] +pub(crate) struct InvalidFloatLiteralWidth { + #[primary_span] + pub span: Span, + pub width: String, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_float_literal_suffix)] +#[help] +pub(crate) struct InvalidFloatLiteralSuffix { + #[primary_span] + #[label] + pub span: Span, + pub suffix: String, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::int_literal_too_large)] +pub(crate) struct IntLiteralTooLarge { + #[primary_span] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::missing_semicolon_before_array)] +pub(crate) struct MissingSemicolonBeforeArray { + #[primary_span] + pub open_delim: Span, + #[suggestion_verbose(applicability = "maybe-incorrect", code = ";")] + pub semicolon: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::invalid_block_macro_segment)] +pub(crate) struct InvalidBlockMacroSegment { + #[primary_span] + pub span: Span, + #[label] + pub context: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::if_expression_missing_then_block)] +pub(crate) struct IfExpressionMissingThenBlock { + #[primary_span] + pub if_span: Span, + #[subdiagnostic] + pub sub: IfExpressionMissingThenBlockSub, +} + +#[derive(SessionSubdiagnostic)] +pub(crate) enum IfExpressionMissingThenBlockSub { + #[help(parser::condition_possibly_unfinished)] + UnfinishedCondition(#[primary_span] Span), + #[help(parser::add_then_block)] + AddThenBlock(#[primary_span] Span), +} + +#[derive(SessionDiagnostic)] +#[diag(parser::if_expression_missing_condition)] +pub(crate) struct IfExpressionMissingCondition { + #[primary_span] + #[label(parser::condition_label)] + pub if_span: Span, + #[label(parser::block_label)] + pub block_span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::expected_expression_found_let)] +pub(crate) struct ExpectedExpressionFoundLet { + #[primary_span] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::expected_else_block)] +pub(crate) struct ExpectedElseBlock { + #[primary_span] + pub first_tok_span: Span, + pub first_tok: String, + #[label] + pub else_span: Span, + #[suggestion(applicability = "maybe-incorrect", code = "if ")] + pub condition_start: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::outer_attribute_not_allowed_on_if_else)] +pub(crate) struct OuterAttributeNotAllowedOnIfElse { + #[primary_span] + pub last: Span, + + #[label(parser::branch_label)] + pub branch_span: Span, + + #[label(parser::ctx_label)] + pub ctx_span: Span, + pub ctx: String, + + #[suggestion(applicability = "machine-applicable", code = "")] + pub attributes: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::missing_in_in_for_loop)] +pub(crate) struct MissingInInForLoop { + #[primary_span] + pub span: Span, + #[subdiagnostic] + pub sub: MissingInInForLoopSub, +} + +#[derive(SessionSubdiagnostic)] +pub(crate) enum MissingInInForLoopSub { + // Has been misleading, at least in the past (closed Issue #48492), thus maybe-incorrect + #[suggestion_short(parser::use_in_not_of, applicability = "maybe-incorrect", code = "in")] + InNotOf(#[primary_span] Span), + #[suggestion_short(parser::add_in, applicability = "maybe-incorrect", code = " in ")] + AddIn(#[primary_span] Span), +} + +#[derive(SessionDiagnostic)] +#[diag(parser::missing_comma_after_match_arm)] +pub(crate) struct MissingCommaAfterMatchArm { + #[primary_span] + #[suggestion(applicability = "machine-applicable", code = ",")] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::catch_after_try)] +#[help] +pub(crate) struct CatchAfterTry { + #[primary_span] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::comma_after_base_struct)] +#[note] +pub(crate) struct CommaAfterBaseStruct { + #[primary_span] + pub span: Span, + #[suggestion_short(applicability = "machine-applicable", code = "")] + pub comma: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::eq_field_init)] +pub(crate) struct EqFieldInit { + #[primary_span] + pub span: Span, + #[suggestion(applicability = "machine-applicable", code = ":")] + pub eq: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::dotdotdot)] +pub(crate) struct DotDotDot { + #[primary_span] + #[suggestion(parser::suggest_exclusive_range, applicability = "maybe-incorrect", code = "..")] + #[suggestion(parser::suggest_inclusive_range, applicability = "maybe-incorrect", code = "..=")] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::left_arrow_operator)] +pub(crate) struct LeftArrowOperator { + #[primary_span] + #[suggestion(applicability = "maybe-incorrect", code = "< -")] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::remove_let)] +pub(crate) struct RemoveLet { + #[primary_span] + #[suggestion(applicability = "machine-applicable", code = "")] + pub span: Span, +} + +#[derive(SessionDiagnostic)] +#[diag(parser::use_eq_instead)] +pub(crate) struct UseEqInstead { + #[primary_span] + #[suggestion_short(applicability = "machine-applicable", code = "=")] + pub span: Span, +} + // SnapshotParser is used to create a snapshot of the parser // without causing duplicate errors being emitted when the `Parser` // is dropped. @@ -387,7 +774,7 @@ impl<'a> Parser<'a> { /// This is to avoid losing unclosed delims errors `create_snapshot_for_diagnostic` clears. pub(super) fn restore_snapshot(&mut self, snapshot: SnapshotParser<'a>) { *self = snapshot.parser; - self.unclosed_delims.extend(snapshot.unclosed_delims.clone()); + self.unclosed_delims.extend(snapshot.unclosed_delims); } pub fn unclosed_delims(&self) -> &[UnmatchedBrace] { @@ -555,10 +942,12 @@ impl<'a> Parser<'a> { return Ok(true); } else if self.look_ahead(0, |t| { t == &token::CloseDelim(Delimiter::Brace) - || (t.can_begin_expr() && t != &token::Semi && t != &token::Pound) + || ((t.can_begin_expr() || t.can_begin_item()) + && t != &token::Semi + && t != &token::Pound) // Avoid triggering with too many trailing `#` in raw string. || (sm.is_multiline( - self.prev_token.span.shrink_to_hi().until(self.token.span.shrink_to_lo()) + self.prev_token.span.shrink_to_hi().until(self.token.span.shrink_to_lo()), ) && t == &token::Pound) }) && !expected.contains(&TokenType::Token(token::Comma)) { @@ -576,6 +965,14 @@ impl<'a> Parser<'a> { } } + if self.token.kind == TokenKind::EqEq + && self.prev_token.is_ident() + && expected.iter().any(|tok| matches!(tok, TokenType::Token(TokenKind::Eq))) + { + // Likely typo: `=` → `==` in let expr or enum item + return Err(self.sess.create_err(UseEqInstead { span: self.token.span })); + } + let expect = tokens_to_string(&expected); let actual = super::token_descr(&self.token); let (msg_exp, (label_sp, label_exp)) = if expected.len() > 1 { @@ -590,7 +987,7 @@ impl<'a> Parser<'a> { ) } else if expected.is_empty() { ( - format!("unexpected token: {}", actual), + format!("unexpected token: {actual}"), (self.prev_token.span, "unexpected token after this".to_string()), ) } else { @@ -603,16 +1000,29 @@ impl<'a> Parser<'a> { let mut err = self.struct_span_err(self.token.span, &msg_exp); if let TokenKind::Ident(symbol, _) = &self.prev_token.kind { - if symbol.as_str() == "public" { + if ["def", "fun", "func", "function"].contains(&symbol.as_str()) { err.span_suggestion_short( self.prev_token.span, - "write `pub` instead of `public` to make the item public", - "pub", + &format!("write `fn` instead of `{symbol}` to declare a function"), + "fn", appl, ); } } + // `pub` may be used for an item or `pub(crate)` + if self.prev_token.is_ident_named(sym::public) + && (self.token.can_begin_item() + || self.token.kind == TokenKind::OpenDelim(Delimiter::Parenthesis)) + { + err.span_suggestion_short( + self.prev_token.span, + "write `pub` instead of `public` to make the item public", + "pub", + appl, + ); + } + // Add suggestion for a missing closing angle bracket if '>' is included in expected_tokens // there are unclosed angle brackets if self.unmatched_angle_bracket_count > 0 @@ -734,7 +1144,7 @@ impl<'a> Parser<'a> { let mut snapshot = self.create_snapshot_for_diagnostic(); let path = Path { segments: vec![], span: self.prev_token.span.shrink_to_lo(), tokens: None }; - let struct_expr = snapshot.parse_struct_expr(None, path, AttrVec::new(), false); + let struct_expr = snapshot.parse_struct_expr(None, path, false); let block_tail = self.parse_block_tail(lo, s, AttemptLocalParseRecovery::No); return Some(match (struct_expr, block_tail) { (Ok(expr), Err(mut err)) => { @@ -1188,8 +1598,7 @@ impl<'a> Parser<'a> { outer_op.node, ); - let mk_err_expr = - |this: &Self, span| Ok(Some(this.mk_expr(span, ExprKind::Err, AttrVec::new()))); + let mk_err_expr = |this: &Self, span| Ok(Some(this.mk_expr(span, ExprKind::Err))); match inner_op.kind { ExprKind::Binary(op, ref l1, ref r1) if op.node.is_comparison() => { @@ -1497,7 +1906,7 @@ impl<'a> Parser<'a> { MultiSugg { msg: format!("use `{}= 1` instead", kind.op.chr()), patches: vec![ - (pre_span, format!("{{ let {} = ", tmp_var)), + (pre_span, format!("{{ let {tmp_var} = ")), (post_span, format!("; {} {}= 1; {} }}", base_src, kind.op.chr(), tmp_var)), ], applicability: Applicability::HasPlaceholders, @@ -1647,7 +2056,6 @@ impl<'a> Parser<'a> { &mut self, lo: Span, await_sp: Span, - attrs: AttrVec, ) -> PResult<'a, P> { let (hi, expr, is_question) = if self.token == token::Not { // Handle `await!()`. @@ -1662,7 +2070,7 @@ impl<'a> Parser<'a> { ExprKind::Try(_) => ExprKind::Err, _ => ExprKind::Await(expr), }; - let expr = self.mk_expr(lo.to(sp), kind, attrs); + let expr = self.mk_expr(lo.to(sp), kind); self.maybe_recover_from_bad_qpath(expr) } @@ -1680,7 +2088,7 @@ impl<'a> Parser<'a> { // Handle `await { }`. // This needs to be handled separately from the next arm to avoid // interpreting `await { }?` as `?.await`. - self.parse_block_expr(None, self.token.span, BlockCheckMode::Default, AttrVec::new()) + self.parse_block_expr(None, self.token.span, BlockCheckMode::Default) } else { self.parse_expr() } @@ -1823,7 +2231,7 @@ impl<'a> Parser<'a> { err.emit(); // Recover from parse error, callers expect the closing delim to be consumed. self.consume_block(delim, ConsumeClosingDelim::Yes); - self.mk_expr(lo.to(self.prev_token.span), ExprKind::Err, AttrVec::new()) + self.mk_expr(lo.to(self.prev_token.span), ExprKind::Err) } } } @@ -2334,7 +2742,7 @@ impl<'a> Parser<'a> { fn recover_const_param_decl(&mut self, ty_generics: Option<&Generics>) -> Option { let snapshot = self.create_snapshot_for_diagnostic(); - let param = match self.parse_const_param(vec![]) { + let param = match self.parse_const_param(AttrVec::new()) { Ok(param) => param, Err(err) => { err.cancel(); @@ -2577,7 +2985,7 @@ impl<'a> Parser<'a> { } _ => {} }, - PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None) => { + PatKind::Ident(BindingAnnotation::NONE, ident, None) => { match &first_pat.kind { PatKind::Ident(_, old_ident, _) => { let path = PatKind::Path( diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index 0719a0ef0..725768c1f 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -1,4 +1,14 @@ -use super::diagnostics::SnapshotParser; +use super::diagnostics::{ + CatchAfterTry, CommaAfterBaseStruct, DoCatchSyntaxRemoved, DotDotDot, EqFieldInit, + ExpectedElseBlock, ExpectedExpressionFoundLet, FieldExpressionWithGeneric, + FloatLiteralRequiresIntegerPart, IfExpressionMissingCondition, IfExpressionMissingThenBlock, + IfExpressionMissingThenBlockSub, InvalidBlockMacroSegment, InvalidComparisonOperator, + InvalidComparisonOperatorSub, InvalidLogicalOperator, InvalidLogicalOperatorSub, + LeftArrowOperator, LifetimeInBorrowExpression, MacroInvocationWithQualifiedPath, + MalformedLoopLabel, MissingInInForLoop, MissingInInForLoopSub, MissingSemicolonBeforeArray, + NotAsNegationOperator, OuterAttributeNotAllowedOnIfElse, RequireColonAfterLabeledExpression, + SnapshotParser, TildeAsUnaryOperator, UnexpectedTokenAfterLabel, +}; use super::pat::{CommaRecoveryMode, RecoverColon, RecoverComma, PARAM_EXPECTED}; use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign}; use super::{ @@ -6,6 +16,11 @@ use super::{ SemiColonMode, SeqSep, TokenExpectType, TokenType, TrailingToken, }; use crate::maybe_recover_from_interpolated_ty_qpath; +use crate::parser::diagnostics::{ + IntLiteralTooLarge, InvalidFloatLiteralSuffix, InvalidFloatLiteralWidth, + InvalidIntLiteralWidth, InvalidNumLiteralBasePrefix, InvalidNumLiteralSuffix, + MissingCommaAfterMatchArm, +}; use core::mem; use rustc_ast::ptr::P; @@ -20,10 +35,10 @@ use rustc_ast::{AnonConst, BinOp, BinOpKind, FnDecl, FnRetTy, MacCall, Param, Ty use rustc_ast::{Arm, Async, BlockCheckMode, Expr, ExprKind, Label, Movability, RangeLimits}; use rustc_ast::{ClosureBinder, StmtKind}; use rustc_ast_pretty::pprust; -use rustc_data_structures::thin_vec::ThinVec; -use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, PResult}; +use rustc_errors::{Applicability, Diagnostic, PResult}; use rustc_session::lint::builtin::BREAK_WITH_LABEL_AND_LOOP; use rustc_session::lint::BuiltinLintDiagnostics; +use rustc_session::SessionDiagnostic; use rustc_span::source_map::{self, Span, Spanned}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{BytePos, Pos}; @@ -45,20 +60,12 @@ macro_rules! maybe_whole_expr { token::NtPath(path) => { let path = (**path).clone(); $p.bump(); - return Ok($p.mk_expr( - $p.prev_token.span, - ExprKind::Path(None, path), - AttrVec::new(), - )); + return Ok($p.mk_expr($p.prev_token.span, ExprKind::Path(None, path))); } token::NtBlock(block) => { let block = block.clone(); $p.bump(); - return Ok($p.mk_expr( - $p.prev_token.span, - ExprKind::Block(block, None), - AttrVec::new(), - )); + return Ok($p.mk_expr($p.prev_token.span, ExprKind::Block(block, None))); } _ => {} }; @@ -120,7 +127,7 @@ impl<'a> Parser<'a> { // Special-case handling of `foo(_, _, _)` err.emit(); self.bump(); - Ok(self.mk_expr(self.prev_token.span, ExprKind::Err, AttrVec::new())) + Ok(self.mk_expr(self.prev_token.span, ExprKind::Err)) } _ => Err(err), }, @@ -225,15 +232,18 @@ impl<'a> Parser<'a> { AssocOp::Equal => "==", AssocOp::NotEqual => "!=", _ => unreachable!(), - }; - self.struct_span_err(sp, &format!("invalid comparison operator `{sugg}=`")) - .span_suggestion_short( - sp, - &format!("`{s}=` is not a valid comparison operator, use `{s}`", s = sugg), - sugg, - Applicability::MachineApplicable, - ) - .emit(); + } + .into(); + let invalid = format!("{}=", &sugg); + self.sess.emit_err(InvalidComparisonOperator { + span: sp, + invalid: invalid.clone(), + sub: InvalidComparisonOperatorSub::Correctable { + span: sp, + invalid, + correct: sugg, + }, + }); self.bump(); } @@ -243,14 +253,15 @@ impl<'a> Parser<'a> { && self.prev_token.span.hi() == self.token.span.lo() { let sp = op.span.to(self.token.span); - self.struct_span_err(sp, "invalid comparison operator `<>`") - .span_suggestion_short( - sp, - "`<>` is not a valid comparison operator, use `!=`", - "!=", - Applicability::MachineApplicable, - ) - .emit(); + self.sess.emit_err(InvalidComparisonOperator { + span: sp, + invalid: "<>".into(), + sub: InvalidComparisonOperatorSub::Correctable { + span: sp, + invalid: "<>".into(), + correct: "!=".into(), + }, + }); self.bump(); } @@ -260,12 +271,11 @@ impl<'a> Parser<'a> { && self.prev_token.span.hi() == self.token.span.lo() { let sp = op.span.to(self.token.span); - self.struct_span_err(sp, "invalid comparison operator `<=>`") - .span_label( - sp, - "`<=>` is not a valid comparison operator, use `std::cmp::Ordering`", - ) - .emit(); + self.sess.emit_err(InvalidComparisonOperator { + span: sp, + invalid: "<=>".into(), + sub: InvalidComparisonOperatorSub::Spaceship(sp), + }); self.bump(); } @@ -329,11 +339,9 @@ impl<'a> Parser<'a> { | AssocOp::GreaterEqual => { let ast_op = op.to_ast_binop().unwrap(); let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs); - self.mk_expr(span, binary, AttrVec::new()) - } - AssocOp::Assign => { - self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span), AttrVec::new()) + self.mk_expr(span, binary) } + AssocOp::Assign => self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span)), AssocOp::AssignOp(k) => { let aop = match k { token::Plus => BinOpKind::Add, @@ -348,7 +356,7 @@ impl<'a> Parser<'a> { token::Shr => BinOpKind::Shr, }; let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs); - self.mk_expr(span, aopexpr, AttrVec::new()) + self.mk_expr(span, aopexpr) } AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => { self.span_bug(span, "AssocOp should have been handled by special case") @@ -441,11 +449,19 @@ impl<'a> Parser<'a> { } (Some(op), _) => (op, self.token.span), (None, Some((Ident { name: sym::and, span }, false))) => { - self.error_bad_logical_op("and", "&&", "conjunction"); + self.sess.emit_err(InvalidLogicalOperator { + span: self.token.span, + incorrect: "and".into(), + sub: InvalidLogicalOperatorSub::Conjunction(self.token.span), + }); (AssocOp::LAnd, span) } (None, Some((Ident { name: sym::or, span }, false))) => { - self.error_bad_logical_op("or", "||", "disjunction"); + self.sess.emit_err(InvalidLogicalOperator { + span: self.token.span, + incorrect: "or".into(), + sub: InvalidLogicalOperatorSub::Disjunction(self.token.span), + }); (AssocOp::LOr, span) } _ => return None, @@ -453,19 +469,6 @@ impl<'a> Parser<'a> { Some(source_map::respan(span, op)) } - /// Error on `and` and `or` suggesting `&&` and `||` respectively. - fn error_bad_logical_op(&self, bad: &str, good: &str, english: &str) { - self.struct_span_err(self.token.span, &format!("`{bad}` is not a logical operator")) - .span_suggestion_short( - self.token.span, - &format!("use `{good}` to perform logical {english}"), - good, - Applicability::MachineApplicable, - ) - .note("unlike in e.g., python and PHP, `&&` and `||` are used for logical operators") - .emit(); - } - /// Checks if this expression is a successfully parsed statement. fn expr_is_complete(&self, e: &Expr) -> bool { self.restrictions.contains(Restrictions::STMT_EXPR) @@ -491,7 +494,7 @@ impl<'a> Parser<'a> { let limits = if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let range = self.mk_range(Some(lhs), rhs, limits); - Ok(self.mk_expr(span, range, AttrVec::new())) + Ok(self.mk_expr(span, range)) } fn is_at_start_of_range_notation_rhs(&self) -> bool { @@ -540,7 +543,7 @@ impl<'a> Parser<'a> { (lo, None) }; let range = this.mk_range(None, opt_end, limits); - Ok(this.mk_expr(span, range, attrs.into())) + Ok(this.mk_expr_with_attrs(span, range, attrs)) }) } @@ -553,7 +556,7 @@ impl<'a> Parser<'a> { ($this:ident, $attrs:expr, |this, _| $body:expr) => { $this.collect_tokens_for_expr($attrs, |$this, attrs| { let (hi, ex) = $body?; - Ok($this.mk_expr(lo.to(hi), ex, attrs.into())) + Ok($this.mk_expr_with_attrs(lo.to(hi), ex, attrs)) }) }; } @@ -630,14 +633,7 @@ impl<'a> Parser<'a> { // Recover on `!` suggesting for bitwise negation instead. fn recover_tilde_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> { - self.struct_span_err(lo, "`~` cannot be used as a unary operator") - .span_suggestion_short( - lo, - "use `!` to perform bitwise not", - "!", - Applicability::MachineApplicable, - ) - .emit(); + self.sess.emit_err(TildeAsUnaryOperator(lo)); self.parse_unary_expr(lo, UnOp::Not) } @@ -663,20 +659,14 @@ impl<'a> Parser<'a> { /// Recover on `not expr` in favor of `!expr`. fn recover_not_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> { // Emit the error... - let not_token = self.look_ahead(1, |t| t.clone()); - self.struct_span_err( - not_token.span, - &format!("unexpected {} after identifier", super::token_descr(¬_token)), - ) - .span_suggestion_short( + let negated_token = self.look_ahead(1, |t| t.clone()); + self.sess.emit_err(NotAsNegationOperator { + negated: negated_token.span, + negated_desc: super::token_descr(&negated_token), // Span the `not` plus trailing whitespace to avoid // trailing whitespace after the `!` in our suggestion - self.sess.source_map().span_until_non_whitespace(lo.to(not_token.span)), - "use `!` to perform logical negation", - "!", - Applicability::MachineApplicable, - ) - .emit(); + not: self.sess.source_map().span_until_non_whitespace(lo.to(negated_token.span)), + }); // ...and recover! self.parse_unary_expr(lo, UnOp::Not) @@ -705,11 +695,7 @@ impl<'a> Parser<'a> { expr_kind: fn(P, P) -> ExprKind, ) -> PResult<'a, P> { let mk_expr = |this: &mut Self, lhs: P, rhs: P| { - this.mk_expr( - this.mk_expr_sp(&lhs, lhs_span, rhs.span), - expr_kind(lhs, rhs), - AttrVec::new(), - ) + this.mk_expr(this.mk_expr_sp(&lhs, lhs_span, rhs.span), expr_kind(lhs, rhs)) }; // Save the state of the parser before parsing type normally, in case there is a @@ -737,17 +723,13 @@ impl<'a> Parser<'a> { segments[0].ident.span, ), }; - match self.parse_labeled_expr(label, AttrVec::new(), false) { + match self.parse_labeled_expr(label, false) { Ok(expr) => { type_err.cancel(); - self.struct_span_err(label.ident.span, "malformed loop label") - .span_suggestion( - label.ident.span, - "use the correct loop label format", - label.ident, - Applicability::MachineApplicable, - ) - .emit(); + self.sess.emit_err(MalformedLoopLabel { + span: label.ident.span, + correct_label: label.ident, + }); return Ok(expr); } Err(err) => { @@ -859,7 +841,7 @@ impl<'a> Parser<'a> { ); let mut err = self.struct_span_err(span, &msg); - let suggest_parens = |err: &mut DiagnosticBuilder<'_, _>| { + let suggest_parens = |err: &mut Diagnostic| { let suggestions = vec![ (span.shrink_to_lo(), "(".to_string()), (span.shrink_to_hi(), ")".to_string()), @@ -925,15 +907,7 @@ impl<'a> Parser<'a> { } fn error_remove_borrow_lifetime(&self, span: Span, lt_span: Span) { - self.struct_span_err(span, "borrow expressions cannot be annotated with lifetimes") - .span_label(lt_span, "annotated with lifetime here") - .span_suggestion( - lt_span, - "remove the lifetime annotation", - "", - Applicability::MachineApplicable, - ) - .emit(); + self.sess.emit_err(LifetimeInBorrowExpression { span, lifetime_span: lt_span }); } /// Parse `mut?` or `raw [ const | mut ]`. @@ -965,18 +939,23 @@ impl<'a> Parser<'a> { &mut self, e0: P, lo: Span, - mut attrs: Vec, + mut attrs: ast::AttrVec, ) -> PResult<'a, P> { // Stitch the list of outer attributes onto the return value. // A little bit ugly, but the best way given the current code // structure - self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| { - expr.map(|mut expr| { - attrs.extend::>(expr.attrs.into()); - expr.attrs = attrs.into(); - expr + let res = self.parse_dot_or_call_expr_with_(e0, lo); + if attrs.is_empty() { + res + } else { + res.map(|expr| { + expr.map(|mut expr| { + attrs.extend(expr.attrs); + expr.attrs = attrs; + expr + }) }) - }) + } } fn parse_dot_or_call_expr_with_(&mut self, mut e: P, lo: Span) -> PResult<'a, P> { @@ -990,7 +969,7 @@ impl<'a> Parser<'a> { }; if has_question { // `expr?` - e = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Try(e), AttrVec::new()); + e = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Try(e)); continue; } let has_dot = if self.prev_token.kind == TokenKind::Ident(kw::Return, false) { @@ -1168,7 +1147,7 @@ impl<'a> Parser<'a> { let span = self.prev_token.span; let field = ExprKind::Field(base, Ident::new(field, span)); self.expect_no_suffix(span, "a tuple index", suffix); - self.mk_expr(lo.to(span), field, AttrVec::new()) + self.mk_expr(lo.to(span), field) } /// Parse a function call expression, `expr(...)`. @@ -1182,9 +1161,9 @@ impl<'a> Parser<'a> { }; let open_paren = self.token.span; - let mut seq = self.parse_paren_expr_seq().map(|args| { - self.mk_expr(lo.to(self.prev_token.span), self.mk_call(fun, args), AttrVec::new()) - }); + let mut seq = self + .parse_paren_expr_seq() + .map(|args| self.mk_expr(lo.to(self.prev_token.span), self.mk_call(fun, args))); if let Some(expr) = self.maybe_recover_struct_lit_bad_delims(lo, open_paren, &mut seq, snapshot) { @@ -1258,10 +1237,13 @@ impl<'a> Parser<'a> { /// Parse an indexing expression `expr[...]`. fn parse_index_expr(&mut self, lo: Span, base: P) -> PResult<'a, P> { + let prev_span = self.prev_token.span; + let open_delim_span = self.token.span; self.bump(); // `[` let index = self.parse_expr()?; + self.suggest_missing_semicolon_before_array(prev_span, open_delim_span)?; self.expect(&token::CloseDelim(Delimiter::Bracket))?; - Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index), AttrVec::new())) + Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index))) } /// Assuming we have just parsed `.`, continue parsing into an expression. @@ -1282,19 +1264,15 @@ impl<'a> Parser<'a> { let fn_span = fn_span_lo.to(self.prev_token.span); let span = lo.to(self.prev_token.span); - Ok(self.mk_expr(span, ExprKind::MethodCall(segment, args, fn_span), AttrVec::new())) + Ok(self.mk_expr(span, ExprKind::MethodCall(segment, args, fn_span))) } else { // Field access `expr.f` if let Some(args) = segment.args { - self.struct_span_err( - args.span(), - "field expressions cannot have generic arguments", - ) - .emit(); + self.sess.emit_err(FieldExpressionWithGeneric(args.span())); } let span = lo.to(self.prev_token.span); - Ok(self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), AttrVec::new())) + Ok(self.mk_expr(span, ExprKind::Field(self_arg, segment.ident))) } } @@ -1309,10 +1287,6 @@ impl<'a> Parser<'a> { // Outer attributes are already parsed and will be // added to the return value after the fact. - // - // Therefore, prevent sub-parser from parsing - // attributes by giving them an empty "already-parsed" list. - let attrs = AttrVec::new(); // Note: when adding new syntax here, don't forget to adjust `TokenKind::can_begin_expr()`. let lo = self.token.span; @@ -1320,13 +1294,13 @@ impl<'a> Parser<'a> { // This match arm is a special-case of the `_` match arm below and // could be removed without changing functionality, but it's faster // to have it here, especially for programs with large constants. - self.parse_lit_expr(attrs) + self.parse_lit_expr() } else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) { - self.parse_tuple_parens_expr(attrs) + self.parse_tuple_parens_expr() } else if self.check(&token::OpenDelim(Delimiter::Brace)) { - self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs) + self.parse_block_expr(None, lo, BlockCheckMode::Default) } else if self.check(&token::BinOp(token::Or)) || self.check(&token::OrOr) { - self.parse_closure_expr(attrs).map_err(|mut err| { + self.parse_closure_expr().map_err(|mut err| { // If the input is something like `if a { 1 } else { 2 } | if a { 3 } else { 4 }` // then suggest parens around the lhs. if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&lo) { @@ -1335,65 +1309,66 @@ impl<'a> Parser<'a> { err }) } else if self.check(&token::OpenDelim(Delimiter::Bracket)) { - self.parse_array_or_repeat_expr(attrs, Delimiter::Bracket) + self.parse_array_or_repeat_expr(Delimiter::Bracket) } else if self.check_path() { - self.parse_path_start_expr(attrs) + self.parse_path_start_expr() } else if self.check_keyword(kw::Move) || self.check_keyword(kw::Static) { - self.parse_closure_expr(attrs) + self.parse_closure_expr() } else if self.eat_keyword(kw::If) { - self.parse_if_expr(attrs) + self.parse_if_expr() } else if self.check_keyword(kw::For) { if self.choose_generics_over_qpath(1) { - self.parse_closure_expr(attrs) + self.parse_closure_expr() } else { assert!(self.eat_keyword(kw::For)); - self.parse_for_expr(None, self.prev_token.span, attrs) + self.parse_for_expr(None, self.prev_token.span) } } else if self.eat_keyword(kw::While) { - self.parse_while_expr(None, self.prev_token.span, attrs) + self.parse_while_expr(None, self.prev_token.span) } else if let Some(label) = self.eat_label() { - self.parse_labeled_expr(label, attrs, true) + self.parse_labeled_expr(label, true) } else if self.eat_keyword(kw::Loop) { let sp = self.prev_token.span; - self.parse_loop_expr(None, self.prev_token.span, attrs).map_err(|mut err| { + self.parse_loop_expr(None, self.prev_token.span).map_err(|mut err| { err.span_label(sp, "while parsing this `loop` expression"); err }) } else if self.eat_keyword(kw::Continue) { let kind = ExprKind::Continue(self.eat_label()); - Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs)) + Ok(self.mk_expr(lo.to(self.prev_token.span), kind)) } else if self.eat_keyword(kw::Match) { let match_sp = self.prev_token.span; - self.parse_match_expr(attrs).map_err(|mut err| { + self.parse_match_expr().map_err(|mut err| { err.span_label(match_sp, "while parsing this `match` expression"); err }) } else if self.eat_keyword(kw::Unsafe) { let sp = self.prev_token.span; - self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs) - .map_err(|mut err| { + self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided)).map_err( + |mut err| { err.span_label(sp, "while parsing this `unsafe` expression"); err - }) + }, + ) } else if self.check_inline_const(0) { self.parse_const_block(lo.to(self.token.span), false) } else if self.is_do_catch_block() { - self.recover_do_catch(attrs) + self.recover_do_catch() } else if self.is_try_block() { self.expect_keyword(kw::Try)?; - self.parse_try_block(lo, attrs) + self.parse_try_block(lo) } else if self.eat_keyword(kw::Return) { - self.parse_return_expr(attrs) + self.parse_return_expr() } else if self.eat_keyword(kw::Break) { - self.parse_break_expr(attrs) + self.parse_break_expr() } else if self.eat_keyword(kw::Yield) { - self.parse_yield_expr(attrs) + self.parse_yield_expr() } else if self.is_do_yeet() { - self.parse_yeet_expr(attrs) + self.parse_yeet_expr() } else if self.check_keyword(kw::Let) { - self.parse_let_expr(attrs) + self.parse_let_expr() } else if self.eat_keyword(kw::Underscore) { - Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore, attrs)) + Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore)) } else if !self.unclosed_delims.is_empty() && self.check(&token::Semi) { // Don't complain about bare semicolons after unclosed braces // recovery in order to keep the error count down. Fixing the @@ -1412,32 +1387,32 @@ impl<'a> Parser<'a> { if self.check_keyword(kw::Async) { if self.is_async_block() { // Check for `async {` and `async move {`. - self.parse_async_block(attrs) + self.parse_async_block() } else { - self.parse_closure_expr(attrs) + self.parse_closure_expr() } } else if self.eat_keyword(kw::Await) { - self.recover_incorrect_await_syntax(lo, self.prev_token.span, attrs) + self.recover_incorrect_await_syntax(lo, self.prev_token.span) } else { - self.parse_lit_expr(attrs) + self.parse_lit_expr() } } else { - self.parse_lit_expr(attrs) + self.parse_lit_expr() } } - fn parse_lit_expr(&mut self, attrs: AttrVec) -> PResult<'a, P> { + fn parse_lit_expr(&mut self) -> PResult<'a, P> { let lo = self.token.span; match self.parse_opt_lit() { Some(literal) => { - let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal), attrs); + let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal)); self.maybe_recover_from_bad_qpath(expr) } None => self.try_macro_suggestion(), } } - fn parse_tuple_parens_expr(&mut self, attrs: AttrVec) -> PResult<'a, P> { + fn parse_tuple_parens_expr(&mut self) -> PResult<'a, P> { let lo = self.token.span; self.expect(&token::OpenDelim(Delimiter::Parenthesis))?; let (es, trailing_comma) = match self.parse_seq_to_end( @@ -1457,15 +1432,11 @@ impl<'a> Parser<'a> { // `(e,)` is a tuple with only one field, `e`. ExprKind::Tup(es) }; - let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs); + let expr = self.mk_expr(lo.to(self.prev_token.span), kind); self.maybe_recover_from_bad_qpath(expr) } - fn parse_array_or_repeat_expr( - &mut self, - attrs: AttrVec, - close_delim: Delimiter, - ) -> PResult<'a, P> { + fn parse_array_or_repeat_expr(&mut self, close_delim: Delimiter) -> PResult<'a, P> { let lo = self.token.span; self.bump(); // `[` or other open delim @@ -1494,45 +1465,42 @@ impl<'a> Parser<'a> { ExprKind::Array(vec![first_expr]) } }; - let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs); + let expr = self.mk_expr(lo.to(self.prev_token.span), kind); self.maybe_recover_from_bad_qpath(expr) } - fn parse_path_start_expr(&mut self, attrs: AttrVec) -> PResult<'a, P> { + fn parse_path_start_expr(&mut self) -> PResult<'a, P> { let (qself, path) = if self.eat_lt() { let (qself, path) = self.parse_qpath(PathStyle::Expr)?; (Some(qself), path) } else { (None, self.parse_path(PathStyle::Expr)?) }; - let lo = path.span; // `!`, as an operator, is prefix, so we know this isn't that. - let (hi, kind) = if self.eat(&token::Not) { + let (span, kind) = if self.eat(&token::Not) { // MACRO INVOCATION expression if qself.is_some() { - self.struct_span_err(path.span, "macros cannot use qualified paths").emit(); + self.sess.emit_err(MacroInvocationWithQualifiedPath(path.span)); } - let mac = MacCall { + let lo = path.span; + let mac = P(MacCall { path, args: self.parse_mac_args()?, prior_type_ascription: self.last_type_ascription, - }; - (self.prev_token.span, ExprKind::MacCall(mac)) - } else if self.check(&token::OpenDelim(Delimiter::Brace)) { - if let Some(expr) = self.maybe_parse_struct_expr(qself.as_ref(), &path, &attrs) { + }); + (lo.to(self.prev_token.span), ExprKind::MacCall(mac)) + } else if self.check(&token::OpenDelim(Delimiter::Brace)) && + let Some(expr) = self.maybe_parse_struct_expr(qself.as_ref(), &path) { if qself.is_some() { self.sess.gated_spans.gate(sym::more_qualified_paths, path.span); } return expr; - } else { - (path.span, ExprKind::Path(qself, path)) - } } else { (path.span, ExprKind::Path(qself, path)) }; - let expr = self.mk_expr(lo.to(hi), kind, attrs); + let expr = self.mk_expr(span, kind); self.maybe_recover_from_bad_qpath(expr) } @@ -1540,31 +1508,30 @@ impl<'a> Parser<'a> { fn parse_labeled_expr( &mut self, label: Label, - attrs: AttrVec, mut consume_colon: bool, ) -> PResult<'a, P> { let lo = label.ident.span; let label = Some(label); let ate_colon = self.eat(&token::Colon); let expr = if self.eat_keyword(kw::While) { - self.parse_while_expr(label, lo, attrs) + self.parse_while_expr(label, lo) } else if self.eat_keyword(kw::For) { - self.parse_for_expr(label, lo, attrs) + self.parse_for_expr(label, lo) } else if self.eat_keyword(kw::Loop) { - self.parse_loop_expr(label, lo, attrs) + self.parse_loop_expr(label, lo) } else if self.check_noexpect(&token::OpenDelim(Delimiter::Brace)) || self.token.is_whole_block() { - self.parse_block_expr(label, lo, BlockCheckMode::Default, attrs) + self.parse_block_expr(label, lo, BlockCheckMode::Default) } else if !ate_colon && (self.check_noexpect(&TokenKind::Comma) || self.check_noexpect(&TokenKind::Gt)) { // We're probably inside of a `Path<'a>` that needs a turbofish - let msg = "expected `while`, `for`, `loop` or `{` after a label"; - self.struct_span_err(self.token.span, msg).span_label(self.token.span, msg).emit(); + self.sess.emit_err(UnexpectedTokenAfterLabel(self.token.span)); consume_colon = false; Ok(self.mk_expr_err(lo)) } else { + // FIXME: use UnexpectedTokenAfterLabel, needs multipart suggestions let msg = "expected `while`, `for`, `loop` or `{` after a label"; let mut err = self.struct_span_err(self.token.span, msg); @@ -1618,10 +1585,10 @@ impl<'a> Parser<'a> { Applicability::MachineApplicable, ); - // Replace `'label: non_block_expr` with `'label: {non_block_expr}` in order to supress future errors about `break 'label`. + // Replace `'label: non_block_expr` with `'label: {non_block_expr}` in order to suppress future errors about `break 'label`. let stmt = self.mk_stmt(span, StmtKind::Expr(expr)); let blk = self.mk_block(vec![stmt], BlockCheckMode::Default, span); - self.mk_expr(span, ExprKind::Block(blk, label), ThinVec::new()) + self.mk_expr(span, ExprKind::Block(blk, label)) }); err.emit(); @@ -1629,44 +1596,27 @@ impl<'a> Parser<'a> { }?; if !ate_colon && consume_colon { - self.error_labeled_expr_must_be_followed_by_colon(lo, expr.span); + self.sess.emit_err(RequireColonAfterLabeledExpression { + span: expr.span, + label: lo, + label_end: lo.shrink_to_hi(), + }); } Ok(expr) } - fn error_labeled_expr_must_be_followed_by_colon(&self, lo: Span, span: Span) { - self.struct_span_err(span, "labeled expression must be followed by `:`") - .span_label(lo, "the label") - .span_suggestion_short( - lo.shrink_to_hi(), - "add `:` after the label", - ": ", - Applicability::MachineApplicable, - ) - .note("labels are used before loops and blocks, allowing e.g., `break 'label` to them") - .emit(); - } - /// Recover on the syntax `do catch { ... }` suggesting `try { ... }` instead. - fn recover_do_catch(&mut self, attrs: AttrVec) -> PResult<'a, P> { + fn recover_do_catch(&mut self) -> PResult<'a, P> { let lo = self.token.span; self.bump(); // `do` self.bump(); // `catch` - let span_dc = lo.to(self.prev_token.span); - self.struct_span_err(span_dc, "found removed `do catch` syntax") - .span_suggestion( - span_dc, - "replace with the new syntax", - "try", - Applicability::MachineApplicable, - ) - .note("following RFC #2388, the new non-placeholder syntax is `try`") - .emit(); + let span = lo.to(self.prev_token.span); + self.sess.emit_err(DoCatchSyntaxRemoved { span }); - self.parse_try_block(lo, attrs) + self.parse_try_block(lo) } /// Parse an expression if the token can begin one. @@ -1675,15 +1625,15 @@ impl<'a> Parser<'a> { } /// Parse `"return" expr?`. - fn parse_return_expr(&mut self, attrs: AttrVec) -> PResult<'a, P> { + fn parse_return_expr(&mut self) -> PResult<'a, P> { let lo = self.prev_token.span; let kind = ExprKind::Ret(self.parse_expr_opt()?); - let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs); + let expr = self.mk_expr(lo.to(self.prev_token.span), kind); self.maybe_recover_from_bad_qpath(expr) } /// Parse `"do" "yeet" expr?`. - fn parse_yeet_expr(&mut self, attrs: AttrVec) -> PResult<'a, P> { + fn parse_yeet_expr(&mut self) -> PResult<'a, P> { let lo = self.token.span; self.bump(); // `do` @@ -1693,7 +1643,7 @@ impl<'a> Parser<'a> { let span = lo.to(self.prev_token.span); self.sess.gated_spans.gate(sym::yeet_expr, span); - let expr = self.mk_expr(span, kind, attrs); + let expr = self.mk_expr(span, kind); self.maybe_recover_from_bad_qpath(expr) } @@ -1705,13 +1655,13 @@ impl<'a> Parser<'a> { /// `break 'lbl: loop {}`); a labeled break with an unlabeled loop as its value /// expression only gets a warning for compatibility reasons; and a labeled break /// with a labeled loop does not even get a warning because there is no ambiguity. - fn parse_break_expr(&mut self, attrs: AttrVec) -> PResult<'a, P> { + fn parse_break_expr(&mut self) -> PResult<'a, P> { let lo = self.prev_token.span; let mut label = self.eat_label(); let kind = if label.is_some() && self.token == token::Colon { // The value expression can be a labeled loop, see issue #86948, e.g.: // `loop { break 'label: loop { break 'label 42; }; }` - let lexpr = self.parse_labeled_expr(label.take().unwrap(), AttrVec::new(), true)?; + let lexpr = self.parse_labeled_expr(label.take().unwrap(), true)?; self.struct_span_err( lexpr.span, "parentheses are required around this expression to avoid confusion with a labeled break expression", @@ -1753,17 +1703,17 @@ impl<'a> Parser<'a> { } else { None }; - let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Break(label, kind), attrs); + let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Break(label, kind)); self.maybe_recover_from_bad_qpath(expr) } /// Parse `"yield" expr?`. - fn parse_yield_expr(&mut self, attrs: AttrVec) -> PResult<'a, P> { + fn parse_yield_expr(&mut self) -> PResult<'a, P> { let lo = self.prev_token.span; let kind = ExprKind::Yield(self.parse_expr_opt()?); let span = lo.to(self.prev_token.span); self.sess.gated_spans.gate(sym::generators, span); - let expr = self.mk_expr(span, kind, attrs); + let expr = self.mk_expr(span, kind); self.maybe_recover_from_bad_qpath(expr) } @@ -1775,8 +1725,8 @@ impl<'a> Parser<'a> { Some(lit) => match lit.kind { ast::LitKind::Str(symbol_unescaped, style) => Ok(ast::StrLit { style, - symbol: lit.token.symbol, - suffix: lit.token.suffix, + symbol: lit.token_lit.symbol, + suffix: lit.token_lit.suffix, span: lit.span, symbol_unescaped, }), @@ -1853,20 +1803,16 @@ impl<'a> Parser<'a> { let suffixless_lit = token::Lit::new(lit.kind, lit.symbol, None); let symbol = Symbol::intern(&suffixless_lit.to_string()); let lit = token::Lit::new(token::Err, symbol, lit.suffix); - Some(Lit::from_lit_token(lit, span).unwrap_or_else(|_| unreachable!())) + Some(Lit::from_token_lit(lit, span).unwrap_or_else(|_| unreachable!())) } } } fn error_float_lits_must_have_int_part(&self, token: &Token) { - self.struct_span_err(token.span, "float literals must have an integer part") - .span_suggestion( - token.span, - "must have an integer part", - pprust::token_to_string(token), - Applicability::MachineApplicable, - ) - .emit(); + self.sess.emit_err(FloatLiteralRequiresIntegerPart { + span: token.span, + correct: pprust::token_to_string(token).into_owned(), + }); } fn report_lit_error(&self, err: LitError, lit: token::Lit, span: Span) { @@ -1908,28 +1854,11 @@ impl<'a> Parser<'a> { let suf = suf.as_str(); if looks_like_width_suffix(&['i', 'u'], &suf) { // If it looks like a width, try to be helpful. - let msg = format!("invalid width `{}` for integer literal", &suf[1..]); - self.struct_span_err(span, &msg) - .help("valid widths are 8, 16, 32, 64 and 128") - .emit(); + self.sess.emit_err(InvalidIntLiteralWidth { span, width: suf[1..].into() }); } else if let Some(fixed) = fix_base_capitalisation(suf) { - let msg = "invalid base prefix for number literal"; - - self.struct_span_err(span, msg) - .note("base prefixes (`0xff`, `0b1010`, `0o755`) are lowercase") - .span_suggestion( - span, - "try making the prefix lowercase", - fixed, - Applicability::MaybeIncorrect, - ) - .emit(); + self.sess.emit_err(InvalidNumLiteralBasePrefix { span, fixed }); } else { - let msg = format!("invalid suffix `{suf}` for number literal"); - self.struct_span_err(span, &msg) - .span_label(span, format!("invalid suffix `{suf}`")) - .help("the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.)") - .emit(); + self.sess.emit_err(InvalidNumLiteralSuffix { span, suffix: suf.to_string() }); } } LitError::InvalidFloatSuffix => { @@ -1937,14 +1866,10 @@ impl<'a> Parser<'a> { let suf = suf.as_str(); if looks_like_width_suffix(&['f'], suf) { // If it looks like a width, try to be helpful. - let msg = format!("invalid width `{}` for float literal", &suf[1..]); - self.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit(); + self.sess + .emit_err(InvalidFloatLiteralWidth { span, width: suf[1..].to_string() }); } else { - let msg = format!("invalid suffix `{suf}` for float literal"); - self.struct_span_err(span, &msg) - .span_label(span, format!("invalid suffix `{suf}`")) - .help("valid suffixes are `f32` and `f64`") - .emit(); + self.sess.emit_err(InvalidFloatLiteralSuffix { span, suffix: suf.to_string() }); } } LitError::NonDecimalFloat(base) => { @@ -1959,7 +1884,7 @@ impl<'a> Parser<'a> { .emit(); } LitError::IntTooLarge => { - self.struct_span_err(span, "integer literal is too large").emit(); + self.sess.emit_err(IntLiteralTooLarge { span }); } } } @@ -2007,14 +1932,10 @@ impl<'a> Parser<'a> { let lo = self.token.span; let minus_present = self.eat(&token::BinOp(token::Minus)); let lit = self.parse_lit()?; - let expr = self.mk_expr(lit.span, ExprKind::Lit(lit), AttrVec::new()); + let expr = self.mk_expr(lit.span, ExprKind::Lit(lit)); if minus_present { - Ok(self.mk_expr( - lo.to(self.prev_token.span), - self.mk_unary(UnOp::Neg, expr), - AttrVec::new(), - )) + Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_unary(UnOp::Neg, expr))) } else { Ok(expr) } @@ -2029,13 +1950,9 @@ impl<'a> Parser<'a> { /// Emits a suggestion if it looks like the user meant an array but /// accidentally used braces, causing the code to be interpreted as a block /// expression. - fn maybe_suggest_brackets_instead_of_braces( - &mut self, - lo: Span, - attrs: AttrVec, - ) -> Option> { + fn maybe_suggest_brackets_instead_of_braces(&mut self, lo: Span) -> Option> { let mut snapshot = self.create_snapshot_for_diagnostic(); - match snapshot.parse_array_or_repeat_expr(attrs, Delimiter::Brace) { + match snapshot.parse_array_or_repeat_expr(Delimiter::Brace) { Ok(arr) => { let hi = snapshot.prev_token.span; self.struct_span_err(arr.span, "this is a block expression, not an array") @@ -2056,43 +1973,76 @@ impl<'a> Parser<'a> { } } + fn suggest_missing_semicolon_before_array( + &self, + prev_span: Span, + open_delim_span: Span, + ) -> PResult<'a, ()> { + if self.token.kind == token::Comma { + if !self.sess.source_map().is_multiline(prev_span.until(self.token.span)) { + return Ok(()); + } + let mut snapshot = self.create_snapshot_for_diagnostic(); + snapshot.bump(); + match snapshot.parse_seq_to_before_end( + &token::CloseDelim(Delimiter::Bracket), + SeqSep::trailing_allowed(token::Comma), + |p| p.parse_expr(), + ) { + Ok(_) + // When the close delim is `)`, `token.kind` is expected to be `token::CloseDelim(Delimiter::Parenthesis)`, + // but the actual `token.kind` is `token::CloseDelim(Delimiter::Bracket)`. + // This is because the `token.kind` of the close delim is treated as the same as + // that of the open delim in `TokenTreesReader::parse_token_tree`, even if the delimiters of them are different. + // Therefore, `token.kind` should not be compared here. + if snapshot + .span_to_snippet(snapshot.token.span) + .map_or(false, |snippet| snippet == "]") => + { + return Err(MissingSemicolonBeforeArray { + open_delim: open_delim_span, + semicolon: prev_span.shrink_to_hi(), + }.into_diagnostic(&self.sess.span_diagnostic)); + } + Ok(_) => (), + Err(err) => err.cancel(), + } + } + Ok(()) + } + /// Parses a block or unsafe block. pub(super) fn parse_block_expr( &mut self, opt_label: Option