From 698f8c2f01ea549d77d7dc3338a12e04c11057b9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:02:58 +0200 Subject: Adding upstream version 1.64.0+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_ast_lowering/Cargo.toml | 23 + compiler/rustc_ast_lowering/src/asm.rs | 485 ++++ compiler/rustc_ast_lowering/src/block.rs | 122 + compiler/rustc_ast_lowering/src/expr.rs | 1914 +++++++++++++++ compiler/rustc_ast_lowering/src/index.rs | 346 +++ compiler/rustc_ast_lowering/src/item.rs | 1513 ++++++++++++ compiler/rustc_ast_lowering/src/lib.rs | 2501 ++++++++++++++++++++ .../rustc_ast_lowering/src/lifetime_collector.rs | 115 + compiler/rustc_ast_lowering/src/pat.rs | 350 +++ compiler/rustc_ast_lowering/src/path.rs | 406 ++++ 10 files changed, 7775 insertions(+) create mode 100644 compiler/rustc_ast_lowering/Cargo.toml create mode 100644 compiler/rustc_ast_lowering/src/asm.rs create mode 100644 compiler/rustc_ast_lowering/src/block.rs create mode 100644 compiler/rustc_ast_lowering/src/expr.rs create mode 100644 compiler/rustc_ast_lowering/src/index.rs create mode 100644 compiler/rustc_ast_lowering/src/item.rs create mode 100644 compiler/rustc_ast_lowering/src/lib.rs create mode 100644 compiler/rustc_ast_lowering/src/lifetime_collector.rs create mode 100644 compiler/rustc_ast_lowering/src/pat.rs create mode 100644 compiler/rustc_ast_lowering/src/path.rs (limited to 'compiler/rustc_ast_lowering') diff --git a/compiler/rustc_ast_lowering/Cargo.toml b/compiler/rustc_ast_lowering/Cargo.toml new file mode 100644 index 000000000..39ba62ef2 --- /dev/null +++ b/compiler/rustc_ast_lowering/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "rustc_ast_lowering" +version = "0.0.0" +edition = "2021" + +[lib] +doctest = false + +[dependencies] +rustc_arena = { path = "../rustc_arena" } +tracing = "0.1" +rustc_ast_pretty = { path = "../rustc_ast_pretty" } +rustc_hir = { path = "../rustc_hir" } +rustc_target = { path = "../rustc_target" } +rustc_data_structures = { path = "../rustc_data_structures" } +rustc_index = { path = "../rustc_index" } +rustc_middle = { path = "../rustc_middle" } +rustc_query_system = { path = "../rustc_query_system" } +rustc_span = { path = "../rustc_span" } +rustc_errors = { path = "../rustc_errors" } +rustc_session = { path = "../rustc_session" } +rustc_ast = { path = "../rustc_ast" } +smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } diff --git a/compiler/rustc_ast_lowering/src/asm.rs b/compiler/rustc_ast_lowering/src/asm.rs new file mode 100644 index 000000000..4166b4fc2 --- /dev/null +++ b/compiler/rustc_ast_lowering/src/asm.rs @@ -0,0 +1,485 @@ +use crate::{ImplTraitContext, ImplTraitPosition, ParamMode, ResolverAstLoweringExt}; + +use super::LoweringContext; + +use rustc_ast::ptr::P; +use rustc_ast::*; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_errors::struct_span_err; +use rustc_hir as hir; +use rustc_hir::def::{DefKind, Res}; +use rustc_hir::definitions::DefPathData; +use rustc_session::parse::feature_err; +use rustc_span::{sym, Span}; +use rustc_target::asm; +use std::collections::hash_map::Entry; +use std::fmt::Write; + +impl<'a, 'hir> LoweringContext<'a, 'hir> { + pub(crate) fn lower_inline_asm( + &mut self, + sp: Span, + asm: &InlineAsm, + ) -> &'hir hir::InlineAsm<'hir> { + // Rustdoc needs to support asm! from foreign architectures: don't try + // lowering the register constraints in this case. + let asm_arch = + if self.tcx.sess.opts.actually_rustdoc { None } else { self.tcx.sess.asm_arch }; + if asm_arch.is_none() && !self.tcx.sess.opts.actually_rustdoc { + struct_span_err!( + self.tcx.sess, + sp, + E0472, + "inline assembly is unsupported on this target" + ) + .emit(); + } + if let Some(asm_arch) = asm_arch { + // Inline assembly is currently only stable for these architectures. + let is_stable = matches!( + asm_arch, + asm::InlineAsmArch::X86 + | asm::InlineAsmArch::X86_64 + | asm::InlineAsmArch::Arm + | asm::InlineAsmArch::AArch64 + | asm::InlineAsmArch::RiscV32 + | asm::InlineAsmArch::RiscV64 + ); + if !is_stable && !self.tcx.features().asm_experimental_arch { + feature_err( + &self.tcx.sess.parse_sess, + sym::asm_experimental_arch, + sp, + "inline assembly is not stable yet on this architecture", + ) + .emit(); + } + } + if asm.options.contains(InlineAsmOptions::ATT_SYNTAX) + && !matches!(asm_arch, Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64)) + && !self.tcx.sess.opts.actually_rustdoc + { + self.tcx + .sess + .struct_span_err(sp, "the `att_syntax` option is only supported on x86") + .emit(); + } + if asm.options.contains(InlineAsmOptions::MAY_UNWIND) && !self.tcx.features().asm_unwind { + feature_err( + &self.tcx.sess.parse_sess, + sym::asm_unwind, + sp, + "the `may_unwind` option is unstable", + ) + .emit(); + } + + let mut clobber_abis = FxHashMap::default(); + if let Some(asm_arch) = asm_arch { + for (abi_name, abi_span) in &asm.clobber_abis { + match asm::InlineAsmClobberAbi::parse(asm_arch, &self.tcx.sess.target, *abi_name) { + Ok(abi) => { + // If the abi was already in the list, emit an error + match clobber_abis.get(&abi) { + Some((prev_name, prev_sp)) => { + let mut err = self.tcx.sess.struct_span_err( + *abi_span, + &format!("`{}` ABI specified multiple times", prev_name), + ); + err.span_label(*prev_sp, "previously specified here"); + + // Multiple different abi names may actually be the same ABI + // If the specified ABIs are not the same name, alert the user that they resolve to the same ABI + let source_map = self.tcx.sess.source_map(); + if source_map.span_to_snippet(*prev_sp) + != source_map.span_to_snippet(*abi_span) + { + err.note("these ABIs are equivalent on the current target"); + } + + err.emit(); + } + None => { + clobber_abis.insert(abi, (abi_name, *abi_span)); + } + } + } + Err(&[]) => { + self.tcx + .sess + .struct_span_err( + *abi_span, + "`clobber_abi` is not supported on this target", + ) + .emit(); + } + Err(supported_abis) => { + let mut err = self + .tcx + .sess + .struct_span_err(*abi_span, "invalid ABI for `clobber_abi`"); + let mut abis = format!("`{}`", supported_abis[0]); + for m in &supported_abis[1..] { + let _ = write!(abis, ", `{}`", m); + } + err.note(&format!( + "the following ABIs are supported on this target: {}", + abis + )); + err.emit(); + } + } + } + } + + // Lower operands to HIR. We use dummy register classes if an error + // occurs during lowering because we still need to be able to produce a + // valid HIR. + let sess = self.tcx.sess; + let mut operands: Vec<_> = asm + .operands + .iter() + .map(|(op, op_sp)| { + let lower_reg = |reg| match reg { + InlineAsmRegOrRegClass::Reg(s) => { + asm::InlineAsmRegOrRegClass::Reg(if let Some(asm_arch) = asm_arch { + asm::InlineAsmReg::parse(asm_arch, s).unwrap_or_else(|e| { + let msg = format!("invalid register `{}`: {}", s, e); + sess.struct_span_err(*op_sp, &msg).emit(); + asm::InlineAsmReg::Err + }) + } else { + asm::InlineAsmReg::Err + }) + } + InlineAsmRegOrRegClass::RegClass(s) => { + asm::InlineAsmRegOrRegClass::RegClass(if let Some(asm_arch) = asm_arch { + asm::InlineAsmRegClass::parse(asm_arch, s).unwrap_or_else(|e| { + let msg = format!("invalid register class `{}`: {}", s, e); + sess.struct_span_err(*op_sp, &msg).emit(); + asm::InlineAsmRegClass::Err + }) + } else { + asm::InlineAsmRegClass::Err + }) + } + }; + + let op = match *op { + InlineAsmOperand::In { reg, ref expr } => hir::InlineAsmOperand::In { + reg: lower_reg(reg), + expr: self.lower_expr_mut(expr), + }, + InlineAsmOperand::Out { reg, late, ref expr } => hir::InlineAsmOperand::Out { + reg: lower_reg(reg), + late, + expr: expr.as_ref().map(|expr| self.lower_expr_mut(expr)), + }, + InlineAsmOperand::InOut { reg, late, ref expr } => { + hir::InlineAsmOperand::InOut { + reg: lower_reg(reg), + late, + expr: self.lower_expr_mut(expr), + } + } + InlineAsmOperand::SplitInOut { reg, late, ref in_expr, ref out_expr } => { + hir::InlineAsmOperand::SplitInOut { + reg: lower_reg(reg), + late, + in_expr: self.lower_expr_mut(in_expr), + out_expr: out_expr.as_ref().map(|expr| self.lower_expr_mut(expr)), + } + } + InlineAsmOperand::Const { ref anon_const } => { + if !self.tcx.features().asm_const { + feature_err( + &sess.parse_sess, + sym::asm_const, + *op_sp, + "const operands for inline assembly are unstable", + ) + .emit(); + } + hir::InlineAsmOperand::Const { + anon_const: self.lower_anon_const(anon_const), + } + } + InlineAsmOperand::Sym { ref sym } => { + if !self.tcx.features().asm_sym { + feature_err( + &sess.parse_sess, + sym::asm_sym, + *op_sp, + "sym operands for inline assembly are unstable", + ) + .emit(); + } + + let static_def_id = self + .resolver + .get_partial_res(sym.id) + .filter(|res| res.unresolved_segments() == 0) + .and_then(|res| { + if let Res::Def(DefKind::Static(_), def_id) = res.base_res() { + Some(def_id) + } else { + None + } + }); + + if let Some(def_id) = static_def_id { + let path = self.lower_qpath( + sym.id, + &sym.qself, + &sym.path, + ParamMode::Optional, + ImplTraitContext::Disallowed(ImplTraitPosition::Path), + ); + hir::InlineAsmOperand::SymStatic { path, def_id } + } else { + // Replace the InlineAsmSym AST node with an + // Expr using the name node id. + let expr = Expr { + id: sym.id, + kind: ExprKind::Path(sym.qself.clone(), sym.path.clone()), + span: *op_sp, + attrs: AttrVec::new(), + tokens: None, + }; + + // Wrap the expression in an AnonConst. + let parent_def_id = self.current_hir_id_owner; + let node_id = self.next_node_id(); + self.create_def(parent_def_id, node_id, DefPathData::AnonConst); + let anon_const = AnonConst { id: node_id, value: P(expr) }; + hir::InlineAsmOperand::SymFn { + anon_const: self.lower_anon_const(&anon_const), + } + } + } + }; + (op, self.lower_span(*op_sp)) + }) + .collect(); + + // Validate template modifiers against the register classes for the operands + for p in &asm.template { + if let InlineAsmTemplatePiece::Placeholder { + operand_idx, + modifier: Some(modifier), + span: placeholder_span, + } = *p + { + let op_sp = asm.operands[operand_idx].1; + match &operands[operand_idx].0 { + hir::InlineAsmOperand::In { reg, .. } + | hir::InlineAsmOperand::Out { reg, .. } + | hir::InlineAsmOperand::InOut { reg, .. } + | hir::InlineAsmOperand::SplitInOut { reg, .. } => { + let class = reg.reg_class(); + if class == asm::InlineAsmRegClass::Err { + continue; + } + let valid_modifiers = class.valid_modifiers(asm_arch.unwrap()); + if !valid_modifiers.contains(&modifier) { + let mut err = sess.struct_span_err( + placeholder_span, + "invalid asm template modifier for this register class", + ); + err.span_label(placeholder_span, "template modifier"); + err.span_label(op_sp, "argument"); + if !valid_modifiers.is_empty() { + let mut mods = format!("`{}`", valid_modifiers[0]); + for m in &valid_modifiers[1..] { + let _ = write!(mods, ", `{}`", m); + } + err.note(&format!( + "the `{}` register class supports \ + the following template modifiers: {}", + class.name(), + mods + )); + } else { + err.note(&format!( + "the `{}` register class does not support template modifiers", + class.name() + )); + } + err.emit(); + } + } + hir::InlineAsmOperand::Const { .. } => { + let mut err = sess.struct_span_err( + placeholder_span, + "asm template modifiers are not allowed for `const` arguments", + ); + err.span_label(placeholder_span, "template modifier"); + err.span_label(op_sp, "argument"); + err.emit(); + } + hir::InlineAsmOperand::SymFn { .. } + | hir::InlineAsmOperand::SymStatic { .. } => { + let mut err = sess.struct_span_err( + placeholder_span, + "asm template modifiers are not allowed for `sym` arguments", + ); + err.span_label(placeholder_span, "template modifier"); + err.span_label(op_sp, "argument"); + err.emit(); + } + } + } + } + + let mut used_input_regs = FxHashMap::default(); + let mut used_output_regs = FxHashMap::default(); + + for (idx, &(ref op, op_sp)) in operands.iter().enumerate() { + if let Some(reg) = op.reg() { + let reg_class = reg.reg_class(); + if reg_class == asm::InlineAsmRegClass::Err { + continue; + } + + // Some register classes can only be used as clobbers. This + // means that we disallow passing a value in/out of the asm and + // require that the operand name an explicit register, not a + // register class. + if reg_class.is_clobber_only(asm_arch.unwrap()) && !op.is_clobber() { + let msg = format!( + "register class `{}` can only be used as a clobber, \ + not as an input or output", + reg_class.name() + ); + sess.struct_span_err(op_sp, &msg).emit(); + continue; + } + + // Check for conflicts between explicit register operands. + if let asm::InlineAsmRegOrRegClass::Reg(reg) = reg { + let (input, output) = match op { + hir::InlineAsmOperand::In { .. } => (true, false), + + // Late output do not conflict with inputs, but normal outputs do + hir::InlineAsmOperand::Out { late, .. } => (!late, true), + + hir::InlineAsmOperand::InOut { .. } + | hir::InlineAsmOperand::SplitInOut { .. } => (true, true), + + hir::InlineAsmOperand::Const { .. } + | hir::InlineAsmOperand::SymFn { .. } + | hir::InlineAsmOperand::SymStatic { .. } => { + unreachable!() + } + }; + + // Flag to output the error only once per operand + let mut skip = false; + reg.overlapping_regs(|r| { + let mut check = |used_regs: &mut FxHashMap, + input| { + match used_regs.entry(r) { + Entry::Occupied(o) => { + if skip { + return; + } + skip = true; + + let idx2 = *o.get(); + let &(ref op2, op_sp2) = &operands[idx2]; + let Some(asm::InlineAsmRegOrRegClass::Reg(reg2)) = op2.reg() else { + unreachable!(); + }; + + let msg = format!( + "register `{}` conflicts with register `{}`", + reg.name(), + reg2.name() + ); + let mut err = sess.struct_span_err(op_sp, &msg); + err.span_label(op_sp, &format!("register `{}`", reg.name())); + err.span_label(op_sp2, &format!("register `{}`", reg2.name())); + + match (op, op2) { + ( + hir::InlineAsmOperand::In { .. }, + hir::InlineAsmOperand::Out { late, .. }, + ) + | ( + hir::InlineAsmOperand::Out { late, .. }, + hir::InlineAsmOperand::In { .. }, + ) => { + assert!(!*late); + let out_op_sp = if input { op_sp2 } else { op_sp }; + let msg = "use `lateout` instead of \ + `out` to avoid conflict"; + err.span_help(out_op_sp, msg); + } + _ => {} + } + + err.emit(); + } + Entry::Vacant(v) => { + if r == reg { + v.insert(idx); + } + } + } + }; + if input { + check(&mut used_input_regs, true); + } + if output { + check(&mut used_output_regs, false); + } + }); + } + } + } + + // If a clobber_abi is specified, add the necessary clobbers to the + // operands list. + let mut clobbered = FxHashSet::default(); + for (abi, (_, abi_span)) in clobber_abis { + for &clobber in abi.clobbered_regs() { + // Don't emit a clobber for a register already clobbered + if clobbered.contains(&clobber) { + continue; + } + + let mut output_used = false; + clobber.overlapping_regs(|reg| { + if used_output_regs.contains_key(®) { + output_used = true; + } + }); + + if !output_used { + operands.push(( + hir::InlineAsmOperand::Out { + reg: asm::InlineAsmRegOrRegClass::Reg(clobber), + late: true, + expr: None, + }, + self.lower_span(abi_span), + )); + clobbered.insert(clobber); + } + } + } + + let operands = self.arena.alloc_from_iter(operands); + let template = self.arena.alloc_from_iter(asm.template.iter().cloned()); + let template_strs = self.arena.alloc_from_iter( + asm.template_strs + .iter() + .map(|(sym, snippet, span)| (*sym, *snippet, self.lower_span(*span))), + ); + let line_spans = + self.arena.alloc_from_iter(asm.line_spans.iter().map(|span| self.lower_span(*span))); + let hir_asm = + hir::InlineAsm { template, template_strs, operands, options: asm.options, line_spans }; + self.arena.alloc(hir_asm) + } +} diff --git a/compiler/rustc_ast_lowering/src/block.rs b/compiler/rustc_ast_lowering/src/block.rs new file mode 100644 index 000000000..7cbfe143b --- /dev/null +++ b/compiler/rustc_ast_lowering/src/block.rs @@ -0,0 +1,122 @@ +use crate::{ImplTraitContext, ImplTraitPosition, LoweringContext}; +use rustc_ast::{Block, BlockCheckMode, Local, LocalKind, Stmt, StmtKind}; +use rustc_hir as hir; +use rustc_session::parse::feature_err; +use rustc_span::sym; + +use smallvec::SmallVec; + +impl<'a, 'hir> LoweringContext<'a, 'hir> { + pub(super) fn lower_block( + &mut self, + b: &Block, + targeted_by_break: bool, + ) -> &'hir hir::Block<'hir> { + self.arena.alloc(self.lower_block_noalloc(b, targeted_by_break)) + } + + pub(super) fn lower_block_noalloc( + &mut self, + b: &Block, + targeted_by_break: bool, + ) -> hir::Block<'hir> { + let (stmts, expr) = self.lower_stmts(&b.stmts); + let rules = self.lower_block_check_mode(&b.rules); + let hir_id = self.lower_node_id(b.id); + hir::Block { hir_id, stmts, expr, rules, span: self.lower_span(b.span), targeted_by_break } + } + + fn lower_stmts( + &mut self, + mut ast_stmts: &[Stmt], + ) -> (&'hir [hir::Stmt<'hir>], Option<&'hir hir::Expr<'hir>>) { + let mut stmts = SmallVec::<[hir::Stmt<'hir>; 8]>::new(); + let mut expr = None; + while let [s, tail @ ..] = ast_stmts { + match s.kind { + StmtKind::Local(ref local) => { + let hir_id = self.lower_node_id(s.id); + let local = self.lower_local(local); + self.alias_attrs(hir_id, local.hir_id); + let kind = hir::StmtKind::Local(local); + let span = self.lower_span(s.span); + stmts.push(hir::Stmt { hir_id, kind, span }); + } + StmtKind::Item(ref it) => { + stmts.extend(self.lower_item_ref(it).into_iter().enumerate().map( + |(i, item_id)| { + let hir_id = match i { + 0 => self.lower_node_id(s.id), + _ => self.next_id(), + }; + let kind = hir::StmtKind::Item(item_id); + let span = self.lower_span(s.span); + hir::Stmt { hir_id, kind, span } + }, + )); + } + StmtKind::Expr(ref e) => { + let e = self.lower_expr(e); + if tail.is_empty() { + expr = Some(e); + } else { + let hir_id = self.lower_node_id(s.id); + self.alias_attrs(hir_id, e.hir_id); + let kind = hir::StmtKind::Expr(e); + let span = self.lower_span(s.span); + stmts.push(hir::Stmt { hir_id, kind, span }); + } + } + StmtKind::Semi(ref e) => { + let e = self.lower_expr(e); + let hir_id = self.lower_node_id(s.id); + self.alias_attrs(hir_id, e.hir_id); + let kind = hir::StmtKind::Semi(e); + let span = self.lower_span(s.span); + stmts.push(hir::Stmt { hir_id, kind, span }); + } + StmtKind::Empty => {} + StmtKind::MacCall(..) => panic!("shouldn't exist here"), + } + ast_stmts = &ast_stmts[1..]; + } + (self.arena.alloc_from_iter(stmts), expr) + } + + fn lower_local(&mut self, l: &Local) -> &'hir hir::Local<'hir> { + let ty = l + .ty + .as_ref() + .map(|t| self.lower_ty(t, ImplTraitContext::Disallowed(ImplTraitPosition::Variable))); + let init = l.kind.init().map(|init| self.lower_expr(init)); + let hir_id = self.lower_node_id(l.id); + let pat = self.lower_pat(&l.pat); + let els = if let LocalKind::InitElse(_, els) = &l.kind { + if !self.tcx.features().let_else { + feature_err( + &self.tcx.sess.parse_sess, + sym::let_else, + l.span, + "`let...else` statements are unstable", + ) + .emit(); + } + Some(self.lower_block(els, false)) + } else { + None + }; + let span = self.lower_span(l.span); + let source = hir::LocalSource::Normal; + self.lower_attrs(hir_id, &l.attrs); + self.arena.alloc(hir::Local { hir_id, ty, pat, init, els, span, source }) + } + + fn lower_block_check_mode(&mut self, b: &BlockCheckMode) -> hir::BlockCheckMode { + match *b { + BlockCheckMode::Default => hir::BlockCheckMode::DefaultBlock, + BlockCheckMode::Unsafe(u) => { + hir::BlockCheckMode::UnsafeBlock(self.lower_unsafe_source(u)) + } + } + } +} diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs new file mode 100644 index 000000000..fb6715ff1 --- /dev/null +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -0,0 +1,1914 @@ +use super::ResolverAstLoweringExt; +use super::{ImplTraitContext, LoweringContext, ParamMode, ParenthesizedGenericArgs}; +use crate::{FnDeclKind, ImplTraitPosition}; + +use rustc_ast::attr; +use rustc_ast::ptr::P as AstP; +use rustc_ast::*; +use rustc_data_structures::stack::ensure_sufficient_stack; +use rustc_data_structures::thin_vec::ThinVec; +use rustc_errors::struct_span_err; +use rustc_hir as hir; +use rustc_hir::def::Res; +use rustc_hir::definitions::DefPathData; +use rustc_span::source_map::{respan, DesugaringKind, Span, Spanned}; +use rustc_span::symbol::{sym, Ident}; +use rustc_span::DUMMY_SP; + +impl<'hir> LoweringContext<'_, 'hir> { + fn lower_exprs(&mut self, exprs: &[AstP]) -> &'hir [hir::Expr<'hir>] { + self.arena.alloc_from_iter(exprs.iter().map(|x| self.lower_expr_mut(x))) + } + + pub(super) fn lower_expr(&mut self, e: &Expr) -> &'hir hir::Expr<'hir> { + self.arena.alloc(self.lower_expr_mut(e)) + } + + pub(super) fn lower_expr_mut(&mut self, e: &Expr) -> hir::Expr<'hir> { + ensure_sufficient_stack(|| { + let kind = match e.kind { + ExprKind::Box(ref inner) => hir::ExprKind::Box(self.lower_expr(inner)), + ExprKind::Array(ref exprs) => hir::ExprKind::Array(self.lower_exprs(exprs)), + ExprKind::ConstBlock(ref anon_const) => { + let anon_const = self.lower_anon_const(anon_const); + hir::ExprKind::ConstBlock(anon_const) + } + ExprKind::Repeat(ref expr, ref count) => { + let expr = self.lower_expr(expr); + let count = self.lower_array_length(count); + hir::ExprKind::Repeat(expr, count) + } + ExprKind::Tup(ref elts) => hir::ExprKind::Tup(self.lower_exprs(elts)), + ExprKind::Call(ref f, ref args) => { + if e.attrs.get(0).map_or(false, |a| a.has_name(sym::rustc_box)) { + if let [inner] = &args[..] && e.attrs.len() == 1 { + let kind = hir::ExprKind::Box(self.lower_expr(&inner)); + let hir_id = self.lower_node_id(e.id); + return hir::Expr { hir_id, kind, span: self.lower_span(e.span) }; + } else { + self.tcx.sess + .struct_span_err( + e.span, + "#[rustc_box] requires precisely one argument \ + and no other attributes are allowed", + ) + .emit(); + hir::ExprKind::Err + } + } else if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f) { + self.lower_legacy_const_generics((**f).clone(), args.clone(), &legacy_args) + } else { + let f = self.lower_expr(f); + hir::ExprKind::Call(f, self.lower_exprs(args)) + } + } + ExprKind::MethodCall(ref seg, ref args, span) => { + let hir_seg = self.arena.alloc(self.lower_path_segment( + e.span, + seg, + ParamMode::Optional, + ParenthesizedGenericArgs::Err, + ImplTraitContext::Disallowed(ImplTraitPosition::Path), + )); + let args = self.lower_exprs(args); + hir::ExprKind::MethodCall(hir_seg, args, self.lower_span(span)) + } + ExprKind::Binary(binop, ref lhs, ref rhs) => { + let binop = self.lower_binop(binop); + let lhs = self.lower_expr(lhs); + let rhs = self.lower_expr(rhs); + hir::ExprKind::Binary(binop, lhs, rhs) + } + ExprKind::Unary(op, ref ohs) => { + let op = self.lower_unop(op); + let ohs = self.lower_expr(ohs); + hir::ExprKind::Unary(op, ohs) + } + ExprKind::Lit(ref l) => { + hir::ExprKind::Lit(respan(self.lower_span(l.span), l.kind.clone())) + } + ExprKind::Cast(ref expr, ref ty) => { + let expr = self.lower_expr(expr); + let ty = + self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)); + hir::ExprKind::Cast(expr, ty) + } + ExprKind::Type(ref expr, ref ty) => { + let expr = self.lower_expr(expr); + let ty = + self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)); + hir::ExprKind::Type(expr, ty) + } + ExprKind::AddrOf(k, m, ref ohs) => { + let ohs = self.lower_expr(ohs); + hir::ExprKind::AddrOf(k, m, ohs) + } + ExprKind::Let(ref pat, ref scrutinee, span) => { + hir::ExprKind::Let(self.arena.alloc(hir::Let { + hir_id: self.next_id(), + span: self.lower_span(span), + pat: self.lower_pat(pat), + ty: None, + init: self.lower_expr(scrutinee), + })) + } + ExprKind::If(ref cond, ref then, ref else_opt) => { + self.lower_expr_if(cond, then, else_opt.as_deref()) + } + ExprKind::While(ref cond, ref body, opt_label) => { + self.with_loop_scope(e.id, |this| { + let span = + this.mark_span_with_reason(DesugaringKind::WhileLoop, e.span, None); + this.lower_expr_while_in_loop_scope(span, cond, body, opt_label) + }) + } + ExprKind::Loop(ref body, opt_label) => self.with_loop_scope(e.id, |this| { + hir::ExprKind::Loop( + this.lower_block(body, false), + this.lower_label(opt_label), + hir::LoopSource::Loop, + DUMMY_SP, + ) + }), + ExprKind::TryBlock(ref body) => self.lower_expr_try_block(body), + ExprKind::Match(ref expr, ref arms) => hir::ExprKind::Match( + self.lower_expr(expr), + self.arena.alloc_from_iter(arms.iter().map(|x| self.lower_arm(x))), + hir::MatchSource::Normal, + ), + ExprKind::Async(capture_clause, closure_node_id, ref block) => self + .make_async_expr( + capture_clause, + closure_node_id, + None, + block.span, + hir::AsyncGeneratorKind::Block, + |this| this.with_new_scopes(|this| this.lower_block_expr(block)), + ), + ExprKind::Await(ref expr) => { + let span = if expr.span.hi() < e.span.hi() { + expr.span.shrink_to_hi().with_hi(e.span.hi()) + } else { + // this is a recovered `await expr` + e.span + }; + self.lower_expr_await(span, expr) + } + ExprKind::Closure( + ref binder, + capture_clause, + asyncness, + movability, + ref decl, + ref body, + fn_decl_span, + ) => { + if let Async::Yes { closure_id, .. } = asyncness { + self.lower_expr_async_closure( + binder, + capture_clause, + e.id, + closure_id, + decl, + body, + fn_decl_span, + ) + } else { + self.lower_expr_closure( + binder, + capture_clause, + e.id, + movability, + decl, + body, + fn_decl_span, + ) + } + } + ExprKind::Block(ref blk, opt_label) => { + let opt_label = self.lower_label(opt_label); + hir::ExprKind::Block(self.lower_block(blk, opt_label.is_some()), opt_label) + } + ExprKind::Assign(ref el, ref er, span) => { + self.lower_expr_assign(el, er, span, e.span) + } + ExprKind::AssignOp(op, ref el, ref er) => hir::ExprKind::AssignOp( + self.lower_binop(op), + self.lower_expr(el), + self.lower_expr(er), + ), + ExprKind::Field(ref el, ident) => { + hir::ExprKind::Field(self.lower_expr(el), self.lower_ident(ident)) + } + ExprKind::Index(ref el, ref er) => { + hir::ExprKind::Index(self.lower_expr(el), self.lower_expr(er)) + } + ExprKind::Range(Some(ref e1), Some(ref e2), RangeLimits::Closed) => { + self.lower_expr_range_closed(e.span, e1, e2) + } + ExprKind::Range(ref e1, ref e2, lims) => { + self.lower_expr_range(e.span, e1.as_deref(), e2.as_deref(), lims) + } + ExprKind::Underscore => { + self.tcx + .sess.struct_span_err( + e.span, + "in expressions, `_` can only be used on the left-hand side of an assignment", + ) + .span_label(e.span, "`_` not allowed here") + .emit(); + hir::ExprKind::Err + } + ExprKind::Path(ref qself, ref path) => { + let qpath = self.lower_qpath( + e.id, + qself, + path, + ParamMode::Optional, + ImplTraitContext::Disallowed(ImplTraitPosition::Path), + ); + hir::ExprKind::Path(qpath) + } + ExprKind::Break(opt_label, ref opt_expr) => { + let opt_expr = opt_expr.as_ref().map(|x| self.lower_expr(x)); + hir::ExprKind::Break(self.lower_jump_destination(e.id, opt_label), opt_expr) + } + ExprKind::Continue(opt_label) => { + hir::ExprKind::Continue(self.lower_jump_destination(e.id, opt_label)) + } + ExprKind::Ret(ref e) => { + let e = e.as_ref().map(|x| self.lower_expr(x)); + hir::ExprKind::Ret(e) + } + ExprKind::Yeet(ref sub_expr) => self.lower_expr_yeet(e.span, sub_expr.as_deref()), + ExprKind::InlineAsm(ref asm) => { + hir::ExprKind::InlineAsm(self.lower_inline_asm(e.span, asm)) + } + ExprKind::Struct(ref se) => { + let rest = match &se.rest { + StructRest::Base(e) => Some(self.lower_expr(e)), + StructRest::Rest(sp) => { + self.tcx + .sess + .struct_span_err(*sp, "base expression required after `..`") + .span_label(*sp, "add a base expression here") + .emit(); + Some(&*self.arena.alloc(self.expr_err(*sp))) + } + StructRest::None => None, + }; + hir::ExprKind::Struct( + self.arena.alloc(self.lower_qpath( + e.id, + &se.qself, + &se.path, + ParamMode::Optional, + ImplTraitContext::Disallowed(ImplTraitPosition::Path), + )), + self.arena + .alloc_from_iter(se.fields.iter().map(|x| self.lower_expr_field(x))), + rest, + ) + } + ExprKind::Yield(ref opt_expr) => self.lower_expr_yield(e.span, opt_expr.as_deref()), + ExprKind::Err => hir::ExprKind::Err, + ExprKind::Try(ref sub_expr) => self.lower_expr_try(e.span, sub_expr), + ExprKind::Paren(ref ex) => { + let mut ex = self.lower_expr_mut(ex); + // Include parens in span, but only if it is a super-span. + if e.span.contains(ex.span) { + ex.span = self.lower_span(e.span); + } + // Merge attributes into the inner expression. + if !e.attrs.is_empty() { + let old_attrs = + self.attrs.get(&ex.hir_id.local_id).map(|la| *la).unwrap_or(&[]); + self.attrs.insert( + ex.hir_id.local_id, + &*self.arena.alloc_from_iter( + e.attrs + .iter() + .map(|a| self.lower_attr(a)) + .chain(old_attrs.iter().cloned()), + ), + ); + } + return ex; + } + + // Desugar `ExprForLoop` + // from: `[opt_ident]: for in ` + ExprKind::ForLoop(ref pat, ref head, ref body, opt_label) => { + return self.lower_expr_for(e, pat, head, body, opt_label); + } + ExprKind::MacCall(_) => panic!("{:?} shouldn't exist here", e.span), + }; + + let hir_id = self.lower_node_id(e.id); + self.lower_attrs(hir_id, &e.attrs); + hir::Expr { hir_id, kind, span: self.lower_span(e.span) } + }) + } + + fn lower_unop(&mut self, u: UnOp) -> hir::UnOp { + match u { + UnOp::Deref => hir::UnOp::Deref, + UnOp::Not => hir::UnOp::Not, + UnOp::Neg => hir::UnOp::Neg, + } + } + + fn lower_binop(&mut self, b: BinOp) -> hir::BinOp { + Spanned { + node: match b.node { + BinOpKind::Add => hir::BinOpKind::Add, + BinOpKind::Sub => hir::BinOpKind::Sub, + BinOpKind::Mul => hir::BinOpKind::Mul, + BinOpKind::Div => hir::BinOpKind::Div, + BinOpKind::Rem => hir::BinOpKind::Rem, + BinOpKind::And => hir::BinOpKind::And, + BinOpKind::Or => hir::BinOpKind::Or, + BinOpKind::BitXor => hir::BinOpKind::BitXor, + BinOpKind::BitAnd => hir::BinOpKind::BitAnd, + BinOpKind::BitOr => hir::BinOpKind::BitOr, + BinOpKind::Shl => hir::BinOpKind::Shl, + BinOpKind::Shr => hir::BinOpKind::Shr, + BinOpKind::Eq => hir::BinOpKind::Eq, + BinOpKind::Lt => hir::BinOpKind::Lt, + BinOpKind::Le => hir::BinOpKind::Le, + BinOpKind::Ne => hir::BinOpKind::Ne, + BinOpKind::Ge => hir::BinOpKind::Ge, + BinOpKind::Gt => hir::BinOpKind::Gt, + }, + span: self.lower_span(b.span), + } + } + + fn lower_legacy_const_generics( + &mut self, + mut f: Expr, + args: Vec>, + legacy_args_idx: &[usize], + ) -> hir::ExprKind<'hir> { + let ExprKind::Path(None, ref mut path) = f.kind else { + unreachable!(); + }; + + // Split the arguments into const generics and normal arguments + let mut real_args = vec![]; + let mut generic_args = vec![]; + for (idx, arg) in args.into_iter().enumerate() { + if legacy_args_idx.contains(&idx) { + let parent_def_id = self.current_hir_id_owner; + let node_id = self.next_node_id(); + + // Add a definition for the in-band const def. + self.create_def(parent_def_id, node_id, DefPathData::AnonConst); + + let anon_const = AnonConst { id: node_id, value: arg }; + generic_args.push(AngleBracketedArg::Arg(GenericArg::Const(anon_const))); + } else { + real_args.push(arg); + } + } + + // Add generic args to the last element of the path. + let last_segment = path.segments.last_mut().unwrap(); + assert!(last_segment.args.is_none()); + last_segment.args = Some(AstP(GenericArgs::AngleBracketed(AngleBracketedArgs { + span: DUMMY_SP, + args: generic_args, + }))); + + // Now lower everything as normal. + let f = self.lower_expr(&f); + hir::ExprKind::Call(f, self.lower_exprs(&real_args)) + } + + fn lower_expr_if( + &mut self, + cond: &Expr, + then: &Block, + else_opt: Option<&Expr>, + ) -> hir::ExprKind<'hir> { + let lowered_cond = self.lower_expr(cond); + let new_cond = self.manage_let_cond(lowered_cond); + let then_expr = self.lower_block_expr(then); + if let Some(rslt) = else_opt { + hir::ExprKind::If(new_cond, self.arena.alloc(then_expr), Some(self.lower_expr(rslt))) + } else { + hir::ExprKind::If(new_cond, self.arena.alloc(then_expr), None) + } + } + + // If `cond` kind is `let`, returns `let`. Otherwise, wraps and returns `cond` + // in a temporary block. + fn manage_let_cond(&mut self, cond: &'hir hir::Expr<'hir>) -> &'hir hir::Expr<'hir> { + fn has_let_expr<'hir>(expr: &'hir hir::Expr<'hir>) -> bool { + match expr.kind { + hir::ExprKind::Binary(_, lhs, rhs) => has_let_expr(lhs) || has_let_expr(rhs), + hir::ExprKind::Let(..) => true, + _ => false, + } + } + if has_let_expr(cond) { + cond + } else { + let reason = DesugaringKind::CondTemporary; + let span_block = self.mark_span_with_reason(reason, cond.span, None); + self.expr_drop_temps(span_block, cond, AttrVec::new()) + } + } + + // We desugar: `'label: while $cond $body` into: + // + // ``` + // 'label: loop { + // if { let _t = $cond; _t } { + // $body + // } + // else { + // break; + // } + // } + // ``` + // + // Wrap in a construct equivalent to `{ let _t = $cond; _t }` + // to preserve drop semantics since `while $cond { ... }` does not + // let temporaries live outside of `cond`. + fn lower_expr_while_in_loop_scope( + &mut self, + span: Span, + cond: &Expr, + body: &Block, + opt_label: Option