summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_ast_lowering/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_ast_lowering/src
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_ast_lowering/src')
-rw-r--r--compiler/rustc_ast_lowering/src/asm.rs485
-rw-r--r--compiler/rustc_ast_lowering/src/block.rs122
-rw-r--r--compiler/rustc_ast_lowering/src/expr.rs1914
-rw-r--r--compiler/rustc_ast_lowering/src/index.rs346
-rw-r--r--compiler/rustc_ast_lowering/src/item.rs1513
-rw-r--r--compiler/rustc_ast_lowering/src/lib.rs2501
-rw-r--r--compiler/rustc_ast_lowering/src/lifetime_collector.rs115
-rw-r--r--compiler/rustc_ast_lowering/src/pat.rs350
-rw-r--r--compiler/rustc_ast_lowering/src/path.rs406
9 files changed, 7752 insertions, 0 deletions
diff --git a/compiler/rustc_ast_lowering/src/asm.rs b/compiler/rustc_ast_lowering/src/asm.rs
new file mode 100644
index 000000000..4166b4fc2
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/asm.rs
@@ -0,0 +1,485 @@
+use crate::{ImplTraitContext, ImplTraitPosition, ParamMode, ResolverAstLoweringExt};
+
+use super::LoweringContext;
+
+use rustc_ast::ptr::P;
+use rustc_ast::*;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::definitions::DefPathData;
+use rustc_session::parse::feature_err;
+use rustc_span::{sym, Span};
+use rustc_target::asm;
+use std::collections::hash_map::Entry;
+use std::fmt::Write;
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ pub(crate) fn lower_inline_asm(
+ &mut self,
+ sp: Span,
+ asm: &InlineAsm,
+ ) -> &'hir hir::InlineAsm<'hir> {
+ // Rustdoc needs to support asm! from foreign architectures: don't try
+ // lowering the register constraints in this case.
+ let asm_arch =
+ if self.tcx.sess.opts.actually_rustdoc { None } else { self.tcx.sess.asm_arch };
+ if asm_arch.is_none() && !self.tcx.sess.opts.actually_rustdoc {
+ struct_span_err!(
+ self.tcx.sess,
+ sp,
+ E0472,
+ "inline assembly is unsupported on this target"
+ )
+ .emit();
+ }
+ if let Some(asm_arch) = asm_arch {
+ // Inline assembly is currently only stable for these architectures.
+ let is_stable = matches!(
+ asm_arch,
+ asm::InlineAsmArch::X86
+ | asm::InlineAsmArch::X86_64
+ | asm::InlineAsmArch::Arm
+ | asm::InlineAsmArch::AArch64
+ | asm::InlineAsmArch::RiscV32
+ | asm::InlineAsmArch::RiscV64
+ );
+ if !is_stable && !self.tcx.features().asm_experimental_arch {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::asm_experimental_arch,
+ sp,
+ "inline assembly is not stable yet on this architecture",
+ )
+ .emit();
+ }
+ }
+ if asm.options.contains(InlineAsmOptions::ATT_SYNTAX)
+ && !matches!(asm_arch, Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64))
+ && !self.tcx.sess.opts.actually_rustdoc
+ {
+ self.tcx
+ .sess
+ .struct_span_err(sp, "the `att_syntax` option is only supported on x86")
+ .emit();
+ }
+ if asm.options.contains(InlineAsmOptions::MAY_UNWIND) && !self.tcx.features().asm_unwind {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::asm_unwind,
+ sp,
+ "the `may_unwind` option is unstable",
+ )
+ .emit();
+ }
+
+ let mut clobber_abis = FxHashMap::default();
+ if let Some(asm_arch) = asm_arch {
+ for (abi_name, abi_span) in &asm.clobber_abis {
+ match asm::InlineAsmClobberAbi::parse(asm_arch, &self.tcx.sess.target, *abi_name) {
+ Ok(abi) => {
+ // If the abi was already in the list, emit an error
+ match clobber_abis.get(&abi) {
+ Some((prev_name, prev_sp)) => {
+ let mut err = self.tcx.sess.struct_span_err(
+ *abi_span,
+ &format!("`{}` ABI specified multiple times", prev_name),
+ );
+ err.span_label(*prev_sp, "previously specified here");
+
+ // Multiple different abi names may actually be the same ABI
+ // If the specified ABIs are not the same name, alert the user that they resolve to the same ABI
+ let source_map = self.tcx.sess.source_map();
+ if source_map.span_to_snippet(*prev_sp)
+ != source_map.span_to_snippet(*abi_span)
+ {
+ err.note("these ABIs are equivalent on the current target");
+ }
+
+ err.emit();
+ }
+ None => {
+ clobber_abis.insert(abi, (abi_name, *abi_span));
+ }
+ }
+ }
+ Err(&[]) => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ *abi_span,
+ "`clobber_abi` is not supported on this target",
+ )
+ .emit();
+ }
+ Err(supported_abis) => {
+ let mut err = self
+ .tcx
+ .sess
+ .struct_span_err(*abi_span, "invalid ABI for `clobber_abi`");
+ let mut abis = format!("`{}`", supported_abis[0]);
+ for m in &supported_abis[1..] {
+ let _ = write!(abis, ", `{}`", m);
+ }
+ err.note(&format!(
+ "the following ABIs are supported on this target: {}",
+ abis
+ ));
+ err.emit();
+ }
+ }
+ }
+ }
+
+ // Lower operands to HIR. We use dummy register classes if an error
+ // occurs during lowering because we still need to be able to produce a
+ // valid HIR.
+ let sess = self.tcx.sess;
+ let mut operands: Vec<_> = asm
+ .operands
+ .iter()
+ .map(|(op, op_sp)| {
+ let lower_reg = |reg| match reg {
+ InlineAsmRegOrRegClass::Reg(s) => {
+ asm::InlineAsmRegOrRegClass::Reg(if let Some(asm_arch) = asm_arch {
+ asm::InlineAsmReg::parse(asm_arch, s).unwrap_or_else(|e| {
+ let msg = format!("invalid register `{}`: {}", s, e);
+ sess.struct_span_err(*op_sp, &msg).emit();
+ asm::InlineAsmReg::Err
+ })
+ } else {
+ asm::InlineAsmReg::Err
+ })
+ }
+ InlineAsmRegOrRegClass::RegClass(s) => {
+ asm::InlineAsmRegOrRegClass::RegClass(if let Some(asm_arch) = asm_arch {
+ asm::InlineAsmRegClass::parse(asm_arch, s).unwrap_or_else(|e| {
+ let msg = format!("invalid register class `{}`: {}", s, e);
+ sess.struct_span_err(*op_sp, &msg).emit();
+ asm::InlineAsmRegClass::Err
+ })
+ } else {
+ asm::InlineAsmRegClass::Err
+ })
+ }
+ };
+
+ let op = match *op {
+ InlineAsmOperand::In { reg, ref expr } => hir::InlineAsmOperand::In {
+ reg: lower_reg(reg),
+ expr: self.lower_expr_mut(expr),
+ },
+ InlineAsmOperand::Out { reg, late, ref expr } => hir::InlineAsmOperand::Out {
+ reg: lower_reg(reg),
+ late,
+ expr: expr.as_ref().map(|expr| self.lower_expr_mut(expr)),
+ },
+ InlineAsmOperand::InOut { reg, late, ref expr } => {
+ hir::InlineAsmOperand::InOut {
+ reg: lower_reg(reg),
+ late,
+ expr: self.lower_expr_mut(expr),
+ }
+ }
+ InlineAsmOperand::SplitInOut { reg, late, ref in_expr, ref out_expr } => {
+ hir::InlineAsmOperand::SplitInOut {
+ reg: lower_reg(reg),
+ late,
+ in_expr: self.lower_expr_mut(in_expr),
+ out_expr: out_expr.as_ref().map(|expr| self.lower_expr_mut(expr)),
+ }
+ }
+ InlineAsmOperand::Const { ref anon_const } => {
+ if !self.tcx.features().asm_const {
+ feature_err(
+ &sess.parse_sess,
+ sym::asm_const,
+ *op_sp,
+ "const operands for inline assembly are unstable",
+ )
+ .emit();
+ }
+ hir::InlineAsmOperand::Const {
+ anon_const: self.lower_anon_const(anon_const),
+ }
+ }
+ InlineAsmOperand::Sym { ref sym } => {
+ if !self.tcx.features().asm_sym {
+ feature_err(
+ &sess.parse_sess,
+ sym::asm_sym,
+ *op_sp,
+ "sym operands for inline assembly are unstable",
+ )
+ .emit();
+ }
+
+ let static_def_id = self
+ .resolver
+ .get_partial_res(sym.id)
+ .filter(|res| res.unresolved_segments() == 0)
+ .and_then(|res| {
+ if let Res::Def(DefKind::Static(_), def_id) = res.base_res() {
+ Some(def_id)
+ } else {
+ None
+ }
+ });
+
+ if let Some(def_id) = static_def_id {
+ let path = self.lower_qpath(
+ sym.id,
+ &sym.qself,
+ &sym.path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ hir::InlineAsmOperand::SymStatic { path, def_id }
+ } else {
+ // Replace the InlineAsmSym AST node with an
+ // Expr using the name node id.
+ let expr = Expr {
+ id: sym.id,
+ kind: ExprKind::Path(sym.qself.clone(), sym.path.clone()),
+ span: *op_sp,
+ attrs: AttrVec::new(),
+ tokens: None,
+ };
+
+ // Wrap the expression in an AnonConst.
+ let parent_def_id = self.current_hir_id_owner;
+ let node_id = self.next_node_id();
+ self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+ let anon_const = AnonConst { id: node_id, value: P(expr) };
+ hir::InlineAsmOperand::SymFn {
+ anon_const: self.lower_anon_const(&anon_const),
+ }
+ }
+ }
+ };
+ (op, self.lower_span(*op_sp))
+ })
+ .collect();
+
+ // Validate template modifiers against the register classes for the operands
+ for p in &asm.template {
+ if let InlineAsmTemplatePiece::Placeholder {
+ operand_idx,
+ modifier: Some(modifier),
+ span: placeholder_span,
+ } = *p
+ {
+ let op_sp = asm.operands[operand_idx].1;
+ match &operands[operand_idx].0 {
+ hir::InlineAsmOperand::In { reg, .. }
+ | hir::InlineAsmOperand::Out { reg, .. }
+ | hir::InlineAsmOperand::InOut { reg, .. }
+ | hir::InlineAsmOperand::SplitInOut { reg, .. } => {
+ let class = reg.reg_class();
+ if class == asm::InlineAsmRegClass::Err {
+ continue;
+ }
+ let valid_modifiers = class.valid_modifiers(asm_arch.unwrap());
+ if !valid_modifiers.contains(&modifier) {
+ let mut err = sess.struct_span_err(
+ placeholder_span,
+ "invalid asm template modifier for this register class",
+ );
+ err.span_label(placeholder_span, "template modifier");
+ err.span_label(op_sp, "argument");
+ if !valid_modifiers.is_empty() {
+ let mut mods = format!("`{}`", valid_modifiers[0]);
+ for m in &valid_modifiers[1..] {
+ let _ = write!(mods, ", `{}`", m);
+ }
+ err.note(&format!(
+ "the `{}` register class supports \
+ the following template modifiers: {}",
+ class.name(),
+ mods
+ ));
+ } else {
+ err.note(&format!(
+ "the `{}` register class does not support template modifiers",
+ class.name()
+ ));
+ }
+ err.emit();
+ }
+ }
+ hir::InlineAsmOperand::Const { .. } => {
+ let mut err = sess.struct_span_err(
+ placeholder_span,
+ "asm template modifiers are not allowed for `const` arguments",
+ );
+ err.span_label(placeholder_span, "template modifier");
+ err.span_label(op_sp, "argument");
+ err.emit();
+ }
+ hir::InlineAsmOperand::SymFn { .. }
+ | hir::InlineAsmOperand::SymStatic { .. } => {
+ let mut err = sess.struct_span_err(
+ placeholder_span,
+ "asm template modifiers are not allowed for `sym` arguments",
+ );
+ err.span_label(placeholder_span, "template modifier");
+ err.span_label(op_sp, "argument");
+ err.emit();
+ }
+ }
+ }
+ }
+
+ let mut used_input_regs = FxHashMap::default();
+ let mut used_output_regs = FxHashMap::default();
+
+ for (idx, &(ref op, op_sp)) in operands.iter().enumerate() {
+ if let Some(reg) = op.reg() {
+ let reg_class = reg.reg_class();
+ if reg_class == asm::InlineAsmRegClass::Err {
+ continue;
+ }
+
+ // Some register classes can only be used as clobbers. This
+ // means that we disallow passing a value in/out of the asm and
+ // require that the operand name an explicit register, not a
+ // register class.
+ if reg_class.is_clobber_only(asm_arch.unwrap()) && !op.is_clobber() {
+ let msg = format!(
+ "register class `{}` can only be used as a clobber, \
+ not as an input or output",
+ reg_class.name()
+ );
+ sess.struct_span_err(op_sp, &msg).emit();
+ continue;
+ }
+
+ // Check for conflicts between explicit register operands.
+ if let asm::InlineAsmRegOrRegClass::Reg(reg) = reg {
+ let (input, output) = match op {
+ hir::InlineAsmOperand::In { .. } => (true, false),
+
+ // Late output do not conflict with inputs, but normal outputs do
+ hir::InlineAsmOperand::Out { late, .. } => (!late, true),
+
+ hir::InlineAsmOperand::InOut { .. }
+ | hir::InlineAsmOperand::SplitInOut { .. } => (true, true),
+
+ hir::InlineAsmOperand::Const { .. }
+ | hir::InlineAsmOperand::SymFn { .. }
+ | hir::InlineAsmOperand::SymStatic { .. } => {
+ unreachable!()
+ }
+ };
+
+ // Flag to output the error only once per operand
+ let mut skip = false;
+ reg.overlapping_regs(|r| {
+ let mut check = |used_regs: &mut FxHashMap<asm::InlineAsmReg, usize>,
+ input| {
+ match used_regs.entry(r) {
+ Entry::Occupied(o) => {
+ if skip {
+ return;
+ }
+ skip = true;
+
+ let idx2 = *o.get();
+ let &(ref op2, op_sp2) = &operands[idx2];
+ let Some(asm::InlineAsmRegOrRegClass::Reg(reg2)) = op2.reg() else {
+ unreachable!();
+ };
+
+ let msg = format!(
+ "register `{}` conflicts with register `{}`",
+ reg.name(),
+ reg2.name()
+ );
+ let mut err = sess.struct_span_err(op_sp, &msg);
+ err.span_label(op_sp, &format!("register `{}`", reg.name()));
+ err.span_label(op_sp2, &format!("register `{}`", reg2.name()));
+
+ match (op, op2) {
+ (
+ hir::InlineAsmOperand::In { .. },
+ hir::InlineAsmOperand::Out { late, .. },
+ )
+ | (
+ hir::InlineAsmOperand::Out { late, .. },
+ hir::InlineAsmOperand::In { .. },
+ ) => {
+ assert!(!*late);
+ let out_op_sp = if input { op_sp2 } else { op_sp };
+ let msg = "use `lateout` instead of \
+ `out` to avoid conflict";
+ err.span_help(out_op_sp, msg);
+ }
+ _ => {}
+ }
+
+ err.emit();
+ }
+ Entry::Vacant(v) => {
+ if r == reg {
+ v.insert(idx);
+ }
+ }
+ }
+ };
+ if input {
+ check(&mut used_input_regs, true);
+ }
+ if output {
+ check(&mut used_output_regs, false);
+ }
+ });
+ }
+ }
+ }
+
+ // If a clobber_abi is specified, add the necessary clobbers to the
+ // operands list.
+ let mut clobbered = FxHashSet::default();
+ for (abi, (_, abi_span)) in clobber_abis {
+ for &clobber in abi.clobbered_regs() {
+ // Don't emit a clobber for a register already clobbered
+ if clobbered.contains(&clobber) {
+ continue;
+ }
+
+ let mut output_used = false;
+ clobber.overlapping_regs(|reg| {
+ if used_output_regs.contains_key(&reg) {
+ output_used = true;
+ }
+ });
+
+ if !output_used {
+ operands.push((
+ hir::InlineAsmOperand::Out {
+ reg: asm::InlineAsmRegOrRegClass::Reg(clobber),
+ late: true,
+ expr: None,
+ },
+ self.lower_span(abi_span),
+ ));
+ clobbered.insert(clobber);
+ }
+ }
+ }
+
+ let operands = self.arena.alloc_from_iter(operands);
+ let template = self.arena.alloc_from_iter(asm.template.iter().cloned());
+ let template_strs = self.arena.alloc_from_iter(
+ asm.template_strs
+ .iter()
+ .map(|(sym, snippet, span)| (*sym, *snippet, self.lower_span(*span))),
+ );
+ let line_spans =
+ self.arena.alloc_from_iter(asm.line_spans.iter().map(|span| self.lower_span(*span)));
+ let hir_asm =
+ hir::InlineAsm { template, template_strs, operands, options: asm.options, line_spans };
+ self.arena.alloc(hir_asm)
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/block.rs b/compiler/rustc_ast_lowering/src/block.rs
new file mode 100644
index 000000000..7cbfe143b
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/block.rs
@@ -0,0 +1,122 @@
+use crate::{ImplTraitContext, ImplTraitPosition, LoweringContext};
+use rustc_ast::{Block, BlockCheckMode, Local, LocalKind, Stmt, StmtKind};
+use rustc_hir as hir;
+use rustc_session::parse::feature_err;
+use rustc_span::sym;
+
+use smallvec::SmallVec;
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ pub(super) fn lower_block(
+ &mut self,
+ b: &Block,
+ targeted_by_break: bool,
+ ) -> &'hir hir::Block<'hir> {
+ self.arena.alloc(self.lower_block_noalloc(b, targeted_by_break))
+ }
+
+ pub(super) fn lower_block_noalloc(
+ &mut self,
+ b: &Block,
+ targeted_by_break: bool,
+ ) -> hir::Block<'hir> {
+ let (stmts, expr) = self.lower_stmts(&b.stmts);
+ let rules = self.lower_block_check_mode(&b.rules);
+ let hir_id = self.lower_node_id(b.id);
+ hir::Block { hir_id, stmts, expr, rules, span: self.lower_span(b.span), targeted_by_break }
+ }
+
+ fn lower_stmts(
+ &mut self,
+ mut ast_stmts: &[Stmt],
+ ) -> (&'hir [hir::Stmt<'hir>], Option<&'hir hir::Expr<'hir>>) {
+ let mut stmts = SmallVec::<[hir::Stmt<'hir>; 8]>::new();
+ let mut expr = None;
+ while let [s, tail @ ..] = ast_stmts {
+ match s.kind {
+ StmtKind::Local(ref local) => {
+ let hir_id = self.lower_node_id(s.id);
+ let local = self.lower_local(local);
+ self.alias_attrs(hir_id, local.hir_id);
+ let kind = hir::StmtKind::Local(local);
+ let span = self.lower_span(s.span);
+ stmts.push(hir::Stmt { hir_id, kind, span });
+ }
+ StmtKind::Item(ref it) => {
+ stmts.extend(self.lower_item_ref(it).into_iter().enumerate().map(
+ |(i, item_id)| {
+ let hir_id = match i {
+ 0 => self.lower_node_id(s.id),
+ _ => self.next_id(),
+ };
+ let kind = hir::StmtKind::Item(item_id);
+ let span = self.lower_span(s.span);
+ hir::Stmt { hir_id, kind, span }
+ },
+ ));
+ }
+ StmtKind::Expr(ref e) => {
+ let e = self.lower_expr(e);
+ if tail.is_empty() {
+ expr = Some(e);
+ } else {
+ let hir_id = self.lower_node_id(s.id);
+ self.alias_attrs(hir_id, e.hir_id);
+ let kind = hir::StmtKind::Expr(e);
+ let span = self.lower_span(s.span);
+ stmts.push(hir::Stmt { hir_id, kind, span });
+ }
+ }
+ StmtKind::Semi(ref e) => {
+ let e = self.lower_expr(e);
+ let hir_id = self.lower_node_id(s.id);
+ self.alias_attrs(hir_id, e.hir_id);
+ let kind = hir::StmtKind::Semi(e);
+ let span = self.lower_span(s.span);
+ stmts.push(hir::Stmt { hir_id, kind, span });
+ }
+ StmtKind::Empty => {}
+ StmtKind::MacCall(..) => panic!("shouldn't exist here"),
+ }
+ ast_stmts = &ast_stmts[1..];
+ }
+ (self.arena.alloc_from_iter(stmts), expr)
+ }
+
+ fn lower_local(&mut self, l: &Local) -> &'hir hir::Local<'hir> {
+ let ty = l
+ .ty
+ .as_ref()
+ .map(|t| self.lower_ty(t, ImplTraitContext::Disallowed(ImplTraitPosition::Variable)));
+ let init = l.kind.init().map(|init| self.lower_expr(init));
+ let hir_id = self.lower_node_id(l.id);
+ let pat = self.lower_pat(&l.pat);
+ let els = if let LocalKind::InitElse(_, els) = &l.kind {
+ if !self.tcx.features().let_else {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::let_else,
+ l.span,
+ "`let...else` statements are unstable",
+ )
+ .emit();
+ }
+ Some(self.lower_block(els, false))
+ } else {
+ None
+ };
+ let span = self.lower_span(l.span);
+ let source = hir::LocalSource::Normal;
+ self.lower_attrs(hir_id, &l.attrs);
+ self.arena.alloc(hir::Local { hir_id, ty, pat, init, els, span, source })
+ }
+
+ fn lower_block_check_mode(&mut self, b: &BlockCheckMode) -> hir::BlockCheckMode {
+ match *b {
+ BlockCheckMode::Default => hir::BlockCheckMode::DefaultBlock,
+ BlockCheckMode::Unsafe(u) => {
+ hir::BlockCheckMode::UnsafeBlock(self.lower_unsafe_source(u))
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs
new file mode 100644
index 000000000..fb6715ff1
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/expr.rs
@@ -0,0 +1,1914 @@
+use super::ResolverAstLoweringExt;
+use super::{ImplTraitContext, LoweringContext, ParamMode, ParenthesizedGenericArgs};
+use crate::{FnDeclKind, ImplTraitPosition};
+
+use rustc_ast::attr;
+use rustc_ast::ptr::P as AstP;
+use rustc_ast::*;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::definitions::DefPathData;
+use rustc_span::source_map::{respan, DesugaringKind, Span, Spanned};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::DUMMY_SP;
+
+impl<'hir> LoweringContext<'_, 'hir> {
+ fn lower_exprs(&mut self, exprs: &[AstP<Expr>]) -> &'hir [hir::Expr<'hir>] {
+ self.arena.alloc_from_iter(exprs.iter().map(|x| self.lower_expr_mut(x)))
+ }
+
+ pub(super) fn lower_expr(&mut self, e: &Expr) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.lower_expr_mut(e))
+ }
+
+ pub(super) fn lower_expr_mut(&mut self, e: &Expr) -> hir::Expr<'hir> {
+ ensure_sufficient_stack(|| {
+ let kind = match e.kind {
+ ExprKind::Box(ref inner) => hir::ExprKind::Box(self.lower_expr(inner)),
+ ExprKind::Array(ref exprs) => hir::ExprKind::Array(self.lower_exprs(exprs)),
+ ExprKind::ConstBlock(ref anon_const) => {
+ let anon_const = self.lower_anon_const(anon_const);
+ hir::ExprKind::ConstBlock(anon_const)
+ }
+ ExprKind::Repeat(ref expr, ref count) => {
+ let expr = self.lower_expr(expr);
+ let count = self.lower_array_length(count);
+ hir::ExprKind::Repeat(expr, count)
+ }
+ ExprKind::Tup(ref elts) => hir::ExprKind::Tup(self.lower_exprs(elts)),
+ ExprKind::Call(ref f, ref args) => {
+ if e.attrs.get(0).map_or(false, |a| a.has_name(sym::rustc_box)) {
+ if let [inner] = &args[..] && e.attrs.len() == 1 {
+ let kind = hir::ExprKind::Box(self.lower_expr(&inner));
+ let hir_id = self.lower_node_id(e.id);
+ return hir::Expr { hir_id, kind, span: self.lower_span(e.span) };
+ } else {
+ self.tcx.sess
+ .struct_span_err(
+ e.span,
+ "#[rustc_box] requires precisely one argument \
+ and no other attributes are allowed",
+ )
+ .emit();
+ hir::ExprKind::Err
+ }
+ } else if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f) {
+ self.lower_legacy_const_generics((**f).clone(), args.clone(), &legacy_args)
+ } else {
+ let f = self.lower_expr(f);
+ hir::ExprKind::Call(f, self.lower_exprs(args))
+ }
+ }
+ ExprKind::MethodCall(ref seg, ref args, span) => {
+ let hir_seg = self.arena.alloc(self.lower_path_segment(
+ e.span,
+ seg,
+ ParamMode::Optional,
+ ParenthesizedGenericArgs::Err,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ ));
+ let args = self.lower_exprs(args);
+ hir::ExprKind::MethodCall(hir_seg, args, self.lower_span(span))
+ }
+ ExprKind::Binary(binop, ref lhs, ref rhs) => {
+ let binop = self.lower_binop(binop);
+ let lhs = self.lower_expr(lhs);
+ let rhs = self.lower_expr(rhs);
+ hir::ExprKind::Binary(binop, lhs, rhs)
+ }
+ ExprKind::Unary(op, ref ohs) => {
+ let op = self.lower_unop(op);
+ let ohs = self.lower_expr(ohs);
+ hir::ExprKind::Unary(op, ohs)
+ }
+ ExprKind::Lit(ref l) => {
+ hir::ExprKind::Lit(respan(self.lower_span(l.span), l.kind.clone()))
+ }
+ ExprKind::Cast(ref expr, ref ty) => {
+ let expr = self.lower_expr(expr);
+ let ty =
+ self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ hir::ExprKind::Cast(expr, ty)
+ }
+ ExprKind::Type(ref expr, ref ty) => {
+ let expr = self.lower_expr(expr);
+ let ty =
+ self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ hir::ExprKind::Type(expr, ty)
+ }
+ ExprKind::AddrOf(k, m, ref ohs) => {
+ let ohs = self.lower_expr(ohs);
+ hir::ExprKind::AddrOf(k, m, ohs)
+ }
+ ExprKind::Let(ref pat, ref scrutinee, span) => {
+ hir::ExprKind::Let(self.arena.alloc(hir::Let {
+ hir_id: self.next_id(),
+ span: self.lower_span(span),
+ pat: self.lower_pat(pat),
+ ty: None,
+ init: self.lower_expr(scrutinee),
+ }))
+ }
+ ExprKind::If(ref cond, ref then, ref else_opt) => {
+ self.lower_expr_if(cond, then, else_opt.as_deref())
+ }
+ ExprKind::While(ref cond, ref body, opt_label) => {
+ self.with_loop_scope(e.id, |this| {
+ let span =
+ this.mark_span_with_reason(DesugaringKind::WhileLoop, e.span, None);
+ this.lower_expr_while_in_loop_scope(span, cond, body, opt_label)
+ })
+ }
+ ExprKind::Loop(ref body, opt_label) => self.with_loop_scope(e.id, |this| {
+ hir::ExprKind::Loop(
+ this.lower_block(body, false),
+ this.lower_label(opt_label),
+ hir::LoopSource::Loop,
+ DUMMY_SP,
+ )
+ }),
+ ExprKind::TryBlock(ref body) => self.lower_expr_try_block(body),
+ ExprKind::Match(ref expr, ref arms) => hir::ExprKind::Match(
+ self.lower_expr(expr),
+ self.arena.alloc_from_iter(arms.iter().map(|x| self.lower_arm(x))),
+ hir::MatchSource::Normal,
+ ),
+ ExprKind::Async(capture_clause, closure_node_id, ref block) => self
+ .make_async_expr(
+ capture_clause,
+ closure_node_id,
+ None,
+ block.span,
+ hir::AsyncGeneratorKind::Block,
+ |this| this.with_new_scopes(|this| this.lower_block_expr(block)),
+ ),
+ ExprKind::Await(ref expr) => {
+ let span = if expr.span.hi() < e.span.hi() {
+ expr.span.shrink_to_hi().with_hi(e.span.hi())
+ } else {
+ // this is a recovered `await expr`
+ e.span
+ };
+ self.lower_expr_await(span, expr)
+ }
+ ExprKind::Closure(
+ ref binder,
+ capture_clause,
+ asyncness,
+ movability,
+ ref decl,
+ ref body,
+ fn_decl_span,
+ ) => {
+ if let Async::Yes { closure_id, .. } = asyncness {
+ self.lower_expr_async_closure(
+ binder,
+ capture_clause,
+ e.id,
+ closure_id,
+ decl,
+ body,
+ fn_decl_span,
+ )
+ } else {
+ self.lower_expr_closure(
+ binder,
+ capture_clause,
+ e.id,
+ movability,
+ decl,
+ body,
+ fn_decl_span,
+ )
+ }
+ }
+ ExprKind::Block(ref blk, opt_label) => {
+ let opt_label = self.lower_label(opt_label);
+ hir::ExprKind::Block(self.lower_block(blk, opt_label.is_some()), opt_label)
+ }
+ ExprKind::Assign(ref el, ref er, span) => {
+ self.lower_expr_assign(el, er, span, e.span)
+ }
+ ExprKind::AssignOp(op, ref el, ref er) => hir::ExprKind::AssignOp(
+ self.lower_binop(op),
+ self.lower_expr(el),
+ self.lower_expr(er),
+ ),
+ ExprKind::Field(ref el, ident) => {
+ hir::ExprKind::Field(self.lower_expr(el), self.lower_ident(ident))
+ }
+ ExprKind::Index(ref el, ref er) => {
+ hir::ExprKind::Index(self.lower_expr(el), self.lower_expr(er))
+ }
+ ExprKind::Range(Some(ref e1), Some(ref e2), RangeLimits::Closed) => {
+ self.lower_expr_range_closed(e.span, e1, e2)
+ }
+ ExprKind::Range(ref e1, ref e2, lims) => {
+ self.lower_expr_range(e.span, e1.as_deref(), e2.as_deref(), lims)
+ }
+ ExprKind::Underscore => {
+ self.tcx
+ .sess.struct_span_err(
+ e.span,
+ "in expressions, `_` can only be used on the left-hand side of an assignment",
+ )
+ .span_label(e.span, "`_` not allowed here")
+ .emit();
+ hir::ExprKind::Err
+ }
+ ExprKind::Path(ref qself, ref path) => {
+ let qpath = self.lower_qpath(
+ e.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ hir::ExprKind::Path(qpath)
+ }
+ ExprKind::Break(opt_label, ref opt_expr) => {
+ let opt_expr = opt_expr.as_ref().map(|x| self.lower_expr(x));
+ hir::ExprKind::Break(self.lower_jump_destination(e.id, opt_label), opt_expr)
+ }
+ ExprKind::Continue(opt_label) => {
+ hir::ExprKind::Continue(self.lower_jump_destination(e.id, opt_label))
+ }
+ ExprKind::Ret(ref e) => {
+ let e = e.as_ref().map(|x| self.lower_expr(x));
+ hir::ExprKind::Ret(e)
+ }
+ ExprKind::Yeet(ref sub_expr) => self.lower_expr_yeet(e.span, sub_expr.as_deref()),
+ ExprKind::InlineAsm(ref asm) => {
+ hir::ExprKind::InlineAsm(self.lower_inline_asm(e.span, asm))
+ }
+ ExprKind::Struct(ref se) => {
+ let rest = match &se.rest {
+ StructRest::Base(e) => Some(self.lower_expr(e)),
+ StructRest::Rest(sp) => {
+ self.tcx
+ .sess
+ .struct_span_err(*sp, "base expression required after `..`")
+ .span_label(*sp, "add a base expression here")
+ .emit();
+ Some(&*self.arena.alloc(self.expr_err(*sp)))
+ }
+ StructRest::None => None,
+ };
+ hir::ExprKind::Struct(
+ self.arena.alloc(self.lower_qpath(
+ e.id,
+ &se.qself,
+ &se.path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ )),
+ self.arena
+ .alloc_from_iter(se.fields.iter().map(|x| self.lower_expr_field(x))),
+ rest,
+ )
+ }
+ ExprKind::Yield(ref opt_expr) => self.lower_expr_yield(e.span, opt_expr.as_deref()),
+ ExprKind::Err => hir::ExprKind::Err,
+ ExprKind::Try(ref sub_expr) => self.lower_expr_try(e.span, sub_expr),
+ ExprKind::Paren(ref ex) => {
+ let mut ex = self.lower_expr_mut(ex);
+ // Include parens in span, but only if it is a super-span.
+ if e.span.contains(ex.span) {
+ ex.span = self.lower_span(e.span);
+ }
+ // Merge attributes into the inner expression.
+ if !e.attrs.is_empty() {
+ let old_attrs =
+ self.attrs.get(&ex.hir_id.local_id).map(|la| *la).unwrap_or(&[]);
+ self.attrs.insert(
+ ex.hir_id.local_id,
+ &*self.arena.alloc_from_iter(
+ e.attrs
+ .iter()
+ .map(|a| self.lower_attr(a))
+ .chain(old_attrs.iter().cloned()),
+ ),
+ );
+ }
+ return ex;
+ }
+
+ // Desugar `ExprForLoop`
+ // from: `[opt_ident]: for <pat> in <head> <body>`
+ ExprKind::ForLoop(ref pat, ref head, ref body, opt_label) => {
+ return self.lower_expr_for(e, pat, head, body, opt_label);
+ }
+ ExprKind::MacCall(_) => panic!("{:?} shouldn't exist here", e.span),
+ };
+
+ let hir_id = self.lower_node_id(e.id);
+ self.lower_attrs(hir_id, &e.attrs);
+ hir::Expr { hir_id, kind, span: self.lower_span(e.span) }
+ })
+ }
+
+ fn lower_unop(&mut self, u: UnOp) -> hir::UnOp {
+ match u {
+ UnOp::Deref => hir::UnOp::Deref,
+ UnOp::Not => hir::UnOp::Not,
+ UnOp::Neg => hir::UnOp::Neg,
+ }
+ }
+
+ fn lower_binop(&mut self, b: BinOp) -> hir::BinOp {
+ Spanned {
+ node: match b.node {
+ BinOpKind::Add => hir::BinOpKind::Add,
+ BinOpKind::Sub => hir::BinOpKind::Sub,
+ BinOpKind::Mul => hir::BinOpKind::Mul,
+ BinOpKind::Div => hir::BinOpKind::Div,
+ BinOpKind::Rem => hir::BinOpKind::Rem,
+ BinOpKind::And => hir::BinOpKind::And,
+ BinOpKind::Or => hir::BinOpKind::Or,
+ BinOpKind::BitXor => hir::BinOpKind::BitXor,
+ BinOpKind::BitAnd => hir::BinOpKind::BitAnd,
+ BinOpKind::BitOr => hir::BinOpKind::BitOr,
+ BinOpKind::Shl => hir::BinOpKind::Shl,
+ BinOpKind::Shr => hir::BinOpKind::Shr,
+ BinOpKind::Eq => hir::BinOpKind::Eq,
+ BinOpKind::Lt => hir::BinOpKind::Lt,
+ BinOpKind::Le => hir::BinOpKind::Le,
+ BinOpKind::Ne => hir::BinOpKind::Ne,
+ BinOpKind::Ge => hir::BinOpKind::Ge,
+ BinOpKind::Gt => hir::BinOpKind::Gt,
+ },
+ span: self.lower_span(b.span),
+ }
+ }
+
+ fn lower_legacy_const_generics(
+ &mut self,
+ mut f: Expr,
+ args: Vec<AstP<Expr>>,
+ legacy_args_idx: &[usize],
+ ) -> hir::ExprKind<'hir> {
+ let ExprKind::Path(None, ref mut path) = f.kind else {
+ unreachable!();
+ };
+
+ // Split the arguments into const generics and normal arguments
+ let mut real_args = vec![];
+ let mut generic_args = vec![];
+ for (idx, arg) in args.into_iter().enumerate() {
+ if legacy_args_idx.contains(&idx) {
+ let parent_def_id = self.current_hir_id_owner;
+ let node_id = self.next_node_id();
+
+ // Add a definition for the in-band const def.
+ self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+
+ let anon_const = AnonConst { id: node_id, value: arg };
+ generic_args.push(AngleBracketedArg::Arg(GenericArg::Const(anon_const)));
+ } else {
+ real_args.push(arg);
+ }
+ }
+
+ // Add generic args to the last element of the path.
+ let last_segment = path.segments.last_mut().unwrap();
+ assert!(last_segment.args.is_none());
+ last_segment.args = Some(AstP(GenericArgs::AngleBracketed(AngleBracketedArgs {
+ span: DUMMY_SP,
+ args: generic_args,
+ })));
+
+ // Now lower everything as normal.
+ let f = self.lower_expr(&f);
+ hir::ExprKind::Call(f, self.lower_exprs(&real_args))
+ }
+
+ fn lower_expr_if(
+ &mut self,
+ cond: &Expr,
+ then: &Block,
+ else_opt: Option<&Expr>,
+ ) -> hir::ExprKind<'hir> {
+ let lowered_cond = self.lower_expr(cond);
+ let new_cond = self.manage_let_cond(lowered_cond);
+ let then_expr = self.lower_block_expr(then);
+ if let Some(rslt) = else_opt {
+ hir::ExprKind::If(new_cond, self.arena.alloc(then_expr), Some(self.lower_expr(rslt)))
+ } else {
+ hir::ExprKind::If(new_cond, self.arena.alloc(then_expr), None)
+ }
+ }
+
+ // If `cond` kind is `let`, returns `let`. Otherwise, wraps and returns `cond`
+ // in a temporary block.
+ fn manage_let_cond(&mut self, cond: &'hir hir::Expr<'hir>) -> &'hir hir::Expr<'hir> {
+ fn has_let_expr<'hir>(expr: &'hir hir::Expr<'hir>) -> bool {
+ match expr.kind {
+ hir::ExprKind::Binary(_, lhs, rhs) => has_let_expr(lhs) || has_let_expr(rhs),
+ hir::ExprKind::Let(..) => true,
+ _ => false,
+ }
+ }
+ if has_let_expr(cond) {
+ cond
+ } else {
+ let reason = DesugaringKind::CondTemporary;
+ let span_block = self.mark_span_with_reason(reason, cond.span, None);
+ self.expr_drop_temps(span_block, cond, AttrVec::new())
+ }
+ }
+
+ // We desugar: `'label: while $cond $body` into:
+ //
+ // ```
+ // 'label: loop {
+ // if { let _t = $cond; _t } {
+ // $body
+ // }
+ // else {
+ // break;
+ // }
+ // }
+ // ```
+ //
+ // Wrap in a construct equivalent to `{ let _t = $cond; _t }`
+ // to preserve drop semantics since `while $cond { ... }` does not
+ // let temporaries live outside of `cond`.
+ fn lower_expr_while_in_loop_scope(
+ &mut self,
+ span: Span,
+ cond: &Expr,
+ body: &Block,
+ opt_label: Option<Label>,
+ ) -> hir::ExprKind<'hir> {
+ let lowered_cond = self.with_loop_condition_scope(|t| t.lower_expr(cond));
+ let new_cond = self.manage_let_cond(lowered_cond);
+ let then = self.lower_block_expr(body);
+ let expr_break = self.expr_break(span, ThinVec::new());
+ let stmt_break = self.stmt_expr(span, expr_break);
+ let else_blk = self.block_all(span, arena_vec![self; stmt_break], None);
+ let else_expr = self.arena.alloc(self.expr_block(else_blk, ThinVec::new()));
+ let if_kind = hir::ExprKind::If(new_cond, self.arena.alloc(then), Some(else_expr));
+ let if_expr = self.expr(span, if_kind, ThinVec::new());
+ let block = self.block_expr(self.arena.alloc(if_expr));
+ let span = self.lower_span(span.with_hi(cond.span.hi()));
+ let opt_label = self.lower_label(opt_label);
+ hir::ExprKind::Loop(block, opt_label, hir::LoopSource::While, span)
+ }
+
+ /// Desugar `try { <stmts>; <expr> }` into `{ <stmts>; ::std::ops::Try::from_output(<expr>) }`,
+ /// `try { <stmts>; }` into `{ <stmts>; ::std::ops::Try::from_output(()) }`
+ /// and save the block id to use it as a break target for desugaring of the `?` operator.
+ fn lower_expr_try_block(&mut self, body: &Block) -> hir::ExprKind<'hir> {
+ self.with_catch_scope(body.id, |this| {
+ let mut block = this.lower_block_noalloc(body, true);
+
+ // Final expression of the block (if present) or `()` with span at the end of block
+ let (try_span, tail_expr) = if let Some(expr) = block.expr.take() {
+ (
+ this.mark_span_with_reason(
+ DesugaringKind::TryBlock,
+ expr.span,
+ this.allow_try_trait.clone(),
+ ),
+ expr,
+ )
+ } else {
+ let try_span = this.mark_span_with_reason(
+ DesugaringKind::TryBlock,
+ this.tcx.sess.source_map().end_point(body.span),
+ this.allow_try_trait.clone(),
+ );
+
+ (try_span, this.expr_unit(try_span))
+ };
+
+ let ok_wrapped_span =
+ this.mark_span_with_reason(DesugaringKind::TryBlock, tail_expr.span, None);
+
+ // `::std::ops::Try::from_output($tail_expr)`
+ block.expr = Some(this.wrap_in_try_constructor(
+ hir::LangItem::TryTraitFromOutput,
+ try_span,
+ tail_expr,
+ ok_wrapped_span,
+ ));
+
+ hir::ExprKind::Block(this.arena.alloc(block), None)
+ })
+ }
+
+ fn wrap_in_try_constructor(
+ &mut self,
+ lang_item: hir::LangItem,
+ method_span: Span,
+ expr: &'hir hir::Expr<'hir>,
+ overall_span: Span,
+ ) -> &'hir hir::Expr<'hir> {
+ let constructor = self.arena.alloc(self.expr_lang_item_path(
+ method_span,
+ lang_item,
+ ThinVec::new(),
+ None,
+ ));
+ self.expr_call(overall_span, constructor, std::slice::from_ref(expr))
+ }
+
+ fn lower_arm(&mut self, arm: &Arm) -> hir::Arm<'hir> {
+ let pat = self.lower_pat(&arm.pat);
+ let guard = arm.guard.as_ref().map(|cond| {
+ if let ExprKind::Let(ref pat, ref scrutinee, span) = cond.kind {
+ hir::Guard::IfLet(self.arena.alloc(hir::Let {
+ hir_id: self.next_id(),
+ span: self.lower_span(span),
+ pat: self.lower_pat(pat),
+ ty: None,
+ init: self.lower_expr(scrutinee),
+ }))
+ } else {
+ hir::Guard::If(self.lower_expr(cond))
+ }
+ });
+ let hir_id = self.next_id();
+ self.lower_attrs(hir_id, &arm.attrs);
+ hir::Arm {
+ hir_id,
+ pat,
+ guard,
+ body: self.lower_expr(&arm.body),
+ span: self.lower_span(arm.span),
+ }
+ }
+
+ /// Lower an `async` construct to a generator that is then wrapped so it implements `Future`.
+ ///
+ /// This results in:
+ ///
+ /// ```text
+ /// std::future::from_generator(static move? |_task_context| -> <ret_ty> {
+ /// <body>
+ /// })
+ /// ```
+ pub(super) fn make_async_expr(
+ &mut self,
+ capture_clause: CaptureBy,
+ closure_node_id: NodeId,
+ ret_ty: Option<AstP<Ty>>,
+ span: Span,
+ async_gen_kind: hir::AsyncGeneratorKind,
+ body: impl FnOnce(&mut Self) -> hir::Expr<'hir>,
+ ) -> hir::ExprKind<'hir> {
+ let output = match ret_ty {
+ Some(ty) => hir::FnRetTy::Return(
+ self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::AsyncBlock)),
+ ),
+ None => hir::FnRetTy::DefaultReturn(self.lower_span(span)),
+ };
+
+ // Resume argument type. We let the compiler infer this to simplify the lowering. It is
+ // fully constrained by `future::from_generator`.
+ let input_ty = hir::Ty {
+ hir_id: self.next_id(),
+ kind: hir::TyKind::Infer,
+ span: self.lower_span(span),
+ };
+
+ // The closure/generator `FnDecl` takes a single (resume) argument of type `input_ty`.
+ let fn_decl = self.arena.alloc(hir::FnDecl {
+ inputs: arena_vec![self; input_ty],
+ output,
+ c_variadic: false,
+ implicit_self: hir::ImplicitSelfKind::None,
+ });
+
+ // Lower the argument pattern/ident. The ident is used again in the `.await` lowering.
+ let (pat, task_context_hid) = self.pat_ident_binding_mode(
+ span,
+ Ident::with_dummy_span(sym::_task_context),
+ hir::BindingAnnotation::Mutable,
+ );
+ let param = hir::Param {
+ hir_id: self.next_id(),
+ pat,
+ ty_span: self.lower_span(span),
+ span: self.lower_span(span),
+ };
+ let params = arena_vec![self; param];
+
+ let body = self.lower_body(move |this| {
+ this.generator_kind = Some(hir::GeneratorKind::Async(async_gen_kind));
+
+ let old_ctx = this.task_context;
+ this.task_context = Some(task_context_hid);
+ let res = body(this);
+ this.task_context = old_ctx;
+ (params, res)
+ });
+
+ // `static |_task_context| -> <ret_ty> { body }`:
+ let generator_kind = {
+ let c = self.arena.alloc(hir::Closure {
+ binder: hir::ClosureBinder::Default,
+ capture_clause,
+ bound_generic_params: &[],
+ fn_decl,
+ body,
+ fn_decl_span: self.lower_span(span),
+ movability: Some(hir::Movability::Static),
+ });
+
+ hir::ExprKind::Closure(c)
+ };
+ let generator = hir::Expr {
+ hir_id: self.lower_node_id(closure_node_id),
+ kind: generator_kind,
+ span: self.lower_span(span),
+ };
+
+ // `future::from_generator`:
+ let unstable_span =
+ self.mark_span_with_reason(DesugaringKind::Async, span, self.allow_gen_future.clone());
+ let gen_future = self.expr_lang_item_path(
+ unstable_span,
+ hir::LangItem::FromGenerator,
+ ThinVec::new(),
+ None,
+ );
+
+ // `future::from_generator(generator)`:
+ hir::ExprKind::Call(self.arena.alloc(gen_future), arena_vec![self; generator])
+ }
+
+ /// Desugar `<expr>.await` into:
+ /// ```ignore (pseudo-rust)
+ /// match ::std::future::IntoFuture::into_future(<expr>) {
+ /// mut __awaitee => loop {
+ /// match unsafe { ::std::future::Future::poll(
+ /// <::std::pin::Pin>::new_unchecked(&mut __awaitee),
+ /// ::std::future::get_context(task_context),
+ /// ) } {
+ /// ::std::task::Poll::Ready(result) => break result,
+ /// ::std::task::Poll::Pending => {}
+ /// }
+ /// task_context = yield ();
+ /// }
+ /// }
+ /// ```
+ fn lower_expr_await(&mut self, dot_await_span: Span, expr: &Expr) -> hir::ExprKind<'hir> {
+ let full_span = expr.span.to(dot_await_span);
+ match self.generator_kind {
+ Some(hir::GeneratorKind::Async(_)) => {}
+ Some(hir::GeneratorKind::Gen) | None => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ dot_await_span,
+ E0728,
+ "`await` is only allowed inside `async` functions and blocks"
+ );
+ err.span_label(dot_await_span, "only allowed inside `async` functions and blocks");
+ if let Some(item_sp) = self.current_item {
+ err.span_label(item_sp, "this is not `async`");
+ }
+ err.emit();
+ }
+ }
+ let span = self.mark_span_with_reason(DesugaringKind::Await, dot_await_span, None);
+ let gen_future_span = self.mark_span_with_reason(
+ DesugaringKind::Await,
+ full_span,
+ self.allow_gen_future.clone(),
+ );
+ let expr = self.lower_expr_mut(expr);
+ let expr_hir_id = expr.hir_id;
+
+ // Note that the name of this binding must not be changed to something else because
+ // debuggers and debugger extensions expect it to be called `__awaitee`. They use
+ // this name to identify what is being awaited by a suspended async functions.
+ let awaitee_ident = Ident::with_dummy_span(sym::__awaitee);
+ let (awaitee_pat, awaitee_pat_hid) =
+ self.pat_ident_binding_mode(span, awaitee_ident, hir::BindingAnnotation::Mutable);
+
+ let task_context_ident = Ident::with_dummy_span(sym::_task_context);
+
+ // unsafe {
+ // ::std::future::Future::poll(
+ // ::std::pin::Pin::new_unchecked(&mut __awaitee),
+ // ::std::future::get_context(task_context),
+ // )
+ // }
+ let poll_expr = {
+ let awaitee = self.expr_ident(span, awaitee_ident, awaitee_pat_hid);
+ let ref_mut_awaitee = self.expr_mut_addr_of(span, awaitee);
+ let task_context = if let Some(task_context_hid) = self.task_context {
+ self.expr_ident_mut(span, task_context_ident, task_context_hid)
+ } else {
+ // Use of `await` outside of an async context, we cannot use `task_context` here.
+ self.expr_err(span)
+ };
+ let new_unchecked = self.expr_call_lang_item_fn_mut(
+ span,
+ hir::LangItem::PinNewUnchecked,
+ arena_vec![self; ref_mut_awaitee],
+ Some(expr_hir_id),
+ );
+ let get_context = self.expr_call_lang_item_fn_mut(
+ gen_future_span,
+ hir::LangItem::GetContext,
+ arena_vec![self; task_context],
+ Some(expr_hir_id),
+ );
+ let call = self.expr_call_lang_item_fn(
+ span,
+ hir::LangItem::FuturePoll,
+ arena_vec![self; new_unchecked, get_context],
+ Some(expr_hir_id),
+ );
+ self.arena.alloc(self.expr_unsafe(call))
+ };
+
+ // `::std::task::Poll::Ready(result) => break result`
+ let loop_node_id = self.next_node_id();
+ let loop_hir_id = self.lower_node_id(loop_node_id);
+ let ready_arm = {
+ let x_ident = Ident::with_dummy_span(sym::result);
+ let (x_pat, x_pat_hid) = self.pat_ident(gen_future_span, x_ident);
+ let x_expr = self.expr_ident(gen_future_span, x_ident, x_pat_hid);
+ let ready_field = self.single_pat_field(gen_future_span, x_pat);
+ let ready_pat = self.pat_lang_item_variant(
+ span,
+ hir::LangItem::PollReady,
+ ready_field,
+ Some(expr_hir_id),
+ );
+ let break_x = self.with_loop_scope(loop_node_id, move |this| {
+ let expr_break =
+ hir::ExprKind::Break(this.lower_loop_destination(None), Some(x_expr));
+ this.arena.alloc(this.expr(gen_future_span, expr_break, ThinVec::new()))
+ });
+ self.arm(ready_pat, break_x)
+ };
+
+ // `::std::task::Poll::Pending => {}`
+ let pending_arm = {
+ let pending_pat = self.pat_lang_item_variant(
+ span,
+ hir::LangItem::PollPending,
+ &[],
+ Some(expr_hir_id),
+ );
+ let empty_block = self.expr_block_empty(span);
+ self.arm(pending_pat, empty_block)
+ };
+
+ let inner_match_stmt = {
+ let match_expr = self.expr_match(
+ span,
+ poll_expr,
+ arena_vec![self; ready_arm, pending_arm],
+ hir::MatchSource::AwaitDesugar,
+ );
+ self.stmt_expr(span, match_expr)
+ };
+
+ // task_context = yield ();
+ let yield_stmt = {
+ let unit = self.expr_unit(span);
+ let yield_expr = self.expr(
+ span,
+ hir::ExprKind::Yield(unit, hir::YieldSource::Await { expr: Some(expr_hir_id) }),
+ ThinVec::new(),
+ );
+ let yield_expr = self.arena.alloc(yield_expr);
+
+ if let Some(task_context_hid) = self.task_context {
+ let lhs = self.expr_ident(span, task_context_ident, task_context_hid);
+ let assign = self.expr(
+ span,
+ hir::ExprKind::Assign(lhs, yield_expr, self.lower_span(span)),
+ AttrVec::new(),
+ );
+ self.stmt_expr(span, assign)
+ } else {
+ // Use of `await` outside of an async context. Return `yield_expr` so that we can
+ // proceed with type checking.
+ self.stmt(span, hir::StmtKind::Semi(yield_expr))
+ }
+ };
+
+ let loop_block = self.block_all(span, arena_vec![self; inner_match_stmt, yield_stmt], None);
+
+ // loop { .. }
+ let loop_expr = self.arena.alloc(hir::Expr {
+ hir_id: loop_hir_id,
+ kind: hir::ExprKind::Loop(
+ loop_block,
+ None,
+ hir::LoopSource::Loop,
+ self.lower_span(span),
+ ),
+ span: self.lower_span(span),
+ });
+
+ // mut __awaitee => loop { ... }
+ let awaitee_arm = self.arm(awaitee_pat, loop_expr);
+
+ // `match ::std::future::IntoFuture::into_future(<expr>) { ... }`
+ let into_future_span = self.mark_span_with_reason(
+ DesugaringKind::Await,
+ dot_await_span,
+ self.allow_into_future.clone(),
+ );
+ let into_future_expr = self.expr_call_lang_item_fn(
+ into_future_span,
+ hir::LangItem::IntoFutureIntoFuture,
+ arena_vec![self; expr],
+ Some(expr_hir_id),
+ );
+
+ // match <into_future_expr> {
+ // mut __awaitee => loop { .. }
+ // }
+ hir::ExprKind::Match(
+ into_future_expr,
+ arena_vec![self; awaitee_arm],
+ hir::MatchSource::AwaitDesugar,
+ )
+ }
+
+ fn lower_expr_closure(
+ &mut self,
+ binder: &ClosureBinder,
+ capture_clause: CaptureBy,
+ closure_id: NodeId,
+ movability: Movability,
+ decl: &FnDecl,
+ body: &Expr,
+ fn_decl_span: Span,
+ ) -> hir::ExprKind<'hir> {
+ let (binder_clause, generic_params) = self.lower_closure_binder(binder);
+
+ let (body_id, generator_option) = self.with_new_scopes(move |this| {
+ let prev = this.current_item;
+ this.current_item = Some(fn_decl_span);
+ let mut generator_kind = None;
+ let body_id = this.lower_fn_body(decl, |this| {
+ let e = this.lower_expr_mut(body);
+ generator_kind = this.generator_kind;
+ e
+ });
+ let generator_option =
+ this.generator_movability_for_fn(&decl, fn_decl_span, generator_kind, movability);
+ this.current_item = prev;
+ (body_id, generator_option)
+ });
+
+ let bound_generic_params = self.lower_lifetime_binder(closure_id, generic_params);
+ // Lower outside new scope to preserve `is_in_loop_condition`.
+ let fn_decl = self.lower_fn_decl(decl, None, FnDeclKind::Closure, None);
+
+ let c = self.arena.alloc(hir::Closure {
+ binder: binder_clause,
+ capture_clause,
+ bound_generic_params,
+ fn_decl,
+ body: body_id,
+ fn_decl_span: self.lower_span(fn_decl_span),
+ movability: generator_option,
+ });
+
+ hir::ExprKind::Closure(c)
+ }
+
+ fn generator_movability_for_fn(
+ &mut self,
+ decl: &FnDecl,
+ fn_decl_span: Span,
+ generator_kind: Option<hir::GeneratorKind>,
+ movability: Movability,
+ ) -> Option<hir::Movability> {
+ match generator_kind {
+ Some(hir::GeneratorKind::Gen) => {
+ if decl.inputs.len() > 1 {
+ struct_span_err!(
+ self.tcx.sess,
+ fn_decl_span,
+ E0628,
+ "too many parameters for a generator (expected 0 or 1 parameters)"
+ )
+ .emit();
+ }
+ Some(movability)
+ }
+ Some(hir::GeneratorKind::Async(_)) => {
+ panic!("non-`async` closure body turned `async` during lowering");
+ }
+ None => {
+ if movability == Movability::Static {
+ struct_span_err!(
+ self.tcx.sess,
+ fn_decl_span,
+ E0697,
+ "closures cannot be static"
+ )
+ .emit();
+ }
+ None
+ }
+ }
+ }
+
+ fn lower_closure_binder<'c>(
+ &mut self,
+ binder: &'c ClosureBinder,
+ ) -> (hir::ClosureBinder, &'c [GenericParam]) {
+ let (binder, params) = match binder {
+ ClosureBinder::NotPresent => (hir::ClosureBinder::Default, &[][..]),
+ &ClosureBinder::For { span, ref generic_params } => {
+ let span = self.lower_span(span);
+ (hir::ClosureBinder::For { span }, &**generic_params)
+ }
+ };
+
+ (binder, params)
+ }
+
+ fn lower_expr_async_closure(
+ &mut self,
+ binder: &ClosureBinder,
+ capture_clause: CaptureBy,
+ closure_id: NodeId,
+ inner_closure_id: NodeId,
+ decl: &FnDecl,
+ body: &Expr,
+ fn_decl_span: Span,
+ ) -> hir::ExprKind<'hir> {
+ if let &ClosureBinder::For { span, .. } = binder {
+ self.tcx.sess.span_err(
+ span,
+ "`for<...>` binders on `async` closures are not currently supported",
+ );
+ }
+
+ let (binder_clause, generic_params) = self.lower_closure_binder(binder);
+
+ let outer_decl =
+ FnDecl { inputs: decl.inputs.clone(), output: FnRetTy::Default(fn_decl_span) };
+
+ let body = self.with_new_scopes(|this| {
+ // FIXME(cramertj): allow `async` non-`move` closures with arguments.
+ if capture_clause == CaptureBy::Ref && !decl.inputs.is_empty() {
+ struct_span_err!(
+ this.tcx.sess,
+ fn_decl_span,
+ E0708,
+ "`async` non-`move` closures with parameters are not currently supported",
+ )
+ .help(
+ "consider using `let` statements to manually capture \
+ variables by reference before entering an `async move` closure",
+ )
+ .emit();
+ }
+
+ // Transform `async |x: u8| -> X { ... }` into
+ // `|x: u8| future_from_generator(|| -> X { ... })`.
+ let body_id = this.lower_fn_body(&outer_decl, |this| {
+ let async_ret_ty =
+ if let FnRetTy::Ty(ty) = &decl.output { Some(ty.clone()) } else { None };
+ let async_body = this.make_async_expr(
+ capture_clause,
+ inner_closure_id,
+ async_ret_ty,
+ body.span,
+ hir::AsyncGeneratorKind::Closure,
+ |this| this.with_new_scopes(|this| this.lower_expr_mut(body)),
+ );
+ this.expr(fn_decl_span, async_body, ThinVec::new())
+ });
+ body_id
+ });
+
+ let bound_generic_params = self.lower_lifetime_binder(closure_id, generic_params);
+
+ // We need to lower the declaration outside the new scope, because we
+ // have to conserve the state of being inside a loop condition for the
+ // closure argument types.
+ let fn_decl = self.lower_fn_decl(&outer_decl, None, FnDeclKind::Closure, None);
+
+ let c = self.arena.alloc(hir::Closure {
+ binder: binder_clause,
+ capture_clause,
+ bound_generic_params,
+ fn_decl,
+ body,
+ fn_decl_span: self.lower_span(fn_decl_span),
+ movability: None,
+ });
+ hir::ExprKind::Closure(c)
+ }
+
+ /// Destructure the LHS of complex assignments.
+ /// For instance, lower `(a, b) = t` to `{ let (lhs1, lhs2) = t; a = lhs1; b = lhs2; }`.
+ fn lower_expr_assign(
+ &mut self,
+ lhs: &Expr,
+ rhs: &Expr,
+ eq_sign_span: Span,
+ whole_span: Span,
+ ) -> hir::ExprKind<'hir> {
+ // Return early in case of an ordinary assignment.
+ fn is_ordinary(lower_ctx: &mut LoweringContext<'_, '_>, lhs: &Expr) -> bool {
+ match &lhs.kind {
+ ExprKind::Array(..)
+ | ExprKind::Struct(..)
+ | ExprKind::Tup(..)
+ | ExprKind::Underscore => false,
+ // Check for tuple struct constructor.
+ ExprKind::Call(callee, ..) => lower_ctx.extract_tuple_struct_path(callee).is_none(),
+ ExprKind::Paren(e) => {
+ match e.kind {
+ // We special-case `(..)` for consistency with patterns.
+ ExprKind::Range(None, None, RangeLimits::HalfOpen) => false,
+ _ => is_ordinary(lower_ctx, e),
+ }
+ }
+ _ => true,
+ }
+ }
+ if is_ordinary(self, lhs) {
+ return hir::ExprKind::Assign(
+ self.lower_expr(lhs),
+ self.lower_expr(rhs),
+ self.lower_span(eq_sign_span),
+ );
+ }
+
+ let mut assignments = vec![];
+
+ // The LHS becomes a pattern: `(lhs1, lhs2)`.
+ let pat = self.destructure_assign(lhs, eq_sign_span, &mut assignments);
+ let rhs = self.lower_expr(rhs);
+
+ // Introduce a `let` for destructuring: `let (lhs1, lhs2) = t`.
+ let destructure_let = self.stmt_let_pat(
+ None,
+ whole_span,
+ Some(rhs),
+ pat,
+ hir::LocalSource::AssignDesugar(self.lower_span(eq_sign_span)),
+ );
+
+ // `a = lhs1; b = lhs2;`.
+ let stmts = self
+ .arena
+ .alloc_from_iter(std::iter::once(destructure_let).chain(assignments.into_iter()));
+
+ // Wrap everything in a block.
+ hir::ExprKind::Block(&self.block_all(whole_span, stmts, None), None)
+ }
+
+ /// If the given expression is a path to a tuple struct, returns that path.
+ /// It is not a complete check, but just tries to reject most paths early
+ /// if they are not tuple structs.
+ /// Type checking will take care of the full validation later.
+ fn extract_tuple_struct_path<'a>(
+ &mut self,
+ expr: &'a Expr,
+ ) -> Option<(&'a Option<QSelf>, &'a Path)> {
+ if let ExprKind::Path(qself, path) = &expr.kind {
+ // Does the path resolve to something disallowed in a tuple struct/variant pattern?
+ if let Some(partial_res) = self.resolver.get_partial_res(expr.id) {
+ if partial_res.unresolved_segments() == 0
+ && !partial_res.base_res().expected_in_tuple_struct_pat()
+ {
+ return None;
+ }
+ }
+ return Some((qself, path));
+ }
+ None
+ }
+
+ /// If the given expression is a path to a unit struct, returns that path.
+ /// It is not a complete check, but just tries to reject most paths early
+ /// if they are not unit structs.
+ /// Type checking will take care of the full validation later.
+ fn extract_unit_struct_path<'a>(
+ &mut self,
+ expr: &'a Expr,
+ ) -> Option<(&'a Option<QSelf>, &'a Path)> {
+ if let ExprKind::Path(qself, path) = &expr.kind {
+ // Does the path resolve to something disallowed in a unit struct/variant pattern?
+ if let Some(partial_res) = self.resolver.get_partial_res(expr.id) {
+ if partial_res.unresolved_segments() == 0
+ && !partial_res.base_res().expected_in_unit_struct_pat()
+ {
+ return None;
+ }
+ }
+ return Some((qself, path));
+ }
+ None
+ }
+
+ /// Convert the LHS of a destructuring assignment to a pattern.
+ /// Each sub-assignment is recorded in `assignments`.
+ fn destructure_assign(
+ &mut self,
+ lhs: &Expr,
+ eq_sign_span: Span,
+ assignments: &mut Vec<hir::Stmt<'hir>>,
+ ) -> &'hir hir::Pat<'hir> {
+ self.arena.alloc(self.destructure_assign_mut(lhs, eq_sign_span, assignments))
+ }
+
+ fn destructure_assign_mut(
+ &mut self,
+ lhs: &Expr,
+ eq_sign_span: Span,
+ assignments: &mut Vec<hir::Stmt<'hir>>,
+ ) -> hir::Pat<'hir> {
+ match &lhs.kind {
+ // Underscore pattern.
+ ExprKind::Underscore => {
+ return self.pat_without_dbm(lhs.span, hir::PatKind::Wild);
+ }
+ // Slice patterns.
+ ExprKind::Array(elements) => {
+ let (pats, rest) =
+ self.destructure_sequence(elements, "slice", eq_sign_span, assignments);
+ let slice_pat = if let Some((i, span)) = rest {
+ let (before, after) = pats.split_at(i);
+ hir::PatKind::Slice(
+ before,
+ Some(self.arena.alloc(self.pat_without_dbm(span, hir::PatKind::Wild))),
+ after,
+ )
+ } else {
+ hir::PatKind::Slice(pats, None, &[])
+ };
+ return self.pat_without_dbm(lhs.span, slice_pat);
+ }
+ // Tuple structs.
+ ExprKind::Call(callee, args) => {
+ if let Some((qself, path)) = self.extract_tuple_struct_path(callee) {
+ let (pats, rest) = self.destructure_sequence(
+ args,
+ "tuple struct or variant",
+ eq_sign_span,
+ assignments,
+ );
+ let qpath = self.lower_qpath(
+ callee.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ // Destructure like a tuple struct.
+ let tuple_struct_pat =
+ hir::PatKind::TupleStruct(qpath, pats, rest.map(|r| r.0));
+ return self.pat_without_dbm(lhs.span, tuple_struct_pat);
+ }
+ }
+ // Unit structs and enum variants.
+ ExprKind::Path(..) => {
+ if let Some((qself, path)) = self.extract_unit_struct_path(lhs) {
+ let qpath = self.lower_qpath(
+ lhs.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ // Destructure like a unit struct.
+ let unit_struct_pat = hir::PatKind::Path(qpath);
+ return self.pat_without_dbm(lhs.span, unit_struct_pat);
+ }
+ }
+ // Structs.
+ ExprKind::Struct(se) => {
+ let field_pats = self.arena.alloc_from_iter(se.fields.iter().map(|f| {
+ let pat = self.destructure_assign(&f.expr, eq_sign_span, assignments);
+ hir::PatField {
+ hir_id: self.next_id(),
+ ident: self.lower_ident(f.ident),
+ pat,
+ is_shorthand: f.is_shorthand,
+ span: self.lower_span(f.span),
+ }
+ }));
+ let qpath = self.lower_qpath(
+ lhs.id,
+ &se.qself,
+ &se.path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ let fields_omitted = match &se.rest {
+ StructRest::Base(e) => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ e.span,
+ "functional record updates are not allowed in destructuring \
+ assignments",
+ )
+ .span_suggestion(
+ e.span,
+ "consider removing the trailing pattern",
+ "",
+ rustc_errors::Applicability::MachineApplicable,
+ )
+ .emit();
+ true
+ }
+ StructRest::Rest(_) => true,
+ StructRest::None => false,
+ };
+ let struct_pat = hir::PatKind::Struct(qpath, field_pats, fields_omitted);
+ return self.pat_without_dbm(lhs.span, struct_pat);
+ }
+ // Tuples.
+ ExprKind::Tup(elements) => {
+ let (pats, rest) =
+ self.destructure_sequence(elements, "tuple", eq_sign_span, assignments);
+ let tuple_pat = hir::PatKind::Tuple(pats, rest.map(|r| r.0));
+ return self.pat_without_dbm(lhs.span, tuple_pat);
+ }
+ ExprKind::Paren(e) => {
+ // We special-case `(..)` for consistency with patterns.
+ if let ExprKind::Range(None, None, RangeLimits::HalfOpen) = e.kind {
+ let tuple_pat = hir::PatKind::Tuple(&[], Some(0));
+ return self.pat_without_dbm(lhs.span, tuple_pat);
+ } else {
+ return self.destructure_assign_mut(e, eq_sign_span, assignments);
+ }
+ }
+ _ => {}
+ }
+ // Treat all other cases as normal lvalue.
+ let ident = Ident::new(sym::lhs, self.lower_span(lhs.span));
+ let (pat, binding) = self.pat_ident_mut(lhs.span, ident);
+ let ident = self.expr_ident(lhs.span, ident, binding);
+ let assign =
+ hir::ExprKind::Assign(self.lower_expr(lhs), ident, self.lower_span(eq_sign_span));
+ let expr = self.expr(lhs.span, assign, ThinVec::new());
+ assignments.push(self.stmt_expr(lhs.span, expr));
+ pat
+ }
+
+ /// Destructure a sequence of expressions occurring on the LHS of an assignment.
+ /// Such a sequence occurs in a tuple (struct)/slice.
+ /// Return a sequence of corresponding patterns, and the index and the span of `..` if it
+ /// exists.
+ /// Each sub-assignment is recorded in `assignments`.
+ fn destructure_sequence(
+ &mut self,
+ elements: &[AstP<Expr>],
+ ctx: &str,
+ eq_sign_span: Span,
+ assignments: &mut Vec<hir::Stmt<'hir>>,
+ ) -> (&'hir [hir::Pat<'hir>], Option<(usize, Span)>) {
+ let mut rest = None;
+ let elements =
+ self.arena.alloc_from_iter(elements.iter().enumerate().filter_map(|(i, e)| {
+ // Check for `..` pattern.
+ if let ExprKind::Range(None, None, RangeLimits::HalfOpen) = e.kind {
+ if let Some((_, prev_span)) = rest {
+ self.ban_extra_rest_pat(e.span, prev_span, ctx);
+ } else {
+ rest = Some((i, e.span));
+ }
+ None
+ } else {
+ Some(self.destructure_assign_mut(e, eq_sign_span, assignments))
+ }
+ }));
+ (elements, rest)
+ }
+
+ /// Desugar `<start>..=<end>` into `std::ops::RangeInclusive::new(<start>, <end>)`.
+ fn lower_expr_range_closed(&mut self, span: Span, e1: &Expr, e2: &Expr) -> hir::ExprKind<'hir> {
+ let e1 = self.lower_expr_mut(e1);
+ let e2 = self.lower_expr_mut(e2);
+ let fn_path =
+ hir::QPath::LangItem(hir::LangItem::RangeInclusiveNew, self.lower_span(span), None);
+ let fn_expr =
+ self.arena.alloc(self.expr(span, hir::ExprKind::Path(fn_path), ThinVec::new()));
+ hir::ExprKind::Call(fn_expr, arena_vec![self; e1, e2])
+ }
+
+ fn lower_expr_range(
+ &mut self,
+ span: Span,
+ e1: Option<&Expr>,
+ e2: Option<&Expr>,
+ lims: RangeLimits,
+ ) -> hir::ExprKind<'hir> {
+ use rustc_ast::RangeLimits::*;
+
+ let lang_item = match (e1, e2, lims) {
+ (None, None, HalfOpen) => hir::LangItem::RangeFull,
+ (Some(..), None, HalfOpen) => hir::LangItem::RangeFrom,
+ (None, Some(..), HalfOpen) => hir::LangItem::RangeTo,
+ (Some(..), Some(..), HalfOpen) => hir::LangItem::Range,
+ (None, Some(..), Closed) => hir::LangItem::RangeToInclusive,
+ (Some(..), Some(..), Closed) => unreachable!(),
+ (_, None, Closed) => self.diagnostic().span_fatal(span, "inclusive range with no end"),
+ };
+
+ let fields = self.arena.alloc_from_iter(
+ e1.iter().map(|e| (sym::start, e)).chain(e2.iter().map(|e| (sym::end, e))).map(
+ |(s, e)| {
+ let expr = self.lower_expr(&e);
+ let ident = Ident::new(s, self.lower_span(e.span));
+ self.expr_field(ident, expr, e.span)
+ },
+ ),
+ );
+
+ hir::ExprKind::Struct(
+ self.arena.alloc(hir::QPath::LangItem(lang_item, self.lower_span(span), None)),
+ fields,
+ None,
+ )
+ }
+
+ fn lower_label(&self, opt_label: Option<Label>) -> Option<Label> {
+ let label = opt_label?;
+ Some(Label { ident: self.lower_ident(label.ident) })
+ }
+
+ fn lower_loop_destination(&mut self, destination: Option<(NodeId, Label)>) -> hir::Destination {
+ let target_id = match destination {
+ Some((id, _)) => {
+ if let Some(loop_id) = self.resolver.get_label_res(id) {
+ Ok(self.lower_node_id(loop_id))
+ } else {
+ Err(hir::LoopIdError::UnresolvedLabel)
+ }
+ }
+ None => self
+ .loop_scope
+ .map(|id| Ok(self.lower_node_id(id)))
+ .unwrap_or(Err(hir::LoopIdError::OutsideLoopScope)),
+ };
+ let label = self.lower_label(destination.map(|(_, label)| label));
+ hir::Destination { label, target_id }
+ }
+
+ fn lower_jump_destination(&mut self, id: NodeId, opt_label: Option<Label>) -> hir::Destination {
+ if self.is_in_loop_condition && opt_label.is_none() {
+ hir::Destination {
+ label: None,
+ target_id: Err(hir::LoopIdError::UnlabeledCfInWhileCondition),
+ }
+ } else {
+ self.lower_loop_destination(opt_label.map(|label| (id, label)))
+ }
+ }
+
+ fn with_catch_scope<T>(&mut self, catch_id: NodeId, f: impl FnOnce(&mut Self) -> T) -> T {
+ let old_scope = self.catch_scope.replace(catch_id);
+ let result = f(self);
+ self.catch_scope = old_scope;
+ result
+ }
+
+ fn with_loop_scope<T>(&mut self, loop_id: NodeId, f: impl FnOnce(&mut Self) -> T) -> T {
+ // We're no longer in the base loop's condition; we're in another loop.
+ let was_in_loop_condition = self.is_in_loop_condition;
+ self.is_in_loop_condition = false;
+
+ let old_scope = self.loop_scope.replace(loop_id);
+ let result = f(self);
+ self.loop_scope = old_scope;
+
+ self.is_in_loop_condition = was_in_loop_condition;
+
+ result
+ }
+
+ fn with_loop_condition_scope<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
+ let was_in_loop_condition = self.is_in_loop_condition;
+ self.is_in_loop_condition = true;
+
+ let result = f(self);
+
+ self.is_in_loop_condition = was_in_loop_condition;
+
+ result
+ }
+
+ fn lower_expr_field(&mut self, f: &ExprField) -> hir::ExprField<'hir> {
+ hir::ExprField {
+ hir_id: self.next_id(),
+ ident: self.lower_ident(f.ident),
+ expr: self.lower_expr(&f.expr),
+ span: self.lower_span(f.span),
+ is_shorthand: f.is_shorthand,
+ }
+ }
+
+ fn lower_expr_yield(&mut self, span: Span, opt_expr: Option<&Expr>) -> hir::ExprKind<'hir> {
+ match self.generator_kind {
+ Some(hir::GeneratorKind::Gen) => {}
+ Some(hir::GeneratorKind::Async(_)) => {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0727,
+ "`async` generators are not yet supported"
+ )
+ .emit();
+ }
+ None => self.generator_kind = Some(hir::GeneratorKind::Gen),
+ }
+
+ let expr =
+ opt_expr.as_ref().map(|x| self.lower_expr(x)).unwrap_or_else(|| self.expr_unit(span));
+
+ hir::ExprKind::Yield(expr, hir::YieldSource::Yield)
+ }
+
+ /// Desugar `ExprForLoop` from: `[opt_ident]: for <pat> in <head> <body>` into:
+ /// ```ignore (pseudo-rust)
+ /// {
+ /// let result = match IntoIterator::into_iter(<head>) {
+ /// mut iter => {
+ /// [opt_ident]: loop {
+ /// match Iterator::next(&mut iter) {
+ /// None => break,
+ /// Some(<pat>) => <body>,
+ /// };
+ /// }
+ /// }
+ /// };
+ /// result
+ /// }
+ /// ```
+ fn lower_expr_for(
+ &mut self,
+ e: &Expr,
+ pat: &Pat,
+ head: &Expr,
+ body: &Block,
+ opt_label: Option<Label>,
+ ) -> hir::Expr<'hir> {
+ let head = self.lower_expr_mut(head);
+ let pat = self.lower_pat(pat);
+ let for_span =
+ self.mark_span_with_reason(DesugaringKind::ForLoop, self.lower_span(e.span), None);
+ let head_span = self.mark_span_with_reason(DesugaringKind::ForLoop, head.span, None);
+ let pat_span = self.mark_span_with_reason(DesugaringKind::ForLoop, pat.span, None);
+
+ // `None => break`
+ let none_arm = {
+ let break_expr =
+ self.with_loop_scope(e.id, |this| this.expr_break_alloc(for_span, ThinVec::new()));
+ let pat = self.pat_none(for_span);
+ self.arm(pat, break_expr)
+ };
+
+ // Some(<pat>) => <body>,
+ let some_arm = {
+ let some_pat = self.pat_some(pat_span, pat);
+ let body_block = self.with_loop_scope(e.id, |this| this.lower_block(body, false));
+ let body_expr = self.arena.alloc(self.expr_block(body_block, ThinVec::new()));
+ self.arm(some_pat, body_expr)
+ };
+
+ // `mut iter`
+ let iter = Ident::with_dummy_span(sym::iter);
+ let (iter_pat, iter_pat_nid) =
+ self.pat_ident_binding_mode(head_span, iter, hir::BindingAnnotation::Mutable);
+
+ // `match Iterator::next(&mut iter) { ... }`
+ let match_expr = {
+ let iter = self.expr_ident(head_span, iter, iter_pat_nid);
+ let ref_mut_iter = self.expr_mut_addr_of(head_span, iter);
+ let next_expr = self.expr_call_lang_item_fn(
+ head_span,
+ hir::LangItem::IteratorNext,
+ arena_vec![self; ref_mut_iter],
+ None,
+ );
+ let arms = arena_vec![self; none_arm, some_arm];
+
+ self.expr_match(head_span, next_expr, arms, hir::MatchSource::ForLoopDesugar)
+ };
+ let match_stmt = self.stmt_expr(for_span, match_expr);
+
+ let loop_block = self.block_all(for_span, arena_vec![self; match_stmt], None);
+
+ // `[opt_ident]: loop { ... }`
+ let kind = hir::ExprKind::Loop(
+ loop_block,
+ self.lower_label(opt_label),
+ hir::LoopSource::ForLoop,
+ self.lower_span(for_span.with_hi(head.span.hi())),
+ );
+ let loop_expr =
+ self.arena.alloc(hir::Expr { hir_id: self.lower_node_id(e.id), kind, span: for_span });
+
+ // `mut iter => { ... }`
+ let iter_arm = self.arm(iter_pat, loop_expr);
+
+ // `match ::std::iter::IntoIterator::into_iter(<head>) { ... }`
+ let into_iter_expr = {
+ self.expr_call_lang_item_fn(
+ head_span,
+ hir::LangItem::IntoIterIntoIter,
+ arena_vec![self; head],
+ None,
+ )
+ };
+
+ let match_expr = self.arena.alloc(self.expr_match(
+ for_span,
+ into_iter_expr,
+ arena_vec![self; iter_arm],
+ hir::MatchSource::ForLoopDesugar,
+ ));
+
+ let attrs: Vec<_> = e.attrs.iter().map(|a| self.lower_attr(a)).collect();
+
+ // This is effectively `{ let _result = ...; _result }`.
+ // The construct was introduced in #21984 and is necessary to make sure that
+ // temporaries in the `head` expression are dropped and do not leak to the
+ // surrounding scope of the `match` since the `match` is not a terminating scope.
+ //
+ // Also, add the attributes to the outer returned expr node.
+ self.expr_drop_temps_mut(for_span, match_expr, attrs.into())
+ }
+
+ /// Desugar `ExprKind::Try` from: `<expr>?` into:
+ /// ```ignore (pseudo-rust)
+ /// match Try::branch(<expr>) {
+ /// ControlFlow::Continue(val) => #[allow(unreachable_code)] val,,
+ /// ControlFlow::Break(residual) =>
+ /// #[allow(unreachable_code)]
+ /// // If there is an enclosing `try {...}`:
+ /// break 'catch_target Try::from_residual(residual),
+ /// // Otherwise:
+ /// return Try::from_residual(residual),
+ /// }
+ /// ```
+ fn lower_expr_try(&mut self, span: Span, sub_expr: &Expr) -> hir::ExprKind<'hir> {
+ let unstable_span = self.mark_span_with_reason(
+ DesugaringKind::QuestionMark,
+ span,
+ self.allow_try_trait.clone(),
+ );
+ let try_span = self.tcx.sess.source_map().end_point(span);
+ let try_span = self.mark_span_with_reason(
+ DesugaringKind::QuestionMark,
+ try_span,
+ self.allow_try_trait.clone(),
+ );
+
+ // `Try::branch(<expr>)`
+ let scrutinee = {
+ // expand <expr>
+ let sub_expr = self.lower_expr_mut(sub_expr);
+
+ self.expr_call_lang_item_fn(
+ unstable_span,
+ hir::LangItem::TryTraitBranch,
+ arena_vec![self; sub_expr],
+ None,
+ )
+ };
+
+ // `#[allow(unreachable_code)]`
+ let attr = {
+ // `allow(unreachable_code)`
+ let allow = {
+ let allow_ident = Ident::new(sym::allow, self.lower_span(span));
+ let uc_ident = Ident::new(sym::unreachable_code, self.lower_span(span));
+ let uc_nested = attr::mk_nested_word_item(uc_ident);
+ attr::mk_list_item(allow_ident, vec![uc_nested])
+ };
+ attr::mk_attr_outer(allow)
+ };
+ let attrs = vec![attr];
+
+ // `ControlFlow::Continue(val) => #[allow(unreachable_code)] val,`
+ let continue_arm = {
+ let val_ident = Ident::with_dummy_span(sym::val);
+ let (val_pat, val_pat_nid) = self.pat_ident(span, val_ident);
+ let val_expr = self.arena.alloc(self.expr_ident_with_attrs(
+ span,
+ val_ident,
+ val_pat_nid,
+ ThinVec::from(attrs.clone()),
+ ));
+ let continue_pat = self.pat_cf_continue(unstable_span, val_pat);
+ self.arm(continue_pat, val_expr)
+ };
+
+ // `ControlFlow::Break(residual) =>
+ // #[allow(unreachable_code)]
+ // return Try::from_residual(residual),`
+ let break_arm = {
+ let residual_ident = Ident::with_dummy_span(sym::residual);
+ let (residual_local, residual_local_nid) = self.pat_ident(try_span, residual_ident);
+ let residual_expr = self.expr_ident_mut(try_span, residual_ident, residual_local_nid);
+ let from_residual_expr = self.wrap_in_try_constructor(
+ hir::LangItem::TryTraitFromResidual,
+ try_span,
+ self.arena.alloc(residual_expr),
+ unstable_span,
+ );
+ let thin_attrs = ThinVec::from(attrs);
+ let ret_expr = if let Some(catch_node) = self.catch_scope {
+ let target_id = Ok(self.lower_node_id(catch_node));
+ self.arena.alloc(self.expr(
+ try_span,
+ hir::ExprKind::Break(
+ hir::Destination { label: None, target_id },
+ Some(from_residual_expr),
+ ),
+ thin_attrs,
+ ))
+ } else {
+ self.arena.alloc(self.expr(
+ try_span,
+ hir::ExprKind::Ret(Some(from_residual_expr)),
+ thin_attrs,
+ ))
+ };
+
+ let break_pat = self.pat_cf_break(try_span, residual_local);
+ self.arm(break_pat, ret_expr)
+ };
+
+ hir::ExprKind::Match(
+ scrutinee,
+ arena_vec![self; break_arm, continue_arm],
+ hir::MatchSource::TryDesugar,
+ )
+ }
+
+ /// Desugar `ExprKind::Yeet` from: `do yeet <expr>` into:
+ /// ```rust
+ /// // If there is an enclosing `try {...}`:
+ /// break 'catch_target FromResidual::from_residual(Yeet(residual)),
+ /// // Otherwise:
+ /// return FromResidual::from_residual(Yeet(residual)),
+ /// ```
+ /// But to simplify this, there's a `from_yeet` lang item function which
+ /// handles the combined `FromResidual::from_residual(Yeet(residual))`.
+ fn lower_expr_yeet(&mut self, span: Span, sub_expr: Option<&Expr>) -> hir::ExprKind<'hir> {
+ // The expression (if present) or `()` otherwise.
+ let (yeeted_span, yeeted_expr) = if let Some(sub_expr) = sub_expr {
+ (sub_expr.span, self.lower_expr(sub_expr))
+ } else {
+ (self.mark_span_with_reason(DesugaringKind::YeetExpr, span, None), self.expr_unit(span))
+ };
+
+ let unstable_span = self.mark_span_with_reason(
+ DesugaringKind::YeetExpr,
+ span,
+ self.allow_try_trait.clone(),
+ );
+
+ let from_yeet_expr = self.wrap_in_try_constructor(
+ hir::LangItem::TryTraitFromYeet,
+ unstable_span,
+ yeeted_expr,
+ yeeted_span,
+ );
+
+ if let Some(catch_node) = self.catch_scope {
+ let target_id = Ok(self.lower_node_id(catch_node));
+ hir::ExprKind::Break(hir::Destination { label: None, target_id }, Some(from_yeet_expr))
+ } else {
+ hir::ExprKind::Ret(Some(from_yeet_expr))
+ }
+ }
+
+ // =========================================================================
+ // Helper methods for building HIR.
+ // =========================================================================
+
+ /// Wrap the given `expr` in a terminating scope using `hir::ExprKind::DropTemps`.
+ ///
+ /// In terms of drop order, it has the same effect as wrapping `expr` in
+ /// `{ let _t = $expr; _t }` but should provide better compile-time performance.
+ ///
+ /// The drop order can be important in e.g. `if expr { .. }`.
+ pub(super) fn expr_drop_temps(
+ &mut self,
+ span: Span,
+ expr: &'hir hir::Expr<'hir>,
+ attrs: AttrVec,
+ ) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr_drop_temps_mut(span, expr, attrs))
+ }
+
+ pub(super) fn expr_drop_temps_mut(
+ &mut self,
+ span: Span,
+ expr: &'hir hir::Expr<'hir>,
+ attrs: AttrVec,
+ ) -> hir::Expr<'hir> {
+ self.expr(span, hir::ExprKind::DropTemps(expr), attrs)
+ }
+
+ fn expr_match(
+ &mut self,
+ span: Span,
+ arg: &'hir hir::Expr<'hir>,
+ arms: &'hir [hir::Arm<'hir>],
+ source: hir::MatchSource,
+ ) -> hir::Expr<'hir> {
+ self.expr(span, hir::ExprKind::Match(arg, arms, source), ThinVec::new())
+ }
+
+ fn expr_break(&mut self, span: Span, attrs: AttrVec) -> hir::Expr<'hir> {
+ let expr_break = hir::ExprKind::Break(self.lower_loop_destination(None), None);
+ self.expr(span, expr_break, attrs)
+ }
+
+ fn expr_break_alloc(&mut self, span: Span, attrs: AttrVec) -> &'hir hir::Expr<'hir> {
+ let expr_break = self.expr_break(span, attrs);
+ self.arena.alloc(expr_break)
+ }
+
+ fn expr_mut_addr_of(&mut self, span: Span, e: &'hir hir::Expr<'hir>) -> hir::Expr<'hir> {
+ self.expr(
+ span,
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, e),
+ ThinVec::new(),
+ )
+ }
+
+ fn expr_unit(&mut self, sp: Span) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr(sp, hir::ExprKind::Tup(&[]), ThinVec::new()))
+ }
+
+ fn expr_call_mut(
+ &mut self,
+ span: Span,
+ e: &'hir hir::Expr<'hir>,
+ args: &'hir [hir::Expr<'hir>],
+ ) -> hir::Expr<'hir> {
+ self.expr(span, hir::ExprKind::Call(e, args), ThinVec::new())
+ }
+
+ fn expr_call(
+ &mut self,
+ span: Span,
+ e: &'hir hir::Expr<'hir>,
+ args: &'hir [hir::Expr<'hir>],
+ ) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr_call_mut(span, e, args))
+ }
+
+ fn expr_call_lang_item_fn_mut(
+ &mut self,
+ span: Span,
+ lang_item: hir::LangItem,
+ args: &'hir [hir::Expr<'hir>],
+ hir_id: Option<hir::HirId>,
+ ) -> hir::Expr<'hir> {
+ let path =
+ self.arena.alloc(self.expr_lang_item_path(span, lang_item, ThinVec::new(), hir_id));
+ self.expr_call_mut(span, path, args)
+ }
+
+ fn expr_call_lang_item_fn(
+ &mut self,
+ span: Span,
+ lang_item: hir::LangItem,
+ args: &'hir [hir::Expr<'hir>],
+ hir_id: Option<hir::HirId>,
+ ) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr_call_lang_item_fn_mut(span, lang_item, args, hir_id))
+ }
+
+ fn expr_lang_item_path(
+ &mut self,
+ span: Span,
+ lang_item: hir::LangItem,
+ attrs: AttrVec,
+ hir_id: Option<hir::HirId>,
+ ) -> hir::Expr<'hir> {
+ self.expr(
+ span,
+ hir::ExprKind::Path(hir::QPath::LangItem(lang_item, self.lower_span(span), hir_id)),
+ attrs,
+ )
+ }
+
+ pub(super) fn expr_ident(
+ &mut self,
+ sp: Span,
+ ident: Ident,
+ binding: hir::HirId,
+ ) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr_ident_mut(sp, ident, binding))
+ }
+
+ pub(super) fn expr_ident_mut(
+ &mut self,
+ sp: Span,
+ ident: Ident,
+ binding: hir::HirId,
+ ) -> hir::Expr<'hir> {
+ self.expr_ident_with_attrs(sp, ident, binding, ThinVec::new())
+ }
+
+ fn expr_ident_with_attrs(
+ &mut self,
+ span: Span,
+ ident: Ident,
+ binding: hir::HirId,
+ attrs: AttrVec,
+ ) -> hir::Expr<'hir> {
+ let expr_path = hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ span: self.lower_span(span),
+ res: Res::Local(binding),
+ segments: arena_vec![self; hir::PathSegment::from_ident(ident)],
+ }),
+ ));
+
+ self.expr(span, expr_path, attrs)
+ }
+
+ fn expr_unsafe(&mut self, expr: &'hir hir::Expr<'hir>) -> hir::Expr<'hir> {
+ let hir_id = self.next_id();
+ let span = expr.span;
+ self.expr(
+ span,
+ hir::ExprKind::Block(
+ self.arena.alloc(hir::Block {
+ stmts: &[],
+ expr: Some(expr),
+ hir_id,
+ rules: hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::CompilerGenerated),
+ span: self.lower_span(span),
+ targeted_by_break: false,
+ }),
+ None,
+ ),
+ ThinVec::new(),
+ )
+ }
+
+ fn expr_block_empty(&mut self, span: Span) -> &'hir hir::Expr<'hir> {
+ let blk = self.block_all(span, &[], None);
+ let expr = self.expr_block(blk, ThinVec::new());
+ self.arena.alloc(expr)
+ }
+
+ pub(super) fn expr_block(
+ &mut self,
+ b: &'hir hir::Block<'hir>,
+ attrs: AttrVec,
+ ) -> hir::Expr<'hir> {
+ self.expr(b.span, hir::ExprKind::Block(b, None), attrs)
+ }
+
+ pub(super) fn expr(
+ &mut self,
+ span: Span,
+ kind: hir::ExprKind<'hir>,
+ attrs: AttrVec,
+ ) -> hir::Expr<'hir> {
+ let hir_id = self.next_id();
+ self.lower_attrs(hir_id, &attrs);
+ hir::Expr { hir_id, kind, span: self.lower_span(span) }
+ }
+
+ fn expr_field(
+ &mut self,
+ ident: Ident,
+ expr: &'hir hir::Expr<'hir>,
+ span: Span,
+ ) -> hir::ExprField<'hir> {
+ hir::ExprField {
+ hir_id: self.next_id(),
+ ident,
+ span: self.lower_span(span),
+ expr,
+ is_shorthand: false,
+ }
+ }
+
+ fn arm(&mut self, pat: &'hir hir::Pat<'hir>, expr: &'hir hir::Expr<'hir>) -> hir::Arm<'hir> {
+ hir::Arm {
+ hir_id: self.next_id(),
+ pat,
+ guard: None,
+ span: self.lower_span(expr.span),
+ body: expr,
+ }
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/index.rs b/compiler/rustc_ast_lowering/src/index.rs
new file mode 100644
index 000000000..d5af74d47
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/index.rs
@@ -0,0 +1,346 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::definitions;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::*;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::span_bug;
+use rustc_session::Session;
+use rustc_span::source_map::SourceMap;
+use rustc_span::{Span, DUMMY_SP};
+
+use tracing::debug;
+
+/// A visitor that walks over the HIR and collects `Node`s into a HIR map.
+pub(super) struct NodeCollector<'a, 'hir> {
+ /// Source map
+ source_map: &'a SourceMap,
+ bodies: &'a SortedMap<ItemLocalId, &'hir Body<'hir>>,
+
+ /// Outputs
+ nodes: IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>,
+ parenting: FxHashMap<LocalDefId, ItemLocalId>,
+
+ /// The parent of this node
+ parent_node: hir::ItemLocalId,
+
+ owner: LocalDefId,
+
+ definitions: &'a definitions::Definitions,
+}
+
+#[tracing::instrument(level = "debug", skip(sess, definitions, bodies))]
+pub(super) fn index_hir<'hir>(
+ sess: &Session,
+ definitions: &definitions::Definitions,
+ item: hir::OwnerNode<'hir>,
+ bodies: &SortedMap<ItemLocalId, &'hir Body<'hir>>,
+) -> (IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>, FxHashMap<LocalDefId, ItemLocalId>) {
+ let mut nodes = IndexVec::new();
+ // This node's parent should never be accessed: the owner's parent is computed by the
+ // hir_owner_parent query. Make it invalid (= ItemLocalId::MAX) to force an ICE whenever it is
+ // used.
+ nodes.push(Some(ParentedNode { parent: ItemLocalId::INVALID, node: item.into() }));
+ let mut collector = NodeCollector {
+ source_map: sess.source_map(),
+ definitions,
+ owner: item.def_id(),
+ parent_node: ItemLocalId::new(0),
+ nodes,
+ bodies,
+ parenting: FxHashMap::default(),
+ };
+
+ match item {
+ OwnerNode::Crate(citem) => {
+ collector.visit_mod(&citem, citem.spans.inner_span, hir::CRATE_HIR_ID)
+ }
+ OwnerNode::Item(item) => collector.visit_item(item),
+ OwnerNode::TraitItem(item) => collector.visit_trait_item(item),
+ OwnerNode::ImplItem(item) => collector.visit_impl_item(item),
+ OwnerNode::ForeignItem(item) => collector.visit_foreign_item(item),
+ };
+
+ (collector.nodes, collector.parenting)
+}
+
+impl<'a, 'hir> NodeCollector<'a, 'hir> {
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn insert(&mut self, span: Span, hir_id: HirId, node: Node<'hir>) {
+ debug_assert_eq!(self.owner, hir_id.owner);
+ debug_assert_ne!(hir_id.local_id.as_u32(), 0);
+
+ // Make sure that the DepNode of some node coincides with the HirId
+ // owner of that node.
+ if cfg!(debug_assertions) {
+ if hir_id.owner != self.owner {
+ span_bug!(
+ span,
+ "inconsistent DepNode at `{:?}` for `{:?}`: \
+ current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?})",
+ self.source_map.span_to_diagnostic_string(span),
+ node,
+ self.definitions.def_path(self.owner).to_string_no_crate_verbose(),
+ self.owner,
+ self.definitions.def_path(hir_id.owner).to_string_no_crate_verbose(),
+ hir_id.owner,
+ )
+ }
+ }
+
+ self.nodes.insert(hir_id.local_id, ParentedNode { parent: self.parent_node, node: node });
+ }
+
+ fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_node_id: HirId, f: F) {
+ debug_assert_eq!(parent_node_id.owner, self.owner);
+ let parent_node = self.parent_node;
+ self.parent_node = parent_node_id.local_id;
+ f(self);
+ self.parent_node = parent_node;
+ }
+
+ fn insert_nested(&mut self, item: LocalDefId) {
+ self.parenting.insert(item, self.parent_node);
+ }
+}
+
+impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
+ /// Because we want to track parent items and so forth, enable
+ /// deep walking so that we walk nested items in the context of
+ /// their outer items.
+
+ fn visit_nested_item(&mut self, item: ItemId) {
+ debug!("visit_nested_item: {:?}", item);
+ self.insert_nested(item.def_id);
+ }
+
+ fn visit_nested_trait_item(&mut self, item_id: TraitItemId) {
+ self.insert_nested(item_id.def_id);
+ }
+
+ fn visit_nested_impl_item(&mut self, item_id: ImplItemId) {
+ self.insert_nested(item_id.def_id);
+ }
+
+ fn visit_nested_foreign_item(&mut self, foreign_id: ForeignItemId) {
+ self.insert_nested(foreign_id.def_id);
+ }
+
+ fn visit_nested_body(&mut self, id: BodyId) {
+ debug_assert_eq!(id.hir_id.owner, self.owner);
+ let body = self.bodies[&id.hir_id.local_id];
+ self.visit_body(body);
+ }
+
+ fn visit_param(&mut self, param: &'hir Param<'hir>) {
+ let node = Node::Param(param);
+ self.insert(param.pat.span, param.hir_id, node);
+ self.with_parent(param.hir_id, |this| {
+ intravisit::walk_param(this, param);
+ });
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_item(&mut self, i: &'hir Item<'hir>) {
+ debug_assert_eq!(i.def_id, self.owner);
+ self.with_parent(i.hir_id(), |this| {
+ if let ItemKind::Struct(ref struct_def, _) = i.kind {
+ // If this is a tuple or unit-like struct, register the constructor.
+ if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
+ this.insert(i.span, ctor_hir_id, Node::Ctor(struct_def));
+ }
+ }
+ intravisit::walk_item(this, i);
+ });
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_foreign_item(&mut self, fi: &'hir ForeignItem<'hir>) {
+ debug_assert_eq!(fi.def_id, self.owner);
+ self.with_parent(fi.hir_id(), |this| {
+ intravisit::walk_foreign_item(this, fi);
+ });
+ }
+
+ fn visit_generic_param(&mut self, param: &'hir GenericParam<'hir>) {
+ self.insert(param.span, param.hir_id, Node::GenericParam(param));
+ intravisit::walk_generic_param(self, param);
+ }
+
+ fn visit_const_param_default(&mut self, param: HirId, ct: &'hir AnonConst) {
+ self.with_parent(param, |this| {
+ intravisit::walk_const_param_default(this, ct);
+ })
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_trait_item(&mut self, ti: &'hir TraitItem<'hir>) {
+ debug_assert_eq!(ti.def_id, self.owner);
+ self.with_parent(ti.hir_id(), |this| {
+ intravisit::walk_trait_item(this, ti);
+ });
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_impl_item(&mut self, ii: &'hir ImplItem<'hir>) {
+ debug_assert_eq!(ii.def_id, self.owner);
+ self.with_parent(ii.hir_id(), |this| {
+ intravisit::walk_impl_item(this, ii);
+ });
+ }
+
+ fn visit_pat(&mut self, pat: &'hir Pat<'hir>) {
+ self.insert(pat.span, pat.hir_id, Node::Pat(pat));
+
+ self.with_parent(pat.hir_id, |this| {
+ intravisit::walk_pat(this, pat);
+ });
+ }
+
+ fn visit_arm(&mut self, arm: &'hir Arm<'hir>) {
+ let node = Node::Arm(arm);
+
+ self.insert(arm.span, arm.hir_id, node);
+
+ self.with_parent(arm.hir_id, |this| {
+ intravisit::walk_arm(this, arm);
+ });
+ }
+
+ fn visit_anon_const(&mut self, constant: &'hir AnonConst) {
+ self.insert(DUMMY_SP, constant.hir_id, Node::AnonConst(constant));
+
+ self.with_parent(constant.hir_id, |this| {
+ intravisit::walk_anon_const(this, constant);
+ });
+ }
+
+ fn visit_expr(&mut self, expr: &'hir Expr<'hir>) {
+ self.insert(expr.span, expr.hir_id, Node::Expr(expr));
+
+ self.with_parent(expr.hir_id, |this| {
+ intravisit::walk_expr(this, expr);
+ });
+ }
+
+ fn visit_stmt(&mut self, stmt: &'hir Stmt<'hir>) {
+ self.insert(stmt.span, stmt.hir_id, Node::Stmt(stmt));
+
+ self.with_parent(stmt.hir_id, |this| {
+ intravisit::walk_stmt(this, stmt);
+ });
+ }
+
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'hir PathSegment<'hir>) {
+ if let Some(hir_id) = path_segment.hir_id {
+ self.insert(path_span, hir_id, Node::PathSegment(path_segment));
+ }
+ intravisit::walk_path_segment(self, path_span, path_segment);
+ }
+
+ fn visit_ty(&mut self, ty: &'hir Ty<'hir>) {
+ self.insert(ty.span, ty.hir_id, Node::Ty(ty));
+
+ self.with_parent(ty.hir_id, |this| {
+ intravisit::walk_ty(this, ty);
+ });
+ }
+
+ fn visit_infer(&mut self, inf: &'hir InferArg) {
+ self.insert(inf.span, inf.hir_id, Node::Infer(inf));
+
+ self.with_parent(inf.hir_id, |this| {
+ intravisit::walk_inf(this, inf);
+ });
+ }
+
+ fn visit_trait_ref(&mut self, tr: &'hir TraitRef<'hir>) {
+ self.insert(tr.path.span, tr.hir_ref_id, Node::TraitRef(tr));
+
+ self.with_parent(tr.hir_ref_id, |this| {
+ intravisit::walk_trait_ref(this, tr);
+ });
+ }
+
+ fn visit_fn(
+ &mut self,
+ fk: intravisit::FnKind<'hir>,
+ fd: &'hir FnDecl<'hir>,
+ b: BodyId,
+ s: Span,
+ id: HirId,
+ ) {
+ assert_eq!(self.owner, id.owner);
+ assert_eq!(self.parent_node, id.local_id);
+ intravisit::walk_fn(self, fk, fd, b, s, id);
+ }
+
+ fn visit_block(&mut self, block: &'hir Block<'hir>) {
+ self.insert(block.span, block.hir_id, Node::Block(block));
+ self.with_parent(block.hir_id, |this| {
+ intravisit::walk_block(this, block);
+ });
+ }
+
+ fn visit_local(&mut self, l: &'hir Local<'hir>) {
+ self.insert(l.span, l.hir_id, Node::Local(l));
+ self.with_parent(l.hir_id, |this| {
+ intravisit::walk_local(this, l);
+ })
+ }
+
+ fn visit_lifetime(&mut self, lifetime: &'hir Lifetime) {
+ self.insert(lifetime.span, lifetime.hir_id, Node::Lifetime(lifetime));
+ }
+
+ fn visit_variant(&mut self, v: &'hir Variant<'hir>, g: &'hir Generics<'hir>, item_id: HirId) {
+ self.insert(v.span, v.id, Node::Variant(v));
+ self.with_parent(v.id, |this| {
+ // Register the constructor of this variant.
+ if let Some(ctor_hir_id) = v.data.ctor_hir_id() {
+ this.insert(v.span, ctor_hir_id, Node::Ctor(&v.data));
+ }
+ intravisit::walk_variant(this, v, g, item_id);
+ });
+ }
+
+ fn visit_field_def(&mut self, field: &'hir FieldDef<'hir>) {
+ self.insert(field.span, field.hir_id, Node::Field(field));
+ self.with_parent(field.hir_id, |this| {
+ intravisit::walk_field_def(this, field);
+ });
+ }
+
+ fn visit_assoc_type_binding(&mut self, type_binding: &'hir TypeBinding<'hir>) {
+ self.insert(type_binding.span, type_binding.hir_id, Node::TypeBinding(type_binding));
+ self.with_parent(type_binding.hir_id, |this| {
+ intravisit::walk_assoc_type_binding(this, type_binding)
+ })
+ }
+
+ fn visit_trait_item_ref(&mut self, ii: &'hir TraitItemRef) {
+ // Do not visit the duplicate information in TraitItemRef. We want to
+ // map the actual nodes, not the duplicate ones in the *Ref.
+ let TraitItemRef { id, ident: _, kind: _, span: _ } = *ii;
+
+ self.visit_nested_trait_item(id);
+ }
+
+ fn visit_impl_item_ref(&mut self, ii: &'hir ImplItemRef) {
+ // Do not visit the duplicate information in ImplItemRef. We want to
+ // map the actual nodes, not the duplicate ones in the *Ref.
+ let ImplItemRef { id, ident: _, kind: _, span: _, trait_item_def_id: _ } = *ii;
+
+ self.visit_nested_impl_item(id);
+ }
+
+ fn visit_foreign_item_ref(&mut self, fi: &'hir ForeignItemRef) {
+ // Do not visit the duplicate information in ForeignItemRef. We want to
+ // map the actual nodes, not the duplicate ones in the *Ref.
+ let ForeignItemRef { id, ident: _, span: _ } = *fi;
+
+ self.visit_nested_foreign_item(id);
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
new file mode 100644
index 000000000..ee4c0036f
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -0,0 +1,1513 @@
+use super::ResolverAstLoweringExt;
+use super::{AstOwner, ImplTraitContext, ImplTraitPosition};
+use super::{FnDeclKind, LoweringContext, ParamMode};
+
+use rustc_ast::ptr::P;
+use rustc_ast::visit::AssocCtxt;
+use rustc_ast::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::PredicateOrigin;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::ty::{DefIdTree, ResolverAstLowering, TyCtxt};
+use rustc_span::source_map::DesugaringKind;
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::Span;
+use rustc_target::spec::abi;
+use smallvec::{smallvec, SmallVec};
+
+use std::iter;
+
+pub(super) struct ItemLowerer<'a, 'hir> {
+ pub(super) tcx: TyCtxt<'hir>,
+ pub(super) resolver: &'a mut ResolverAstLowering,
+ pub(super) ast_index: &'a IndexVec<LocalDefId, AstOwner<'a>>,
+ pub(super) owners: &'a mut IndexVec<LocalDefId, hir::MaybeOwner<&'hir hir::OwnerInfo<'hir>>>,
+}
+
+/// When we have a ty alias we *may* have two where clauses. To give the best diagnostics, we set the span
+/// to the where clause that is preferred, if it exists. Otherwise, it sets the span to the other where
+/// clause if it exists.
+fn add_ty_alias_where_clause(
+ generics: &mut ast::Generics,
+ mut where_clauses: (TyAliasWhereClause, TyAliasWhereClause),
+ prefer_first: bool,
+) {
+ if !prefer_first {
+ where_clauses = (where_clauses.1, where_clauses.0);
+ }
+ if where_clauses.0.0 || !where_clauses.1.0 {
+ generics.where_clause.has_where_token = where_clauses.0.0;
+ generics.where_clause.span = where_clauses.0.1;
+ } else {
+ generics.where_clause.has_where_token = where_clauses.1.0;
+ generics.where_clause.span = where_clauses.1.1;
+ }
+}
+
+impl<'a, 'hir> ItemLowerer<'a, 'hir> {
+ fn with_lctx(
+ &mut self,
+ owner: NodeId,
+ f: impl FnOnce(&mut LoweringContext<'_, 'hir>) -> hir::OwnerNode<'hir>,
+ ) {
+ let mut lctx = LoweringContext {
+ // Pseudo-globals.
+ tcx: self.tcx,
+ resolver: self.resolver,
+ arena: self.tcx.hir_arena,
+
+ // HirId handling.
+ bodies: Vec::new(),
+ attrs: SortedMap::default(),
+ children: FxHashMap::default(),
+ current_hir_id_owner: CRATE_DEF_ID,
+ item_local_id_counter: hir::ItemLocalId::new(0),
+ node_id_to_local_id: Default::default(),
+ local_id_to_def_id: SortedMap::new(),
+ trait_map: Default::default(),
+
+ // Lowering state.
+ catch_scope: None,
+ loop_scope: None,
+ is_in_loop_condition: false,
+ is_in_trait_impl: false,
+ is_in_dyn_type: false,
+ generator_kind: None,
+ task_context: None,
+ current_item: None,
+ impl_trait_defs: Vec::new(),
+ impl_trait_bounds: Vec::new(),
+ allow_try_trait: Some([sym::try_trait_v2, sym::yeet_desugar_details][..].into()),
+ allow_gen_future: Some([sym::gen_future][..].into()),
+ allow_into_future: Some([sym::into_future][..].into()),
+ };
+ lctx.with_hir_id_owner(owner, |lctx| f(lctx));
+
+ for (def_id, info) in lctx.children {
+ self.owners.ensure_contains_elem(def_id, || hir::MaybeOwner::Phantom);
+ debug_assert!(matches!(self.owners[def_id], hir::MaybeOwner::Phantom));
+ self.owners[def_id] = info;
+ }
+ }
+
+ pub(super) fn lower_node(
+ &mut self,
+ def_id: LocalDefId,
+ ) -> hir::MaybeOwner<&'hir hir::OwnerInfo<'hir>> {
+ self.owners.ensure_contains_elem(def_id, || hir::MaybeOwner::Phantom);
+ if let hir::MaybeOwner::Phantom = self.owners[def_id] {
+ let node = self.ast_index[def_id];
+ match node {
+ AstOwner::NonOwner => {}
+ AstOwner::Crate(c) => self.lower_crate(c),
+ AstOwner::Item(item) => self.lower_item(item),
+ AstOwner::AssocItem(item, ctxt) => self.lower_assoc_item(item, ctxt),
+ AstOwner::ForeignItem(item) => self.lower_foreign_item(item),
+ }
+ }
+
+ self.owners[def_id]
+ }
+
+ #[instrument(level = "debug", skip(self, c))]
+ fn lower_crate(&mut self, c: &Crate) {
+ debug_assert_eq!(self.resolver.node_id_to_def_id[&CRATE_NODE_ID], CRATE_DEF_ID);
+ self.with_lctx(CRATE_NODE_ID, |lctx| {
+ let module = lctx.lower_mod(&c.items, &c.spans);
+ lctx.lower_attrs(hir::CRATE_HIR_ID, &c.attrs);
+ hir::OwnerNode::Crate(lctx.arena.alloc(module))
+ })
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn lower_item(&mut self, item: &Item) {
+ self.with_lctx(item.id, |lctx| hir::OwnerNode::Item(lctx.lower_item(item)))
+ }
+
+ fn lower_assoc_item(&mut self, item: &AssocItem, ctxt: AssocCtxt) {
+ let def_id = self.resolver.node_id_to_def_id[&item.id];
+
+ let parent_id = self.tcx.local_parent(def_id);
+ let parent_hir = self.lower_node(parent_id).unwrap();
+ self.with_lctx(item.id, |lctx| {
+ // Evaluate with the lifetimes in `params` in-scope.
+ // This is used to track which lifetimes have already been defined,
+ // and which need to be replicated when lowering an async fn.
+ match parent_hir.node().expect_item().kind {
+ hir::ItemKind::Impl(hir::Impl { ref of_trait, .. }) => {
+ lctx.is_in_trait_impl = of_trait.is_some();
+ }
+ _ => {}
+ };
+
+ match ctxt {
+ AssocCtxt::Trait => hir::OwnerNode::TraitItem(lctx.lower_trait_item(item)),
+ AssocCtxt::Impl => hir::OwnerNode::ImplItem(lctx.lower_impl_item(item)),
+ }
+ })
+ }
+
+ fn lower_foreign_item(&mut self, item: &ForeignItem) {
+ self.with_lctx(item.id, |lctx| hir::OwnerNode::ForeignItem(lctx.lower_foreign_item(item)))
+ }
+}
+
+impl<'hir> LoweringContext<'_, 'hir> {
+ pub(super) fn lower_mod(&mut self, items: &[P<Item>], spans: &ModSpans) -> hir::Mod<'hir> {
+ hir::Mod {
+ spans: hir::ModSpans {
+ inner_span: self.lower_span(spans.inner_span),
+ inject_use_span: self.lower_span(spans.inject_use_span),
+ },
+ item_ids: self.arena.alloc_from_iter(items.iter().flat_map(|x| self.lower_item_ref(x))),
+ }
+ }
+
+ pub(super) fn lower_item_ref(&mut self, i: &Item) -> SmallVec<[hir::ItemId; 1]> {
+ let mut node_ids = smallvec![hir::ItemId { def_id: self.local_def_id(i.id) }];
+ if let ItemKind::Use(ref use_tree) = &i.kind {
+ self.lower_item_id_use_tree(use_tree, i.id, &mut node_ids);
+ }
+ node_ids
+ }
+
+ fn lower_item_id_use_tree(
+ &mut self,
+ tree: &UseTree,
+ base_id: NodeId,
+ vec: &mut SmallVec<[hir::ItemId; 1]>,
+ ) {
+ match tree.kind {
+ UseTreeKind::Nested(ref nested_vec) => {
+ for &(ref nested, id) in nested_vec {
+ vec.push(hir::ItemId { def_id: self.local_def_id(id) });
+ self.lower_item_id_use_tree(nested, id, vec);
+ }
+ }
+ UseTreeKind::Glob => {}
+ UseTreeKind::Simple(_, id1, id2) => {
+ for (_, &id) in
+ iter::zip(self.expect_full_res_from_use(base_id).skip(1), &[id1, id2])
+ {
+ vec.push(hir::ItemId { def_id: self.local_def_id(id) });
+ }
+ }
+ }
+ }
+
+ fn lower_item(&mut self, i: &Item) -> &'hir hir::Item<'hir> {
+ let mut ident = i.ident;
+ let vis_span = self.lower_span(i.vis.span);
+ let hir_id = self.lower_node_id(i.id);
+ let attrs = self.lower_attrs(hir_id, &i.attrs);
+ let kind = self.lower_item_kind(i.span, i.id, hir_id, &mut ident, attrs, vis_span, &i.kind);
+ let item = hir::Item {
+ def_id: hir_id.expect_owner(),
+ ident: self.lower_ident(ident),
+ kind,
+ vis_span,
+ span: self.lower_span(i.span),
+ };
+ self.arena.alloc(item)
+ }
+
+ fn lower_item_kind(
+ &mut self,
+ span: Span,
+ id: NodeId,
+ hir_id: hir::HirId,
+ ident: &mut Ident,
+ attrs: Option<&'hir [Attribute]>,
+ vis_span: Span,
+ i: &ItemKind,
+ ) -> hir::ItemKind<'hir> {
+ match *i {
+ ItemKind::ExternCrate(orig_name) => hir::ItemKind::ExternCrate(orig_name),
+ ItemKind::Use(ref use_tree) => {
+ // Start with an empty prefix.
+ let prefix = Path { segments: vec![], span: use_tree.span, tokens: None };
+
+ self.lower_use_tree(use_tree, &prefix, id, vis_span, ident, attrs)
+ }
+ ItemKind::Static(ref t, m, ref e) => {
+ let (ty, body_id) = self.lower_const_item(t, span, e.as_deref());
+ hir::ItemKind::Static(ty, m, body_id)
+ }
+ ItemKind::Const(_, ref t, ref e) => {
+ let (ty, body_id) = self.lower_const_item(t, span, e.as_deref());
+ hir::ItemKind::Const(ty, body_id)
+ }
+ ItemKind::Fn(box Fn {
+ sig: FnSig { ref decl, header, span: fn_sig_span },
+ ref generics,
+ ref body,
+ ..
+ }) => {
+ self.with_new_scopes(|this| {
+ this.current_item = Some(ident.span);
+
+ // Note: we don't need to change the return type from `T` to
+ // `impl Future<Output = T>` here because lower_body
+ // only cares about the input argument patterns in the function
+ // declaration (decl), not the return types.
+ let asyncness = header.asyncness;
+ let body_id =
+ this.lower_maybe_async_body(span, &decl, asyncness, body.as_deref());
+
+ let itctx = ImplTraitContext::Universal;
+ let (generics, decl) = this.lower_generics(generics, id, itctx, |this| {
+ let ret_id = asyncness.opt_return_id();
+ this.lower_fn_decl(&decl, Some(id), FnDeclKind::Fn, ret_id)
+ });
+ let sig = hir::FnSig {
+ decl,
+ header: this.lower_fn_header(header),
+ span: this.lower_span(fn_sig_span),
+ };
+ hir::ItemKind::Fn(sig, generics, body_id)
+ })
+ }
+ ItemKind::Mod(_, ref mod_kind) => match mod_kind {
+ ModKind::Loaded(items, _, spans) => {
+ hir::ItemKind::Mod(self.lower_mod(items, spans))
+ }
+ ModKind::Unloaded => panic!("`mod` items should have been loaded by now"),
+ },
+ ItemKind::ForeignMod(ref fm) => hir::ItemKind::ForeignMod {
+ abi: fm.abi.map_or(abi::Abi::FALLBACK, |abi| self.lower_abi(abi)),
+ items: self
+ .arena
+ .alloc_from_iter(fm.items.iter().map(|x| self.lower_foreign_item_ref(x))),
+ },
+ ItemKind::GlobalAsm(ref asm) => {
+ hir::ItemKind::GlobalAsm(self.lower_inline_asm(span, asm))
+ }
+ ItemKind::TyAlias(box TyAlias {
+ ref generics,
+ where_clauses,
+ ty: Some(ref ty),
+ ..
+ }) => {
+ // We lower
+ //
+ // type Foo = impl Trait
+ //
+ // to
+ //
+ // type Foo = Foo1
+ // opaque type Foo1: Trait
+ let mut generics = generics.clone();
+ add_ty_alias_where_clause(&mut generics, where_clauses, true);
+ let (generics, ty) = self.lower_generics(
+ &generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.lower_ty(ty, ImplTraitContext::TypeAliasesOpaqueTy),
+ );
+ hir::ItemKind::TyAlias(ty, generics)
+ }
+ ItemKind::TyAlias(box TyAlias {
+ ref generics, ref where_clauses, ty: None, ..
+ }) => {
+ let mut generics = generics.clone();
+ add_ty_alias_where_clause(&mut generics, *where_clauses, true);
+ let (generics, ty) = self.lower_generics(
+ &generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.arena.alloc(this.ty(span, hir::TyKind::Err)),
+ );
+ hir::ItemKind::TyAlias(ty, generics)
+ }
+ ItemKind::Enum(ref enum_definition, ref generics) => {
+ let (generics, variants) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ this.arena.alloc_from_iter(
+ enum_definition.variants.iter().map(|x| this.lower_variant(x)),
+ )
+ },
+ );
+ hir::ItemKind::Enum(hir::EnumDef { variants }, generics)
+ }
+ ItemKind::Struct(ref struct_def, ref generics) => {
+ let (generics, struct_def) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.lower_variant_data(hir_id, struct_def),
+ );
+ hir::ItemKind::Struct(struct_def, generics)
+ }
+ ItemKind::Union(ref vdata, ref generics) => {
+ let (generics, vdata) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.lower_variant_data(hir_id, vdata),
+ );
+ hir::ItemKind::Union(vdata, generics)
+ }
+ ItemKind::Impl(box Impl {
+ unsafety,
+ polarity,
+ defaultness,
+ constness,
+ generics: ref ast_generics,
+ of_trait: ref trait_ref,
+ self_ty: ref ty,
+ items: ref impl_items,
+ }) => {
+ // Lower the "impl header" first. This ordering is important
+ // for in-band lifetimes! Consider `'a` here:
+ //
+ // impl Foo<'a> for u32 {
+ // fn method(&'a self) { .. }
+ // }
+ //
+ // Because we start by lowering the `Foo<'a> for u32`
+ // part, we will add `'a` to the list of generics on
+ // the impl. When we then encounter it later in the
+ // method, it will not be considered an in-band
+ // lifetime to be added, but rather a reference to a
+ // parent lifetime.
+ let itctx = ImplTraitContext::Universal;
+ let (generics, (trait_ref, lowered_ty)) =
+ self.lower_generics(ast_generics, id, itctx, |this| {
+ let trait_ref = trait_ref.as_ref().map(|trait_ref| {
+ this.lower_trait_ref(
+ trait_ref,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Trait),
+ )
+ });
+
+ let lowered_ty = this
+ .lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+
+ (trait_ref, lowered_ty)
+ });
+
+ let new_impl_items = self
+ .arena
+ .alloc_from_iter(impl_items.iter().map(|item| self.lower_impl_item_ref(item)));
+
+ // `defaultness.has_value()` is never called for an `impl`, always `true` in order
+ // to not cause an assertion failure inside the `lower_defaultness` function.
+ let has_val = true;
+ let (defaultness, defaultness_span) = self.lower_defaultness(defaultness, has_val);
+ let polarity = match polarity {
+ ImplPolarity::Positive => ImplPolarity::Positive,
+ ImplPolarity::Negative(s) => ImplPolarity::Negative(self.lower_span(s)),
+ };
+ hir::ItemKind::Impl(self.arena.alloc(hir::Impl {
+ unsafety: self.lower_unsafety(unsafety),
+ polarity,
+ defaultness,
+ defaultness_span,
+ constness: self.lower_constness(constness),
+ generics,
+ of_trait: trait_ref,
+ self_ty: lowered_ty,
+ items: new_impl_items,
+ }))
+ }
+ ItemKind::Trait(box Trait {
+ is_auto,
+ unsafety,
+ ref generics,
+ ref bounds,
+ ref items,
+ }) => {
+ let (generics, (unsafety, items, bounds)) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ let bounds = this.lower_param_bounds(
+ bounds,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ );
+ let items = this.arena.alloc_from_iter(
+ items.iter().map(|item| this.lower_trait_item_ref(item)),
+ );
+ let unsafety = this.lower_unsafety(unsafety);
+ (unsafety, items, bounds)
+ },
+ );
+ hir::ItemKind::Trait(is_auto, unsafety, generics, bounds, items)
+ }
+ ItemKind::TraitAlias(ref generics, ref bounds) => {
+ let (generics, bounds) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ this.lower_param_bounds(
+ bounds,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ )
+ },
+ );
+ hir::ItemKind::TraitAlias(generics, bounds)
+ }
+ ItemKind::MacroDef(MacroDef { ref body, macro_rules }) => {
+ let body = P(self.lower_mac_args(body));
+ let macro_kind = self.resolver.decl_macro_kind(self.local_def_id(id));
+ hir::ItemKind::Macro(ast::MacroDef { body, macro_rules }, macro_kind)
+ }
+ ItemKind::MacCall(..) => {
+ panic!("`TyMac` should have been expanded by now")
+ }
+ }
+ }
+
+ fn lower_const_item(
+ &mut self,
+ ty: &Ty,
+ span: Span,
+ body: Option<&Expr>,
+ ) -> (&'hir hir::Ty<'hir>, hir::BodyId) {
+ let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ (ty, self.lower_const_body(span, body))
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn lower_use_tree(
+ &mut self,
+ tree: &UseTree,
+ prefix: &Path,
+ id: NodeId,
+ vis_span: Span,
+ ident: &mut Ident,
+ attrs: Option<&'hir [Attribute]>,
+ ) -> hir::ItemKind<'hir> {
+ let path = &tree.prefix;
+ let segments = prefix.segments.iter().chain(path.segments.iter()).cloned().collect();
+
+ match tree.kind {
+ UseTreeKind::Simple(rename, id1, id2) => {
+ *ident = tree.ident();
+
+ // First, apply the prefix to the path.
+ let mut path = Path { segments, span: path.span, tokens: None };
+
+ // Correctly resolve `self` imports.
+ if path.segments.len() > 1
+ && path.segments.last().unwrap().ident.name == kw::SelfLower
+ {
+ let _ = path.segments.pop();
+ if rename.is_none() {
+ *ident = path.segments.last().unwrap().ident;
+ }
+ }
+
+ let mut resolutions = self.expect_full_res_from_use(id).fuse();
+ // We want to return *something* from this function, so hold onto the first item
+ // for later.
+ let ret_res = self.lower_res(resolutions.next().unwrap_or(Res::Err));
+
+ // Here, we are looping over namespaces, if they exist for the definition
+ // being imported. We only handle type and value namespaces because we
+ // won't be dealing with macros in the rest of the compiler.
+ // Essentially a single `use` which imports two names is desugared into
+ // two imports.
+ for new_node_id in [id1, id2] {
+ let new_id = self.local_def_id(new_node_id);
+ let Some(res) = resolutions.next() else {
+ // Associate an HirId to both ids even if there is no resolution.
+ let _old = self.children.insert(
+ new_id,
+ hir::MaybeOwner::NonOwner(hir::HirId::make_owner(new_id)),
+ );
+ debug_assert!(_old.is_none());
+ continue;
+ };
+ let ident = *ident;
+ let mut path = path.clone();
+ for seg in &mut path.segments {
+ seg.id = self.next_node_id();
+ }
+ let span = path.span;
+
+ self.with_hir_id_owner(new_node_id, |this| {
+ let res = this.lower_res(res);
+ let path = this.lower_path_extra(res, &path, ParamMode::Explicit);
+ let kind = hir::ItemKind::Use(path, hir::UseKind::Single);
+ if let Some(attrs) = attrs {
+ this.attrs.insert(hir::ItemLocalId::new(0), attrs);
+ }
+
+ let item = hir::Item {
+ def_id: new_id,
+ ident: this.lower_ident(ident),
+ kind,
+ vis_span,
+ span: this.lower_span(span),
+ };
+ hir::OwnerNode::Item(this.arena.alloc(item))
+ });
+ }
+
+ let path = self.lower_path_extra(ret_res, &path, ParamMode::Explicit);
+ hir::ItemKind::Use(path, hir::UseKind::Single)
+ }
+ UseTreeKind::Glob => {
+ let path = self.lower_path(
+ id,
+ &Path { segments, span: path.span, tokens: None },
+ ParamMode::Explicit,
+ );
+ hir::ItemKind::Use(path, hir::UseKind::Glob)
+ }
+ UseTreeKind::Nested(ref trees) => {
+ // Nested imports are desugared into simple imports.
+ // So, if we start with
+ //
+ // ```
+ // pub(x) use foo::{a, b};
+ // ```
+ //
+ // we will create three items:
+ //
+ // ```
+ // pub(x) use foo::a;
+ // pub(x) use foo::b;
+ // pub(x) use foo::{}; // <-- this is called the `ListStem`
+ // ```
+ //
+ // The first two are produced by recursively invoking
+ // `lower_use_tree` (and indeed there may be things
+ // like `use foo::{a::{b, c}}` and so forth). They
+ // wind up being directly added to
+ // `self.items`. However, the structure of this
+ // function also requires us to return one item, and
+ // for that we return the `{}` import (called the
+ // `ListStem`).
+
+ let prefix = Path { segments, span: prefix.span.to(path.span), tokens: None };
+
+ // Add all the nested `PathListItem`s to the HIR.
+ for &(ref use_tree, id) in trees {
+ let new_hir_id = self.local_def_id(id);
+
+ let mut prefix = prefix.clone();
+
+ // Give the segments new node-ids since they are being cloned.
+ for seg in &mut prefix.segments {
+ seg.id = self.next_node_id();
+ }
+
+ // Each `use` import is an item and thus are owners of the
+ // names in the path. Up to this point the nested import is
+ // the current owner, since we want each desugared import to
+ // own its own names, we have to adjust the owner before
+ // lowering the rest of the import.
+ self.with_hir_id_owner(id, |this| {
+ let mut ident = *ident;
+
+ let kind =
+ this.lower_use_tree(use_tree, &prefix, id, vis_span, &mut ident, attrs);
+ if let Some(attrs) = attrs {
+ this.attrs.insert(hir::ItemLocalId::new(0), attrs);
+ }
+
+ let item = hir::Item {
+ def_id: new_hir_id,
+ ident: this.lower_ident(ident),
+ kind,
+ vis_span,
+ span: this.lower_span(use_tree.span),
+ };
+ hir::OwnerNode::Item(this.arena.alloc(item))
+ });
+ }
+
+ let res = self.expect_full_res_from_use(id).next().unwrap_or(Res::Err);
+ let res = self.lower_res(res);
+ let path = self.lower_path_extra(res, &prefix, ParamMode::Explicit);
+ hir::ItemKind::Use(path, hir::UseKind::ListStem)
+ }
+ }
+ }
+
+ fn lower_foreign_item(&mut self, i: &ForeignItem) -> &'hir hir::ForeignItem<'hir> {
+ let hir_id = self.lower_node_id(i.id);
+ let def_id = hir_id.expect_owner();
+ self.lower_attrs(hir_id, &i.attrs);
+ let item = hir::ForeignItem {
+ def_id,
+ ident: self.lower_ident(i.ident),
+ kind: match i.kind {
+ ForeignItemKind::Fn(box Fn { ref sig, ref generics, .. }) => {
+ let fdec = &sig.decl;
+ let itctx = ImplTraitContext::Universal;
+ let (generics, (fn_dec, fn_args)) =
+ self.lower_generics(generics, i.id, itctx, |this| {
+ (
+ // Disallow `impl Trait` in foreign items.
+ this.lower_fn_decl(fdec, None, FnDeclKind::ExternFn, None),
+ this.lower_fn_params_to_names(fdec),
+ )
+ });
+
+ hir::ForeignItemKind::Fn(fn_dec, fn_args, generics)
+ }
+ ForeignItemKind::Static(ref t, m, _) => {
+ let ty =
+ self.lower_ty(t, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ hir::ForeignItemKind::Static(ty, m)
+ }
+ ForeignItemKind::TyAlias(..) => hir::ForeignItemKind::Type,
+ ForeignItemKind::MacCall(_) => panic!("macro shouldn't exist here"),
+ },
+ vis_span: self.lower_span(i.vis.span),
+ span: self.lower_span(i.span),
+ };
+ self.arena.alloc(item)
+ }
+
+ fn lower_foreign_item_ref(&mut self, i: &ForeignItem) -> hir::ForeignItemRef {
+ hir::ForeignItemRef {
+ id: hir::ForeignItemId { def_id: self.local_def_id(i.id) },
+ ident: self.lower_ident(i.ident),
+ span: self.lower_span(i.span),
+ }
+ }
+
+ fn lower_variant(&mut self, v: &Variant) -> hir::Variant<'hir> {
+ let id = self.lower_node_id(v.id);
+ self.lower_attrs(id, &v.attrs);
+ hir::Variant {
+ id,
+ data: self.lower_variant_data(id, &v.data),
+ disr_expr: v.disr_expr.as_ref().map(|e| self.lower_anon_const(e)),
+ ident: self.lower_ident(v.ident),
+ span: self.lower_span(v.span),
+ }
+ }
+
+ fn lower_variant_data(
+ &mut self,
+ parent_id: hir::HirId,
+ vdata: &VariantData,
+ ) -> hir::VariantData<'hir> {
+ match *vdata {
+ VariantData::Struct(ref fields, recovered) => hir::VariantData::Struct(
+ self.arena
+ .alloc_from_iter(fields.iter().enumerate().map(|f| self.lower_field_def(f))),
+ recovered,
+ ),
+ VariantData::Tuple(ref fields, id) => {
+ let ctor_id = self.lower_node_id(id);
+ self.alias_attrs(ctor_id, parent_id);
+ hir::VariantData::Tuple(
+ self.arena.alloc_from_iter(
+ fields.iter().enumerate().map(|f| self.lower_field_def(f)),
+ ),
+ ctor_id,
+ )
+ }
+ VariantData::Unit(id) => {
+ let ctor_id = self.lower_node_id(id);
+ self.alias_attrs(ctor_id, parent_id);
+ hir::VariantData::Unit(ctor_id)
+ }
+ }
+ }
+
+ fn lower_field_def(&mut self, (index, f): (usize, &FieldDef)) -> hir::FieldDef<'hir> {
+ let ty = if let TyKind::Path(ref qself, ref path) = f.ty.kind {
+ let t = self.lower_path_ty(
+ &f.ty,
+ qself,
+ path,
+ ParamMode::ExplicitNamed, // no `'_` in declarations (Issue #61124)
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ self.arena.alloc(t)
+ } else {
+ self.lower_ty(&f.ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ };
+ let hir_id = self.lower_node_id(f.id);
+ self.lower_attrs(hir_id, &f.attrs);
+ hir::FieldDef {
+ span: self.lower_span(f.span),
+ hir_id,
+ ident: match f.ident {
+ Some(ident) => self.lower_ident(ident),
+ // FIXME(jseyfried): positional field hygiene.
+ None => Ident::new(sym::integer(index), self.lower_span(f.span)),
+ },
+ vis_span: self.lower_span(f.vis.span),
+ ty,
+ }
+ }
+
+ fn lower_trait_item(&mut self, i: &AssocItem) -> &'hir hir::TraitItem<'hir> {
+ let hir_id = self.lower_node_id(i.id);
+ let trait_item_def_id = hir_id.expect_owner();
+
+ let (generics, kind, has_default) = match i.kind {
+ AssocItemKind::Const(_, ref ty, ref default) => {
+ let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ let body = default.as_ref().map(|x| self.lower_const_body(i.span, Some(x)));
+ (hir::Generics::empty(), hir::TraitItemKind::Const(ty, body), body.is_some())
+ }
+ AssocItemKind::Fn(box Fn { ref sig, ref generics, body: None, .. }) => {
+ let names = self.lower_fn_params_to_names(&sig.decl);
+ let (generics, sig) =
+ self.lower_method_sig(generics, sig, i.id, FnDeclKind::Trait, None);
+ (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Required(names)), false)
+ }
+ AssocItemKind::Fn(box Fn { ref sig, ref generics, body: Some(ref body), .. }) => {
+ let asyncness = sig.header.asyncness;
+ let body_id =
+ self.lower_maybe_async_body(i.span, &sig.decl, asyncness, Some(&body));
+ let (generics, sig) = self.lower_method_sig(
+ generics,
+ sig,
+ i.id,
+ FnDeclKind::Trait,
+ asyncness.opt_return_id(),
+ );
+ (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Provided(body_id)), true)
+ }
+ AssocItemKind::TyAlias(box TyAlias {
+ ref generics,
+ where_clauses,
+ ref bounds,
+ ref ty,
+ ..
+ }) => {
+ let mut generics = generics.clone();
+ add_ty_alias_where_clause(&mut generics, where_clauses, false);
+ let (generics, kind) = self.lower_generics(
+ &generics,
+ i.id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ let ty = ty.as_ref().map(|x| {
+ this.lower_ty(x, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ });
+ hir::TraitItemKind::Type(
+ this.lower_param_bounds(
+ bounds,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ ),
+ ty,
+ )
+ },
+ );
+ (generics, kind, ty.is_some())
+ }
+ AssocItemKind::MacCall(..) => panic!("macro item shouldn't exist at this point"),
+ };
+
+ self.lower_attrs(hir_id, &i.attrs);
+ let item = hir::TraitItem {
+ def_id: trait_item_def_id,
+ ident: self.lower_ident(i.ident),
+ generics,
+ kind,
+ span: self.lower_span(i.span),
+ defaultness: hir::Defaultness::Default { has_value: has_default },
+ };
+ self.arena.alloc(item)
+ }
+
+ fn lower_trait_item_ref(&mut self, i: &AssocItem) -> hir::TraitItemRef {
+ let kind = match &i.kind {
+ AssocItemKind::Const(..) => hir::AssocItemKind::Const,
+ AssocItemKind::TyAlias(..) => hir::AssocItemKind::Type,
+ AssocItemKind::Fn(box Fn { sig, .. }) => {
+ hir::AssocItemKind::Fn { has_self: sig.decl.has_self() }
+ }
+ AssocItemKind::MacCall(..) => unimplemented!(),
+ };
+ let id = hir::TraitItemId { def_id: self.local_def_id(i.id) };
+ hir::TraitItemRef {
+ id,
+ ident: self.lower_ident(i.ident),
+ span: self.lower_span(i.span),
+ kind,
+ }
+ }
+
+ /// Construct `ExprKind::Err` for the given `span`.
+ pub(crate) fn expr_err(&mut self, span: Span) -> hir::Expr<'hir> {
+ self.expr(span, hir::ExprKind::Err, AttrVec::new())
+ }
+
+ fn lower_impl_item(&mut self, i: &AssocItem) -> &'hir hir::ImplItem<'hir> {
+ // Since `default impl` is not yet implemented, this is always true in impls.
+ let has_value = true;
+ let (defaultness, _) = self.lower_defaultness(i.kind.defaultness(), has_value);
+
+ let (generics, kind) = match &i.kind {
+ AssocItemKind::Const(_, ty, expr) => {
+ let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ (
+ hir::Generics::empty(),
+ hir::ImplItemKind::Const(ty, self.lower_const_body(i.span, expr.as_deref())),
+ )
+ }
+ AssocItemKind::Fn(box Fn { sig, generics, body, .. }) => {
+ self.current_item = Some(i.span);
+ let asyncness = sig.header.asyncness;
+ let body_id =
+ self.lower_maybe_async_body(i.span, &sig.decl, asyncness, body.as_deref());
+ let (generics, sig) = self.lower_method_sig(
+ generics,
+ sig,
+ i.id,
+ if self.is_in_trait_impl { FnDeclKind::Impl } else { FnDeclKind::Inherent },
+ asyncness.opt_return_id(),
+ );
+
+ (generics, hir::ImplItemKind::Fn(sig, body_id))
+ }
+ AssocItemKind::TyAlias(box TyAlias { generics, where_clauses, ty, .. }) => {
+ let mut generics = generics.clone();
+ add_ty_alias_where_clause(&mut generics, *where_clauses, false);
+ self.lower_generics(
+ &generics,
+ i.id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| match ty {
+ None => {
+ let ty = this.arena.alloc(this.ty(i.span, hir::TyKind::Err));
+ hir::ImplItemKind::TyAlias(ty)
+ }
+ Some(ty) => {
+ let ty = this.lower_ty(ty, ImplTraitContext::TypeAliasesOpaqueTy);
+ hir::ImplItemKind::TyAlias(ty)
+ }
+ },
+ )
+ }
+ AssocItemKind::MacCall(..) => panic!("`TyMac` should have been expanded by now"),
+ };
+
+ let hir_id = self.lower_node_id(i.id);
+ self.lower_attrs(hir_id, &i.attrs);
+ let item = hir::ImplItem {
+ def_id: hir_id.expect_owner(),
+ ident: self.lower_ident(i.ident),
+ generics,
+ kind,
+ vis_span: self.lower_span(i.vis.span),
+ span: self.lower_span(i.span),
+ defaultness,
+ };
+ self.arena.alloc(item)
+ }
+
+ fn lower_impl_item_ref(&mut self, i: &AssocItem) -> hir::ImplItemRef {
+ hir::ImplItemRef {
+ id: hir::ImplItemId { def_id: self.local_def_id(i.id) },
+ ident: self.lower_ident(i.ident),
+ span: self.lower_span(i.span),
+ kind: match &i.kind {
+ AssocItemKind::Const(..) => hir::AssocItemKind::Const,
+ AssocItemKind::TyAlias(..) => hir::AssocItemKind::Type,
+ AssocItemKind::Fn(box Fn { sig, .. }) => {
+ hir::AssocItemKind::Fn { has_self: sig.decl.has_self() }
+ }
+ AssocItemKind::MacCall(..) => unimplemented!(),
+ },
+ trait_item_def_id: self.resolver.get_partial_res(i.id).map(|r| r.base_res().def_id()),
+ }
+ }
+
+ fn lower_defaultness(
+ &self,
+ d: Defaultness,
+ has_value: bool,
+ ) -> (hir::Defaultness, Option<Span>) {
+ match d {
+ Defaultness::Default(sp) => {
+ (hir::Defaultness::Default { has_value }, Some(self.lower_span(sp)))
+ }
+ Defaultness::Final => {
+ assert!(has_value);
+ (hir::Defaultness::Final, None)
+ }
+ }
+ }
+
+ fn record_body(
+ &mut self,
+ params: &'hir [hir::Param<'hir>],
+ value: hir::Expr<'hir>,
+ ) -> hir::BodyId {
+ let body = hir::Body { generator_kind: self.generator_kind, params, value };
+ let id = body.id();
+ debug_assert_eq!(id.hir_id.owner, self.current_hir_id_owner);
+ self.bodies.push((id.hir_id.local_id, self.arena.alloc(body)));
+ id
+ }
+
+ pub(super) fn lower_body(
+ &mut self,
+ f: impl FnOnce(&mut Self) -> (&'hir [hir::Param<'hir>], hir::Expr<'hir>),
+ ) -> hir::BodyId {
+ let prev_gen_kind = self.generator_kind.take();
+ let task_context = self.task_context.take();
+ let (parameters, result) = f(self);
+ let body_id = self.record_body(parameters, result);
+ self.task_context = task_context;
+ self.generator_kind = prev_gen_kind;
+ body_id
+ }
+
+ fn lower_param(&mut self, param: &Param) -> hir::Param<'hir> {
+ let hir_id = self.lower_node_id(param.id);
+ self.lower_attrs(hir_id, &param.attrs);
+ hir::Param {
+ hir_id,
+ pat: self.lower_pat(&param.pat),
+ ty_span: self.lower_span(param.ty.span),
+ span: self.lower_span(param.span),
+ }
+ }
+
+ pub(super) fn lower_fn_body(
+ &mut self,
+ decl: &FnDecl,
+ body: impl FnOnce(&mut Self) -> hir::Expr<'hir>,
+ ) -> hir::BodyId {
+ self.lower_body(|this| {
+ (
+ this.arena.alloc_from_iter(decl.inputs.iter().map(|x| this.lower_param(x))),
+ body(this),
+ )
+ })
+ }
+
+ fn lower_fn_body_block(
+ &mut self,
+ span: Span,
+ decl: &FnDecl,
+ body: Option<&Block>,
+ ) -> hir::BodyId {
+ self.lower_fn_body(decl, |this| this.lower_block_expr_opt(span, body))
+ }
+
+ fn lower_block_expr_opt(&mut self, span: Span, block: Option<&Block>) -> hir::Expr<'hir> {
+ match block {
+ Some(block) => self.lower_block_expr(block),
+ None => self.expr_err(span),
+ }
+ }
+
+ pub(super) fn lower_const_body(&mut self, span: Span, expr: Option<&Expr>) -> hir::BodyId {
+ self.lower_body(|this| {
+ (
+ &[],
+ match expr {
+ Some(expr) => this.lower_expr_mut(expr),
+ None => this.expr_err(span),
+ },
+ )
+ })
+ }
+
+ fn lower_maybe_async_body(
+ &mut self,
+ span: Span,
+ decl: &FnDecl,
+ asyncness: Async,
+ body: Option<&Block>,
+ ) -> hir::BodyId {
+ let closure_id = match asyncness {
+ Async::Yes { closure_id, .. } => closure_id,
+ Async::No => return self.lower_fn_body_block(span, decl, body),
+ };
+
+ self.lower_body(|this| {
+ let mut parameters: Vec<hir::Param<'_>> = Vec::new();
+ let mut statements: Vec<hir::Stmt<'_>> = Vec::new();
+
+ // Async function parameters are lowered into the closure body so that they are
+ // captured and so that the drop order matches the equivalent non-async functions.
+ //
+ // from:
+ //
+ // async fn foo(<pattern>: <ty>, <pattern>: <ty>, <pattern>: <ty>) {
+ // <body>
+ // }
+ //
+ // into:
+ //
+ // fn foo(__arg0: <ty>, __arg1: <ty>, __arg2: <ty>) {
+ // async move {
+ // let __arg2 = __arg2;
+ // let <pattern> = __arg2;
+ // let __arg1 = __arg1;
+ // let <pattern> = __arg1;
+ // let __arg0 = __arg0;
+ // let <pattern> = __arg0;
+ // drop-temps { <body> } // see comments later in fn for details
+ // }
+ // }
+ //
+ // If `<pattern>` is a simple ident, then it is lowered to a single
+ // `let <pattern> = <pattern>;` statement as an optimization.
+ //
+ // Note that the body is embedded in `drop-temps`; an
+ // equivalent desugaring would be `return { <body>
+ // };`. The key point is that we wish to drop all the
+ // let-bound variables and temporaries created in the body
+ // (and its tail expression!) before we drop the
+ // parameters (c.f. rust-lang/rust#64512).
+ for (index, parameter) in decl.inputs.iter().enumerate() {
+ let parameter = this.lower_param(parameter);
+ let span = parameter.pat.span;
+
+ // Check if this is a binding pattern, if so, we can optimize and avoid adding a
+ // `let <pat> = __argN;` statement. In this case, we do not rename the parameter.
+ let (ident, is_simple_parameter) = match parameter.pat.kind {
+ hir::PatKind::Binding(
+ hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
+ _,
+ ident,
+ _,
+ ) => (ident, true),
+ // For `ref mut` or wildcard arguments, we can't reuse the binding, but
+ // we can keep the same name for the parameter.
+ // This lets rustdoc render it correctly in documentation.
+ hir::PatKind::Binding(_, _, ident, _) => (ident, false),
+ hir::PatKind::Wild => {
+ (Ident::with_dummy_span(rustc_span::symbol::kw::Underscore), false)
+ }
+ _ => {
+ // Replace the ident for bindings that aren't simple.
+ let name = format!("__arg{}", index);
+ let ident = Ident::from_str(&name);
+
+ (ident, false)
+ }
+ };
+
+ let desugared_span = this.mark_span_with_reason(DesugaringKind::Async, span, None);
+
+ // Construct a parameter representing `__argN: <ty>` to replace the parameter of the
+ // async function.
+ //
+ // If this is the simple case, this parameter will end up being the same as the
+ // original parameter, but with a different pattern id.
+ let stmt_attrs = this.attrs.get(&parameter.hir_id.local_id).copied();
+ let (new_parameter_pat, new_parameter_id) = this.pat_ident(desugared_span, ident);
+ let new_parameter = hir::Param {
+ hir_id: parameter.hir_id,
+ pat: new_parameter_pat,
+ ty_span: this.lower_span(parameter.ty_span),
+ span: this.lower_span(parameter.span),
+ };
+
+ if is_simple_parameter {
+ // If this is the simple case, then we only insert one statement that is
+ // `let <pat> = <pat>;`. We re-use the original argument's pattern so that
+ // `HirId`s are densely assigned.
+ let expr = this.expr_ident(desugared_span, ident, new_parameter_id);
+ let stmt = this.stmt_let_pat(
+ stmt_attrs,
+ desugared_span,
+ Some(expr),
+ parameter.pat,
+ hir::LocalSource::AsyncFn,
+ );
+ statements.push(stmt);
+ } else {
+ // If this is not the simple case, then we construct two statements:
+ //
+ // ```
+ // let __argN = __argN;
+ // let <pat> = __argN;
+ // ```
+ //
+ // The first statement moves the parameter into the closure and thus ensures
+ // that the drop order is correct.
+ //
+ // The second statement creates the bindings that the user wrote.
+
+ // Construct the `let mut __argN = __argN;` statement. It must be a mut binding
+ // because the user may have specified a `ref mut` binding in the next
+ // statement.
+ let (move_pat, move_id) = this.pat_ident_binding_mode(
+ desugared_span,
+ ident,
+ hir::BindingAnnotation::Mutable,
+ );
+ let move_expr = this.expr_ident(desugared_span, ident, new_parameter_id);
+ let move_stmt = this.stmt_let_pat(
+ None,
+ desugared_span,
+ Some(move_expr),
+ move_pat,
+ hir::LocalSource::AsyncFn,
+ );
+
+ // Construct the `let <pat> = __argN;` statement. We re-use the original
+ // parameter's pattern so that `HirId`s are densely assigned.
+ let pattern_expr = this.expr_ident(desugared_span, ident, move_id);
+ let pattern_stmt = this.stmt_let_pat(
+ stmt_attrs,
+ desugared_span,
+ Some(pattern_expr),
+ parameter.pat,
+ hir::LocalSource::AsyncFn,
+ );
+
+ statements.push(move_stmt);
+ statements.push(pattern_stmt);
+ };
+
+ parameters.push(new_parameter);
+ }
+
+ let body_span = body.map_or(span, |b| b.span);
+ let async_expr = this.make_async_expr(
+ CaptureBy::Value,
+ closure_id,
+ None,
+ body_span,
+ hir::AsyncGeneratorKind::Fn,
+ |this| {
+ // Create a block from the user's function body:
+ let user_body = this.lower_block_expr_opt(body_span, body);
+
+ // Transform into `drop-temps { <user-body> }`, an expression:
+ let desugared_span =
+ this.mark_span_with_reason(DesugaringKind::Async, user_body.span, None);
+ let user_body = this.expr_drop_temps(
+ desugared_span,
+ this.arena.alloc(user_body),
+ AttrVec::new(),
+ );
+
+ // As noted above, create the final block like
+ //
+ // ```
+ // {
+ // let $param_pattern = $raw_param;
+ // ...
+ // drop-temps { <user-body> }
+ // }
+ // ```
+ let body = this.block_all(
+ desugared_span,
+ this.arena.alloc_from_iter(statements),
+ Some(user_body),
+ );
+
+ this.expr_block(body, AttrVec::new())
+ },
+ );
+
+ (
+ this.arena.alloc_from_iter(parameters),
+ this.expr(body_span, async_expr, AttrVec::new()),
+ )
+ })
+ }
+
+ fn lower_method_sig(
+ &mut self,
+ generics: &Generics,
+ sig: &FnSig,
+ id: NodeId,
+ kind: FnDeclKind,
+ is_async: Option<NodeId>,
+ ) -> (&'hir hir::Generics<'hir>, hir::FnSig<'hir>) {
+ let header = self.lower_fn_header(sig.header);
+ let itctx = ImplTraitContext::Universal;
+ let (generics, decl) = self.lower_generics(generics, id, itctx, |this| {
+ this.lower_fn_decl(&sig.decl, Some(id), kind, is_async)
+ });
+ (generics, hir::FnSig { header, decl, span: self.lower_span(sig.span) })
+ }
+
+ fn lower_fn_header(&mut self, h: FnHeader) -> hir::FnHeader {
+ hir::FnHeader {
+ unsafety: self.lower_unsafety(h.unsafety),
+ asyncness: self.lower_asyncness(h.asyncness),
+ constness: self.lower_constness(h.constness),
+ abi: self.lower_extern(h.ext),
+ }
+ }
+
+ pub(super) fn lower_abi(&mut self, abi: StrLit) -> abi::Abi {
+ abi::lookup(abi.symbol_unescaped.as_str()).unwrap_or_else(|| {
+ self.error_on_invalid_abi(abi);
+ abi::Abi::Rust
+ })
+ }
+
+ pub(super) fn lower_extern(&mut self, ext: Extern) -> abi::Abi {
+ match ext {
+ Extern::None => abi::Abi::Rust,
+ Extern::Implicit(_) => abi::Abi::FALLBACK,
+ Extern::Explicit(abi, _) => self.lower_abi(abi),
+ }
+ }
+
+ fn error_on_invalid_abi(&self, abi: StrLit) {
+ struct_span_err!(self.tcx.sess, abi.span, E0703, "invalid ABI: found `{}`", abi.symbol)
+ .span_label(abi.span, "invalid ABI")
+ .help(&format!("valid ABIs: {}", abi::all_names().join(", ")))
+ .emit();
+ }
+
+ fn lower_asyncness(&mut self, a: Async) -> hir::IsAsync {
+ match a {
+ Async::Yes { .. } => hir::IsAsync::Async,
+ Async::No => hir::IsAsync::NotAsync,
+ }
+ }
+
+ fn lower_constness(&mut self, c: Const) -> hir::Constness {
+ match c {
+ Const::Yes(_) => hir::Constness::Const,
+ Const::No => hir::Constness::NotConst,
+ }
+ }
+
+ pub(super) fn lower_unsafety(&mut self, u: Unsafe) -> hir::Unsafety {
+ match u {
+ Unsafe::Yes(_) => hir::Unsafety::Unsafe,
+ Unsafe::No => hir::Unsafety::Normal,
+ }
+ }
+
+ /// Return the pair of the lowered `generics` as `hir::Generics` and the evaluation of `f` with
+ /// the carried impl trait definitions and bounds.
+ #[instrument(level = "debug", skip(self, f))]
+ fn lower_generics<T>(
+ &mut self,
+ generics: &Generics,
+ parent_node_id: NodeId,
+ itctx: ImplTraitContext,
+ f: impl FnOnce(&mut Self) -> T,
+ ) -> (&'hir hir::Generics<'hir>, T) {
+ debug_assert!(self.impl_trait_defs.is_empty());
+ debug_assert!(self.impl_trait_bounds.is_empty());
+
+ // Error if `?Trait` bounds in where clauses don't refer directly to type parameters.
+ // Note: we used to clone these bounds directly onto the type parameter (and avoid lowering
+ // these into hir when we lower thee where clauses), but this makes it quite difficult to
+ // keep track of the Span info. Now, `add_implicitly_sized` in `AstConv` checks both param bounds and
+ // where clauses for `?Sized`.
+ for pred in &generics.where_clause.predicates {
+ let WherePredicate::BoundPredicate(ref bound_pred) = *pred else {
+ continue;
+ };
+ let compute_is_param = || {
+ // Check if the where clause type is a plain type parameter.
+ match self
+ .resolver
+ .get_partial_res(bound_pred.bounded_ty.id)
+ .map(|d| (d.base_res(), d.unresolved_segments()))
+ {
+ Some((Res::Def(DefKind::TyParam, def_id), 0))
+ if bound_pred.bound_generic_params.is_empty() =>
+ {
+ generics
+ .params
+ .iter()
+ .any(|p| def_id == self.local_def_id(p.id).to_def_id())
+ }
+ // Either the `bounded_ty` is not a plain type parameter, or
+ // it's not found in the generic type parameters list.
+ _ => false,
+ }
+ };
+ // We only need to compute this once per `WherePredicate`, but don't
+ // need to compute this at all unless there is a Maybe bound.
+ let mut is_param: Option<bool> = None;
+ for bound in &bound_pred.bounds {
+ if !matches!(*bound, GenericBound::Trait(_, TraitBoundModifier::Maybe)) {
+ continue;
+ }
+ let is_param = *is_param.get_or_insert_with(compute_is_param);
+ if !is_param {
+ self.diagnostic().span_err(
+ bound.span(),
+ "`?Trait` bounds are only permitted at the \
+ point where a type parameter is declared",
+ );
+ }
+ }
+ }
+
+ let mut predicates: SmallVec<[hir::WherePredicate<'hir>; 4]> = SmallVec::new();
+ predicates.extend(generics.params.iter().filter_map(|param| {
+ self.lower_generic_bound_predicate(
+ param.ident,
+ param.id,
+ &param.kind,
+ &param.bounds,
+ itctx,
+ PredicateOrigin::GenericParam,
+ )
+ }));
+ predicates.extend(
+ generics
+ .where_clause
+ .predicates
+ .iter()
+ .map(|predicate| self.lower_where_predicate(predicate)),
+ );
+
+ let mut params: SmallVec<[hir::GenericParam<'hir>; 4]> =
+ self.lower_generic_params_mut(&generics.params).collect();
+
+ // Introduce extra lifetimes if late resolution tells us to.
+ let extra_lifetimes = self.resolver.take_extra_lifetime_params(parent_node_id);
+ params.extend(extra_lifetimes.into_iter().filter_map(|(ident, node_id, res)| {
+ self.lifetime_res_to_generic_param(ident, node_id, res)
+ }));
+
+ let has_where_clause_predicates = !generics.where_clause.predicates.is_empty();
+ let where_clause_span = self.lower_span(generics.where_clause.span);
+ let span = self.lower_span(generics.span);
+ let res = f(self);
+
+ let impl_trait_defs = std::mem::take(&mut self.impl_trait_defs);
+ params.extend(impl_trait_defs.into_iter());
+
+ let impl_trait_bounds = std::mem::take(&mut self.impl_trait_bounds);
+ predicates.extend(impl_trait_bounds.into_iter());
+
+ let lowered_generics = self.arena.alloc(hir::Generics {
+ params: self.arena.alloc_from_iter(params),
+ predicates: self.arena.alloc_from_iter(predicates),
+ has_where_clause_predicates,
+ where_clause_span,
+ span,
+ });
+
+ (lowered_generics, res)
+ }
+
+ pub(super) fn lower_generic_bound_predicate(
+ &mut self,
+ ident: Ident,
+ id: NodeId,
+ kind: &GenericParamKind,
+ bounds: &[GenericBound],
+ itctx: ImplTraitContext,
+ origin: PredicateOrigin,
+ ) -> Option<hir::WherePredicate<'hir>> {
+ // Do not create a clause if we do not have anything inside it.
+ if bounds.is_empty() {
+ return None;
+ }
+
+ let bounds = self.lower_param_bounds(bounds, itctx);
+
+ let ident = self.lower_ident(ident);
+ let param_span = ident.span;
+ let span = bounds
+ .iter()
+ .fold(Some(param_span.shrink_to_hi()), |span: Option<Span>, bound| {
+ let bound_span = bound.span();
+ // We include bounds that come from a `#[derive(_)]` but point at the user's code,
+ // as we use this method to get a span appropriate for suggestions.
+ if !bound_span.can_be_used_for_suggestions() {
+ None
+ } else if let Some(span) = span {
+ Some(span.to(bound_span))
+ } else {
+ Some(bound_span)
+ }
+ })
+ .unwrap_or(param_span.shrink_to_hi());
+ match kind {
+ GenericParamKind::Const { .. } => None,
+ GenericParamKind::Type { .. } => {
+ let def_id = self.local_def_id(id).to_def_id();
+ let ty_path = self.arena.alloc(hir::Path {
+ span: param_span,
+ res: Res::Def(DefKind::TyParam, def_id),
+ segments: self.arena.alloc_from_iter([hir::PathSegment::from_ident(ident)]),
+ });
+ let ty_id = self.next_id();
+ let bounded_ty =
+ self.ty_path(ty_id, param_span, hir::QPath::Resolved(None, ty_path));
+ Some(hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ bounded_ty: self.arena.alloc(bounded_ty),
+ bounds,
+ span,
+ bound_generic_params: &[],
+ origin,
+ }))
+ }
+ GenericParamKind::Lifetime => {
+ let ident_span = self.lower_span(ident.span);
+ let ident = self.lower_ident(ident);
+ let lt_id = self.next_node_id();
+ let lifetime = self.new_named_lifetime(id, lt_id, ident_span, ident);
+ Some(hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+ lifetime,
+ span,
+ bounds,
+ in_where_clause: false,
+ }))
+ }
+ }
+ }
+
+ fn lower_where_predicate(&mut self, pred: &WherePredicate) -> hir::WherePredicate<'hir> {
+ match *pred {
+ WherePredicate::BoundPredicate(WhereBoundPredicate {
+ ref bound_generic_params,
+ ref bounded_ty,
+ ref bounds,
+ span,
+ }) => hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ bound_generic_params: self.lower_generic_params(bound_generic_params),
+ bounded_ty: self
+ .lower_ty(bounded_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ bounds: self.arena.alloc_from_iter(bounds.iter().map(|bound| {
+ self.lower_param_bound(
+ bound,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ )
+ })),
+ span: self.lower_span(span),
+ origin: PredicateOrigin::WhereClause,
+ }),
+ WherePredicate::RegionPredicate(WhereRegionPredicate {
+ ref lifetime,
+ ref bounds,
+ span,
+ }) => hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+ span: self.lower_span(span),
+ lifetime: self.lower_lifetime(lifetime),
+ bounds: self.lower_param_bounds(
+ bounds,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ ),
+ in_where_clause: true,
+ }),
+ WherePredicate::EqPredicate(WhereEqPredicate { id, ref lhs_ty, ref rhs_ty, span }) => {
+ hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
+ hir_id: self.lower_node_id(id),
+ lhs_ty: self
+ .lower_ty(lhs_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ rhs_ty: self
+ .lower_ty(rhs_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ span: self.lower_span(span),
+ })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
new file mode 100644
index 000000000..224dc3c23
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -0,0 +1,2501 @@
+//! Lowers the AST to the HIR.
+//!
+//! Since the AST and HIR are fairly similar, this is mostly a simple procedure,
+//! much like a fold. Where lowering involves a bit more work things get more
+//! interesting and there are some invariants you should know about. These mostly
+//! concern spans and IDs.
+//!
+//! Spans are assigned to AST nodes during parsing and then are modified during
+//! expansion to indicate the origin of a node and the process it went through
+//! being expanded. IDs are assigned to AST nodes just before lowering.
+//!
+//! For the simpler lowering steps, IDs and spans should be preserved. Unlike
+//! expansion we do not preserve the process of lowering in the spans, so spans
+//! should not be modified here. When creating a new node (as opposed to
+//! "folding" an existing one), create a new ID using `next_id()`.
+//!
+//! You must ensure that IDs are unique. That means that you should only use the
+//! ID from an AST node in a single HIR node (you can assume that AST node-IDs
+//! are unique). Every new node must have a unique ID. Avoid cloning HIR nodes.
+//! If you do, you must then set the new node's ID to a fresh one.
+//!
+//! Spans are used for error messages and for tools to map semantics back to
+//! source code. It is therefore not as important with spans as IDs to be strict
+//! about use (you can't break the compiler by screwing up a span). Obviously, a
+//! HIR node can only have a single span. But multiple nodes can have the same
+//! span and spans don't need to be kept in order, etc. Where code is preserved
+//! by lowering, it should have the same span as in the AST. Where HIR nodes are
+//! new it is probably best to give a span for the whole AST node being lowered.
+//! All nodes should have real spans; don't use dummy spans. Tools are likely to
+//! get confused if the spans from leaf AST nodes occur in multiple places
+//! in the HIR, especially for multiple identifiers.
+
+#![feature(box_patterns)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(never_type)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+
+use rustc_ast::visit;
+use rustc_ast::{self as ast, *};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{struct_span_err, Applicability, Handler};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, LifetimeRes, Namespace, PartialRes, PerNS, Res};
+use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::definitions::DefPathData;
+use rustc_hir::{ConstArg, GenericArg, ItemLocalId, ParamName, TraitCandidate};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::span_bug;
+use rustc_middle::ty::{ResolverAstLowering, TyCtxt};
+use rustc_session::parse::feature_err;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::source_map::DesugaringKind;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+use smallvec::SmallVec;
+use std::collections::hash_map::Entry;
+
+macro_rules! arena_vec {
+ ($this:expr; $($x:expr),*) => (
+ $this.arena.alloc_from_iter([$($x),*])
+ );
+}
+
+mod asm;
+mod block;
+mod expr;
+mod index;
+mod item;
+mod lifetime_collector;
+mod pat;
+mod path;
+
+struct LoweringContext<'a, 'hir> {
+ tcx: TyCtxt<'hir>,
+ resolver: &'a mut ResolverAstLowering,
+
+ /// Used to allocate HIR nodes.
+ arena: &'hir hir::Arena<'hir>,
+
+ /// Bodies inside the owner being lowered.
+ bodies: Vec<(hir::ItemLocalId, &'hir hir::Body<'hir>)>,
+ /// Attributes inside the owner being lowered.
+ attrs: SortedMap<hir::ItemLocalId, &'hir [Attribute]>,
+ /// Collect items that were created by lowering the current owner.
+ children: FxHashMap<LocalDefId, hir::MaybeOwner<&'hir hir::OwnerInfo<'hir>>>,
+
+ generator_kind: Option<hir::GeneratorKind>,
+
+ /// When inside an `async` context, this is the `HirId` of the
+ /// `task_context` local bound to the resume argument of the generator.
+ task_context: Option<hir::HirId>,
+
+ /// Used to get the current `fn`'s def span to point to when using `await`
+ /// outside of an `async fn`.
+ current_item: Option<Span>,
+
+ catch_scope: Option<NodeId>,
+ loop_scope: Option<NodeId>,
+ is_in_loop_condition: bool,
+ is_in_trait_impl: bool,
+ is_in_dyn_type: bool,
+
+ current_hir_id_owner: LocalDefId,
+ item_local_id_counter: hir::ItemLocalId,
+ local_id_to_def_id: SortedMap<ItemLocalId, LocalDefId>,
+ trait_map: FxHashMap<ItemLocalId, Box<[TraitCandidate]>>,
+
+ impl_trait_defs: Vec<hir::GenericParam<'hir>>,
+ impl_trait_bounds: Vec<hir::WherePredicate<'hir>>,
+
+ /// NodeIds that are lowered inside the current HIR owner.
+ node_id_to_local_id: FxHashMap<NodeId, hir::ItemLocalId>,
+
+ allow_try_trait: Option<Lrc<[Symbol]>>,
+ allow_gen_future: Option<Lrc<[Symbol]>>,
+ allow_into_future: Option<Lrc<[Symbol]>>,
+}
+
+trait ResolverAstLoweringExt {
+ fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>>;
+ fn get_partial_res(&self, id: NodeId) -> Option<PartialRes>;
+ fn get_import_res(&self, id: NodeId) -> PerNS<Option<Res<NodeId>>>;
+ fn get_label_res(&self, id: NodeId) -> Option<NodeId>;
+ fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes>;
+ fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)>;
+ fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind;
+ /// Record the map from `from` local def id to `to` local def id, on `generics_def_id_map`
+ /// field.
+ fn record_def_id_remap(&mut self, from: LocalDefId, to: LocalDefId);
+ /// Get the previously recorded `to` local def id given the `from` local def id, obtained using
+ /// `generics_def_id_map` field.
+ fn get_remapped_def_id(&self, local_def_id: LocalDefId) -> LocalDefId;
+}
+
+impl ResolverAstLoweringExt for ResolverAstLowering {
+ fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>> {
+ if let ExprKind::Path(None, path) = &expr.kind {
+ // Don't perform legacy const generics rewriting if the path already
+ // has generic arguments.
+ if path.segments.last().unwrap().args.is_some() {
+ return None;
+ }
+
+ let partial_res = self.partial_res_map.get(&expr.id)?;
+ if partial_res.unresolved_segments() != 0 {
+ return None;
+ }
+
+ if let Res::Def(DefKind::Fn, def_id) = partial_res.base_res() {
+ // We only support cross-crate argument rewriting. Uses
+ // within the same crate should be updated to use the new
+ // const generics style.
+ if def_id.is_local() {
+ return None;
+ }
+
+ if let Some(v) = self.legacy_const_generic_args.get(&def_id) {
+ return v.clone();
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Obtains resolution for a `NodeId` with a single resolution.
+ fn get_partial_res(&self, id: NodeId) -> Option<PartialRes> {
+ self.partial_res_map.get(&id).copied()
+ }
+
+ /// Obtains per-namespace resolutions for `use` statement with the given `NodeId`.
+ fn get_import_res(&self, id: NodeId) -> PerNS<Option<Res<NodeId>>> {
+ self.import_res_map.get(&id).copied().unwrap_or_default()
+ }
+
+ /// Obtains resolution for a label with the given `NodeId`.
+ fn get_label_res(&self, id: NodeId) -> Option<NodeId> {
+ self.label_res_map.get(&id).copied()
+ }
+
+ /// Obtains resolution for a lifetime with the given `NodeId`.
+ fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes> {
+ self.lifetimes_res_map.get(&id).copied()
+ }
+
+ /// Obtain the list of lifetimes parameters to add to an item.
+ ///
+ /// Extra lifetime parameters should only be added in places that can appear
+ /// as a `binder` in `LifetimeRes`.
+ ///
+ /// The extra lifetimes that appear from the parenthesized `Fn`-trait desugaring
+ /// should appear at the enclosing `PolyTraitRef`.
+ fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)> {
+ self.extra_lifetime_params_map.remove(&id).unwrap_or_default()
+ }
+
+ fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind {
+ self.builtin_macro_kinds.get(&def_id).copied().unwrap_or(MacroKind::Bang)
+ }
+
+ /// Push a remapping into the top-most map.
+ /// Panics if no map has been pushed.
+ /// Remapping is used when creating lowering `-> impl Trait` return
+ /// types to create the resulting opaque type.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn record_def_id_remap(&mut self, from: LocalDefId, to: LocalDefId) {
+ self.generics_def_id_map.last_mut().expect("no map pushed").insert(from, to);
+ }
+
+ fn get_remapped_def_id(&self, mut local_def_id: LocalDefId) -> LocalDefId {
+ // `generics_def_id_map` is a stack of mappings. As we go deeper in impl traits nesting we
+ // push new mappings so we need to try first the latest mappings, hence `iter().rev()`.
+ //
+ // Consider:
+ //
+ // `fn test<'a, 'b>() -> impl Trait<&'a u8, Ty = impl Sized + 'b> {}`
+ //
+ // We would end with a generics_def_id_map like:
+ //
+ // `[[fn#'b -> impl_trait#'b], [fn#'b -> impl_sized#'b]]`
+ //
+ // for the opaque type generated on `impl Sized + 'b`, We want the result to be:
+ // impl_sized#'b, so iterating forward is the wrong thing to do.
+ for map in self.generics_def_id_map.iter().rev() {
+ if let Some(r) = map.get(&local_def_id) {
+ debug!("def_id_remapper: remapping from `{local_def_id:?}` to `{r:?}`");
+ local_def_id = *r;
+ } else {
+ debug!("def_id_remapper: no remapping for `{local_def_id:?}` found in map");
+ }
+ }
+
+ local_def_id
+ }
+}
+
+/// Context of `impl Trait` in code, which determines whether it is allowed in an HIR subtree,
+/// and if so, what meaning it has.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum ImplTraitContext {
+ /// Treat `impl Trait` as shorthand for a new universal generic parameter.
+ /// Example: `fn foo(x: impl Debug)`, where `impl Debug` is conceptually
+ /// equivalent to a fresh universal parameter like `fn foo<T: Debug>(x: T)`.
+ ///
+ /// Newly generated parameters should be inserted into the given `Vec`.
+ Universal,
+
+ /// Treat `impl Trait` as shorthand for a new opaque type.
+ /// Example: `fn foo() -> impl Debug`, where `impl Debug` is conceptually
+ /// equivalent to a new opaque type like `type T = impl Debug; fn foo() -> T`.
+ ///
+ ReturnPositionOpaqueTy {
+ /// Origin: Either OpaqueTyOrigin::FnReturn or OpaqueTyOrigin::AsyncFn,
+ origin: hir::OpaqueTyOrigin,
+ },
+ /// Impl trait in type aliases.
+ TypeAliasesOpaqueTy,
+ /// `impl Trait` is not accepted in this position.
+ Disallowed(ImplTraitPosition),
+}
+
+/// Position in which `impl Trait` is disallowed.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum ImplTraitPosition {
+ Path,
+ Variable,
+ Type,
+ Trait,
+ AsyncBlock,
+ Bound,
+ Generic,
+ ExternFnParam,
+ ClosureParam,
+ PointerParam,
+ FnTraitParam,
+ TraitParam,
+ ImplParam,
+ ExternFnReturn,
+ ClosureReturn,
+ PointerReturn,
+ FnTraitReturn,
+ TraitReturn,
+ ImplReturn,
+}
+
+impl std::fmt::Display for ImplTraitPosition {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let name = match self {
+ ImplTraitPosition::Path => "path",
+ ImplTraitPosition::Variable => "variable binding",
+ ImplTraitPosition::Type => "type",
+ ImplTraitPosition::Trait => "trait",
+ ImplTraitPosition::AsyncBlock => "async block",
+ ImplTraitPosition::Bound => "bound",
+ ImplTraitPosition::Generic => "generic",
+ ImplTraitPosition::ExternFnParam => "`extern fn` param",
+ ImplTraitPosition::ClosureParam => "closure param",
+ ImplTraitPosition::PointerParam => "`fn` pointer param",
+ ImplTraitPosition::FnTraitParam => "`Fn` trait param",
+ ImplTraitPosition::TraitParam => "trait method param",
+ ImplTraitPosition::ImplParam => "`impl` method param",
+ ImplTraitPosition::ExternFnReturn => "`extern fn` return",
+ ImplTraitPosition::ClosureReturn => "closure return",
+ ImplTraitPosition::PointerReturn => "`fn` pointer return",
+ ImplTraitPosition::FnTraitReturn => "`Fn` trait return",
+ ImplTraitPosition::TraitReturn => "trait method return",
+ ImplTraitPosition::ImplReturn => "`impl` method return",
+ };
+
+ write!(f, "{}", name)
+ }
+}
+
+#[derive(Debug)]
+enum FnDeclKind {
+ Fn,
+ Inherent,
+ ExternFn,
+ Closure,
+ Pointer,
+ Trait,
+ Impl,
+}
+
+impl FnDeclKind {
+ fn impl_trait_return_allowed(&self) -> bool {
+ match self {
+ FnDeclKind::Fn | FnDeclKind::Inherent => true,
+ _ => false,
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+enum AstOwner<'a> {
+ NonOwner,
+ Crate(&'a ast::Crate),
+ Item(&'a ast::Item),
+ AssocItem(&'a ast::AssocItem, visit::AssocCtxt),
+ ForeignItem(&'a ast::ForeignItem),
+}
+
+fn index_crate<'a>(
+ node_id_to_def_id: &FxHashMap<NodeId, LocalDefId>,
+ krate: &'a Crate,
+) -> IndexVec<LocalDefId, AstOwner<'a>> {
+ let mut indexer = Indexer { node_id_to_def_id, index: IndexVec::new() };
+ indexer.index.ensure_contains_elem(CRATE_DEF_ID, || AstOwner::NonOwner);
+ indexer.index[CRATE_DEF_ID] = AstOwner::Crate(krate);
+ visit::walk_crate(&mut indexer, krate);
+ return indexer.index;
+
+ struct Indexer<'s, 'a> {
+ node_id_to_def_id: &'s FxHashMap<NodeId, LocalDefId>,
+ index: IndexVec<LocalDefId, AstOwner<'a>>,
+ }
+
+ impl<'a> visit::Visitor<'a> for Indexer<'_, 'a> {
+ fn visit_attribute(&mut self, _: &'a Attribute) {
+ // We do not want to lower expressions that appear in attributes,
+ // as they are not accessible to the rest of the HIR.
+ }
+
+ fn visit_item(&mut self, item: &'a ast::Item) {
+ let def_id = self.node_id_to_def_id[&item.id];
+ self.index.ensure_contains_elem(def_id, || AstOwner::NonOwner);
+ self.index[def_id] = AstOwner::Item(item);
+ visit::walk_item(self, item)
+ }
+
+ fn visit_assoc_item(&mut self, item: &'a ast::AssocItem, ctxt: visit::AssocCtxt) {
+ let def_id = self.node_id_to_def_id[&item.id];
+ self.index.ensure_contains_elem(def_id, || AstOwner::NonOwner);
+ self.index[def_id] = AstOwner::AssocItem(item, ctxt);
+ visit::walk_assoc_item(self, item, ctxt);
+ }
+
+ fn visit_foreign_item(&mut self, item: &'a ast::ForeignItem) {
+ let def_id = self.node_id_to_def_id[&item.id];
+ self.index.ensure_contains_elem(def_id, || AstOwner::NonOwner);
+ self.index[def_id] = AstOwner::ForeignItem(item);
+ visit::walk_foreign_item(self, item);
+ }
+ }
+}
+
+/// Compute the hash for the HIR of the full crate.
+/// This hash will then be part of the crate_hash which is stored in the metadata.
+fn compute_hir_hash(
+ tcx: TyCtxt<'_>,
+ owners: &IndexVec<LocalDefId, hir::MaybeOwner<&hir::OwnerInfo<'_>>>,
+) -> Fingerprint {
+ let mut hir_body_nodes: Vec<_> = owners
+ .iter_enumerated()
+ .filter_map(|(def_id, info)| {
+ let info = info.as_owner()?;
+ let def_path_hash = tcx.hir().def_path_hash(def_id);
+ Some((def_path_hash, info))
+ })
+ .collect();
+ hir_body_nodes.sort_unstable_by_key(|bn| bn.0);
+
+ tcx.with_stable_hashing_context(|mut hcx| {
+ let mut stable_hasher = StableHasher::new();
+ hir_body_nodes.hash_stable(&mut hcx, &mut stable_hasher);
+ stable_hasher.finish()
+ })
+}
+
+pub fn lower_to_hir<'hir>(tcx: TyCtxt<'hir>, (): ()) -> hir::Crate<'hir> {
+ let sess = tcx.sess;
+ let krate = tcx.untracked_crate.steal();
+ let mut resolver = tcx.resolver_for_lowering(()).steal();
+
+ let ast_index = index_crate(&resolver.node_id_to_def_id, &krate);
+ let mut owners = IndexVec::from_fn_n(
+ |_| hir::MaybeOwner::Phantom,
+ tcx.definitions_untracked().def_index_count(),
+ );
+
+ for def_id in ast_index.indices() {
+ item::ItemLowerer {
+ tcx,
+ resolver: &mut resolver,
+ ast_index: &ast_index,
+ owners: &mut owners,
+ }
+ .lower_node(def_id);
+ }
+
+ // Drop AST to free memory
+ std::mem::drop(ast_index);
+ sess.time("drop_ast", || std::mem::drop(krate));
+
+ // Discard hygiene data, which isn't required after lowering to HIR.
+ if !sess.opts.unstable_opts.keep_hygiene_data {
+ rustc_span::hygiene::clear_syntax_context_map();
+ }
+
+ let hir_hash = compute_hir_hash(tcx, &owners);
+ hir::Crate { owners, hir_hash }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+enum ParamMode {
+ /// Any path in a type context.
+ Explicit,
+ /// Path in a type definition, where the anonymous lifetime `'_` is not allowed.
+ ExplicitNamed,
+ /// The `module::Type` in `module::Type::method` in an expression.
+ Optional,
+}
+
+enum ParenthesizedGenericArgs {
+ Ok,
+ Err,
+}
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ fn create_def(
+ &mut self,
+ parent: LocalDefId,
+ node_id: ast::NodeId,
+ data: DefPathData,
+ ) -> LocalDefId {
+ debug_assert_ne!(node_id, ast::DUMMY_NODE_ID);
+ assert!(
+ self.opt_local_def_id(node_id).is_none(),
+ "adding a def'n for node-id {:?} and data {:?} but a previous def'n exists: {:?}",
+ node_id,
+ data,
+ self.tcx.hir().def_key(self.local_def_id(node_id)),
+ );
+
+ let def_id = self.tcx.create_def(parent, data);
+
+ debug!("create_def: def_id_to_node_id[{:?}] <-> {:?}", def_id, node_id);
+ self.resolver.node_id_to_def_id.insert(node_id, def_id);
+
+ def_id
+ }
+
+ fn next_node_id(&mut self) -> NodeId {
+ let start = self.resolver.next_node_id;
+ let next = start.as_u32().checked_add(1).expect("input too large; ran out of NodeIds");
+ self.resolver.next_node_id = ast::NodeId::from_u32(next);
+ start
+ }
+
+ /// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
+ /// resolver (if any), after applying any remapping from `get_remapped_def_id`.
+ ///
+ /// For example, in a function like `fn foo<'a>(x: &'a u32)`,
+ /// invoking with the id from the `ast::Lifetime` node found inside
+ /// the `&'a u32` type would return the `LocalDefId` of the
+ /// `'a` parameter declared on `foo`.
+ ///
+ /// This function also applies remapping from `get_remapped_def_id`.
+ /// These are used when synthesizing opaque types from `-> impl Trait` return types and so forth.
+ /// For example, in a function like `fn foo<'a>() -> impl Debug + 'a`,
+ /// we would create an opaque type `type FooReturn<'a1> = impl Debug + 'a1`.
+ /// When lowering the `Debug + 'a` bounds, we add a remapping to map `'a` to `'a1`.
+ fn opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
+ self.resolver
+ .node_id_to_def_id
+ .get(&node)
+ .map(|local_def_id| self.resolver.get_remapped_def_id(*local_def_id))
+ }
+
+ fn local_def_id(&self, node: NodeId) -> LocalDefId {
+ self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{:?}`", node))
+ }
+
+ /// Freshen the `LoweringContext` and ready it to lower a nested item.
+ /// The lowered item is registered into `self.children`.
+ ///
+ /// This function sets up `HirId` lowering infrastructure,
+ /// and stashes the shared mutable state to avoid pollution by the closure.
+ #[instrument(level = "debug", skip(self, f))]
+ fn with_hir_id_owner(
+ &mut self,
+ owner: NodeId,
+ f: impl FnOnce(&mut Self) -> hir::OwnerNode<'hir>,
+ ) {
+ let def_id = self.local_def_id(owner);
+
+ let current_attrs = std::mem::take(&mut self.attrs);
+ let current_bodies = std::mem::take(&mut self.bodies);
+ let current_node_ids = std::mem::take(&mut self.node_id_to_local_id);
+ let current_id_to_def_id = std::mem::take(&mut self.local_id_to_def_id);
+ let current_trait_map = std::mem::take(&mut self.trait_map);
+ let current_owner = std::mem::replace(&mut self.current_hir_id_owner, def_id);
+ let current_local_counter =
+ std::mem::replace(&mut self.item_local_id_counter, hir::ItemLocalId::new(1));
+ let current_impl_trait_defs = std::mem::take(&mut self.impl_trait_defs);
+ let current_impl_trait_bounds = std::mem::take(&mut self.impl_trait_bounds);
+
+ // Do not reset `next_node_id` and `node_id_to_def_id`:
+ // we want `f` to be able to refer to the `LocalDefId`s that the caller created.
+ // and the caller to refer to some of the subdefinitions' nodes' `LocalDefId`s.
+
+ // Always allocate the first `HirId` for the owner itself.
+ let _old = self.node_id_to_local_id.insert(owner, hir::ItemLocalId::new(0));
+ debug_assert_eq!(_old, None);
+
+ let item = f(self);
+ debug_assert_eq!(def_id, item.def_id());
+ // `f` should have consumed all the elements in these vectors when constructing `item`.
+ debug_assert!(self.impl_trait_defs.is_empty());
+ debug_assert!(self.impl_trait_bounds.is_empty());
+ let info = self.make_owner_info(item);
+
+ self.attrs = current_attrs;
+ self.bodies = current_bodies;
+ self.node_id_to_local_id = current_node_ids;
+ self.local_id_to_def_id = current_id_to_def_id;
+ self.trait_map = current_trait_map;
+ self.current_hir_id_owner = current_owner;
+ self.item_local_id_counter = current_local_counter;
+ self.impl_trait_defs = current_impl_trait_defs;
+ self.impl_trait_bounds = current_impl_trait_bounds;
+
+ let _old = self.children.insert(def_id, hir::MaybeOwner::Owner(info));
+ debug_assert!(_old.is_none())
+ }
+
+ /// Installs the remapping `remap` in scope while `f` is being executed.
+ /// This causes references to the `LocalDefId` keys to be changed to
+ /// refer to the values instead.
+ ///
+ /// The remapping is used when one piece of AST expands to multiple
+ /// pieces of HIR. For example, the function `fn foo<'a>(...) -> impl Debug + 'a`,
+ /// expands to both a function definition (`foo`) and a TAIT for the return value,
+ /// both of which have a lifetime parameter `'a`. The remapping allows us to
+ /// rewrite the `'a` in the return value to refer to the
+ /// `'a` declared on the TAIT, instead of the function.
+ fn with_remapping<R>(
+ &mut self,
+ remap: FxHashMap<LocalDefId, LocalDefId>,
+ f: impl FnOnce(&mut Self) -> R,
+ ) -> R {
+ self.resolver.generics_def_id_map.push(remap);
+ let res = f(self);
+ self.resolver.generics_def_id_map.pop();
+ res
+ }
+
+ fn make_owner_info(&mut self, node: hir::OwnerNode<'hir>) -> &'hir hir::OwnerInfo<'hir> {
+ let attrs = std::mem::take(&mut self.attrs);
+ let mut bodies = std::mem::take(&mut self.bodies);
+ let local_id_to_def_id = std::mem::take(&mut self.local_id_to_def_id);
+ let trait_map = std::mem::take(&mut self.trait_map);
+
+ #[cfg(debug_assertions)]
+ for (id, attrs) in attrs.iter() {
+ // Verify that we do not store empty slices in the map.
+ if attrs.is_empty() {
+ panic!("Stored empty attributes for {:?}", id);
+ }
+ }
+
+ bodies.sort_by_key(|(k, _)| *k);
+ let bodies = SortedMap::from_presorted_elements(bodies);
+ let (hash_including_bodies, hash_without_bodies) = self.hash_owner(node, &bodies);
+ let (nodes, parenting) =
+ index::index_hir(self.tcx.sess, &*self.tcx.definitions_untracked(), node, &bodies);
+ let nodes = hir::OwnerNodes {
+ hash_including_bodies,
+ hash_without_bodies,
+ nodes,
+ bodies,
+ local_id_to_def_id,
+ };
+ let attrs = {
+ let hash = self.tcx.with_stable_hashing_context(|mut hcx| {
+ let mut stable_hasher = StableHasher::new();
+ attrs.hash_stable(&mut hcx, &mut stable_hasher);
+ stable_hasher.finish()
+ });
+ hir::AttributeMap { map: attrs, hash }
+ };
+
+ self.arena.alloc(hir::OwnerInfo { nodes, parenting, attrs, trait_map })
+ }
+
+ /// Hash the HIR node twice, one deep and one shallow hash. This allows to differentiate
+ /// queries which depend on the full HIR tree and those which only depend on the item signature.
+ fn hash_owner(
+ &mut self,
+ node: hir::OwnerNode<'hir>,
+ bodies: &SortedMap<hir::ItemLocalId, &'hir hir::Body<'hir>>,
+ ) -> (Fingerprint, Fingerprint) {
+ self.tcx.with_stable_hashing_context(|mut hcx| {
+ let mut stable_hasher = StableHasher::new();
+ hcx.with_hir_bodies(true, node.def_id(), bodies, |hcx| {
+ node.hash_stable(hcx, &mut stable_hasher)
+ });
+ let hash_including_bodies = stable_hasher.finish();
+ let mut stable_hasher = StableHasher::new();
+ hcx.with_hir_bodies(false, node.def_id(), bodies, |hcx| {
+ node.hash_stable(hcx, &mut stable_hasher)
+ });
+ let hash_without_bodies = stable_hasher.finish();
+ (hash_including_bodies, hash_without_bodies)
+ })
+ }
+
+ /// This method allocates a new `HirId` for the given `NodeId` and stores it in
+ /// the `LoweringContext`'s `NodeId => HirId` map.
+ /// Take care not to call this method if the resulting `HirId` is then not
+ /// actually used in the HIR, as that would trigger an assertion in the
+ /// `HirIdValidator` later on, which makes sure that all `NodeId`s got mapped
+ /// properly. Calling the method twice with the same `NodeId` is fine though.
+ fn lower_node_id(&mut self, ast_node_id: NodeId) -> hir::HirId {
+ assert_ne!(ast_node_id, DUMMY_NODE_ID);
+
+ match self.node_id_to_local_id.entry(ast_node_id) {
+ Entry::Occupied(o) => {
+ hir::HirId { owner: self.current_hir_id_owner, local_id: *o.get() }
+ }
+ Entry::Vacant(v) => {
+ // Generate a new `HirId`.
+ let owner = self.current_hir_id_owner;
+ let local_id = self.item_local_id_counter;
+ let hir_id = hir::HirId { owner, local_id };
+
+ v.insert(local_id);
+ self.item_local_id_counter.increment_by(1);
+
+ assert_ne!(local_id, hir::ItemLocalId::new(0));
+ if let Some(def_id) = self.opt_local_def_id(ast_node_id) {
+ // Do not override a `MaybeOwner::Owner` that may already here.
+ self.children.entry(def_id).or_insert(hir::MaybeOwner::NonOwner(hir_id));
+ self.local_id_to_def_id.insert(local_id, def_id);
+ }
+
+ if let Some(traits) = self.resolver.trait_map.remove(&ast_node_id) {
+ self.trait_map.insert(hir_id.local_id, traits.into_boxed_slice());
+ }
+
+ hir_id
+ }
+ }
+ }
+
+ /// Generate a new `HirId` without a backing `NodeId`.
+ fn next_id(&mut self) -> hir::HirId {
+ let owner = self.current_hir_id_owner;
+ let local_id = self.item_local_id_counter;
+ assert_ne!(local_id, hir::ItemLocalId::new(0));
+ self.item_local_id_counter.increment_by(1);
+ hir::HirId { owner, local_id }
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn lower_res(&mut self, res: Res<NodeId>) -> Res {
+ let res: Result<Res, ()> = res.apply_id(|id| {
+ let owner = self.current_hir_id_owner;
+ let local_id = self.node_id_to_local_id.get(&id).copied().ok_or(())?;
+ Ok(hir::HirId { owner, local_id })
+ });
+ trace!(?res);
+
+ // We may fail to find a HirId when the Res points to a Local from an enclosing HIR owner.
+ // This can happen when trying to lower the return type `x` in erroneous code like
+ // async fn foo(x: u8) -> x {}
+ // In that case, `x` is lowered as a function parameter, and the return type is lowered as
+ // an opaque type as a synthesized HIR owner.
+ res.unwrap_or(Res::Err)
+ }
+
+ fn expect_full_res(&mut self, id: NodeId) -> Res<NodeId> {
+ self.resolver.get_partial_res(id).map_or(Res::Err, |pr| {
+ if pr.unresolved_segments() != 0 {
+ panic!("path not fully resolved: {:?}", pr);
+ }
+ pr.base_res()
+ })
+ }
+
+ fn expect_full_res_from_use(&mut self, id: NodeId) -> impl Iterator<Item = Res<NodeId>> {
+ self.resolver.get_import_res(id).present_items()
+ }
+
+ fn diagnostic(&self) -> &Handler {
+ self.tcx.sess.diagnostic()
+ }
+
+ /// Reuses the span but adds information like the kind of the desugaring and features that are
+ /// allowed inside this span.
+ fn mark_span_with_reason(
+ &self,
+ reason: DesugaringKind,
+ span: Span,
+ allow_internal_unstable: Option<Lrc<[Symbol]>>,
+ ) -> Span {
+ self.tcx.with_stable_hashing_context(|hcx| {
+ span.mark_with_reason(allow_internal_unstable, reason, self.tcx.sess.edition(), hcx)
+ })
+ }
+
+ /// Intercept all spans entering HIR.
+ /// Mark a span as relative to the current owning item.
+ fn lower_span(&self, span: Span) -> Span {
+ if self.tcx.sess.opts.unstable_opts.incremental_relative_spans {
+ span.with_parent(Some(self.current_hir_id_owner))
+ } else {
+ // Do not make spans relative when not using incremental compilation.
+ span
+ }
+ }
+
+ fn lower_ident(&self, ident: Ident) -> Ident {
+ Ident::new(ident.name, self.lower_span(ident.span))
+ }
+
+ /// Converts a lifetime into a new generic parameter.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lifetime_res_to_generic_param(
+ &mut self,
+ ident: Ident,
+ node_id: NodeId,
+ res: LifetimeRes,
+ ) -> Option<hir::GenericParam<'hir>> {
+ let (name, kind) = match res {
+ LifetimeRes::Param { .. } => {
+ (hir::ParamName::Plain(ident), hir::LifetimeParamKind::Explicit)
+ }
+ LifetimeRes::Fresh { param, .. } => {
+ // Late resolution delegates to us the creation of the `LocalDefId`.
+ let _def_id = self.create_def(
+ self.current_hir_id_owner,
+ param,
+ DefPathData::LifetimeNs(kw::UnderscoreLifetime),
+ );
+ debug!(?_def_id);
+
+ (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
+ }
+ LifetimeRes::Static | LifetimeRes::Error => return None,
+ res => panic!(
+ "Unexpected lifetime resolution {:?} for {:?} at {:?}",
+ res, ident, ident.span
+ ),
+ };
+ let hir_id = self.lower_node_id(node_id);
+ Some(hir::GenericParam {
+ hir_id,
+ name,
+ span: self.lower_span(ident.span),
+ pure_wrt_drop: false,
+ kind: hir::GenericParamKind::Lifetime { kind },
+ colon_span: None,
+ })
+ }
+
+ /// Lowers a lifetime binder that defines `generic_params`, returning the corresponding HIR
+ /// nodes. The returned list includes any "extra" lifetime parameters that were added by the
+ /// name resolver owing to lifetime elision; this also populates the resolver's node-id->def-id
+ /// map, so that later calls to `opt_node_id_to_def_id` that refer to these extra lifetime
+ /// parameters will be successful.
+ #[tracing::instrument(level = "debug", skip(self))]
+ #[inline]
+ fn lower_lifetime_binder(
+ &mut self,
+ binder: NodeId,
+ generic_params: &[GenericParam],
+ ) -> &'hir [hir::GenericParam<'hir>] {
+ let mut generic_params: Vec<_> = self.lower_generic_params_mut(generic_params).collect();
+ let extra_lifetimes = self.resolver.take_extra_lifetime_params(binder);
+ debug!(?extra_lifetimes);
+ generic_params.extend(extra_lifetimes.into_iter().filter_map(|(ident, node_id, res)| {
+ self.lifetime_res_to_generic_param(ident, node_id, res)
+ }));
+ let generic_params = self.arena.alloc_from_iter(generic_params);
+ debug!(?generic_params);
+
+ generic_params
+ }
+
+ fn with_dyn_type_scope<T>(&mut self, in_scope: bool, f: impl FnOnce(&mut Self) -> T) -> T {
+ let was_in_dyn_type = self.is_in_dyn_type;
+ self.is_in_dyn_type = in_scope;
+
+ let result = f(self);
+
+ self.is_in_dyn_type = was_in_dyn_type;
+
+ result
+ }
+
+ fn with_new_scopes<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
+ let was_in_loop_condition = self.is_in_loop_condition;
+ self.is_in_loop_condition = false;
+
+ let catch_scope = self.catch_scope.take();
+ let loop_scope = self.loop_scope.take();
+ let ret = f(self);
+ self.catch_scope = catch_scope;
+ self.loop_scope = loop_scope;
+
+ self.is_in_loop_condition = was_in_loop_condition;
+
+ ret
+ }
+
+ fn lower_attrs(&mut self, id: hir::HirId, attrs: &[Attribute]) -> Option<&'hir [Attribute]> {
+ if attrs.is_empty() {
+ None
+ } else {
+ debug_assert_eq!(id.owner, self.current_hir_id_owner);
+ let ret = self.arena.alloc_from_iter(attrs.iter().map(|a| self.lower_attr(a)));
+ debug_assert!(!ret.is_empty());
+ self.attrs.insert(id.local_id, ret);
+ Some(ret)
+ }
+ }
+
+ fn lower_attr(&self, attr: &Attribute) -> Attribute {
+ // Note that we explicitly do not walk the path. Since we don't really
+ // lower attributes (we use the AST version) there is nowhere to keep
+ // the `HirId`s. We don't actually need HIR version of attributes anyway.
+ // Tokens are also not needed after macro expansion and parsing.
+ let kind = match attr.kind {
+ AttrKind::Normal(ref item, _) => AttrKind::Normal(
+ AttrItem {
+ path: item.path.clone(),
+ args: self.lower_mac_args(&item.args),
+ tokens: None,
+ },
+ None,
+ ),
+ AttrKind::DocComment(comment_kind, data) => AttrKind::DocComment(comment_kind, data),
+ };
+
+ Attribute { kind, id: attr.id, style: attr.style, span: self.lower_span(attr.span) }
+ }
+
+ fn alias_attrs(&mut self, id: hir::HirId, target_id: hir::HirId) {
+ debug_assert_eq!(id.owner, self.current_hir_id_owner);
+ debug_assert_eq!(target_id.owner, self.current_hir_id_owner);
+ if let Some(&a) = self.attrs.get(&target_id.local_id) {
+ debug_assert!(!a.is_empty());
+ self.attrs.insert(id.local_id, a);
+ }
+ }
+
+ fn lower_mac_args(&self, args: &MacArgs) -> MacArgs {
+ match *args {
+ MacArgs::Empty => MacArgs::Empty,
+ MacArgs::Delimited(dspan, delim, ref tokens) => {
+ // This is either a non-key-value attribute, or a `macro_rules!` body.
+ // We either not have any nonterminals present (in the case of an attribute),
+ // or have tokens available for all nonterminals in the case of a nested
+ // `macro_rules`: e.g:
+ //
+ // ```rust
+ // macro_rules! outer {
+ // ($e:expr) => {
+ // macro_rules! inner {
+ // () => { $e }
+ // }
+ // }
+ // }
+ // ```
+ //
+ // In both cases, we don't want to synthesize any tokens
+ MacArgs::Delimited(dspan, delim, tokens.flattened())
+ }
+ // This is an inert key-value attribute - it will never be visible to macros
+ // after it gets lowered to HIR. Therefore, we can extract literals to handle
+ // nonterminals in `#[doc]` (e.g. `#[doc = $e]`).
+ MacArgs::Eq(eq_span, MacArgsEq::Ast(ref expr)) => {
+ // In valid code the value always ends up as a single literal. Otherwise, a dummy
+ // literal suffices because the error is handled elsewhere.
+ let lit = if let ExprKind::Lit(lit) = &expr.kind {
+ lit.clone()
+ } else {
+ Lit {
+ token: token::Lit::new(token::LitKind::Err, kw::Empty, None),
+ kind: LitKind::Err(kw::Empty),
+ span: DUMMY_SP,
+ }
+ };
+ MacArgs::Eq(eq_span, MacArgsEq::Hir(lit))
+ }
+ MacArgs::Eq(_, MacArgsEq::Hir(ref lit)) => {
+ unreachable!("in literal form when lowering mac args eq: {:?}", lit)
+ }
+ }
+ }
+
+ /// Given an associated type constraint like one of these:
+ ///
+ /// ```ignore (illustrative)
+ /// T: Iterator<Item: Debug>
+ /// ^^^^^^^^^^^
+ /// T: Iterator<Item = Debug>
+ /// ^^^^^^^^^^^^
+ /// ```
+ ///
+ /// returns a `hir::TypeBinding` representing `Item`.
+ #[instrument(level = "debug", skip(self))]
+ fn lower_assoc_ty_constraint(
+ &mut self,
+ constraint: &AssocConstraint,
+ itctx: ImplTraitContext,
+ ) -> hir::TypeBinding<'hir> {
+ debug!("lower_assoc_ty_constraint(constraint={:?}, itctx={:?})", constraint, itctx);
+ // lower generic arguments of identifier in constraint
+ let gen_args = if let Some(ref gen_args) = constraint.gen_args {
+ let gen_args_ctor = match gen_args {
+ GenericArgs::AngleBracketed(ref data) => {
+ self.lower_angle_bracketed_parameter_data(data, ParamMode::Explicit, itctx).0
+ }
+ GenericArgs::Parenthesized(ref data) => {
+ self.emit_bad_parenthesized_trait_in_assoc_ty(data);
+ self.lower_angle_bracketed_parameter_data(
+ &data.as_angle_bracketed_args(),
+ ParamMode::Explicit,
+ itctx,
+ )
+ .0
+ }
+ };
+ gen_args_ctor.into_generic_args(self)
+ } else {
+ self.arena.alloc(hir::GenericArgs::none())
+ };
+
+ let kind = match constraint.kind {
+ AssocConstraintKind::Equality { ref term } => {
+ let term = match term {
+ Term::Ty(ref ty) => self.lower_ty(ty, itctx).into(),
+ Term::Const(ref c) => self.lower_anon_const(c).into(),
+ };
+ hir::TypeBindingKind::Equality { term }
+ }
+ AssocConstraintKind::Bound { ref bounds } => {
+ // Piggy-back on the `impl Trait` context to figure out the correct behavior.
+ let (desugar_to_impl_trait, itctx) = match itctx {
+ // We are in the return position:
+ //
+ // fn foo() -> impl Iterator<Item: Debug>
+ //
+ // so desugar to
+ //
+ // fn foo() -> impl Iterator<Item = impl Debug>
+ ImplTraitContext::ReturnPositionOpaqueTy { .. }
+ | ImplTraitContext::TypeAliasesOpaqueTy { .. } => (true, itctx),
+
+ // We are in the argument position, but within a dyn type:
+ //
+ // fn foo(x: dyn Iterator<Item: Debug>)
+ //
+ // so desugar to
+ //
+ // fn foo(x: dyn Iterator<Item = impl Debug>)
+ ImplTraitContext::Universal if self.is_in_dyn_type => (true, itctx),
+
+ // In `type Foo = dyn Iterator<Item: Debug>` we desugar to
+ // `type Foo = dyn Iterator<Item = impl Debug>` but we have to override the
+ // "impl trait context" to permit `impl Debug` in this position (it desugars
+ // then to an opaque type).
+ //
+ // FIXME: this is only needed until `impl Trait` is allowed in type aliases.
+ ImplTraitContext::Disallowed(_) if self.is_in_dyn_type => {
+ (true, ImplTraitContext::TypeAliasesOpaqueTy)
+ }
+
+ // We are in the parameter position, but not within a dyn type:
+ //
+ // fn foo(x: impl Iterator<Item: Debug>)
+ //
+ // so we leave it as is and this gets expanded in astconv to a bound like
+ // `<T as Iterator>::Item: Debug` where `T` is the type parameter for the
+ // `impl Iterator`.
+ _ => (false, itctx),
+ };
+
+ if desugar_to_impl_trait {
+ // Desugar `AssocTy: Bounds` into `AssocTy = impl Bounds`. We do this by
+ // constructing the HIR for `impl bounds...` and then lowering that.
+
+ let parent_def_id = self.current_hir_id_owner;
+ let impl_trait_node_id = self.next_node_id();
+ self.create_def(parent_def_id, impl_trait_node_id, DefPathData::ImplTrait);
+
+ self.with_dyn_type_scope(false, |this| {
+ let node_id = this.next_node_id();
+ let ty = this.lower_ty(
+ &Ty {
+ id: node_id,
+ kind: TyKind::ImplTrait(impl_trait_node_id, bounds.clone()),
+ span: this.lower_span(constraint.span),
+ tokens: None,
+ },
+ itctx,
+ );
+
+ hir::TypeBindingKind::Equality { term: ty.into() }
+ })
+ } else {
+ // Desugar `AssocTy: Bounds` into a type binding where the
+ // later desugars into a trait predicate.
+ let bounds = self.lower_param_bounds(bounds, itctx);
+
+ hir::TypeBindingKind::Constraint { bounds }
+ }
+ }
+ };
+
+ hir::TypeBinding {
+ hir_id: self.lower_node_id(constraint.id),
+ ident: self.lower_ident(constraint.ident),
+ gen_args,
+ kind,
+ span: self.lower_span(constraint.span),
+ }
+ }
+
+ fn emit_bad_parenthesized_trait_in_assoc_ty(&self, data: &ParenthesizedArgs) {
+ let mut err = self.tcx.sess.struct_span_err(
+ data.span,
+ "parenthesized generic arguments cannot be used in associated type constraints",
+ );
+ // Suggest removing empty parentheses: "Trait()" -> "Trait"
+ if data.inputs.is_empty() {
+ let parentheses_span =
+ data.inputs_span.shrink_to_lo().to(data.inputs_span.shrink_to_hi());
+ err.multipart_suggestion(
+ "remove these parentheses",
+ vec![(parentheses_span, String::new())],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ // Suggest replacing parentheses with angle brackets `Trait(params...)` to `Trait<params...>`
+ else {
+ // Start of parameters to the 1st argument
+ let open_param = data.inputs_span.shrink_to_lo().to(data
+ .inputs
+ .first()
+ .unwrap()
+ .span
+ .shrink_to_lo());
+ // End of last argument to end of parameters
+ let close_param =
+ data.inputs.last().unwrap().span.shrink_to_hi().to(data.inputs_span.shrink_to_hi());
+ err.multipart_suggestion(
+ &format!("use angle brackets instead",),
+ vec![(open_param, String::from("<")), (close_param, String::from(">"))],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn lower_generic_arg(
+ &mut self,
+ arg: &ast::GenericArg,
+ itctx: ImplTraitContext,
+ ) -> hir::GenericArg<'hir> {
+ match arg {
+ ast::GenericArg::Lifetime(lt) => GenericArg::Lifetime(self.lower_lifetime(&lt)),
+ ast::GenericArg::Type(ty) => {
+ match ty.kind {
+ TyKind::Infer if self.tcx.features().generic_arg_infer => {
+ return GenericArg::Infer(hir::InferArg {
+ hir_id: self.lower_node_id(ty.id),
+ span: self.lower_span(ty.span),
+ });
+ }
+ // We parse const arguments as path types as we cannot distinguish them during
+ // parsing. We try to resolve that ambiguity by attempting resolution in both the
+ // type and value namespaces. If we resolved the path in the value namespace, we
+ // transform it into a generic const argument.
+ TyKind::Path(ref qself, ref path) => {
+ if let Some(partial_res) = self.resolver.get_partial_res(ty.id) {
+ let res = partial_res.base_res();
+ if !res.matches_ns(Namespace::TypeNS) {
+ debug!(
+ "lower_generic_arg: Lowering type argument as const argument: {:?}",
+ ty,
+ );
+
+ // Construct an AnonConst where the expr is the "ty"'s path.
+
+ let parent_def_id = self.current_hir_id_owner;
+ let node_id = self.next_node_id();
+
+ // Add a definition for the in-band const def.
+ self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+
+ let span = self.lower_span(ty.span);
+ let path_expr = Expr {
+ id: ty.id,
+ kind: ExprKind::Path(qself.clone(), path.clone()),
+ span,
+ attrs: AttrVec::new(),
+ tokens: None,
+ };
+
+ let ct = self.with_new_scopes(|this| hir::AnonConst {
+ hir_id: this.lower_node_id(node_id),
+ body: this.lower_const_body(path_expr.span, Some(&path_expr)),
+ });
+ return GenericArg::Const(ConstArg { value: ct, span });
+ }
+ }
+ }
+ _ => {}
+ }
+ GenericArg::Type(self.lower_ty_direct(&ty, itctx))
+ }
+ ast::GenericArg::Const(ct) => GenericArg::Const(ConstArg {
+ value: self.lower_anon_const(&ct),
+ span: self.lower_span(ct.value.span),
+ }),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn lower_ty(&mut self, t: &Ty, itctx: ImplTraitContext) -> &'hir hir::Ty<'hir> {
+ self.arena.alloc(self.lower_ty_direct(t, itctx))
+ }
+
+ fn lower_path_ty(
+ &mut self,
+ t: &Ty,
+ qself: &Option<QSelf>,
+ path: &Path,
+ param_mode: ParamMode,
+ itctx: ImplTraitContext,
+ ) -> hir::Ty<'hir> {
+ // Check whether we should interpret this as a bare trait object.
+ // This check mirrors the one in late resolution. We only introduce this special case in
+ // the rare occurence we need to lower `Fresh` anonymous lifetimes.
+ // The other cases when a qpath should be opportunistically made a trait object are handled
+ // by `ty_path`.
+ if qself.is_none()
+ && let Some(partial_res) = self.resolver.get_partial_res(t.id)
+ && partial_res.unresolved_segments() == 0
+ && let Res::Def(DefKind::Trait | DefKind::TraitAlias, _) = partial_res.base_res()
+ {
+ let (bounds, lifetime_bound) = self.with_dyn_type_scope(true, |this| {
+ let bound = this.lower_poly_trait_ref(
+ &PolyTraitRef {
+ bound_generic_params: vec![],
+ trait_ref: TraitRef { path: path.clone(), ref_id: t.id },
+ span: t.span
+ },
+ itctx,
+ );
+ let bounds = this.arena.alloc_from_iter([bound]);
+ let lifetime_bound = this.elided_dyn_bound(t.span);
+ (bounds, lifetime_bound)
+ });
+ let kind = hir::TyKind::TraitObject(bounds, lifetime_bound, TraitObjectSyntax::None);
+ return hir::Ty { kind, span: self.lower_span(t.span), hir_id: self.next_id() };
+ }
+
+ let id = self.lower_node_id(t.id);
+ let qpath = self.lower_qpath(t.id, qself, path, param_mode, itctx);
+ self.ty_path(id, t.span, qpath)
+ }
+
+ fn ty(&mut self, span: Span, kind: hir::TyKind<'hir>) -> hir::Ty<'hir> {
+ hir::Ty { hir_id: self.next_id(), kind, span: self.lower_span(span) }
+ }
+
+ fn ty_tup(&mut self, span: Span, tys: &'hir [hir::Ty<'hir>]) -> hir::Ty<'hir> {
+ self.ty(span, hir::TyKind::Tup(tys))
+ }
+
+ fn lower_ty_direct(&mut self, t: &Ty, itctx: ImplTraitContext) -> hir::Ty<'hir> {
+ let kind = match t.kind {
+ TyKind::Infer => hir::TyKind::Infer,
+ TyKind::Err => hir::TyKind::Err,
+ TyKind::Slice(ref ty) => hir::TyKind::Slice(self.lower_ty(ty, itctx)),
+ TyKind::Ptr(ref mt) => hir::TyKind::Ptr(self.lower_mt(mt, itctx)),
+ TyKind::Rptr(ref region, ref mt) => {
+ let region = region.unwrap_or_else(|| {
+ let id = if let Some(LifetimeRes::ElidedAnchor { start, end }) =
+ self.resolver.get_lifetime_res(t.id)
+ {
+ debug_assert_eq!(start.plus(1), end);
+ start
+ } else {
+ self.next_node_id()
+ };
+ let span = self.tcx.sess.source_map().next_point(t.span.shrink_to_lo());
+ Lifetime { ident: Ident::new(kw::UnderscoreLifetime, span), id }
+ });
+ let lifetime = self.lower_lifetime(&region);
+ hir::TyKind::Rptr(lifetime, self.lower_mt(mt, itctx))
+ }
+ TyKind::BareFn(ref f) => {
+ let generic_params = self.lower_lifetime_binder(t.id, &f.generic_params);
+ hir::TyKind::BareFn(self.arena.alloc(hir::BareFnTy {
+ generic_params,
+ unsafety: self.lower_unsafety(f.unsafety),
+ abi: self.lower_extern(f.ext),
+ decl: self.lower_fn_decl(&f.decl, None, FnDeclKind::Pointer, None),
+ param_names: self.lower_fn_params_to_names(&f.decl),
+ }))
+ }
+ TyKind::Never => hir::TyKind::Never,
+ TyKind::Tup(ref tys) => hir::TyKind::Tup(
+ self.arena.alloc_from_iter(tys.iter().map(|ty| self.lower_ty_direct(ty, itctx))),
+ ),
+ TyKind::Paren(ref ty) => {
+ return self.lower_ty_direct(ty, itctx);
+ }
+ TyKind::Path(ref qself, ref path) => {
+ return self.lower_path_ty(t, qself, path, ParamMode::Explicit, itctx);
+ }
+ TyKind::ImplicitSelf => {
+ let res = self.expect_full_res(t.id);
+ let res = self.lower_res(res);
+ hir::TyKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ res,
+ segments: arena_vec![self; hir::PathSegment::from_ident(
+ Ident::with_dummy_span(kw::SelfUpper)
+ )],
+ span: self.lower_span(t.span),
+ }),
+ ))
+ }
+ TyKind::Array(ref ty, ref length) => {
+ hir::TyKind::Array(self.lower_ty(ty, itctx), self.lower_array_length(length))
+ }
+ TyKind::Typeof(ref expr) => hir::TyKind::Typeof(self.lower_anon_const(expr)),
+ TyKind::TraitObject(ref bounds, kind) => {
+ let mut lifetime_bound = None;
+ let (bounds, lifetime_bound) = self.with_dyn_type_scope(true, |this| {
+ let bounds =
+ this.arena.alloc_from_iter(bounds.iter().filter_map(
+ |bound| match *bound {
+ GenericBound::Trait(
+ ref ty,
+ TraitBoundModifier::None | TraitBoundModifier::MaybeConst,
+ ) => Some(this.lower_poly_trait_ref(ty, itctx)),
+ // `~const ?Bound` will cause an error during AST validation
+ // anyways, so treat it like `?Bound` as compilation proceeds.
+ GenericBound::Trait(
+ _,
+ TraitBoundModifier::Maybe | TraitBoundModifier::MaybeConstMaybe,
+ ) => None,
+ GenericBound::Outlives(ref lifetime) => {
+ if lifetime_bound.is_none() {
+ lifetime_bound = Some(this.lower_lifetime(lifetime));
+ }
+ None
+ }
+ },
+ ));
+ let lifetime_bound =
+ lifetime_bound.unwrap_or_else(|| this.elided_dyn_bound(t.span));
+ (bounds, lifetime_bound)
+ });
+ hir::TyKind::TraitObject(bounds, lifetime_bound, kind)
+ }
+ TyKind::ImplTrait(def_node_id, ref bounds) => {
+ let span = t.span;
+ match itctx {
+ ImplTraitContext::ReturnPositionOpaqueTy { origin } => {
+ self.lower_opaque_impl_trait(span, origin, def_node_id, bounds, itctx)
+ }
+ ImplTraitContext::TypeAliasesOpaqueTy => {
+ let nested_itctx = ImplTraitContext::TypeAliasesOpaqueTy;
+ self.lower_opaque_impl_trait(
+ span,
+ hir::OpaqueTyOrigin::TyAlias,
+ def_node_id,
+ bounds,
+ nested_itctx,
+ )
+ }
+ ImplTraitContext::Universal => {
+ let span = t.span;
+ let ident = Ident::from_str_and_span(&pprust::ty_to_string(t), span);
+ let (param, bounds, path) =
+ self.lower_generic_and_bounds(def_node_id, span, ident, bounds);
+ self.impl_trait_defs.push(param);
+ if let Some(bounds) = bounds {
+ self.impl_trait_bounds.push(bounds);
+ }
+ path
+ }
+ ImplTraitContext::Disallowed(position) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ t.span,
+ E0562,
+ "`impl Trait` only allowed in function and inherent method return types, not in {}",
+ position
+ );
+ err.emit();
+ hir::TyKind::Err
+ }
+ }
+ }
+ TyKind::MacCall(_) => panic!("`TyKind::MacCall` should have been expanded by now"),
+ TyKind::CVarArgs => {
+ self.tcx.sess.delay_span_bug(
+ t.span,
+ "`TyKind::CVarArgs` should have been handled elsewhere",
+ );
+ hir::TyKind::Err
+ }
+ };
+
+ hir::Ty { kind, span: self.lower_span(t.span), hir_id: self.lower_node_id(t.id) }
+ }
+
+ /// Lowers a `ReturnPositionOpaqueTy` (`-> impl Trait`) or a `TypeAliasesOpaqueTy` (`type F =
+ /// impl Trait`): this creates the associated Opaque Type (TAIT) definition and then returns a
+ /// HIR type that references the TAIT.
+ ///
+ /// Given a function definition like:
+ ///
+ /// ```rust
+ /// fn test<'a, T: Debug>(x: &'a T) -> impl Debug + 'a {
+ /// x
+ /// }
+ /// ```
+ ///
+ /// we will create a TAIT definition in the HIR like
+ ///
+ /// ```
+ /// type TestReturn<'a, T, 'x> = impl Debug + 'x
+ /// ```
+ ///
+ /// and return a type like `TestReturn<'static, T, 'a>`, so that the function looks like:
+ ///
+ /// ```rust
+ /// fn test<'a, T: Debug>(x: &'a T) -> TestReturn<'static, T, 'a>
+ /// ```
+ ///
+ /// Note the subtlety around type parameters! The new TAIT, `TestReturn`, inherits all the
+ /// type parameters from the function `test` (this is implemented in the query layer, they aren't
+ /// added explicitly in the HIR). But this includes all the lifetimes, and we only want to
+ /// capture the lifetimes that are referenced in the bounds. Therefore, we add *extra* lifetime parameters
+ /// for the lifetimes that get captured (`'x`, in our example above) and reference those.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lower_opaque_impl_trait(
+ &mut self,
+ span: Span,
+ origin: hir::OpaqueTyOrigin,
+ opaque_ty_node_id: NodeId,
+ bounds: &GenericBounds,
+ itctx: ImplTraitContext,
+ ) -> hir::TyKind<'hir> {
+ // Make sure we know that some funky desugaring has been going on here.
+ // This is a first: there is code in other places like for loop
+ // desugaring that explicitly states that we don't want to track that.
+ // Not tracking it makes lints in rustc and clippy very fragile, as
+ // frequently opened issues show.
+ let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::OpaqueTy, span, None);
+
+ let opaque_ty_def_id = self.local_def_id(opaque_ty_node_id);
+ debug!(?opaque_ty_def_id);
+
+ // Contains the new lifetime definitions created for the TAIT (if any).
+ let mut collected_lifetimes = Vec::new();
+
+ // If this came from a TAIT (as opposed to a function that returns an RPIT), we only want
+ // to capture the lifetimes that appear in the bounds. So visit the bounds to find out
+ // exactly which ones those are.
+ let lifetimes_to_remap = if origin == hir::OpaqueTyOrigin::TyAlias {
+ // in a TAIT like `type Foo<'a> = impl Foo<'a>`, we don't keep all the lifetime parameters
+ Vec::new()
+ } else {
+ // in fn return position, like the `fn test<'a>() -> impl Debug + 'a` example,
+ // we only keep the lifetimes that appear in the `impl Debug` itself:
+ lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds)
+ };
+ debug!(?lifetimes_to_remap);
+
+ self.with_hir_id_owner(opaque_ty_node_id, |lctx| {
+ let mut new_remapping = FxHashMap::default();
+
+ // If this opaque type is only capturing a subset of the lifetimes (those that appear
+ // in bounds), then create the new lifetime parameters required and create a mapping
+ // from the old `'a` (on the function) to the new `'a` (on the opaque type).
+ collected_lifetimes = lctx.create_lifetime_defs(
+ opaque_ty_def_id,
+ &lifetimes_to_remap,
+ &mut new_remapping,
+ );
+ debug!(?collected_lifetimes);
+ debug!(?new_remapping);
+
+ // Install the remapping from old to new (if any):
+ lctx.with_remapping(new_remapping, |lctx| {
+ // This creates HIR lifetime definitions as `hir::GenericParam`, in the given
+ // example `type TestReturn<'a, T, 'x> = impl Debug + 'x`, it creates a collection
+ // containing `&['x]`.
+ let lifetime_defs = lctx.arena.alloc_from_iter(collected_lifetimes.iter().map(
+ |&(new_node_id, lifetime)| {
+ let hir_id = lctx.lower_node_id(new_node_id);
+ debug_assert_ne!(lctx.opt_local_def_id(new_node_id), None);
+
+ let (name, kind) = if lifetime.ident.name == kw::UnderscoreLifetime {
+ (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
+ } else {
+ (
+ hir::ParamName::Plain(lifetime.ident),
+ hir::LifetimeParamKind::Explicit,
+ )
+ };
+
+ hir::GenericParam {
+ hir_id,
+ name,
+ span: lifetime.ident.span,
+ pure_wrt_drop: false,
+ kind: hir::GenericParamKind::Lifetime { kind },
+ colon_span: None,
+ }
+ },
+ ));
+ debug!(?lifetime_defs);
+
+ // Then when we lower the param bounds, references to 'a are remapped to 'a1, so we
+ // get back Debug + 'a1, which is suitable for use on the TAIT.
+ let hir_bounds = lctx.lower_param_bounds(bounds, itctx);
+ debug!(?hir_bounds);
+
+ let opaque_ty_item = hir::OpaqueTy {
+ generics: self.arena.alloc(hir::Generics {
+ params: lifetime_defs,
+ predicates: &[],
+ has_where_clause_predicates: false,
+ where_clause_span: lctx.lower_span(span),
+ span: lctx.lower_span(span),
+ }),
+ bounds: hir_bounds,
+ origin,
+ };
+ debug!(?opaque_ty_item);
+
+ lctx.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span)
+ })
+ });
+
+ // This creates HIR lifetime arguments as `hir::GenericArg`, in the given example `type
+ // TestReturn<'a, T, 'x> = impl Debug + 'x`, it creates a collection containing `&['x]`.
+ let lifetimes =
+ self.arena.alloc_from_iter(collected_lifetimes.into_iter().map(|(_, lifetime)| {
+ let id = self.next_node_id();
+ let span = lifetime.ident.span;
+
+ let ident = if lifetime.ident.name == kw::UnderscoreLifetime {
+ Ident::with_dummy_span(kw::UnderscoreLifetime)
+ } else {
+ lifetime.ident
+ };
+
+ let l = self.new_named_lifetime(lifetime.id, id, span, ident);
+ hir::GenericArg::Lifetime(l)
+ }));
+ debug!(?lifetimes);
+
+ // `impl Trait` now just becomes `Foo<'a, 'b, ..>`.
+ hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, lifetimes)
+ }
+
+ /// Registers a new opaque type with the proper `NodeId`s and
+ /// returns the lowered node-ID for the opaque type.
+ fn generate_opaque_type(
+ &mut self,
+ opaque_ty_id: LocalDefId,
+ opaque_ty_item: hir::OpaqueTy<'hir>,
+ span: Span,
+ opaque_ty_span: Span,
+ ) -> hir::OwnerNode<'hir> {
+ let opaque_ty_item_kind = hir::ItemKind::OpaqueTy(opaque_ty_item);
+ // Generate an `type Foo = impl Trait;` declaration.
+ trace!("registering opaque type with id {:#?}", opaque_ty_id);
+ let opaque_ty_item = hir::Item {
+ def_id: opaque_ty_id,
+ ident: Ident::empty(),
+ kind: opaque_ty_item_kind,
+ vis_span: self.lower_span(span.shrink_to_lo()),
+ span: self.lower_span(opaque_ty_span),
+ };
+ hir::OwnerNode::Item(self.arena.alloc(opaque_ty_item))
+ }
+
+ /// Given a `parent_def_id`, a list of `lifetimes_in_bounds and a `remapping` hash to be
+ /// filled, this function creates new definitions for `Param` and `Fresh` lifetimes, inserts the
+ /// new definition, adds it to the remapping with the definition of the given lifetime and
+ /// returns a list of lifetimes to be lowered afterwards.
+ fn create_lifetime_defs(
+ &mut self,
+ parent_def_id: LocalDefId,
+ lifetimes_in_bounds: &[Lifetime],
+ remapping: &mut FxHashMap<LocalDefId, LocalDefId>,
+ ) -> Vec<(NodeId, Lifetime)> {
+ let mut result = Vec::new();
+
+ for lifetime in lifetimes_in_bounds {
+ let res = self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error);
+ debug!(?res);
+
+ match res {
+ LifetimeRes::Param { param: old_def_id, binder: _ } => {
+ if remapping.get(&old_def_id).is_none() {
+ let node_id = self.next_node_id();
+
+ let new_def_id = self.create_def(
+ parent_def_id,
+ node_id,
+ DefPathData::LifetimeNs(lifetime.ident.name),
+ );
+ remapping.insert(old_def_id, new_def_id);
+
+ result.push((node_id, *lifetime));
+ }
+ }
+
+ LifetimeRes::Fresh { param, binder: _ } => {
+ debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
+ if let Some(old_def_id) = self.opt_local_def_id(param) && remapping.get(&old_def_id).is_none() {
+ let node_id = self.next_node_id();
+
+ let new_def_id = self.create_def(
+ parent_def_id,
+ node_id,
+ DefPathData::LifetimeNs(kw::UnderscoreLifetime),
+ );
+ remapping.insert(old_def_id, new_def_id);
+
+ result.push((node_id, *lifetime));
+ }
+ }
+
+ LifetimeRes::Static | LifetimeRes::Error => {}
+
+ res => {
+ let bug_msg = format!(
+ "Unexpected lifetime resolution {:?} for {:?} at {:?}",
+ res, lifetime.ident, lifetime.ident.span
+ );
+ span_bug!(lifetime.ident.span, "{}", bug_msg);
+ }
+ }
+ }
+
+ result
+ }
+
+ fn lower_fn_params_to_names(&mut self, decl: &FnDecl) -> &'hir [Ident] {
+ // Skip the `...` (`CVarArgs`) trailing arguments from the AST,
+ // as they are not explicit in HIR/Ty function signatures.
+ // (instead, the `c_variadic` flag is set to `true`)
+ let mut inputs = &decl.inputs[..];
+ if decl.c_variadic() {
+ inputs = &inputs[..inputs.len() - 1];
+ }
+ self.arena.alloc_from_iter(inputs.iter().map(|param| match param.pat.kind {
+ PatKind::Ident(_, ident, _) => self.lower_ident(ident),
+ _ => Ident::new(kw::Empty, self.lower_span(param.pat.span)),
+ }))
+ }
+
+ // Lowers a function declaration.
+ //
+ // `decl`: the unlowered (AST) function declaration.
+ // `fn_def_id`: if `Some`, impl Trait arguments are lowered into generic parameters on the
+ // given DefId, otherwise impl Trait is disallowed. Must be `Some` if
+ // `make_ret_async` is also `Some`.
+ // `impl_trait_return_allow`: determines whether `impl Trait` can be used in return position.
+ // This guards against trait declarations and implementations where `impl Trait` is
+ // disallowed.
+ // `make_ret_async`: if `Some`, converts `-> T` into `-> impl Future<Output = T>` in the
+ // return type. This is used for `async fn` declarations. The `NodeId` is the ID of the
+ // return type `impl Trait` item.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lower_fn_decl(
+ &mut self,
+ decl: &FnDecl,
+ fn_node_id: Option<NodeId>,
+ kind: FnDeclKind,
+ make_ret_async: Option<NodeId>,
+ ) -> &'hir hir::FnDecl<'hir> {
+ let c_variadic = decl.c_variadic();
+
+ // Skip the `...` (`CVarArgs`) trailing arguments from the AST,
+ // as they are not explicit in HIR/Ty function signatures.
+ // (instead, the `c_variadic` flag is set to `true`)
+ let mut inputs = &decl.inputs[..];
+ if c_variadic {
+ inputs = &inputs[..inputs.len() - 1];
+ }
+ let inputs = self.arena.alloc_from_iter(inputs.iter().map(|param| {
+ if fn_node_id.is_some() {
+ self.lower_ty_direct(&param.ty, ImplTraitContext::Universal)
+ } else {
+ self.lower_ty_direct(
+ &param.ty,
+ ImplTraitContext::Disallowed(match kind {
+ FnDeclKind::Fn | FnDeclKind::Inherent => {
+ unreachable!("fn should allow in-band lifetimes")
+ }
+ FnDeclKind::ExternFn => ImplTraitPosition::ExternFnParam,
+ FnDeclKind::Closure => ImplTraitPosition::ClosureParam,
+ FnDeclKind::Pointer => ImplTraitPosition::PointerParam,
+ FnDeclKind::Trait => ImplTraitPosition::TraitParam,
+ FnDeclKind::Impl => ImplTraitPosition::ImplParam,
+ }),
+ )
+ }
+ }));
+
+ let output = if let Some(ret_id) = make_ret_async {
+ self.lower_async_fn_ret_ty(
+ &decl.output,
+ fn_node_id.expect("`make_ret_async` but no `fn_def_id`"),
+ ret_id,
+ )
+ } else {
+ match decl.output {
+ FnRetTy::Ty(ref ty) => {
+ let context = match fn_node_id {
+ Some(fn_node_id) if kind.impl_trait_return_allowed() => {
+ let fn_def_id = self.local_def_id(fn_node_id);
+ ImplTraitContext::ReturnPositionOpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
+ }
+ }
+ _ => ImplTraitContext::Disallowed(match kind {
+ FnDeclKind::Fn | FnDeclKind::Inherent => {
+ unreachable!("fn should allow in-band lifetimes")
+ }
+ FnDeclKind::ExternFn => ImplTraitPosition::ExternFnReturn,
+ FnDeclKind::Closure => ImplTraitPosition::ClosureReturn,
+ FnDeclKind::Pointer => ImplTraitPosition::PointerReturn,
+ FnDeclKind::Trait => ImplTraitPosition::TraitReturn,
+ FnDeclKind::Impl => ImplTraitPosition::ImplReturn,
+ }),
+ };
+ hir::FnRetTy::Return(self.lower_ty(ty, context))
+ }
+ FnRetTy::Default(span) => hir::FnRetTy::DefaultReturn(self.lower_span(span)),
+ }
+ };
+
+ self.arena.alloc(hir::FnDecl {
+ inputs,
+ output,
+ c_variadic,
+ implicit_self: decl.inputs.get(0).map_or(hir::ImplicitSelfKind::None, |arg| {
+ use BindingMode::{ByRef, ByValue};
+ let is_mutable_pat = matches!(
+ arg.pat.kind,
+ PatKind::Ident(ByValue(Mutability::Mut) | ByRef(Mutability::Mut), ..)
+ );
+
+ match arg.ty.kind {
+ TyKind::ImplicitSelf if is_mutable_pat => hir::ImplicitSelfKind::Mut,
+ TyKind::ImplicitSelf => hir::ImplicitSelfKind::Imm,
+ // Given we are only considering `ImplicitSelf` types, we needn't consider
+ // the case where we have a mutable pattern to a reference as that would
+ // no longer be an `ImplicitSelf`.
+ TyKind::Rptr(_, ref mt)
+ if mt.ty.kind.is_implicit_self() && mt.mutbl == ast::Mutability::Mut =>
+ {
+ hir::ImplicitSelfKind::MutRef
+ }
+ TyKind::Rptr(_, ref mt) if mt.ty.kind.is_implicit_self() => {
+ hir::ImplicitSelfKind::ImmRef
+ }
+ _ => hir::ImplicitSelfKind::None,
+ }
+ }),
+ })
+ }
+
+ // Transforms `-> T` for `async fn` into `-> OpaqueTy { .. }`
+ // combined with the following definition of `OpaqueTy`:
+ //
+ // type OpaqueTy<generics_from_parent_fn> = impl Future<Output = T>;
+ //
+ // `output`: unlowered output type (`T` in `-> T`)
+ // `fn_def_id`: `DefId` of the parent function (used to create child impl trait definition)
+ // `opaque_ty_node_id`: `NodeId` of the opaque `impl Trait` type that should be created
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lower_async_fn_ret_ty(
+ &mut self,
+ output: &FnRetTy,
+ fn_node_id: NodeId,
+ opaque_ty_node_id: NodeId,
+ ) -> hir::FnRetTy<'hir> {
+ let span = output.span();
+
+ let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None);
+
+ let opaque_ty_def_id = self.local_def_id(opaque_ty_node_id);
+ let fn_def_id = self.local_def_id(fn_node_id);
+
+ // When we create the opaque type for this async fn, it is going to have
+ // to capture all the lifetimes involved in the signature (including in the
+ // return type). This is done by introducing lifetime parameters for:
+ //
+ // - all the explicitly declared lifetimes from the impl and function itself;
+ // - all the elided lifetimes in the fn arguments;
+ // - all the elided lifetimes in the return type.
+ //
+ // So for example in this snippet:
+ //
+ // ```rust
+ // impl<'a> Foo<'a> {
+ // async fn bar<'b>(&self, x: &'b Vec<f64>, y: &str) -> &u32 {
+ // // ^ '0 ^ '1 ^ '2
+ // // elided lifetimes used below
+ // }
+ // }
+ // ```
+ //
+ // we would create an opaque type like:
+ //
+ // ```
+ // type Bar<'a, 'b, '0, '1, '2> = impl Future<Output = &'2 u32>;
+ // ```
+ //
+ // and we would then desugar `bar` to the equivalent of:
+ //
+ // ```rust
+ // impl<'a> Foo<'a> {
+ // fn bar<'b, '0, '1>(&'0 self, x: &'b Vec<f64>, y: &'1 str) -> Bar<'a, 'b, '0, '1, '_>
+ // }
+ // ```
+ //
+ // Note that the final parameter to `Bar` is `'_`, not `'2` --
+ // this is because the elided lifetimes from the return type
+ // should be figured out using the ordinary elision rules, and
+ // this desugaring achieves that.
+
+ // Calculate all the lifetimes that should be captured
+ // by the opaque type. This should include all in-scope
+ // lifetime parameters, including those defined in-band.
+
+ // Contains the new lifetime definitions created for the TAIT (if any) generated for the
+ // return type.
+ let mut collected_lifetimes = Vec::new();
+ let mut new_remapping = FxHashMap::default();
+
+ let extra_lifetime_params = self.resolver.take_extra_lifetime_params(opaque_ty_node_id);
+ debug!(?extra_lifetime_params);
+ for (ident, outer_node_id, outer_res) in extra_lifetime_params {
+ let outer_def_id = self.local_def_id(outer_node_id);
+ let inner_node_id = self.next_node_id();
+
+ // Add a definition for the in scope lifetime def.
+ let inner_def_id = self.create_def(
+ opaque_ty_def_id,
+ inner_node_id,
+ DefPathData::LifetimeNs(ident.name),
+ );
+ new_remapping.insert(outer_def_id, inner_def_id);
+
+ let inner_res = match outer_res {
+ // Input lifetime like `'a`:
+ LifetimeRes::Param { param, .. } => {
+ LifetimeRes::Param { param, binder: fn_node_id }
+ }
+ // Input lifetime like `'1`:
+ LifetimeRes::Fresh { param, .. } => {
+ LifetimeRes::Fresh { param, binder: fn_node_id }
+ }
+ LifetimeRes::Static | LifetimeRes::Error => continue,
+ res => {
+ panic!(
+ "Unexpected lifetime resolution {:?} for {:?} at {:?}",
+ res, ident, ident.span
+ )
+ }
+ };
+
+ let lifetime = Lifetime { id: outer_node_id, ident };
+ collected_lifetimes.push((inner_node_id, lifetime, Some(inner_res)));
+ }
+
+ debug!(?collected_lifetimes);
+
+ // We only want to capture the lifetimes that appear in the bounds. So visit the bounds to
+ // find out exactly which ones those are.
+ // in fn return position, like the `fn test<'a>() -> impl Debug + 'a` example,
+ // we only keep the lifetimes that appear in the `impl Debug` itself:
+ let lifetimes_to_remap = lifetime_collector::lifetimes_in_ret_ty(&self.resolver, output);
+ debug!(?lifetimes_to_remap);
+
+ self.with_hir_id_owner(opaque_ty_node_id, |this| {
+ // If this opaque type is only capturing a subset of the lifetimes (those that appear
+ // in bounds), then create the new lifetime parameters required and create a mapping
+ // from the old `'a` (on the function) to the new `'a` (on the opaque type).
+ collected_lifetimes.extend(
+ this.create_lifetime_defs(
+ opaque_ty_def_id,
+ &lifetimes_to_remap,
+ &mut new_remapping,
+ )
+ .into_iter()
+ .map(|(new_node_id, lifetime)| (new_node_id, lifetime, None)),
+ );
+ debug!(?collected_lifetimes);
+ debug!(?new_remapping);
+
+ // Install the remapping from old to new (if any):
+ this.with_remapping(new_remapping, |this| {
+ // We have to be careful to get elision right here. The
+ // idea is that we create a lifetime parameter for each
+ // lifetime in the return type. So, given a return type
+ // like `async fn foo(..) -> &[&u32]`, we lower to `impl
+ // Future<Output = &'1 [ &'2 u32 ]>`.
+ //
+ // Then, we will create `fn foo(..) -> Foo<'_, '_>`, and
+ // hence the elision takes place at the fn site.
+ let future_bound =
+ this.lower_async_fn_output_type_to_future_bound(output, fn_def_id, span);
+
+ let generic_params = this.arena.alloc_from_iter(collected_lifetimes.iter().map(
+ |&(new_node_id, lifetime, _)| {
+ let hir_id = this.lower_node_id(new_node_id);
+ debug_assert_ne!(this.opt_local_def_id(new_node_id), None);
+
+ let (name, kind) = if lifetime.ident.name == kw::UnderscoreLifetime {
+ (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
+ } else {
+ (
+ hir::ParamName::Plain(lifetime.ident),
+ hir::LifetimeParamKind::Explicit,
+ )
+ };
+
+ hir::GenericParam {
+ hir_id,
+ name,
+ span: lifetime.ident.span,
+ pure_wrt_drop: false,
+ kind: hir::GenericParamKind::Lifetime { kind },
+ colon_span: None,
+ }
+ },
+ ));
+ debug!("lower_async_fn_ret_ty: generic_params={:#?}", generic_params);
+
+ let opaque_ty_item = hir::OpaqueTy {
+ generics: this.arena.alloc(hir::Generics {
+ params: generic_params,
+ predicates: &[],
+ has_where_clause_predicates: false,
+ where_clause_span: this.lower_span(span),
+ span: this.lower_span(span),
+ }),
+ bounds: arena_vec![this; future_bound],
+ origin: hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
+ };
+
+ trace!("exist ty from async fn def id: {:#?}", opaque_ty_def_id);
+ this.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span)
+ })
+ });
+
+ // As documented above, we need to create the lifetime
+ // arguments to our opaque type. Continuing with our example,
+ // we're creating the type arguments for the return type:
+ //
+ // ```
+ // Bar<'a, 'b, '0, '1, '_>
+ // ```
+ //
+ // For the "input" lifetime parameters, we wish to create
+ // references to the parameters themselves, including the
+ // "implicit" ones created from parameter types (`'a`, `'b`,
+ // '`0`, `'1`).
+ //
+ // For the "output" lifetime parameters, we just want to
+ // generate `'_`.
+ let generic_args = self.arena.alloc_from_iter(collected_lifetimes.into_iter().map(
+ |(_, lifetime, res)| {
+ let id = self.next_node_id();
+ let span = lifetime.ident.span;
+
+ let ident = if lifetime.ident.name == kw::UnderscoreLifetime {
+ Ident::with_dummy_span(kw::UnderscoreLifetime)
+ } else {
+ lifetime.ident
+ };
+
+ let res = res.unwrap_or(
+ self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error),
+ );
+ let l = self.new_named_lifetime_with_res(id, span, ident, res);
+ hir::GenericArg::Lifetime(l)
+ },
+ ));
+
+ // Create the `Foo<...>` reference itself. Note that the `type
+ // Foo = impl Trait` is, internally, created as a child of the
+ // async fn, so the *type parameters* are inherited. It's
+ // only the lifetime parameters that we must supply.
+ let opaque_ty_ref =
+ hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, generic_args);
+ let opaque_ty = self.ty(opaque_ty_span, opaque_ty_ref);
+ hir::FnRetTy::Return(self.arena.alloc(opaque_ty))
+ }
+
+ /// Transforms `-> T` into `Future<Output = T>`.
+ fn lower_async_fn_output_type_to_future_bound(
+ &mut self,
+ output: &FnRetTy,
+ fn_def_id: LocalDefId,
+ span: Span,
+ ) -> hir::GenericBound<'hir> {
+ // Compute the `T` in `Future<Output = T>` from the return type.
+ let output_ty = match output {
+ FnRetTy::Ty(ty) => {
+ // Not `OpaqueTyOrigin::AsyncFn`: that's only used for the
+ // `impl Future` opaque type that `async fn` implicitly
+ // generates.
+ let context = ImplTraitContext::ReturnPositionOpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
+ };
+ self.lower_ty(ty, context)
+ }
+ FnRetTy::Default(ret_ty_span) => self.arena.alloc(self.ty_tup(*ret_ty_span, &[])),
+ };
+
+ // "<Output = T>"
+ let future_args = self.arena.alloc(hir::GenericArgs {
+ args: &[],
+ bindings: arena_vec![self; self.output_ty_binding(span, output_ty)],
+ parenthesized: false,
+ span_ext: DUMMY_SP,
+ });
+
+ hir::GenericBound::LangItemTrait(
+ // ::std::future::Future<future_params>
+ hir::LangItem::Future,
+ self.lower_span(span),
+ self.next_id(),
+ future_args,
+ )
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn lower_param_bound(
+ &mut self,
+ tpb: &GenericBound,
+ itctx: ImplTraitContext,
+ ) -> hir::GenericBound<'hir> {
+ match tpb {
+ GenericBound::Trait(p, modifier) => hir::GenericBound::Trait(
+ self.lower_poly_trait_ref(p, itctx),
+ self.lower_trait_bound_modifier(*modifier),
+ ),
+ GenericBound::Outlives(lifetime) => {
+ hir::GenericBound::Outlives(self.lower_lifetime(lifetime))
+ }
+ }
+ }
+
+ fn lower_lifetime(&mut self, l: &Lifetime) -> hir::Lifetime {
+ let span = self.lower_span(l.ident.span);
+ let ident = self.lower_ident(l.ident);
+ self.new_named_lifetime(l.id, l.id, span, ident)
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn new_named_lifetime_with_res(
+ &mut self,
+ id: NodeId,
+ span: Span,
+ ident: Ident,
+ res: LifetimeRes,
+ ) -> hir::Lifetime {
+ let name = match res {
+ LifetimeRes::Param { param, .. } => {
+ let p_name = ParamName::Plain(ident);
+ let param = self.resolver.get_remapped_def_id(param);
+
+ hir::LifetimeName::Param(param, p_name)
+ }
+ LifetimeRes::Fresh { param, .. } => {
+ debug_assert_eq!(ident.name, kw::UnderscoreLifetime);
+ let param = self.local_def_id(param);
+
+ hir::LifetimeName::Param(param, ParamName::Fresh)
+ }
+ LifetimeRes::Infer => hir::LifetimeName::Infer,
+ LifetimeRes::Static => hir::LifetimeName::Static,
+ LifetimeRes::Error => hir::LifetimeName::Error,
+ res => panic!("Unexpected lifetime resolution {:?} for {:?} at {:?}", res, ident, span),
+ };
+
+ debug!(?name);
+ hir::Lifetime { hir_id: self.lower_node_id(id), span: self.lower_span(span), name }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn new_named_lifetime(
+ &mut self,
+ id: NodeId,
+ new_id: NodeId,
+ span: Span,
+ ident: Ident,
+ ) -> hir::Lifetime {
+ let res = self.resolver.get_lifetime_res(id).unwrap_or(LifetimeRes::Error);
+ self.new_named_lifetime_with_res(new_id, span, ident, res)
+ }
+
+ fn lower_generic_params_mut<'s>(
+ &'s mut self,
+ params: &'s [GenericParam],
+ ) -> impl Iterator<Item = hir::GenericParam<'hir>> + Captures<'a> + Captures<'s> {
+ params.iter().map(move |param| self.lower_generic_param(param))
+ }
+
+ fn lower_generic_params(&mut self, params: &[GenericParam]) -> &'hir [hir::GenericParam<'hir>] {
+ self.arena.alloc_from_iter(self.lower_generic_params_mut(params))
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn lower_generic_param(&mut self, param: &GenericParam) -> hir::GenericParam<'hir> {
+ let (name, kind) = self.lower_generic_param_kind(param);
+
+ let hir_id = self.lower_node_id(param.id);
+ self.lower_attrs(hir_id, &param.attrs);
+ hir::GenericParam {
+ hir_id,
+ name,
+ span: self.lower_span(param.span()),
+ pure_wrt_drop: self.tcx.sess.contains_name(&param.attrs, sym::may_dangle),
+ kind,
+ colon_span: param.colon_span.map(|s| self.lower_span(s)),
+ }
+ }
+
+ fn lower_generic_param_kind(
+ &mut self,
+ param: &GenericParam,
+ ) -> (hir::ParamName, hir::GenericParamKind<'hir>) {
+ match param.kind {
+ GenericParamKind::Lifetime => {
+ // AST resolution emitted an error on those parameters, so we lower them using
+ // `ParamName::Error`.
+ let param_name =
+ if let Some(LifetimeRes::Error) = self.resolver.get_lifetime_res(param.id) {
+ ParamName::Error
+ } else {
+ let ident = self.lower_ident(param.ident);
+ ParamName::Plain(ident)
+ };
+ let kind =
+ hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit };
+
+ (param_name, kind)
+ }
+ GenericParamKind::Type { ref default, .. } => {
+ let kind = hir::GenericParamKind::Type {
+ default: default.as_ref().map(|x| {
+ self.lower_ty(x, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ }),
+ synthetic: false,
+ };
+
+ (hir::ParamName::Plain(self.lower_ident(param.ident)), kind)
+ }
+ GenericParamKind::Const { ref ty, kw_span: _, ref default } => {
+ let ty = self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ let default = default.as_ref().map(|def| self.lower_anon_const(def));
+ (
+ hir::ParamName::Plain(self.lower_ident(param.ident)),
+ hir::GenericParamKind::Const { ty, default },
+ )
+ }
+ }
+ }
+
+ fn lower_trait_ref(&mut self, p: &TraitRef, itctx: ImplTraitContext) -> hir::TraitRef<'hir> {
+ let path = match self.lower_qpath(p.ref_id, &None, &p.path, ParamMode::Explicit, itctx) {
+ hir::QPath::Resolved(None, path) => path,
+ qpath => panic!("lower_trait_ref: unexpected QPath `{:?}`", qpath),
+ };
+ hir::TraitRef { path, hir_ref_id: self.lower_node_id(p.ref_id) }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lower_poly_trait_ref(
+ &mut self,
+ p: &PolyTraitRef,
+ itctx: ImplTraitContext,
+ ) -> hir::PolyTraitRef<'hir> {
+ let bound_generic_params =
+ self.lower_lifetime_binder(p.trait_ref.ref_id, &p.bound_generic_params);
+ let trait_ref = self.lower_trait_ref(&p.trait_ref, itctx);
+ hir::PolyTraitRef { bound_generic_params, trait_ref, span: self.lower_span(p.span) }
+ }
+
+ fn lower_mt(&mut self, mt: &MutTy, itctx: ImplTraitContext) -> hir::MutTy<'hir> {
+ hir::MutTy { ty: self.lower_ty(&mt.ty, itctx), mutbl: mt.mutbl }
+ }
+
+ fn lower_param_bounds(
+ &mut self,
+ bounds: &[GenericBound],
+ itctx: ImplTraitContext,
+ ) -> hir::GenericBounds<'hir> {
+ self.arena.alloc_from_iter(self.lower_param_bounds_mut(bounds, itctx))
+ }
+
+ fn lower_param_bounds_mut<'s>(
+ &'s mut self,
+ bounds: &'s [GenericBound],
+ itctx: ImplTraitContext,
+ ) -> impl Iterator<Item = hir::GenericBound<'hir>> + Captures<'s> + Captures<'a> {
+ bounds.iter().map(move |bound| self.lower_param_bound(bound, itctx))
+ }
+
+ fn lower_generic_and_bounds(
+ &mut self,
+ node_id: NodeId,
+ span: Span,
+ ident: Ident,
+ bounds: &[GenericBound],
+ ) -> (hir::GenericParam<'hir>, Option<hir::WherePredicate<'hir>>, hir::TyKind<'hir>) {
+ // Add a definition for the in-band `Param`.
+ let def_id = self.local_def_id(node_id);
+
+ // Set the name to `impl Bound1 + Bound2`.
+ let param = hir::GenericParam {
+ hir_id: self.lower_node_id(node_id),
+ name: ParamName::Plain(self.lower_ident(ident)),
+ pure_wrt_drop: false,
+ span: self.lower_span(span),
+ kind: hir::GenericParamKind::Type { default: None, synthetic: true },
+ colon_span: None,
+ };
+
+ let preds = self.lower_generic_bound_predicate(
+ ident,
+ node_id,
+ &GenericParamKind::Type { default: None },
+ bounds,
+ ImplTraitContext::Universal,
+ hir::PredicateOrigin::ImplTrait,
+ );
+
+ let ty = hir::TyKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ span: self.lower_span(span),
+ res: Res::Def(DefKind::TyParam, def_id.to_def_id()),
+ segments: arena_vec![self; hir::PathSegment::from_ident(self.lower_ident(ident))],
+ }),
+ ));
+
+ (param, preds, ty)
+ }
+
+ /// Lowers a block directly to an expression, presuming that it
+ /// has no attributes and is not targeted by a `break`.
+ fn lower_block_expr(&mut self, b: &Block) -> hir::Expr<'hir> {
+ let block = self.lower_block(b, false);
+ self.expr_block(block, AttrVec::new())
+ }
+
+ fn lower_array_length(&mut self, c: &AnonConst) -> hir::ArrayLen {
+ match c.value.kind {
+ ExprKind::Underscore => {
+ if self.tcx.features().generic_arg_infer {
+ hir::ArrayLen::Infer(self.lower_node_id(c.id), c.value.span)
+ } else {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::generic_arg_infer,
+ c.value.span,
+ "using `_` for array lengths is unstable",
+ )
+ .emit();
+ hir::ArrayLen::Body(self.lower_anon_const(c))
+ }
+ }
+ _ => hir::ArrayLen::Body(self.lower_anon_const(c)),
+ }
+ }
+
+ fn lower_anon_const(&mut self, c: &AnonConst) -> hir::AnonConst {
+ self.with_new_scopes(|this| hir::AnonConst {
+ hir_id: this.lower_node_id(c.id),
+ body: this.lower_const_body(c.value.span, Some(&c.value)),
+ })
+ }
+
+ fn lower_unsafe_source(&mut self, u: UnsafeSource) -> hir::UnsafeSource {
+ match u {
+ CompilerGenerated => hir::UnsafeSource::CompilerGenerated,
+ UserProvided => hir::UnsafeSource::UserProvided,
+ }
+ }
+
+ fn lower_trait_bound_modifier(&mut self, f: TraitBoundModifier) -> hir::TraitBoundModifier {
+ match f {
+ TraitBoundModifier::None => hir::TraitBoundModifier::None,
+ TraitBoundModifier::MaybeConst => hir::TraitBoundModifier::MaybeConst,
+
+ // `MaybeConstMaybe` will cause an error during AST validation, but we need to pick a
+ // placeholder for compilation to proceed.
+ TraitBoundModifier::MaybeConstMaybe | TraitBoundModifier::Maybe => {
+ hir::TraitBoundModifier::Maybe
+ }
+ }
+ }
+
+ // Helper methods for building HIR.
+
+ fn stmt(&mut self, span: Span, kind: hir::StmtKind<'hir>) -> hir::Stmt<'hir> {
+ hir::Stmt { span: self.lower_span(span), kind, hir_id: self.next_id() }
+ }
+
+ fn stmt_expr(&mut self, span: Span, expr: hir::Expr<'hir>) -> hir::Stmt<'hir> {
+ self.stmt(span, hir::StmtKind::Expr(self.arena.alloc(expr)))
+ }
+
+ fn stmt_let_pat(
+ &mut self,
+ attrs: Option<&'hir [Attribute]>,
+ span: Span,
+ init: Option<&'hir hir::Expr<'hir>>,
+ pat: &'hir hir::Pat<'hir>,
+ source: hir::LocalSource,
+ ) -> hir::Stmt<'hir> {
+ let hir_id = self.next_id();
+ if let Some(a) = attrs {
+ debug_assert!(!a.is_empty());
+ self.attrs.insert(hir_id.local_id, a);
+ }
+ let local = hir::Local {
+ hir_id,
+ init,
+ pat,
+ els: None,
+ source,
+ span: self.lower_span(span),
+ ty: None,
+ };
+ self.stmt(span, hir::StmtKind::Local(self.arena.alloc(local)))
+ }
+
+ fn block_expr(&mut self, expr: &'hir hir::Expr<'hir>) -> &'hir hir::Block<'hir> {
+ self.block_all(expr.span, &[], Some(expr))
+ }
+
+ fn block_all(
+ &mut self,
+ span: Span,
+ stmts: &'hir [hir::Stmt<'hir>],
+ expr: Option<&'hir hir::Expr<'hir>>,
+ ) -> &'hir hir::Block<'hir> {
+ let blk = hir::Block {
+ stmts,
+ expr,
+ hir_id: self.next_id(),
+ rules: hir::BlockCheckMode::DefaultBlock,
+ span: self.lower_span(span),
+ targeted_by_break: false,
+ };
+ self.arena.alloc(blk)
+ }
+
+ fn pat_cf_continue(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
+ let field = self.single_pat_field(span, pat);
+ self.pat_lang_item_variant(span, hir::LangItem::ControlFlowContinue, field, None)
+ }
+
+ fn pat_cf_break(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
+ let field = self.single_pat_field(span, pat);
+ self.pat_lang_item_variant(span, hir::LangItem::ControlFlowBreak, field, None)
+ }
+
+ fn pat_some(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
+ let field = self.single_pat_field(span, pat);
+ self.pat_lang_item_variant(span, hir::LangItem::OptionSome, field, None)
+ }
+
+ fn pat_none(&mut self, span: Span) -> &'hir hir::Pat<'hir> {
+ self.pat_lang_item_variant(span, hir::LangItem::OptionNone, &[], None)
+ }
+
+ fn single_pat_field(
+ &mut self,
+ span: Span,
+ pat: &'hir hir::Pat<'hir>,
+ ) -> &'hir [hir::PatField<'hir>] {
+ let field = hir::PatField {
+ hir_id: self.next_id(),
+ ident: Ident::new(sym::integer(0), self.lower_span(span)),
+ is_shorthand: false,
+ pat,
+ span: self.lower_span(span),
+ };
+ arena_vec![self; field]
+ }
+
+ fn pat_lang_item_variant(
+ &mut self,
+ span: Span,
+ lang_item: hir::LangItem,
+ fields: &'hir [hir::PatField<'hir>],
+ hir_id: Option<hir::HirId>,
+ ) -> &'hir hir::Pat<'hir> {
+ let qpath = hir::QPath::LangItem(lang_item, self.lower_span(span), hir_id);
+ self.pat(span, hir::PatKind::Struct(qpath, fields, false))
+ }
+
+ fn pat_ident(&mut self, span: Span, ident: Ident) -> (&'hir hir::Pat<'hir>, hir::HirId) {
+ self.pat_ident_binding_mode(span, ident, hir::BindingAnnotation::Unannotated)
+ }
+
+ fn pat_ident_mut(&mut self, span: Span, ident: Ident) -> (hir::Pat<'hir>, hir::HirId) {
+ self.pat_ident_binding_mode_mut(span, ident, hir::BindingAnnotation::Unannotated)
+ }
+
+ fn pat_ident_binding_mode(
+ &mut self,
+ span: Span,
+ ident: Ident,
+ bm: hir::BindingAnnotation,
+ ) -> (&'hir hir::Pat<'hir>, hir::HirId) {
+ let (pat, hir_id) = self.pat_ident_binding_mode_mut(span, ident, bm);
+ (self.arena.alloc(pat), hir_id)
+ }
+
+ fn pat_ident_binding_mode_mut(
+ &mut self,
+ span: Span,
+ ident: Ident,
+ bm: hir::BindingAnnotation,
+ ) -> (hir::Pat<'hir>, hir::HirId) {
+ let hir_id = self.next_id();
+
+ (
+ hir::Pat {
+ hir_id,
+ kind: hir::PatKind::Binding(bm, hir_id, self.lower_ident(ident), None),
+ span: self.lower_span(span),
+ default_binding_modes: true,
+ },
+ hir_id,
+ )
+ }
+
+ fn pat(&mut self, span: Span, kind: hir::PatKind<'hir>) -> &'hir hir::Pat<'hir> {
+ self.arena.alloc(hir::Pat {
+ hir_id: self.next_id(),
+ kind,
+ span: self.lower_span(span),
+ default_binding_modes: true,
+ })
+ }
+
+ fn pat_without_dbm(&mut self, span: Span, kind: hir::PatKind<'hir>) -> hir::Pat<'hir> {
+ hir::Pat {
+ hir_id: self.next_id(),
+ kind,
+ span: self.lower_span(span),
+ default_binding_modes: false,
+ }
+ }
+
+ fn ty_path(
+ &mut self,
+ mut hir_id: hir::HirId,
+ span: Span,
+ qpath: hir::QPath<'hir>,
+ ) -> hir::Ty<'hir> {
+ let kind = match qpath {
+ hir::QPath::Resolved(None, path) => {
+ // Turn trait object paths into `TyKind::TraitObject` instead.
+ match path.res {
+ Res::Def(DefKind::Trait | DefKind::TraitAlias, _) => {
+ let principal = hir::PolyTraitRef {
+ bound_generic_params: &[],
+ trait_ref: hir::TraitRef { path, hir_ref_id: hir_id },
+ span: self.lower_span(span),
+ };
+
+ // The original ID is taken by the `PolyTraitRef`,
+ // so the `Ty` itself needs a different one.
+ hir_id = self.next_id();
+ hir::TyKind::TraitObject(
+ arena_vec![self; principal],
+ self.elided_dyn_bound(span),
+ TraitObjectSyntax::None,
+ )
+ }
+ _ => hir::TyKind::Path(hir::QPath::Resolved(None, path)),
+ }
+ }
+ _ => hir::TyKind::Path(qpath),
+ };
+
+ hir::Ty { hir_id, kind, span: self.lower_span(span) }
+ }
+
+ /// Invoked to create the lifetime argument(s) for an elided trait object
+ /// bound, like the bound in `Box<dyn Debug>`. This method is not invoked
+ /// when the bound is written, even if it is written with `'_` like in
+ /// `Box<dyn Debug + '_>`. In those cases, `lower_lifetime` is invoked.
+ fn elided_dyn_bound(&mut self, span: Span) -> hir::Lifetime {
+ let r = hir::Lifetime {
+ hir_id: self.next_id(),
+ span: self.lower_span(span),
+ name: hir::LifetimeName::ImplicitObjectLifetimeDefault,
+ };
+ debug!("elided_dyn_bound: r={:?}", r);
+ r
+ }
+}
+
+/// Helper struct for delayed construction of GenericArgs.
+struct GenericArgsCtor<'hir> {
+ args: SmallVec<[hir::GenericArg<'hir>; 4]>,
+ bindings: &'hir [hir::TypeBinding<'hir>],
+ parenthesized: bool,
+ span: Span,
+}
+
+impl<'hir> GenericArgsCtor<'hir> {
+ fn is_empty(&self) -> bool {
+ self.args.is_empty() && self.bindings.is_empty() && !self.parenthesized
+ }
+
+ fn into_generic_args(self, this: &LoweringContext<'_, 'hir>) -> &'hir hir::GenericArgs<'hir> {
+ let ga = hir::GenericArgs {
+ args: this.arena.alloc_from_iter(self.args),
+ bindings: self.bindings,
+ parenthesized: self.parenthesized,
+ span_ext: this.lower_span(self.span),
+ };
+ this.arena.alloc(ga)
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/lifetime_collector.rs b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
new file mode 100644
index 000000000..81006e00f
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
@@ -0,0 +1,115 @@
+use super::ResolverAstLoweringExt;
+use rustc_ast::visit::{self, BoundKind, LifetimeCtxt, Visitor};
+use rustc_ast::{
+ FnRetTy, GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, TraitBoundModifier, Ty,
+ TyKind,
+};
+use rustc_hir::def::LifetimeRes;
+use rustc_middle::span_bug;
+use rustc_middle::ty::ResolverAstLowering;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::Span;
+
+struct LifetimeCollectVisitor<'ast> {
+ resolver: &'ast ResolverAstLowering,
+ current_binders: Vec<NodeId>,
+ collected_lifetimes: Vec<Lifetime>,
+}
+
+impl<'ast> LifetimeCollectVisitor<'ast> {
+ fn new(resolver: &'ast ResolverAstLowering) -> Self {
+ Self { resolver, current_binders: Vec::new(), collected_lifetimes: Vec::new() }
+ }
+
+ fn record_lifetime_use(&mut self, lifetime: Lifetime) {
+ match self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error) {
+ LifetimeRes::Param { binder, .. } | LifetimeRes::Fresh { binder, .. } => {
+ if !self.current_binders.contains(&binder) {
+ if !self.collected_lifetimes.contains(&lifetime) {
+ self.collected_lifetimes.push(lifetime);
+ }
+ }
+ }
+ LifetimeRes::Static | LifetimeRes::Error => {
+ if !self.collected_lifetimes.contains(&lifetime) {
+ self.collected_lifetimes.push(lifetime);
+ }
+ }
+ LifetimeRes::Infer => {}
+ res => {
+ let bug_msg = format!(
+ "Unexpected lifetime resolution {:?} for {:?} at {:?}",
+ res, lifetime.ident, lifetime.ident.span
+ );
+ span_bug!(lifetime.ident.span, "{}", bug_msg);
+ }
+ }
+ }
+
+ /// This collect lifetimes that are elided, for nodes like `Foo<T>` where there are no explicit
+ /// lifetime nodes. Is equivalent to having "pseudo" nodes introduced for each of the node ids
+ /// in the list start..end.
+ fn record_elided_anchor(&mut self, node_id: NodeId, span: Span) {
+ if let Some(LifetimeRes::ElidedAnchor { start, end }) =
+ self.resolver.get_lifetime_res(node_id)
+ {
+ for i in start..end {
+ let lifetime = Lifetime { id: i, ident: Ident::new(kw::UnderscoreLifetime, span) };
+ self.record_lifetime_use(lifetime);
+ }
+ }
+ }
+}
+
+impl<'ast> Visitor<'ast> for LifetimeCollectVisitor<'ast> {
+ fn visit_lifetime(&mut self, lifetime: &'ast Lifetime, _: LifetimeCtxt) {
+ self.record_lifetime_use(*lifetime);
+ }
+
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
+ self.record_elided_anchor(path_segment.id, path_span);
+ visit::walk_path_segment(self, path_span, path_segment);
+ }
+
+ fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) {
+ self.current_binders.push(t.trait_ref.ref_id);
+
+ visit::walk_poly_trait_ref(self, t, m);
+
+ self.current_binders.pop();
+ }
+
+ fn visit_ty(&mut self, t: &'ast Ty) {
+ match t.kind {
+ TyKind::BareFn(_) => {
+ self.current_binders.push(t.id);
+ visit::walk_ty(self, t);
+ self.current_binders.pop();
+ }
+ TyKind::Rptr(None, _) => {
+ self.record_elided_anchor(t.id, t.span);
+ visit::walk_ty(self, t);
+ }
+ _ => {
+ visit::walk_ty(self, t);
+ }
+ }
+ }
+}
+
+pub fn lifetimes_in_ret_ty(resolver: &ResolverAstLowering, ret_ty: &FnRetTy) -> Vec<Lifetime> {
+ let mut visitor = LifetimeCollectVisitor::new(resolver);
+ visitor.visit_fn_ret_ty(ret_ty);
+ visitor.collected_lifetimes
+}
+
+pub fn lifetimes_in_bounds(
+ resolver: &ResolverAstLowering,
+ bounds: &GenericBounds,
+) -> Vec<Lifetime> {
+ let mut visitor = LifetimeCollectVisitor::new(resolver);
+ for bound in bounds {
+ visitor.visit_param_bound(bound, BoundKind::Bound);
+ }
+ visitor.collected_lifetimes
+}
diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs
new file mode 100644
index 000000000..bd2e76e55
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/pat.rs
@@ -0,0 +1,350 @@
+use super::ResolverAstLoweringExt;
+use super::{ImplTraitContext, LoweringContext, ParamMode};
+use crate::ImplTraitPosition;
+
+use rustc_ast::ptr::P;
+use rustc_ast::*;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_span::symbol::Ident;
+use rustc_span::{source_map::Spanned, Span};
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ pub(crate) fn lower_pat(&mut self, pattern: &Pat) -> &'hir hir::Pat<'hir> {
+ self.arena.alloc(self.lower_pat_mut(pattern))
+ }
+
+ pub(crate) fn lower_pat_mut(&mut self, mut pattern: &Pat) -> hir::Pat<'hir> {
+ ensure_sufficient_stack(|| {
+ // loop here to avoid recursion
+ let node = loop {
+ match pattern.kind {
+ PatKind::Wild => break hir::PatKind::Wild,
+ PatKind::Ident(ref binding_mode, ident, ref sub) => {
+ let lower_sub = |this: &mut Self| sub.as_ref().map(|s| this.lower_pat(&*s));
+ break self.lower_pat_ident(pattern, binding_mode, ident, lower_sub);
+ }
+ PatKind::Lit(ref e) => {
+ break hir::PatKind::Lit(self.lower_expr_within_pat(e, false));
+ }
+ PatKind::TupleStruct(ref qself, ref path, ref pats) => {
+ let qpath = self.lower_qpath(
+ pattern.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple struct");
+ break hir::PatKind::TupleStruct(qpath, pats, ddpos);
+ }
+ PatKind::Or(ref pats) => {
+ break hir::PatKind::Or(
+ self.arena.alloc_from_iter(pats.iter().map(|x| self.lower_pat_mut(x))),
+ );
+ }
+ PatKind::Path(ref qself, ref path) => {
+ let qpath = self.lower_qpath(
+ pattern.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ break hir::PatKind::Path(qpath);
+ }
+ PatKind::Struct(ref qself, ref path, ref fields, etc) => {
+ let qpath = self.lower_qpath(
+ pattern.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+
+ let fs = self.arena.alloc_from_iter(fields.iter().map(|f| hir::PatField {
+ hir_id: self.next_id(),
+ ident: self.lower_ident(f.ident),
+ pat: self.lower_pat(&f.pat),
+ is_shorthand: f.is_shorthand,
+ span: self.lower_span(f.span),
+ }));
+ break hir::PatKind::Struct(qpath, fs, etc);
+ }
+ PatKind::Tuple(ref pats) => {
+ let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple");
+ break hir::PatKind::Tuple(pats, ddpos);
+ }
+ PatKind::Box(ref inner) => {
+ break hir::PatKind::Box(self.lower_pat(inner));
+ }
+ PatKind::Ref(ref inner, mutbl) => {
+ break hir::PatKind::Ref(self.lower_pat(inner), mutbl);
+ }
+ PatKind::Range(ref e1, ref e2, Spanned { node: ref end, .. }) => {
+ break hir::PatKind::Range(
+ e1.as_deref().map(|e| self.lower_expr_within_pat(e, true)),
+ e2.as_deref().map(|e| self.lower_expr_within_pat(e, true)),
+ self.lower_range_end(end, e2.is_some()),
+ );
+ }
+ PatKind::Slice(ref pats) => break self.lower_pat_slice(pats),
+ PatKind::Rest => {
+ // If we reach here the `..` pattern is not semantically allowed.
+ break self.ban_illegal_rest_pat(pattern.span);
+ }
+ // return inner to be processed in next loop
+ PatKind::Paren(ref inner) => pattern = inner,
+ PatKind::MacCall(_) => panic!("{:?} shouldn't exist here", pattern.span),
+ }
+ };
+
+ self.pat_with_node_id_of(pattern, node)
+ })
+ }
+
+ fn lower_pat_tuple(
+ &mut self,
+ pats: &[P<Pat>],
+ ctx: &str,
+ ) -> (&'hir [hir::Pat<'hir>], Option<usize>) {
+ let mut elems = Vec::with_capacity(pats.len());
+ let mut rest = None;
+
+ let mut iter = pats.iter().enumerate();
+ for (idx, pat) in iter.by_ref() {
+ // Interpret the first `..` pattern as a sub-tuple pattern.
+ // Note that unlike for slice patterns,
+ // where `xs @ ..` is a legal sub-slice pattern,
+ // it is not a legal sub-tuple pattern.
+ match pat.kind {
+ // Found a sub-tuple rest pattern
+ PatKind::Rest => {
+ rest = Some((idx, pat.span));
+ break;
+ }
+ // Found a sub-tuple pattern `$binding_mode $ident @ ..`.
+ // This is not allowed as a sub-tuple pattern
+ PatKind::Ident(ref _bm, ident, Some(ref sub)) if sub.is_rest() => {
+ let sp = pat.span;
+ self.diagnostic()
+ .struct_span_err(
+ sp,
+ &format!("`{} @` is not allowed in a {}", ident.name, ctx),
+ )
+ .span_label(sp, "this is only allowed in slice patterns")
+ .help("remove this and bind each tuple field independently")
+ .span_suggestion_verbose(
+ sp,
+ &format!("if you don't need to use the contents of {}, discard the tuple's remaining fields", ident),
+ "..",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ _ => {}
+ }
+
+ // It was not a sub-tuple pattern so lower it normally.
+ elems.push(self.lower_pat_mut(pat));
+ }
+
+ for (_, pat) in iter {
+ // There was a previous sub-tuple pattern; make sure we don't allow more...
+ if pat.is_rest() {
+ // ...but there was one again, so error.
+ self.ban_extra_rest_pat(pat.span, rest.unwrap().1, ctx);
+ } else {
+ elems.push(self.lower_pat_mut(pat));
+ }
+ }
+
+ (self.arena.alloc_from_iter(elems), rest.map(|(ddpos, _)| ddpos))
+ }
+
+ /// Lower a slice pattern of form `[pat_0, ..., pat_n]` into
+ /// `hir::PatKind::Slice(before, slice, after)`.
+ ///
+ /// When encountering `($binding_mode $ident @)? ..` (`slice`),
+ /// this is interpreted as a sub-slice pattern semantically.
+ /// Patterns that follow, which are not like `slice` -- or an error occurs, are in `after`.
+ fn lower_pat_slice(&mut self, pats: &[P<Pat>]) -> hir::PatKind<'hir> {
+ let mut before = Vec::new();
+ let mut after = Vec::new();
+ let mut slice = None;
+ let mut prev_rest_span = None;
+
+ // Lowers `$bm $ident @ ..` to `$bm $ident @ _`.
+ let lower_rest_sub = |this: &mut Self, pat, bm, ident, sub| {
+ let lower_sub = |this: &mut Self| Some(this.pat_wild_with_node_id_of(sub));
+ let node = this.lower_pat_ident(pat, bm, ident, lower_sub);
+ this.pat_with_node_id_of(pat, node)
+ };
+
+ let mut iter = pats.iter();
+ // Lower all the patterns until the first occurrence of a sub-slice pattern.
+ for pat in iter.by_ref() {
+ match pat.kind {
+ // Found a sub-slice pattern `..`. Record, lower it to `_`, and stop here.
+ PatKind::Rest => {
+ prev_rest_span = Some(pat.span);
+ slice = Some(self.pat_wild_with_node_id_of(pat));
+ break;
+ }
+ // Found a sub-slice pattern `$binding_mode $ident @ ..`.
+ // Record, lower it to `$binding_mode $ident @ _`, and stop here.
+ PatKind::Ident(ref bm, ident, Some(ref sub)) if sub.is_rest() => {
+ prev_rest_span = Some(sub.span);
+ slice = Some(self.arena.alloc(lower_rest_sub(self, pat, bm, ident, sub)));
+ break;
+ }
+ // It was not a subslice pattern so lower it normally.
+ _ => before.push(self.lower_pat_mut(pat)),
+ }
+ }
+
+ // Lower all the patterns after the first sub-slice pattern.
+ for pat in iter {
+ // There was a previous subslice pattern; make sure we don't allow more.
+ let rest_span = match pat.kind {
+ PatKind::Rest => Some(pat.span),
+ PatKind::Ident(ref bm, ident, Some(ref sub)) if sub.is_rest() => {
+ // #69103: Lower into `binding @ _` as above to avoid ICEs.
+ after.push(lower_rest_sub(self, pat, bm, ident, sub));
+ Some(sub.span)
+ }
+ _ => None,
+ };
+ if let Some(rest_span) = rest_span {
+ // We have e.g., `[a, .., b, ..]`. That's no good, error!
+ self.ban_extra_rest_pat(rest_span, prev_rest_span.unwrap(), "slice");
+ } else {
+ // Lower the pattern normally.
+ after.push(self.lower_pat_mut(pat));
+ }
+ }
+
+ hir::PatKind::Slice(
+ self.arena.alloc_from_iter(before),
+ slice,
+ self.arena.alloc_from_iter(after),
+ )
+ }
+
+ fn lower_pat_ident(
+ &mut self,
+ p: &Pat,
+ binding_mode: &BindingMode,
+ ident: Ident,
+ lower_sub: impl FnOnce(&mut Self) -> Option<&'hir hir::Pat<'hir>>,
+ ) -> hir::PatKind<'hir> {
+ match self.resolver.get_partial_res(p.id).map(|d| d.base_res()) {
+ // `None` can occur in body-less function signatures
+ res @ (None | Some(Res::Local(_))) => {
+ let canonical_id = match res {
+ Some(Res::Local(id)) => id,
+ _ => p.id,
+ };
+
+ hir::PatKind::Binding(
+ self.lower_binding_mode(binding_mode),
+ self.lower_node_id(canonical_id),
+ self.lower_ident(ident),
+ lower_sub(self),
+ )
+ }
+ Some(res) => hir::PatKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ span: self.lower_span(ident.span),
+ res: self.lower_res(res),
+ segments: arena_vec![self; hir::PathSegment::from_ident(self.lower_ident(ident))],
+ }),
+ )),
+ }
+ }
+
+ fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingAnnotation {
+ match *b {
+ BindingMode::ByValue(Mutability::Not) => hir::BindingAnnotation::Unannotated,
+ BindingMode::ByRef(Mutability::Not) => hir::BindingAnnotation::Ref,
+ BindingMode::ByValue(Mutability::Mut) => hir::BindingAnnotation::Mutable,
+ BindingMode::ByRef(Mutability::Mut) => hir::BindingAnnotation::RefMut,
+ }
+ }
+
+ fn pat_wild_with_node_id_of(&mut self, p: &Pat) -> &'hir hir::Pat<'hir> {
+ self.arena.alloc(self.pat_with_node_id_of(p, hir::PatKind::Wild))
+ }
+
+ /// Construct a `Pat` with the `HirId` of `p.id` lowered.
+ fn pat_with_node_id_of(&mut self, p: &Pat, kind: hir::PatKind<'hir>) -> hir::Pat<'hir> {
+ hir::Pat {
+ hir_id: self.lower_node_id(p.id),
+ kind,
+ span: self.lower_span(p.span),
+ default_binding_modes: true,
+ }
+ }
+
+ /// Emit a friendly error for extra `..` patterns in a tuple/tuple struct/slice pattern.
+ pub(crate) fn ban_extra_rest_pat(&self, sp: Span, prev_sp: Span, ctx: &str) {
+ self.diagnostic()
+ .struct_span_err(sp, &format!("`..` can only be used once per {} pattern", ctx))
+ .span_label(sp, &format!("can only be used once per {} pattern", ctx))
+ .span_label(prev_sp, "previously used here")
+ .emit();
+ }
+
+ /// Used to ban the `..` pattern in places it shouldn't be semantically.
+ fn ban_illegal_rest_pat(&self, sp: Span) -> hir::PatKind<'hir> {
+ self.diagnostic()
+ .struct_span_err(sp, "`..` patterns are not allowed here")
+ .note("only allowed in tuple, tuple struct, and slice patterns")
+ .emit();
+
+ // We're not in a list context so `..` can be reasonably treated
+ // as `_` because it should always be valid and roughly matches the
+ // intent of `..` (notice that the rest of a single slot is that slot).
+ hir::PatKind::Wild
+ }
+
+ fn lower_range_end(&mut self, e: &RangeEnd, has_end: bool) -> hir::RangeEnd {
+ match *e {
+ RangeEnd::Excluded if has_end => hir::RangeEnd::Excluded,
+ // No end; so `X..` behaves like `RangeFrom`.
+ RangeEnd::Excluded | RangeEnd::Included(_) => hir::RangeEnd::Included,
+ }
+ }
+
+ /// Matches `'-' lit | lit (cf. parser::Parser::parse_literal_maybe_minus)`,
+ /// or paths for ranges.
+ //
+ // FIXME: do we want to allow `expr -> pattern` conversion to create path expressions?
+ // That means making this work:
+ //
+ // ```rust,ignore (FIXME)
+ // struct S;
+ // macro_rules! m {
+ // ($a:expr) => {
+ // let $a = S;
+ // }
+ // }
+ // m!(S);
+ // ```
+ fn lower_expr_within_pat(&mut self, expr: &Expr, allow_paths: bool) -> &'hir hir::Expr<'hir> {
+ match expr.kind {
+ ExprKind::Lit(..) | ExprKind::ConstBlock(..) | ExprKind::Err => {}
+ ExprKind::Path(..) if allow_paths => {}
+ ExprKind::Unary(UnOp::Neg, ref inner) if matches!(inner.kind, ExprKind::Lit(_)) => {}
+ _ => {
+ self.diagnostic()
+ .span_err(expr.span, "arbitrary expressions aren't allowed in patterns");
+ return self.arena.alloc(self.expr_err(expr.span));
+ }
+ }
+ self.lower_expr(expr)
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs
new file mode 100644
index 000000000..393be3b45
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/path.rs
@@ -0,0 +1,406 @@
+use crate::ImplTraitPosition;
+
+use super::ResolverAstLoweringExt;
+use super::{GenericArgsCtor, LifetimeRes, ParenthesizedGenericArgs};
+use super::{ImplTraitContext, LoweringContext, ParamMode};
+
+use rustc_ast::{self as ast, *};
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, PartialRes, Res};
+use rustc_hir::GenericArg;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::{BytePos, Span, DUMMY_SP};
+
+use smallvec::smallvec;
+use tracing::debug;
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ #[instrument(level = "trace", skip(self))]
+ pub(crate) fn lower_qpath(
+ &mut self,
+ id: NodeId,
+ qself: &Option<QSelf>,
+ p: &Path,
+ param_mode: ParamMode,
+ itctx: ImplTraitContext,
+ ) -> hir::QPath<'hir> {
+ let qself_position = qself.as_ref().map(|q| q.position);
+ let qself = qself.as_ref().map(|q| self.lower_ty(&q.ty, itctx));
+
+ let partial_res =
+ self.resolver.get_partial_res(id).unwrap_or_else(|| PartialRes::new(Res::Err));
+
+ let path_span_lo = p.span.shrink_to_lo();
+ let proj_start = p.segments.len() - partial_res.unresolved_segments();
+ let path = self.arena.alloc(hir::Path {
+ res: self.lower_res(partial_res.base_res()),
+ segments: self.arena.alloc_from_iter(p.segments[..proj_start].iter().enumerate().map(
+ |(i, segment)| {
+ let param_mode = match (qself_position, param_mode) {
+ (Some(j), ParamMode::Optional) if i < j => {
+ // This segment is part of the trait path in a
+ // qualified path - one of `a`, `b` or `Trait`
+ // in `<X as a::b::Trait>::T::U::method`.
+ ParamMode::Explicit
+ }
+ _ => param_mode,
+ };
+
+ let parenthesized_generic_args = match partial_res.base_res() {
+ // `a::b::Trait(Args)`
+ Res::Def(DefKind::Trait, _) if i + 1 == proj_start => {
+ ParenthesizedGenericArgs::Ok
+ }
+ // `a::b::Trait(Args)::TraitItem`
+ Res::Def(DefKind::AssocFn, _)
+ | Res::Def(DefKind::AssocConst, _)
+ | Res::Def(DefKind::AssocTy, _)
+ if i + 2 == proj_start =>
+ {
+ ParenthesizedGenericArgs::Ok
+ }
+ // Avoid duplicated errors.
+ Res::Err => ParenthesizedGenericArgs::Ok,
+ // An error
+ _ => ParenthesizedGenericArgs::Err,
+ };
+
+ self.lower_path_segment(
+ p.span,
+ segment,
+ param_mode,
+ parenthesized_generic_args,
+ itctx,
+ )
+ },
+ )),
+ span: self.lower_span(
+ p.segments[..proj_start]
+ .last()
+ .map_or(path_span_lo, |segment| path_span_lo.to(segment.span())),
+ ),
+ });
+
+ // Simple case, either no projections, or only fully-qualified.
+ // E.g., `std::mem::size_of` or `<I as Iterator>::Item`.
+ if partial_res.unresolved_segments() == 0 {
+ return hir::QPath::Resolved(qself, path);
+ }
+
+ // Create the innermost type that we're projecting from.
+ let mut ty = if path.segments.is_empty() {
+ // If the base path is empty that means there exists a
+ // syntactical `Self`, e.g., `&i32` in `<&i32>::clone`.
+ qself.expect("missing QSelf for <T>::...")
+ } else {
+ // Otherwise, the base path is an implicit `Self` type path,
+ // e.g., `Vec` in `Vec::new` or `<I as Iterator>::Item` in
+ // `<I as Iterator>::Item::default`.
+ let new_id = self.next_id();
+ self.arena.alloc(self.ty_path(new_id, path.span, hir::QPath::Resolved(qself, path)))
+ };
+
+ // Anything after the base path are associated "extensions",
+ // out of which all but the last one are associated types,
+ // e.g., for `std::vec::Vec::<T>::IntoIter::Item::clone`:
+ // * base path is `std::vec::Vec<T>`
+ // * "extensions" are `IntoIter`, `Item` and `clone`
+ // * type nodes are:
+ // 1. `std::vec::Vec<T>` (created above)
+ // 2. `<std::vec::Vec<T>>::IntoIter`
+ // 3. `<<std::vec::Vec<T>>::IntoIter>::Item`
+ // * final path is `<<<std::vec::Vec<T>>::IntoIter>::Item>::clone`
+ for (i, segment) in p.segments.iter().enumerate().skip(proj_start) {
+ let hir_segment = self.arena.alloc(self.lower_path_segment(
+ p.span,
+ segment,
+ param_mode,
+ ParenthesizedGenericArgs::Err,
+ itctx,
+ ));
+ let qpath = hir::QPath::TypeRelative(ty, hir_segment);
+
+ // It's finished, return the extension of the right node type.
+ if i == p.segments.len() - 1 {
+ return qpath;
+ }
+
+ // Wrap the associated extension in another type node.
+ let new_id = self.next_id();
+ ty = self.arena.alloc(self.ty_path(new_id, path_span_lo.to(segment.span()), qpath));
+ }
+
+ // We should've returned in the for loop above.
+
+ self.diagnostic().span_bug(
+ p.span,
+ &format!(
+ "lower_qpath: no final extension segment in {}..{}",
+ proj_start,
+ p.segments.len()
+ ),
+ );
+ }
+
+ pub(crate) fn lower_path_extra(
+ &mut self,
+ res: Res,
+ p: &Path,
+ param_mode: ParamMode,
+ ) -> &'hir hir::Path<'hir> {
+ self.arena.alloc(hir::Path {
+ res,
+ segments: self.arena.alloc_from_iter(p.segments.iter().map(|segment| {
+ self.lower_path_segment(
+ p.span,
+ segment,
+ param_mode,
+ ParenthesizedGenericArgs::Err,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ )
+ })),
+ span: self.lower_span(p.span),
+ })
+ }
+
+ pub(crate) fn lower_path(
+ &mut self,
+ id: NodeId,
+ p: &Path,
+ param_mode: ParamMode,
+ ) -> &'hir hir::Path<'hir> {
+ let res = self.expect_full_res(id);
+ let res = self.lower_res(res);
+ self.lower_path_extra(res, p, param_mode)
+ }
+
+ pub(crate) fn lower_path_segment(
+ &mut self,
+ path_span: Span,
+ segment: &PathSegment,
+ param_mode: ParamMode,
+ parenthesized_generic_args: ParenthesizedGenericArgs,
+ itctx: ImplTraitContext,
+ ) -> hir::PathSegment<'hir> {
+ debug!("path_span: {:?}, lower_path_segment(segment: {:?})", path_span, segment,);
+ let (mut generic_args, infer_args) = if let Some(ref generic_args) = segment.args {
+ let msg = "parenthesized type parameters may only be used with a `Fn` trait";
+ match **generic_args {
+ GenericArgs::AngleBracketed(ref data) => {
+ self.lower_angle_bracketed_parameter_data(data, param_mode, itctx)
+ }
+ GenericArgs::Parenthesized(ref data) => match parenthesized_generic_args {
+ ParenthesizedGenericArgs::Ok => self.lower_parenthesized_parameter_data(data),
+ ParenthesizedGenericArgs::Err => {
+ let mut err = struct_span_err!(self.tcx.sess, data.span, E0214, "{}", msg);
+ err.span_label(data.span, "only `Fn` traits may use parentheses");
+ // Suggest replacing parentheses with angle brackets `Trait(params...)` to `Trait<params...>`
+ if !data.inputs.is_empty() {
+ // Start of the span to the 1st character of 1st argument
+ let open_param = data.inputs_span.shrink_to_lo().to(data
+ .inputs
+ .first()
+ .unwrap()
+ .span
+ .shrink_to_lo());
+ // Last character position of last argument to the end of the span
+ let close_param = data
+ .inputs
+ .last()
+ .unwrap()
+ .span
+ .shrink_to_hi()
+ .to(data.inputs_span.shrink_to_hi());
+ err.multipart_suggestion(
+ &format!("use angle brackets instead",),
+ vec![
+ (open_param, String::from("<")),
+ (close_param, String::from(">")),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ (
+ self.lower_angle_bracketed_parameter_data(
+ &data.as_angle_bracketed_args(),
+ param_mode,
+ itctx,
+ )
+ .0,
+ false,
+ )
+ }
+ },
+ }
+ } else {
+ (
+ GenericArgsCtor {
+ args: Default::default(),
+ bindings: &[],
+ parenthesized: false,
+ span: path_span.shrink_to_hi(),
+ },
+ param_mode == ParamMode::Optional,
+ )
+ };
+
+ let has_lifetimes =
+ generic_args.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)));
+ if !generic_args.parenthesized && !has_lifetimes {
+ self.maybe_insert_elided_lifetimes_in_path(
+ path_span,
+ segment.id,
+ segment.ident.span,
+ &mut generic_args,
+ );
+ }
+
+ let res = self.expect_full_res(segment.id);
+ let id = self.lower_node_id(segment.id);
+ debug!(
+ "lower_path_segment: ident={:?} original-id={:?} new-id={:?}",
+ segment.ident, segment.id, id,
+ );
+
+ hir::PathSegment {
+ ident: self.lower_ident(segment.ident),
+ hir_id: Some(id),
+ res: Some(self.lower_res(res)),
+ infer_args,
+ args: if generic_args.is_empty() && generic_args.span.is_empty() {
+ None
+ } else {
+ Some(generic_args.into_generic_args(self))
+ },
+ }
+ }
+
+ fn maybe_insert_elided_lifetimes_in_path(
+ &mut self,
+ path_span: Span,
+ segment_id: NodeId,
+ segment_ident_span: Span,
+ generic_args: &mut GenericArgsCtor<'hir>,
+ ) {
+ let (start, end) = match self.resolver.get_lifetime_res(segment_id) {
+ Some(LifetimeRes::ElidedAnchor { start, end }) => (start, end),
+ None => return,
+ Some(_) => panic!(),
+ };
+ let expected_lifetimes = end.as_usize() - start.as_usize();
+ debug!(expected_lifetimes);
+
+ // Note: these spans are used for diagnostics when they can't be inferred.
+ // See rustc_resolve::late::lifetimes::LifetimeContext::add_missing_lifetime_specifiers_label
+ let elided_lifetime_span = if generic_args.span.is_empty() {
+ // If there are no brackets, use the identifier span.
+ // HACK: we use find_ancestor_inside to properly suggest elided spans in paths
+ // originating from macros, since the segment's span might be from a macro arg.
+ segment_ident_span.find_ancestor_inside(path_span).unwrap_or(path_span)
+ } else if generic_args.is_empty() {
+ // If there are brackets, but not generic arguments, then use the opening bracket
+ generic_args.span.with_hi(generic_args.span.lo() + BytePos(1))
+ } else {
+ // Else use an empty span right after the opening bracket.
+ generic_args.span.with_lo(generic_args.span.lo() + BytePos(1)).shrink_to_lo()
+ };
+
+ generic_args.args.insert_many(
+ 0,
+ (start.as_u32()..end.as_u32()).map(|i| {
+ let id = NodeId::from_u32(i);
+ let l = self.lower_lifetime(&Lifetime {
+ id,
+ ident: Ident::new(kw::UnderscoreLifetime, elided_lifetime_span),
+ });
+ GenericArg::Lifetime(l)
+ }),
+ );
+ }
+
+ pub(crate) fn lower_angle_bracketed_parameter_data(
+ &mut self,
+ data: &AngleBracketedArgs,
+ param_mode: ParamMode,
+ itctx: ImplTraitContext,
+ ) -> (GenericArgsCtor<'hir>, bool) {
+ let has_non_lt_args = data.args.iter().any(|arg| match arg {
+ AngleBracketedArg::Arg(ast::GenericArg::Lifetime(_))
+ | AngleBracketedArg::Constraint(_) => false,
+ AngleBracketedArg::Arg(ast::GenericArg::Type(_) | ast::GenericArg::Const(_)) => true,
+ });
+ let args = data
+ .args
+ .iter()
+ .filter_map(|arg| match arg {
+ AngleBracketedArg::Arg(arg) => Some(self.lower_generic_arg(arg, itctx)),
+ AngleBracketedArg::Constraint(_) => None,
+ })
+ .collect();
+ let bindings = self.arena.alloc_from_iter(data.args.iter().filter_map(|arg| match arg {
+ AngleBracketedArg::Constraint(c) => Some(self.lower_assoc_ty_constraint(c, itctx)),
+ AngleBracketedArg::Arg(_) => None,
+ }));
+ let ctor = GenericArgsCtor { args, bindings, parenthesized: false, span: data.span };
+ (ctor, !has_non_lt_args && param_mode == ParamMode::Optional)
+ }
+
+ fn lower_parenthesized_parameter_data(
+ &mut self,
+ data: &ParenthesizedArgs,
+ ) -> (GenericArgsCtor<'hir>, bool) {
+ // Switch to `PassThrough` mode for anonymous lifetimes; this
+ // means that we permit things like `&Ref<T>`, where `Ref` has
+ // a hidden lifetime parameter. This is needed for backwards
+ // compatibility, even in contexts like an impl header where
+ // we generally don't permit such things (see #51008).
+ let ParenthesizedArgs { span, inputs, inputs_span, output } = data;
+ let inputs = self.arena.alloc_from_iter(inputs.iter().map(|ty| {
+ self.lower_ty_direct(ty, ImplTraitContext::Disallowed(ImplTraitPosition::FnTraitParam))
+ }));
+ let output_ty = match output {
+ FnRetTy::Ty(ty) => {
+ self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::FnTraitReturn))
+ }
+ FnRetTy::Default(_) => self.arena.alloc(self.ty_tup(*span, &[])),
+ };
+ let args = smallvec![GenericArg::Type(self.ty_tup(*inputs_span, inputs))];
+ let binding = self.output_ty_binding(output_ty.span, output_ty);
+ (
+ GenericArgsCtor {
+ args,
+ bindings: arena_vec![self; binding],
+ parenthesized: true,
+ span: data.inputs_span,
+ },
+ false,
+ )
+ }
+
+ /// An associated type binding `Output = $ty`.
+ pub(crate) fn output_ty_binding(
+ &mut self,
+ span: Span,
+ ty: &'hir hir::Ty<'hir>,
+ ) -> hir::TypeBinding<'hir> {
+ let ident = Ident::with_dummy_span(hir::FN_OUTPUT_NAME);
+ let kind = hir::TypeBindingKind::Equality { term: ty.into() };
+ let args = arena_vec![self;];
+ let bindings = arena_vec![self;];
+ let gen_args = self.arena.alloc(hir::GenericArgs {
+ args,
+ bindings,
+ parenthesized: false,
+ span_ext: DUMMY_SP,
+ });
+ hir::TypeBinding {
+ hir_id: self.next_id(),
+ gen_args,
+ span: self.lower_span(span),
+ ident,
+ kind,
+ }
+ }
+}