summaryrefslogtreecommitdiffstats
path: root/src/tools/rust-analyzer/crates/hir-expand/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /src/tools/rust-analyzer/crates/hir-expand/src
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/tools/rust-analyzer/crates/hir-expand/src')
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/ast_id_map.rs181
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/builtin_attr_macro.rs130
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/builtin_derive_macro.rs249
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/builtin_fn_macro.rs669
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/db.rs509
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/eager.rs266
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/fixup.rs382
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/hygiene.rs256
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/lib.rs1000
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/mod_path.rs276
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/name.rs433
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/proc_macro.rs81
-rw-r--r--src/tools/rust-analyzer/crates/hir-expand/src/quote.rs284
13 files changed, 4716 insertions, 0 deletions
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/ast_id_map.rs b/src/tools/rust-analyzer/crates/hir-expand/src/ast_id_map.rs
new file mode 100644
index 000000000..c1ddef03b
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/ast_id_map.rs
@@ -0,0 +1,181 @@
+//! `AstIdMap` allows to create stable IDs for "large" syntax nodes like items
+//! and macro calls.
+//!
+//! Specifically, it enumerates all items in a file and uses position of a an
+//! item as an ID. That way, id's don't change unless the set of items itself
+//! changes.
+
+use std::{
+ any::type_name,
+ fmt,
+ hash::{BuildHasher, BuildHasherDefault, Hash, Hasher},
+ marker::PhantomData,
+};
+
+use la_arena::{Arena, Idx};
+use profile::Count;
+use rustc_hash::FxHasher;
+use syntax::{ast, match_ast, AstNode, AstPtr, SyntaxNode, SyntaxNodePtr};
+
+/// `AstId` points to an AST node in a specific file.
+pub struct FileAstId<N: AstNode> {
+ raw: ErasedFileAstId,
+ _ty: PhantomData<fn() -> N>,
+}
+
+impl<N: AstNode> Clone for FileAstId<N> {
+ fn clone(&self) -> FileAstId<N> {
+ *self
+ }
+}
+impl<N: AstNode> Copy for FileAstId<N> {}
+
+impl<N: AstNode> PartialEq for FileAstId<N> {
+ fn eq(&self, other: &Self) -> bool {
+ self.raw == other.raw
+ }
+}
+impl<N: AstNode> Eq for FileAstId<N> {}
+impl<N: AstNode> Hash for FileAstId<N> {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ self.raw.hash(hasher);
+ }
+}
+
+impl<N: AstNode> fmt::Debug for FileAstId<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "FileAstId::<{}>({})", type_name::<N>(), self.raw.into_raw())
+ }
+}
+
+impl<N: AstNode> FileAstId<N> {
+ // Can't make this a From implementation because of coherence
+ pub fn upcast<M: AstNode>(self) -> FileAstId<M>
+ where
+ N: Into<M>,
+ {
+ FileAstId { raw: self.raw, _ty: PhantomData }
+ }
+}
+
+type ErasedFileAstId = Idx<SyntaxNodePtr>;
+
+/// Maps items' `SyntaxNode`s to `ErasedFileAstId`s and back.
+#[derive(Default)]
+pub struct AstIdMap {
+ /// Maps stable id to unstable ptr.
+ arena: Arena<SyntaxNodePtr>,
+ /// Reverse: map ptr to id.
+ map: hashbrown::HashMap<Idx<SyntaxNodePtr>, (), ()>,
+ _c: Count<Self>,
+}
+
+impl fmt::Debug for AstIdMap {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AstIdMap").field("arena", &self.arena).finish()
+ }
+}
+
+impl PartialEq for AstIdMap {
+ fn eq(&self, other: &Self) -> bool {
+ self.arena == other.arena
+ }
+}
+impl Eq for AstIdMap {}
+
+impl AstIdMap {
+ pub(crate) fn from_source(node: &SyntaxNode) -> AstIdMap {
+ assert!(node.parent().is_none());
+ let mut res = AstIdMap::default();
+ // By walking the tree in breadth-first order we make sure that parents
+ // get lower ids then children. That is, adding a new child does not
+ // change parent's id. This means that, say, adding a new function to a
+ // trait does not change ids of top-level items, which helps caching.
+ bdfs(node, |it| {
+ match_ast! {
+ match it {
+ ast::Item(module_item) => {
+ res.alloc(module_item.syntax());
+ true
+ },
+ ast::BlockExpr(block) => {
+ res.alloc(block.syntax());
+ true
+ },
+ _ => false,
+ }
+ }
+ });
+ res.map = hashbrown::HashMap::with_capacity_and_hasher(res.arena.len(), ());
+ for (idx, ptr) in res.arena.iter() {
+ let hash = hash_ptr(ptr);
+ match res.map.raw_entry_mut().from_hash(hash, |idx2| *idx2 == idx) {
+ hashbrown::hash_map::RawEntryMut::Occupied(_) => unreachable!(),
+ hashbrown::hash_map::RawEntryMut::Vacant(entry) => {
+ entry.insert_with_hasher(hash, idx, (), |&idx| hash_ptr(&res.arena[idx]));
+ }
+ }
+ }
+ res
+ }
+
+ pub fn ast_id<N: AstNode>(&self, item: &N) -> FileAstId<N> {
+ let raw = self.erased_ast_id(item.syntax());
+ FileAstId { raw, _ty: PhantomData }
+ }
+ fn erased_ast_id(&self, item: &SyntaxNode) -> ErasedFileAstId {
+ let ptr = SyntaxNodePtr::new(item);
+ let hash = hash_ptr(&ptr);
+ match self.map.raw_entry().from_hash(hash, |&idx| self.arena[idx] == ptr) {
+ Some((&idx, &())) => idx,
+ None => panic!(
+ "Can't find {:?} in AstIdMap:\n{:?}",
+ item,
+ self.arena.iter().map(|(_id, i)| i).collect::<Vec<_>>(),
+ ),
+ }
+ }
+
+ pub fn get<N: AstNode>(&self, id: FileAstId<N>) -> AstPtr<N> {
+ AstPtr::try_from_raw(self.arena[id.raw].clone()).unwrap()
+ }
+
+ fn alloc(&mut self, item: &SyntaxNode) -> ErasedFileAstId {
+ self.arena.alloc(SyntaxNodePtr::new(item))
+ }
+}
+
+fn hash_ptr(ptr: &SyntaxNodePtr) -> u64 {
+ let mut hasher = BuildHasherDefault::<FxHasher>::default().build_hasher();
+ ptr.hash(&mut hasher);
+ hasher.finish()
+}
+
+/// Walks the subtree in bdfs order, calling `f` for each node. What is bdfs
+/// order? It is a mix of breadth-first and depth first orders. Nodes for which
+/// `f` returns true are visited breadth-first, all the other nodes are explored
+/// depth-first.
+///
+/// In other words, the size of the bfs queue is bound by the number of "true"
+/// nodes.
+fn bdfs(node: &SyntaxNode, mut f: impl FnMut(SyntaxNode) -> bool) {
+ let mut curr_layer = vec![node.clone()];
+ let mut next_layer = vec![];
+ while !curr_layer.is_empty() {
+ curr_layer.drain(..).for_each(|node| {
+ let mut preorder = node.preorder();
+ while let Some(event) = preorder.next() {
+ match event {
+ syntax::WalkEvent::Enter(node) => {
+ if f(node.clone()) {
+ next_layer.extend(node.children());
+ preorder.skip_subtree();
+ }
+ }
+ syntax::WalkEvent::Leave(_) => {}
+ }
+ }
+ });
+ std::mem::swap(&mut curr_layer, &mut next_layer);
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/builtin_attr_macro.rs b/src/tools/rust-analyzer/crates/hir-expand/src/builtin_attr_macro.rs
new file mode 100644
index 000000000..0c886ac4d
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/builtin_attr_macro.rs
@@ -0,0 +1,130 @@
+//! Builtin attributes.
+
+use crate::{db::AstDatabase, name, ExpandResult, MacroCallId, MacroCallKind};
+
+macro_rules! register_builtin {
+ ( $(($name:ident, $variant:ident) => $expand:ident),* ) => {
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+ pub enum BuiltinAttrExpander {
+ $($variant),*
+ }
+
+ impl BuiltinAttrExpander {
+ pub fn expand(
+ &self,
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+ ) -> ExpandResult<tt::Subtree> {
+ let expander = match *self {
+ $( BuiltinAttrExpander::$variant => $expand, )*
+ };
+ expander(db, id, tt)
+ }
+
+ fn find_by_name(name: &name::Name) -> Option<Self> {
+ match name {
+ $( id if id == &name::name![$name] => Some(BuiltinAttrExpander::$variant), )*
+ _ => None,
+ }
+ }
+ }
+
+ };
+}
+
+impl BuiltinAttrExpander {
+ pub fn is_derive(self) -> bool {
+ matches!(self, BuiltinAttrExpander::Derive)
+ }
+ pub fn is_test(self) -> bool {
+ matches!(self, BuiltinAttrExpander::Test)
+ }
+ pub fn is_bench(self) -> bool {
+ matches!(self, BuiltinAttrExpander::Bench)
+ }
+}
+
+register_builtin! {
+ (bench, Bench) => dummy_attr_expand,
+ (cfg_accessible, CfgAccessible) => dummy_attr_expand,
+ (cfg_eval, CfgEval) => dummy_attr_expand,
+ (derive, Derive) => derive_attr_expand,
+ (global_allocator, GlobalAllocator) => dummy_attr_expand,
+ (test, Test) => dummy_attr_expand,
+ (test_case, TestCase) => dummy_attr_expand
+}
+
+pub fn find_builtin_attr(ident: &name::Name) -> Option<BuiltinAttrExpander> {
+ BuiltinAttrExpander::find_by_name(ident)
+}
+
+fn dummy_attr_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ ExpandResult::ok(tt.clone())
+}
+
+/// We generate a very specific expansion here, as we do not actually expand the `#[derive]` attribute
+/// itself in name res, but we do want to expand it to something for the IDE layer, so that the input
+/// derive attributes can be downmapped, and resolved as proper paths.
+/// This is basically a hack, that simplifies the hacks we need in a lot of ide layer places to
+/// somewhat inconsistently resolve derive attributes.
+///
+/// As such, we expand `#[derive(Foo, bar::Bar)]` into
+/// ```
+/// #[Foo]
+/// #[bar::Bar]
+/// ();
+/// ```
+/// which allows fallback path resolution in hir::Semantics to properly identify our derives.
+/// Since we do not expand the attribute in nameres though, we keep the original item.
+///
+/// The ideal expansion here would be for the `#[derive]` to re-emit the annotated item and somehow
+/// use the input paths in its output as well.
+/// But that would bring two problems with it, for one every derive would duplicate the item token tree
+/// wasting a lot of memory, and it would also require some way to use a path in a way that makes it
+/// always resolve as a derive without nameres recollecting them.
+/// So this hacky approach is a lot more friendly for us, though it does require a bit of support in
+/// [`hir::Semantics`] to make this work.
+fn derive_attr_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let loc = db.lookup_intern_macro_call(id);
+ let derives = match &loc.kind {
+ MacroCallKind::Attr { attr_args, is_derive: true, .. } => &attr_args.0,
+ _ => return ExpandResult::ok(Default::default()),
+ };
+ pseudo_derive_attr_expansion(tt, derives)
+}
+
+pub fn pseudo_derive_attr_expansion(
+ tt: &tt::Subtree,
+ args: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let mk_leaf = |char| {
+ tt::TokenTree::Leaf(tt::Leaf::Punct(tt::Punct {
+ char,
+ spacing: tt::Spacing::Alone,
+ id: tt::TokenId::unspecified(),
+ }))
+ };
+
+ let mut token_trees = Vec::new();
+ for tt in (&args.token_trees)
+ .split(|tt| matches!(tt, tt::TokenTree::Leaf(tt::Leaf::Punct(tt::Punct { char: ',', .. }))))
+ {
+ token_trees.push(mk_leaf('#'));
+ token_trees.push(mk_leaf('['));
+ token_trees.extend(tt.iter().cloned());
+ token_trees.push(mk_leaf(']'));
+ }
+ token_trees.push(mk_leaf('('));
+ token_trees.push(mk_leaf(')'));
+ token_trees.push(mk_leaf(';'));
+ ExpandResult::ok(tt::Subtree { delimiter: tt.delimiter, token_trees })
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/builtin_derive_macro.rs b/src/tools/rust-analyzer/crates/hir-expand/src/builtin_derive_macro.rs
new file mode 100644
index 000000000..79989bc2e
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/builtin_derive_macro.rs
@@ -0,0 +1,249 @@
+//! Builtin derives.
+
+use base_db::{CrateOrigin, LangCrateOrigin};
+use tracing::debug;
+
+use syntax::{
+ ast::{self, AstNode, HasGenericParams, HasModuleItem, HasName},
+ match_ast,
+};
+use tt::TokenId;
+
+use crate::{db::AstDatabase, name, quote, ExpandError, ExpandResult, MacroCallId};
+
+macro_rules! register_builtin {
+ ( $($trait:ident => $expand:ident),* ) => {
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+ pub enum BuiltinDeriveExpander {
+ $($trait),*
+ }
+
+ impl BuiltinDeriveExpander {
+ pub fn expand(
+ &self,
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+ ) -> ExpandResult<tt::Subtree> {
+ let expander = match *self {
+ $( BuiltinDeriveExpander::$trait => $expand, )*
+ };
+ expander(db, id, tt)
+ }
+
+ fn find_by_name(name: &name::Name) -> Option<Self> {
+ match name {
+ $( id if id == &name::name![$trait] => Some(BuiltinDeriveExpander::$trait), )*
+ _ => None,
+ }
+ }
+ }
+
+ };
+}
+
+register_builtin! {
+ Copy => copy_expand,
+ Clone => clone_expand,
+ Default => default_expand,
+ Debug => debug_expand,
+ Hash => hash_expand,
+ Ord => ord_expand,
+ PartialOrd => partial_ord_expand,
+ Eq => eq_expand,
+ PartialEq => partial_eq_expand
+}
+
+pub fn find_builtin_derive(ident: &name::Name) -> Option<BuiltinDeriveExpander> {
+ BuiltinDeriveExpander::find_by_name(ident)
+}
+
+struct BasicAdtInfo {
+ name: tt::Ident,
+ type_or_const_params: usize,
+}
+
+fn parse_adt(tt: &tt::Subtree) -> Result<BasicAdtInfo, ExpandError> {
+ let (parsed, token_map) = mbe::token_tree_to_syntax_node(tt, mbe::TopEntryPoint::MacroItems);
+ let macro_items = ast::MacroItems::cast(parsed.syntax_node()).ok_or_else(|| {
+ debug!("derive node didn't parse");
+ ExpandError::Other("invalid item definition".into())
+ })?;
+ let item = macro_items.items().next().ok_or_else(|| {
+ debug!("no module item parsed");
+ ExpandError::Other("no item found".into())
+ })?;
+ let node = item.syntax();
+ let (name, params) = match_ast! {
+ match node {
+ ast::Struct(it) => (it.name(), it.generic_param_list()),
+ ast::Enum(it) => (it.name(), it.generic_param_list()),
+ ast::Union(it) => (it.name(), it.generic_param_list()),
+ _ => {
+ debug!("unexpected node is {:?}", node);
+ return Err(ExpandError::Other("expected struct, enum or union".into()))
+ },
+ }
+ };
+ let name = name.ok_or_else(|| {
+ debug!("parsed item has no name");
+ ExpandError::Other("missing name".into())
+ })?;
+ let name_token_id =
+ token_map.token_by_range(name.syntax().text_range()).unwrap_or_else(TokenId::unspecified);
+ let name_token = tt::Ident { id: name_token_id, text: name.text().into() };
+ let type_or_const_params =
+ params.map_or(0, |type_param_list| type_param_list.type_or_const_params().count());
+ Ok(BasicAdtInfo { name: name_token, type_or_const_params })
+}
+
+fn make_type_args(n: usize, bound: Vec<tt::TokenTree>) -> Vec<tt::TokenTree> {
+ let mut result = Vec::<tt::TokenTree>::with_capacity(n * 2);
+ result.push(
+ tt::Leaf::Punct(tt::Punct {
+ char: '<',
+ spacing: tt::Spacing::Alone,
+ id: tt::TokenId::unspecified(),
+ })
+ .into(),
+ );
+ for i in 0..n {
+ if i > 0 {
+ result.push(
+ tt::Leaf::Punct(tt::Punct {
+ char: ',',
+ spacing: tt::Spacing::Alone,
+ id: tt::TokenId::unspecified(),
+ })
+ .into(),
+ );
+ }
+ result.push(
+ tt::Leaf::Ident(tt::Ident {
+ id: tt::TokenId::unspecified(),
+ text: format!("T{}", i).into(),
+ })
+ .into(),
+ );
+ result.extend(bound.iter().cloned());
+ }
+ result.push(
+ tt::Leaf::Punct(tt::Punct {
+ char: '>',
+ spacing: tt::Spacing::Alone,
+ id: tt::TokenId::unspecified(),
+ })
+ .into(),
+ );
+ result
+}
+
+fn expand_simple_derive(tt: &tt::Subtree, trait_path: tt::Subtree) -> ExpandResult<tt::Subtree> {
+ let info = match parse_adt(tt) {
+ Ok(info) => info,
+ Err(e) => return ExpandResult::only_err(e),
+ };
+ let name = info.name;
+ let trait_path_clone = trait_path.token_trees.clone();
+ let bound = (quote! { : ##trait_path_clone }).token_trees;
+ let type_params = make_type_args(info.type_or_const_params, bound);
+ let type_args = make_type_args(info.type_or_const_params, Vec::new());
+ let trait_path = trait_path.token_trees;
+ let expanded = quote! {
+ impl ##type_params ##trait_path for #name ##type_args {}
+ };
+ ExpandResult::ok(expanded)
+}
+
+fn find_builtin_crate(db: &dyn AstDatabase, id: MacroCallId) -> tt::TokenTree {
+ // FIXME: make hygiene works for builtin derive macro
+ // such that $crate can be used here.
+ let cg = db.crate_graph();
+ let krate = db.lookup_intern_macro_call(id).krate;
+
+ let tt = if matches!(cg[krate].origin, CrateOrigin::Lang(LangCrateOrigin::Core)) {
+ cov_mark::hit!(test_copy_expand_in_core);
+ quote! { crate }
+ } else {
+ quote! { core }
+ };
+
+ tt.token_trees[0].clone()
+}
+
+fn copy_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::marker::Copy })
+}
+
+fn clone_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::clone::Clone })
+}
+
+fn default_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::default::Default })
+}
+
+fn debug_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::fmt::Debug })
+}
+
+fn hash_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::hash::Hash })
+}
+
+fn eq_expand(db: &dyn AstDatabase, id: MacroCallId, tt: &tt::Subtree) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::cmp::Eq })
+}
+
+fn partial_eq_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::cmp::PartialEq })
+}
+
+fn ord_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::cmp::Ord })
+}
+
+fn partial_ord_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let krate = find_builtin_crate(db, id);
+ expand_simple_derive(tt, quote! { #krate::cmp::PartialOrd })
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/builtin_fn_macro.rs b/src/tools/rust-analyzer/crates/hir-expand/src/builtin_fn_macro.rs
new file mode 100644
index 000000000..76da7c9f1
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/builtin_fn_macro.rs
@@ -0,0 +1,669 @@
+//! Builtin macro
+
+use base_db::{AnchoredPath, Edition, FileId};
+use cfg::CfgExpr;
+use either::Either;
+use mbe::{parse_exprs_with_sep, parse_to_token_tree};
+use syntax::{
+ ast::{self, AstToken},
+ SmolStr,
+};
+
+use crate::{db::AstDatabase, name, quote, ExpandError, ExpandResult, MacroCallId, MacroCallLoc};
+
+macro_rules! register_builtin {
+ ( LAZY: $(($name:ident, $kind: ident) => $expand:ident),* , EAGER: $(($e_name:ident, $e_kind: ident) => $e_expand:ident),* ) => {
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+ pub enum BuiltinFnLikeExpander {
+ $($kind),*
+ }
+
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+ pub enum EagerExpander {
+ $($e_kind),*
+ }
+
+ impl BuiltinFnLikeExpander {
+ pub fn expand(
+ &self,
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+ ) -> ExpandResult<tt::Subtree> {
+ let expander = match *self {
+ $( BuiltinFnLikeExpander::$kind => $expand, )*
+ };
+ expander(db, id, tt)
+ }
+ }
+
+ impl EagerExpander {
+ pub fn expand(
+ &self,
+ db: &dyn AstDatabase,
+ arg_id: MacroCallId,
+ tt: &tt::Subtree,
+ ) -> ExpandResult<ExpandedEager> {
+ let expander = match *self {
+ $( EagerExpander::$e_kind => $e_expand, )*
+ };
+ expander(db, arg_id, tt)
+ }
+ }
+
+ fn find_by_name(ident: &name::Name) -> Option<Either<BuiltinFnLikeExpander, EagerExpander>> {
+ match ident {
+ $( id if id == &name::name![$name] => Some(Either::Left(BuiltinFnLikeExpander::$kind)), )*
+ $( id if id == &name::name![$e_name] => Some(Either::Right(EagerExpander::$e_kind)), )*
+ _ => return None,
+ }
+ }
+ };
+}
+
+#[derive(Debug, Default)]
+pub struct ExpandedEager {
+ pub(crate) subtree: tt::Subtree,
+ /// The included file ID of the include macro.
+ pub(crate) included_file: Option<FileId>,
+}
+
+impl ExpandedEager {
+ fn new(subtree: tt::Subtree) -> Self {
+ ExpandedEager { subtree, included_file: None }
+ }
+}
+
+pub fn find_builtin_macro(
+ ident: &name::Name,
+) -> Option<Either<BuiltinFnLikeExpander, EagerExpander>> {
+ find_by_name(ident)
+}
+
+register_builtin! {
+ LAZY:
+ (column, Column) => column_expand,
+ (file, File) => file_expand,
+ (line, Line) => line_expand,
+ (module_path, ModulePath) => module_path_expand,
+ (assert, Assert) => assert_expand,
+ (stringify, Stringify) => stringify_expand,
+ (format_args, FormatArgs) => format_args_expand,
+ (const_format_args, ConstFormatArgs) => format_args_expand,
+ // format_args_nl only differs in that it adds a newline in the end,
+ // so we use the same stub expansion for now
+ (format_args_nl, FormatArgsNl) => format_args_expand,
+ (llvm_asm, LlvmAsm) => asm_expand,
+ (asm, Asm) => asm_expand,
+ (global_asm, GlobalAsm) => global_asm_expand,
+ (cfg, Cfg) => cfg_expand,
+ (core_panic, CorePanic) => panic_expand,
+ (std_panic, StdPanic) => panic_expand,
+ (unreachable, Unreachable) => unreachable_expand,
+ (log_syntax, LogSyntax) => log_syntax_expand,
+ (trace_macros, TraceMacros) => trace_macros_expand,
+
+ EAGER:
+ (compile_error, CompileError) => compile_error_expand,
+ (concat, Concat) => concat_expand,
+ (concat_idents, ConcatIdents) => concat_idents_expand,
+ (concat_bytes, ConcatBytes) => concat_bytes_expand,
+ (include, Include) => include_expand,
+ (include_bytes, IncludeBytes) => include_bytes_expand,
+ (include_str, IncludeStr) => include_str_expand,
+ (env, Env) => env_expand,
+ (option_env, OptionEnv) => option_env_expand
+}
+
+const DOLLAR_CRATE: tt::Ident =
+ tt::Ident { text: SmolStr::new_inline("$crate"), id: tt::TokenId::unspecified() };
+
+fn module_path_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ _tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ // Just return a dummy result.
+ ExpandResult::ok(quote! { "module::path" })
+}
+
+fn line_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ _tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ // dummy implementation for type-checking purposes
+ let line_num = 0;
+ let expanded = quote! {
+ #line_num
+ };
+
+ ExpandResult::ok(expanded)
+}
+
+fn log_syntax_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ _tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ ExpandResult::ok(quote! {})
+}
+
+fn trace_macros_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ _tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ ExpandResult::ok(quote! {})
+}
+
+fn stringify_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let pretty = tt::pretty(&tt.token_trees);
+
+ let expanded = quote! {
+ #pretty
+ };
+
+ ExpandResult::ok(expanded)
+}
+
+fn column_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ _tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ // dummy implementation for type-checking purposes
+ let col_num = 0;
+ let expanded = quote! {
+ #col_num
+ };
+
+ ExpandResult::ok(expanded)
+}
+
+fn assert_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let args = parse_exprs_with_sep(tt, ',');
+ let expanded = match &*args {
+ [cond, panic_args @ ..] => {
+ let comma = tt::Subtree {
+ delimiter: None,
+ token_trees: vec![tt::TokenTree::Leaf(tt::Leaf::Punct(tt::Punct {
+ char: ',',
+ spacing: tt::Spacing::Alone,
+ id: tt::TokenId::unspecified(),
+ }))],
+ };
+ let cond = cond.clone();
+ let panic_args = itertools::Itertools::intersperse(panic_args.iter().cloned(), comma);
+ quote! {{
+ if !#cond {
+ #DOLLAR_CRATE::panic!(##panic_args);
+ }
+ }}
+ }
+ [] => quote! {{}},
+ };
+
+ ExpandResult::ok(expanded)
+}
+
+fn file_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ _tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ // FIXME: RA purposefully lacks knowledge of absolute file names
+ // so just return "".
+ let file_name = "";
+
+ let expanded = quote! {
+ #file_name
+ };
+
+ ExpandResult::ok(expanded)
+}
+
+fn format_args_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ // We expand `format_args!("", a1, a2)` to
+ // ```
+ // std::fmt::Arguments::new_v1(&[], &[
+ // std::fmt::ArgumentV1::new(&arg1,std::fmt::Display::fmt),
+ // std::fmt::ArgumentV1::new(&arg2,std::fmt::Display::fmt),
+ // ])
+ // ```,
+ // which is still not really correct, but close enough for now
+ let mut args = parse_exprs_with_sep(tt, ',');
+
+ if args.is_empty() {
+ return ExpandResult::only_err(mbe::ExpandError::NoMatchingRule.into());
+ }
+ for arg in &mut args {
+ // Remove `key =`.
+ if matches!(arg.token_trees.get(1), Some(tt::TokenTree::Leaf(tt::Leaf::Punct(p))) if p.char == '=' && p.spacing != tt::Spacing::Joint)
+ {
+ arg.token_trees.drain(..2);
+ }
+ }
+ let _format_string = args.remove(0);
+ let arg_tts = args.into_iter().flat_map(|arg| {
+ quote! { std::fmt::ArgumentV1::new(&(#arg), std::fmt::Display::fmt), }
+ }.token_trees);
+ let expanded = quote! {
+ std::fmt::Arguments::new_v1(&[], &[##arg_tts])
+ };
+ ExpandResult::ok(expanded)
+}
+
+fn asm_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ // We expand all assembly snippets to `format_args!` invocations to get format syntax
+ // highlighting for them.
+
+ let mut literals = Vec::new();
+ for tt in tt.token_trees.chunks(2) {
+ match tt {
+ [tt::TokenTree::Leaf(tt::Leaf::Literal(lit))]
+ | [tt::TokenTree::Leaf(tt::Leaf::Literal(lit)), tt::TokenTree::Leaf(tt::Leaf::Punct(tt::Punct { char: ',', id: _, spacing: _ }))] =>
+ {
+ let krate = DOLLAR_CRATE.clone();
+ literals.push(quote!(#krate::format_args!(#lit);));
+ }
+ _ => break,
+ }
+ }
+
+ let expanded = quote! {{
+ ##literals
+ loop {}
+ }};
+ ExpandResult::ok(expanded)
+}
+
+fn global_asm_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ _tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ // Expand to nothing (at item-level)
+ ExpandResult::ok(quote! {})
+}
+
+fn cfg_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let loc = db.lookup_intern_macro_call(id);
+ let expr = CfgExpr::parse(tt);
+ let enabled = db.crate_graph()[loc.krate].cfg_options.check(&expr) != Some(false);
+ let expanded = if enabled { quote!(true) } else { quote!(false) };
+ ExpandResult::ok(expanded)
+}
+
+fn panic_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(id);
+ // Expand to a macro call `$crate::panic::panic_{edition}`
+ let mut call = if db.crate_graph()[loc.krate].edition >= Edition::Edition2021 {
+ quote!(#DOLLAR_CRATE::panic::panic_2021!)
+ } else {
+ quote!(#DOLLAR_CRATE::panic::panic_2015!)
+ };
+
+ // Pass the original arguments
+ call.token_trees.push(tt::TokenTree::Subtree(tt.clone()));
+ ExpandResult::ok(call)
+}
+
+fn unreachable_expand(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<tt::Subtree> {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(id);
+ // Expand to a macro call `$crate::panic::unreachable_{edition}`
+ let mut call = if db.crate_graph()[loc.krate].edition >= Edition::Edition2021 {
+ quote!(#DOLLAR_CRATE::panic::unreachable_2021!)
+ } else {
+ quote!(#DOLLAR_CRATE::panic::unreachable_2015!)
+ };
+
+ // Pass the original arguments
+ call.token_trees.push(tt::TokenTree::Subtree(tt.clone()));
+ ExpandResult::ok(call)
+}
+
+fn unquote_str(lit: &tt::Literal) -> Option<String> {
+ let lit = ast::make::tokens::literal(&lit.to_string());
+ let token = ast::String::cast(lit)?;
+ token.value().map(|it| it.into_owned())
+}
+
+fn unquote_byte_string(lit: &tt::Literal) -> Option<Vec<u8>> {
+ let lit = ast::make::tokens::literal(&lit.to_string());
+ let token = ast::ByteString::cast(lit)?;
+ token.value().map(|it| it.into_owned())
+}
+
+fn compile_error_expand(
+ _db: &dyn AstDatabase,
+ _id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ let err = match &*tt.token_trees {
+ [tt::TokenTree::Leaf(tt::Leaf::Literal(it))] => {
+ let text = it.text.as_str();
+ if text.starts_with('"') && text.ends_with('"') {
+ // FIXME: does not handle raw strings
+ ExpandError::Other(text[1..text.len() - 1].into())
+ } else {
+ ExpandError::Other("`compile_error!` argument must be a string".into())
+ }
+ }
+ _ => ExpandError::Other("`compile_error!` argument must be a string".into()),
+ };
+
+ ExpandResult { value: ExpandedEager::new(quote! {}), err: Some(err) }
+}
+
+fn concat_expand(
+ _db: &dyn AstDatabase,
+ _arg_id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ let mut err = None;
+ let mut text = String::new();
+ for (i, mut t) in tt.token_trees.iter().enumerate() {
+ // FIXME: hack on top of a hack: `$e:expr` captures get surrounded in parentheses
+ // to ensure the right parsing order, so skip the parentheses here. Ideally we'd
+ // implement rustc's model. cc https://github.com/rust-lang/rust-analyzer/pull/10623
+ if let tt::TokenTree::Subtree(tt::Subtree { delimiter: Some(delim), token_trees }) = t {
+ if let [tt] = &**token_trees {
+ if delim.kind == tt::DelimiterKind::Parenthesis {
+ t = tt;
+ }
+ }
+ }
+
+ match t {
+ tt::TokenTree::Leaf(tt::Leaf::Literal(it)) if i % 2 == 0 => {
+ // concat works with string and char literals, so remove any quotes.
+ // It also works with integer, float and boolean literals, so just use the rest
+ // as-is.
+ let component = unquote_str(it).unwrap_or_else(|| it.text.to_string());
+ text.push_str(&component);
+ }
+ // handle boolean literals
+ tt::TokenTree::Leaf(tt::Leaf::Ident(id))
+ if i % 2 == 0 && (id.text == "true" || id.text == "false") =>
+ {
+ text.push_str(id.text.as_str());
+ }
+ tt::TokenTree::Leaf(tt::Leaf::Punct(punct)) if i % 2 == 1 && punct.char == ',' => (),
+ _ => {
+ err.get_or_insert(mbe::ExpandError::UnexpectedToken.into());
+ }
+ }
+ }
+ ExpandResult { value: ExpandedEager::new(quote!(#text)), err }
+}
+
+fn concat_bytes_expand(
+ _db: &dyn AstDatabase,
+ _arg_id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ let mut bytes = Vec::new();
+ let mut err = None;
+ for (i, t) in tt.token_trees.iter().enumerate() {
+ match t {
+ tt::TokenTree::Leaf(tt::Leaf::Literal(lit)) => {
+ let token = ast::make::tokens::literal(&lit.to_string());
+ match token.kind() {
+ syntax::SyntaxKind::BYTE => bytes.push(token.text().to_string()),
+ syntax::SyntaxKind::BYTE_STRING => {
+ let components = unquote_byte_string(lit).unwrap_or_else(Vec::new);
+ components.into_iter().for_each(|x| bytes.push(x.to_string()));
+ }
+ _ => {
+ err.get_or_insert(mbe::ExpandError::UnexpectedToken.into());
+ break;
+ }
+ }
+ }
+ tt::TokenTree::Leaf(tt::Leaf::Punct(punct)) if i % 2 == 1 && punct.char == ',' => (),
+ tt::TokenTree::Subtree(tree)
+ if tree.delimiter_kind() == Some(tt::DelimiterKind::Bracket) =>
+ {
+ if let Err(e) = concat_bytes_expand_subtree(tree, &mut bytes) {
+ err.get_or_insert(e);
+ break;
+ }
+ }
+ _ => {
+ err.get_or_insert(mbe::ExpandError::UnexpectedToken.into());
+ break;
+ }
+ }
+ }
+ let ident = tt::Ident { text: bytes.join(", ").into(), id: tt::TokenId::unspecified() };
+ ExpandResult { value: ExpandedEager::new(quote!([#ident])), err }
+}
+
+fn concat_bytes_expand_subtree(
+ tree: &tt::Subtree,
+ bytes: &mut Vec<String>,
+) -> Result<(), ExpandError> {
+ for (ti, tt) in tree.token_trees.iter().enumerate() {
+ match tt {
+ tt::TokenTree::Leaf(tt::Leaf::Literal(lit)) => {
+ let lit = ast::make::tokens::literal(&lit.to_string());
+ match lit.kind() {
+ syntax::SyntaxKind::BYTE | syntax::SyntaxKind::INT_NUMBER => {
+ bytes.push(lit.text().to_string())
+ }
+ _ => {
+ return Err(mbe::ExpandError::UnexpectedToken.into());
+ }
+ }
+ }
+ tt::TokenTree::Leaf(tt::Leaf::Punct(punct)) if ti % 2 == 1 && punct.char == ',' => (),
+ _ => {
+ return Err(mbe::ExpandError::UnexpectedToken.into());
+ }
+ }
+ }
+ Ok(())
+}
+
+fn concat_idents_expand(
+ _db: &dyn AstDatabase,
+ _arg_id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ let mut err = None;
+ let mut ident = String::new();
+ for (i, t) in tt.token_trees.iter().enumerate() {
+ match t {
+ tt::TokenTree::Leaf(tt::Leaf::Ident(id)) => {
+ ident.push_str(id.text.as_str());
+ }
+ tt::TokenTree::Leaf(tt::Leaf::Punct(punct)) if i % 2 == 1 && punct.char == ',' => (),
+ _ => {
+ err.get_or_insert(mbe::ExpandError::UnexpectedToken.into());
+ }
+ }
+ }
+ let ident = tt::Ident { text: ident.into(), id: tt::TokenId::unspecified() };
+ ExpandResult { value: ExpandedEager::new(quote!(#ident)), err }
+}
+
+fn relative_file(
+ db: &dyn AstDatabase,
+ call_id: MacroCallId,
+ path_str: &str,
+ allow_recursion: bool,
+) -> Result<FileId, ExpandError> {
+ let call_site = call_id.as_file().original_file(db);
+ let path = AnchoredPath { anchor: call_site, path: path_str };
+ let res = db
+ .resolve_path(path)
+ .ok_or_else(|| ExpandError::Other(format!("failed to load file `{path_str}`").into()))?;
+ // Prevent include itself
+ if res == call_site && !allow_recursion {
+ Err(ExpandError::Other(format!("recursive inclusion of `{path_str}`").into()))
+ } else {
+ Ok(res)
+ }
+}
+
+fn parse_string(tt: &tt::Subtree) -> Result<String, ExpandError> {
+ tt.token_trees
+ .get(0)
+ .and_then(|tt| match tt {
+ tt::TokenTree::Leaf(tt::Leaf::Literal(it)) => unquote_str(it),
+ _ => None,
+ })
+ .ok_or(mbe::ExpandError::ConversionError.into())
+}
+
+fn include_expand(
+ db: &dyn AstDatabase,
+ arg_id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ let res = (|| {
+ let path = parse_string(tt)?;
+ let file_id = relative_file(db, arg_id, &path, false)?;
+
+ let subtree =
+ parse_to_token_tree(&db.file_text(file_id)).ok_or(mbe::ExpandError::ConversionError)?.0;
+ Ok((subtree, file_id))
+ })();
+
+ match res {
+ Ok((subtree, file_id)) => {
+ ExpandResult::ok(ExpandedEager { subtree, included_file: Some(file_id) })
+ }
+ Err(e) => ExpandResult::only_err(e),
+ }
+}
+
+fn include_bytes_expand(
+ _db: &dyn AstDatabase,
+ _arg_id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ if let Err(e) = parse_string(tt) {
+ return ExpandResult::only_err(e);
+ }
+
+ // FIXME: actually read the file here if the user asked for macro expansion
+ let res = tt::Subtree {
+ delimiter: None,
+ token_trees: vec![tt::TokenTree::Leaf(tt::Leaf::Literal(tt::Literal {
+ text: r#"b"""#.into(),
+ id: tt::TokenId::unspecified(),
+ }))],
+ };
+ ExpandResult::ok(ExpandedEager::new(res))
+}
+
+fn include_str_expand(
+ db: &dyn AstDatabase,
+ arg_id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ let path = match parse_string(tt) {
+ Ok(it) => it,
+ Err(e) => return ExpandResult::only_err(e),
+ };
+
+ // FIXME: we're not able to read excluded files (which is most of them because
+ // it's unusual to `include_str!` a Rust file), but we can return an empty string.
+ // Ideally, we'd be able to offer a precise expansion if the user asks for macro
+ // expansion.
+ let file_id = match relative_file(db, arg_id, &path, true) {
+ Ok(file_id) => file_id,
+ Err(_) => {
+ return ExpandResult::ok(ExpandedEager::new(quote!("")));
+ }
+ };
+
+ let text = db.file_text(file_id);
+ let text = &*text;
+
+ ExpandResult::ok(ExpandedEager::new(quote!(#text)))
+}
+
+fn get_env_inner(db: &dyn AstDatabase, arg_id: MacroCallId, key: &str) -> Option<String> {
+ let krate = db.lookup_intern_macro_call(arg_id).krate;
+ db.crate_graph()[krate].env.get(key)
+}
+
+fn env_expand(
+ db: &dyn AstDatabase,
+ arg_id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ let key = match parse_string(tt) {
+ Ok(it) => it,
+ Err(e) => return ExpandResult::only_err(e),
+ };
+
+ let mut err = None;
+ let s = get_env_inner(db, arg_id, &key).unwrap_or_else(|| {
+ // The only variable rust-analyzer ever sets is `OUT_DIR`, so only diagnose that to avoid
+ // unnecessary diagnostics for eg. `CARGO_PKG_NAME`.
+ if key == "OUT_DIR" {
+ err = Some(ExpandError::Other(
+ r#"`OUT_DIR` not set, enable "build scripts" to fix"#.into(),
+ ));
+ }
+
+ // If the variable is unset, still return a dummy string to help type inference along.
+ // We cannot use an empty string here, because for
+ // `include!(concat!(env!("OUT_DIR"), "/foo.rs"))` will become
+ // `include!("foo.rs"), which might go to infinite loop
+ "__RA_UNIMPLEMENTED__".to_string()
+ });
+ let expanded = quote! { #s };
+
+ ExpandResult { value: ExpandedEager::new(expanded), err }
+}
+
+fn option_env_expand(
+ db: &dyn AstDatabase,
+ arg_id: MacroCallId,
+ tt: &tt::Subtree,
+) -> ExpandResult<ExpandedEager> {
+ let key = match parse_string(tt) {
+ Ok(it) => it,
+ Err(e) => return ExpandResult::only_err(e),
+ };
+
+ let expanded = match get_env_inner(db, arg_id, &key) {
+ None => quote! { std::option::Option::None::<&str> },
+ Some(s) => quote! { std::option::Some(#s) },
+ };
+
+ ExpandResult::ok(ExpandedEager::new(expanded))
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/db.rs b/src/tools/rust-analyzer/crates/hir-expand/src/db.rs
new file mode 100644
index 000000000..bd60c3d26
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/db.rs
@@ -0,0 +1,509 @@
+//! Defines database & queries for macro expansion.
+
+use std::sync::Arc;
+
+use base_db::{salsa, SourceDatabase};
+use either::Either;
+use limit::Limit;
+use mbe::syntax_node_to_token_tree;
+use rustc_hash::FxHashSet;
+use syntax::{
+ ast::{self, HasAttrs, HasDocComments},
+ AstNode, GreenNode, Parse, SyntaxNode, SyntaxToken, T,
+};
+
+use crate::{
+ ast_id_map::AstIdMap, builtin_attr_macro::pseudo_derive_attr_expansion, fixup,
+ hygiene::HygieneFrame, BuiltinAttrExpander, BuiltinDeriveExpander, BuiltinFnLikeExpander,
+ ExpandError, ExpandResult, ExpandTo, HirFileId, HirFileIdRepr, MacroCallId, MacroCallKind,
+ MacroCallLoc, MacroDefId, MacroDefKind, MacroFile, ProcMacroExpander,
+};
+
+/// Total limit on the number of tokens produced by any macro invocation.
+///
+/// If an invocation produces more tokens than this limit, it will not be stored in the database and
+/// an error will be emitted.
+///
+/// Actual max for `analysis-stats .` at some point: 30672.
+static TOKEN_LIMIT: Limit = Limit::new(524_288);
+
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub enum TokenExpander {
+ /// Old-style `macro_rules` or the new macros 2.0
+ DeclarativeMacro { mac: mbe::DeclarativeMacro, def_site_token_map: mbe::TokenMap },
+ /// Stuff like `line!` and `file!`.
+ Builtin(BuiltinFnLikeExpander),
+ /// `global_allocator` and such.
+ BuiltinAttr(BuiltinAttrExpander),
+ /// `derive(Copy)` and such.
+ BuiltinDerive(BuiltinDeriveExpander),
+ /// The thing we love the most here in rust-analyzer -- procedural macros.
+ ProcMacro(ProcMacroExpander),
+}
+
+impl TokenExpander {
+ fn expand(
+ &self,
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+ tt: &tt::Subtree,
+ ) -> ExpandResult<tt::Subtree> {
+ match self {
+ TokenExpander::DeclarativeMacro { mac, .. } => mac.expand(tt).map_err(Into::into),
+ TokenExpander::Builtin(it) => it.expand(db, id, tt).map_err(Into::into),
+ TokenExpander::BuiltinAttr(it) => it.expand(db, id, tt),
+ TokenExpander::BuiltinDerive(it) => it.expand(db, id, tt),
+ TokenExpander::ProcMacro(_) => {
+ // We store the result in salsa db to prevent non-deterministic behavior in
+ // some proc-macro implementation
+ // See #4315 for details
+ db.expand_proc_macro(id)
+ }
+ }
+ }
+
+ pub(crate) fn map_id_down(&self, id: tt::TokenId) -> tt::TokenId {
+ match self {
+ TokenExpander::DeclarativeMacro { mac, .. } => mac.map_id_down(id),
+ TokenExpander::Builtin(..)
+ | TokenExpander::BuiltinAttr(..)
+ | TokenExpander::BuiltinDerive(..)
+ | TokenExpander::ProcMacro(..) => id,
+ }
+ }
+
+ pub(crate) fn map_id_up(&self, id: tt::TokenId) -> (tt::TokenId, mbe::Origin) {
+ match self {
+ TokenExpander::DeclarativeMacro { mac, .. } => mac.map_id_up(id),
+ TokenExpander::Builtin(..)
+ | TokenExpander::BuiltinAttr(..)
+ | TokenExpander::BuiltinDerive(..)
+ | TokenExpander::ProcMacro(..) => (id, mbe::Origin::Call),
+ }
+ }
+}
+
+// FIXME: rename to ExpandDatabase
+#[salsa::query_group(AstDatabaseStorage)]
+pub trait AstDatabase: SourceDatabase {
+ fn ast_id_map(&self, file_id: HirFileId) -> Arc<AstIdMap>;
+
+ /// Main public API -- parses a hir file, not caring whether it's a real
+ /// file or a macro expansion.
+ #[salsa::transparent]
+ fn parse_or_expand(&self, file_id: HirFileId) -> Option<SyntaxNode>;
+ /// Implementation for the macro case.
+ fn parse_macro_expansion(
+ &self,
+ macro_file: MacroFile,
+ ) -> ExpandResult<Option<(Parse<SyntaxNode>, Arc<mbe::TokenMap>)>>;
+
+ /// Macro ids. That's probably the tricksiest bit in rust-analyzer, and the
+ /// reason why we use salsa at all.
+ ///
+ /// We encode macro definitions into ids of macro calls, this what allows us
+ /// to be incremental.
+ #[salsa::interned]
+ fn intern_macro_call(&self, macro_call: MacroCallLoc) -> MacroCallId;
+
+ /// Lowers syntactic macro call to a token tree representation.
+ #[salsa::transparent]
+ fn macro_arg(
+ &self,
+ id: MacroCallId,
+ ) -> Option<Arc<(tt::Subtree, mbe::TokenMap, fixup::SyntaxFixupUndoInfo)>>;
+ /// Extracts syntax node, corresponding to a macro call. That's a firewall
+ /// query, only typing in the macro call itself changes the returned
+ /// subtree.
+ fn macro_arg_text(&self, id: MacroCallId) -> Option<GreenNode>;
+ /// Gets the expander for this macro. This compiles declarative macros, and
+ /// just fetches procedural ones.
+ fn macro_def(&self, id: MacroDefId) -> Result<Arc<TokenExpander>, mbe::ParseError>;
+
+ /// Expand macro call to a token tree. This query is LRUed (we keep 128 or so results in memory)
+ fn macro_expand(&self, macro_call: MacroCallId) -> ExpandResult<Option<Arc<tt::Subtree>>>;
+ /// Special case of the previous query for procedural macros. We can't LRU
+ /// proc macros, since they are not deterministic in general, and
+ /// non-determinism breaks salsa in a very, very, very bad way. @edwin0cheng
+ /// heroically debugged this once!
+ fn expand_proc_macro(&self, call: MacroCallId) -> ExpandResult<tt::Subtree>;
+ /// Firewall query that returns the error from the `macro_expand` query.
+ fn macro_expand_error(&self, macro_call: MacroCallId) -> Option<ExpandError>;
+
+ fn hygiene_frame(&self, file_id: HirFileId) -> Arc<HygieneFrame>;
+}
+
+/// This expands the given macro call, but with different arguments. This is
+/// used for completion, where we want to see what 'would happen' if we insert a
+/// token. The `token_to_map` mapped down into the expansion, with the mapped
+/// token returned.
+pub fn expand_speculative(
+ db: &dyn AstDatabase,
+ actual_macro_call: MacroCallId,
+ speculative_args: &SyntaxNode,
+ token_to_map: SyntaxToken,
+) -> Option<(SyntaxNode, SyntaxToken)> {
+ let loc = db.lookup_intern_macro_call(actual_macro_call);
+ let macro_def = db.macro_def(loc.def).ok()?;
+ let token_range = token_to_map.text_range();
+
+ // Build the subtree and token mapping for the speculative args
+ let censor = censor_for_macro_input(&loc, speculative_args);
+ let mut fixups = fixup::fixup_syntax(speculative_args);
+ fixups.replace.extend(censor.into_iter().map(|node| (node.into(), Vec::new())));
+ let (mut tt, spec_args_tmap, _) = mbe::syntax_node_to_token_tree_with_modifications(
+ speculative_args,
+ fixups.token_map,
+ fixups.next_id,
+ fixups.replace,
+ fixups.append,
+ );
+
+ let (attr_arg, token_id) = match loc.kind {
+ MacroCallKind::Attr { invoc_attr_index, is_derive, .. } => {
+ let attr = if is_derive {
+ // for pseudo-derive expansion we actually pass the attribute itself only
+ ast::Attr::cast(speculative_args.clone())
+ } else {
+ // Attributes may have an input token tree, build the subtree and map for this as well
+ // then try finding a token id for our token if it is inside this input subtree.
+ let item = ast::Item::cast(speculative_args.clone())?;
+ item.doc_comments_and_attrs().nth(invoc_attr_index as usize).and_then(Either::left)
+ }?;
+ match attr.token_tree() {
+ Some(token_tree) => {
+ let (mut tree, map) = syntax_node_to_token_tree(attr.token_tree()?.syntax());
+ tree.delimiter = None;
+
+ let shift = mbe::Shift::new(&tt);
+ shift.shift_all(&mut tree);
+
+ let token_id = if token_tree.syntax().text_range().contains_range(token_range) {
+ let attr_input_start =
+ token_tree.left_delimiter_token()?.text_range().start();
+ let range = token_range.checked_sub(attr_input_start)?;
+ let token_id = shift.shift(map.token_by_range(range)?);
+ Some(token_id)
+ } else {
+ None
+ };
+ (Some(tree), token_id)
+ }
+ _ => (None, None),
+ }
+ }
+ _ => (None, None),
+ };
+ let token_id = match token_id {
+ Some(token_id) => token_id,
+ // token wasn't inside an attribute input so it has to be in the general macro input
+ None => {
+ let range = token_range.checked_sub(speculative_args.text_range().start())?;
+ let token_id = spec_args_tmap.token_by_range(range)?;
+ macro_def.map_id_down(token_id)
+ }
+ };
+
+ // Do the actual expansion, we need to directly expand the proc macro due to the attribute args
+ // Otherwise the expand query will fetch the non speculative attribute args and pass those instead.
+ let mut speculative_expansion = match loc.def.kind {
+ MacroDefKind::ProcMacro(expander, ..) => {
+ tt.delimiter = None;
+ expander.expand(db, loc.krate, &tt, attr_arg.as_ref())
+ }
+ MacroDefKind::BuiltInAttr(BuiltinAttrExpander::Derive, _) => {
+ pseudo_derive_attr_expansion(&tt, attr_arg.as_ref()?)
+ }
+ _ => macro_def.expand(db, actual_macro_call, &tt),
+ };
+
+ let expand_to = macro_expand_to(db, actual_macro_call);
+ fixup::reverse_fixups(&mut speculative_expansion.value, &spec_args_tmap, &fixups.undo_info);
+ let (node, rev_tmap) = token_tree_to_syntax_node(&speculative_expansion.value, expand_to);
+
+ let range = rev_tmap.first_range_by_token(token_id, token_to_map.kind())?;
+ let token = node.syntax_node().covering_element(range).into_token()?;
+ Some((node.syntax_node(), token))
+}
+
+fn ast_id_map(db: &dyn AstDatabase, file_id: HirFileId) -> Arc<AstIdMap> {
+ let map = db.parse_or_expand(file_id).map(|it| AstIdMap::from_source(&it)).unwrap_or_default();
+ Arc::new(map)
+}
+
+fn parse_or_expand(db: &dyn AstDatabase, file_id: HirFileId) -> Option<SyntaxNode> {
+ match file_id.0 {
+ HirFileIdRepr::FileId(file_id) => Some(db.parse(file_id).tree().syntax().clone()),
+ HirFileIdRepr::MacroFile(macro_file) => {
+ // FIXME: Note how we convert from `Parse` to `SyntaxNode` here,
+ // forgetting about parse errors.
+ db.parse_macro_expansion(macro_file).value.map(|(it, _)| it.syntax_node())
+ }
+ }
+}
+
+fn parse_macro_expansion(
+ db: &dyn AstDatabase,
+ macro_file: MacroFile,
+) -> ExpandResult<Option<(Parse<SyntaxNode>, Arc<mbe::TokenMap>)>> {
+ let _p = profile::span("parse_macro_expansion");
+ let result = db.macro_expand(macro_file.macro_call_id);
+
+ if let Some(err) = &result.err {
+ // Note:
+ // The final goal we would like to make all parse_macro success,
+ // such that the following log will not call anyway.
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+ let node = loc.kind.to_node(db);
+
+ // collect parent information for warning log
+ let parents =
+ std::iter::successors(loc.kind.file_id().call_node(db), |it| it.file_id.call_node(db))
+ .map(|n| format!("{:#}", n.value))
+ .collect::<Vec<_>>()
+ .join("\n");
+
+ tracing::debug!(
+ "fail on macro_parse: (reason: {:?} macro_call: {:#}) parents: {}",
+ err,
+ node.value,
+ parents
+ );
+ }
+ let tt = match result.value {
+ Some(tt) => tt,
+ None => return ExpandResult { value: None, err: result.err },
+ };
+
+ let expand_to = macro_expand_to(db, macro_file.macro_call_id);
+
+ tracing::debug!("expanded = {}", tt.as_debug_string());
+ tracing::debug!("kind = {:?}", expand_to);
+
+ let (parse, rev_token_map) = token_tree_to_syntax_node(&tt, expand_to);
+
+ ExpandResult { value: Some((parse, Arc::new(rev_token_map))), err: result.err }
+}
+
+fn macro_arg(
+ db: &dyn AstDatabase,
+ id: MacroCallId,
+) -> Option<Arc<(tt::Subtree, mbe::TokenMap, fixup::SyntaxFixupUndoInfo)>> {
+ let arg = db.macro_arg_text(id)?;
+ let loc = db.lookup_intern_macro_call(id);
+
+ let node = SyntaxNode::new_root(arg);
+ let censor = censor_for_macro_input(&loc, &node);
+ let mut fixups = fixup::fixup_syntax(&node);
+ fixups.replace.extend(censor.into_iter().map(|node| (node.into(), Vec::new())));
+ let (mut tt, tmap, _) = mbe::syntax_node_to_token_tree_with_modifications(
+ &node,
+ fixups.token_map,
+ fixups.next_id,
+ fixups.replace,
+ fixups.append,
+ );
+
+ if loc.def.is_proc_macro() {
+ // proc macros expect their inputs without parentheses, MBEs expect it with them included
+ tt.delimiter = None;
+ }
+
+ Some(Arc::new((tt, tmap, fixups.undo_info)))
+}
+
+fn censor_for_macro_input(loc: &MacroCallLoc, node: &SyntaxNode) -> FxHashSet<SyntaxNode> {
+ (|| {
+ let censor = match loc.kind {
+ MacroCallKind::FnLike { .. } => return None,
+ MacroCallKind::Derive { derive_attr_index, .. } => {
+ cov_mark::hit!(derive_censoring);
+ ast::Item::cast(node.clone())?
+ .attrs()
+ .take(derive_attr_index as usize + 1)
+ // FIXME
+ .filter(|attr| attr.simple_name().as_deref() == Some("derive"))
+ .map(|it| it.syntax().clone())
+ .collect()
+ }
+ MacroCallKind::Attr { is_derive: true, .. } => return None,
+ MacroCallKind::Attr { invoc_attr_index, .. } => {
+ cov_mark::hit!(attribute_macro_attr_censoring);
+ ast::Item::cast(node.clone())?
+ .doc_comments_and_attrs()
+ .nth(invoc_attr_index as usize)
+ .and_then(Either::left)
+ .map(|attr| attr.syntax().clone())
+ .into_iter()
+ .collect()
+ }
+ };
+ Some(censor)
+ })()
+ .unwrap_or_default()
+}
+
+fn macro_arg_text(db: &dyn AstDatabase, id: MacroCallId) -> Option<GreenNode> {
+ let loc = db.lookup_intern_macro_call(id);
+ let arg = loc.kind.arg(db)?;
+ if matches!(loc.kind, MacroCallKind::FnLike { .. }) {
+ let first = arg.first_child_or_token().map_or(T![.], |it| it.kind());
+ let last = arg.last_child_or_token().map_or(T![.], |it| it.kind());
+ let well_formed_tt =
+ matches!((first, last), (T!['('], T![')']) | (T!['['], T![']']) | (T!['{'], T!['}']));
+ if !well_formed_tt {
+ // Don't expand malformed (unbalanced) macro invocations. This is
+ // less than ideal, but trying to expand unbalanced macro calls
+ // sometimes produces pathological, deeply nested code which breaks
+ // all kinds of things.
+ //
+ // Some day, we'll have explicit recursion counters for all
+ // recursive things, at which point this code might be removed.
+ cov_mark::hit!(issue9358_bad_macro_stack_overflow);
+ return None;
+ }
+ }
+ Some(arg.green().into())
+}
+
+fn macro_def(db: &dyn AstDatabase, id: MacroDefId) -> Result<Arc<TokenExpander>, mbe::ParseError> {
+ match id.kind {
+ MacroDefKind::Declarative(ast_id) => {
+ let (mac, def_site_token_map) = match ast_id.to_node(db) {
+ ast::Macro::MacroRules(macro_rules) => {
+ let arg = macro_rules
+ .token_tree()
+ .ok_or_else(|| mbe::ParseError::Expected("expected a token tree".into()))?;
+ let (tt, def_site_token_map) = mbe::syntax_node_to_token_tree(arg.syntax());
+ let mac = mbe::DeclarativeMacro::parse_macro_rules(&tt)?;
+ (mac, def_site_token_map)
+ }
+ ast::Macro::MacroDef(macro_def) => {
+ let arg = macro_def
+ .body()
+ .ok_or_else(|| mbe::ParseError::Expected("expected a token tree".into()))?;
+ let (tt, def_site_token_map) = mbe::syntax_node_to_token_tree(arg.syntax());
+ let mac = mbe::DeclarativeMacro::parse_macro2(&tt)?;
+ (mac, def_site_token_map)
+ }
+ };
+ Ok(Arc::new(TokenExpander::DeclarativeMacro { mac, def_site_token_map }))
+ }
+ MacroDefKind::BuiltIn(expander, _) => Ok(Arc::new(TokenExpander::Builtin(expander))),
+ MacroDefKind::BuiltInAttr(expander, _) => {
+ Ok(Arc::new(TokenExpander::BuiltinAttr(expander)))
+ }
+ MacroDefKind::BuiltInDerive(expander, _) => {
+ Ok(Arc::new(TokenExpander::BuiltinDerive(expander)))
+ }
+ MacroDefKind::BuiltInEager(..) => {
+ // FIXME: Return a random error here just to make the types align.
+ // This obviously should do something real instead.
+ Err(mbe::ParseError::UnexpectedToken("unexpected eager macro".into()))
+ }
+ MacroDefKind::ProcMacro(expander, ..) => Ok(Arc::new(TokenExpander::ProcMacro(expander))),
+ }
+}
+
+fn macro_expand(db: &dyn AstDatabase, id: MacroCallId) -> ExpandResult<Option<Arc<tt::Subtree>>> {
+ let _p = profile::span("macro_expand");
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(id);
+ if let Some(eager) = &loc.eager {
+ return ExpandResult {
+ value: Some(eager.arg_or_expansion.clone()),
+ // FIXME: There could be errors here!
+ err: None,
+ };
+ }
+
+ let macro_arg = match db.macro_arg(id) {
+ Some(it) => it,
+ None => {
+ return ExpandResult::only_err(ExpandError::Other(
+ "Failed to lower macro args to token tree".into(),
+ ))
+ }
+ };
+
+ let expander = match db.macro_def(loc.def) {
+ Ok(it) => it,
+ // FIXME: This is weird -- we effectively report macro *definition*
+ // errors lazily, when we try to expand the macro. Instead, they should
+ // be reported at the definition site (when we construct a def map).
+ Err(err) => {
+ return ExpandResult::only_err(ExpandError::Other(
+ format!("invalid macro definition: {}", err).into(),
+ ))
+ }
+ };
+ let ExpandResult { value: mut tt, err } = expander.expand(db, id, &macro_arg.0);
+ // Set a hard limit for the expanded tt
+ let count = tt.count();
+ if TOKEN_LIMIT.check(count).is_err() {
+ return ExpandResult::only_err(ExpandError::Other(
+ format!(
+ "macro invocation exceeds token limit: produced {} tokens, limit is {}",
+ count,
+ TOKEN_LIMIT.inner(),
+ )
+ .into(),
+ ));
+ }
+
+ fixup::reverse_fixups(&mut tt, &macro_arg.1, &macro_arg.2);
+
+ ExpandResult { value: Some(Arc::new(tt)), err }
+}
+
+fn macro_expand_error(db: &dyn AstDatabase, macro_call: MacroCallId) -> Option<ExpandError> {
+ db.macro_expand(macro_call).err
+}
+
+fn expand_proc_macro(db: &dyn AstDatabase, id: MacroCallId) -> ExpandResult<tt::Subtree> {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(id);
+ let macro_arg = match db.macro_arg(id) {
+ Some(it) => it,
+ None => {
+ return ExpandResult::only_err(ExpandError::Other("No arguments for proc-macro".into()))
+ }
+ };
+
+ let expander = match loc.def.kind {
+ MacroDefKind::ProcMacro(expander, ..) => expander,
+ _ => unreachable!(),
+ };
+
+ let attr_arg = match &loc.kind {
+ MacroCallKind::Attr { attr_args, .. } => {
+ let mut attr_args = attr_args.0.clone();
+ mbe::Shift::new(&macro_arg.0).shift_all(&mut attr_args);
+ Some(attr_args)
+ }
+ _ => None,
+ };
+
+ expander.expand(db, loc.krate, &macro_arg.0, attr_arg.as_ref())
+}
+
+fn hygiene_frame(db: &dyn AstDatabase, file_id: HirFileId) -> Arc<HygieneFrame> {
+ Arc::new(HygieneFrame::new(db, file_id))
+}
+
+fn macro_expand_to(db: &dyn AstDatabase, id: MacroCallId) -> ExpandTo {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(id);
+ loc.kind.expand_to()
+}
+
+fn token_tree_to_syntax_node(
+ tt: &tt::Subtree,
+ expand_to: ExpandTo,
+) -> (Parse<SyntaxNode>, mbe::TokenMap) {
+ let entry_point = match expand_to {
+ ExpandTo::Statements => mbe::TopEntryPoint::MacroStmts,
+ ExpandTo::Items => mbe::TopEntryPoint::MacroItems,
+ ExpandTo::Pattern => mbe::TopEntryPoint::Pattern,
+ ExpandTo::Type => mbe::TopEntryPoint::Type,
+ ExpandTo::Expr => mbe::TopEntryPoint::Expr,
+ };
+ mbe::token_tree_to_syntax_node(tt, entry_point)
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/eager.rs b/src/tools/rust-analyzer/crates/hir-expand/src/eager.rs
new file mode 100644
index 000000000..5fd099aea
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/eager.rs
@@ -0,0 +1,266 @@
+//! Eager expansion related utils
+//!
+//! Here is a dump of a discussion from Vadim Petrochenkov about Eager Expansion and
+//! Its name resolution :
+//!
+//! > Eagerly expanded macros (and also macros eagerly expanded by eagerly expanded macros,
+//! > which actually happens in practice too!) are resolved at the location of the "root" macro
+//! > that performs the eager expansion on its arguments.
+//! > If some name cannot be resolved at the eager expansion time it's considered unresolved,
+//! > even if becomes available later (e.g. from a glob import or other macro).
+//!
+//! > Eagerly expanded macros don't add anything to the module structure of the crate and
+//! > don't build any speculative module structures, i.e. they are expanded in a "flat"
+//! > way even if tokens in them look like modules.
+//!
+//! > In other words, it kinda works for simple cases for which it was originally intended,
+//! > and we need to live with it because it's available on stable and widely relied upon.
+//!
+//!
+//! See the full discussion : <https://rust-lang.zulipchat.com/#narrow/stream/131828-t-compiler/topic/Eager.20expansion.20of.20built-in.20macros>
+use std::sync::Arc;
+
+use base_db::CrateId;
+use syntax::{ted, SyntaxNode};
+
+use crate::{
+ ast::{self, AstNode},
+ db::AstDatabase,
+ hygiene::Hygiene,
+ mod_path::ModPath,
+ EagerCallInfo, ExpandError, ExpandResult, ExpandTo, InFile, MacroCallId, MacroCallKind,
+ MacroCallLoc, MacroDefId, MacroDefKind, UnresolvedMacro,
+};
+
+#[derive(Debug)]
+pub struct ErrorEmitted {
+ _private: (),
+}
+
+pub trait ErrorSink {
+ fn emit(&mut self, err: ExpandError);
+
+ fn option<T>(
+ &mut self,
+ opt: Option<T>,
+ error: impl FnOnce() -> ExpandError,
+ ) -> Result<T, ErrorEmitted> {
+ match opt {
+ Some(it) => Ok(it),
+ None => {
+ self.emit(error());
+ Err(ErrorEmitted { _private: () })
+ }
+ }
+ }
+
+ fn option_with<T>(
+ &mut self,
+ opt: impl FnOnce() -> Option<T>,
+ error: impl FnOnce() -> ExpandError,
+ ) -> Result<T, ErrorEmitted> {
+ self.option(opt(), error)
+ }
+
+ fn result<T>(&mut self, res: Result<T, ExpandError>) -> Result<T, ErrorEmitted> {
+ match res {
+ Ok(it) => Ok(it),
+ Err(e) => {
+ self.emit(e);
+ Err(ErrorEmitted { _private: () })
+ }
+ }
+ }
+
+ fn expand_result_option<T>(&mut self, res: ExpandResult<Option<T>>) -> Result<T, ErrorEmitted> {
+ match (res.value, res.err) {
+ (None, Some(err)) => {
+ self.emit(err);
+ Err(ErrorEmitted { _private: () })
+ }
+ (Some(value), opt_err) => {
+ if let Some(err) = opt_err {
+ self.emit(err);
+ }
+ Ok(value)
+ }
+ (None, None) => unreachable!("`ExpandResult` without value or error"),
+ }
+ }
+}
+
+impl ErrorSink for &'_ mut dyn FnMut(ExpandError) {
+ fn emit(&mut self, err: ExpandError) {
+ self(err);
+ }
+}
+
+pub fn expand_eager_macro(
+ db: &dyn AstDatabase,
+ krate: CrateId,
+ macro_call: InFile<ast::MacroCall>,
+ def: MacroDefId,
+ resolver: &dyn Fn(ModPath) -> Option<MacroDefId>,
+ diagnostic_sink: &mut dyn FnMut(ExpandError),
+) -> Result<Result<MacroCallId, ErrorEmitted>, UnresolvedMacro> {
+ let hygiene = Hygiene::new(db, macro_call.file_id);
+ let parsed_args = macro_call
+ .value
+ .token_tree()
+ .map(|tt| mbe::syntax_node_to_token_tree(tt.syntax()).0)
+ .unwrap_or_default();
+
+ let ast_map = db.ast_id_map(macro_call.file_id);
+ let call_id = InFile::new(macro_call.file_id, ast_map.ast_id(&macro_call.value));
+ let expand_to = ExpandTo::from_call_site(&macro_call.value);
+
+ // Note:
+ // When `lazy_expand` is called, its *parent* file must be already exists.
+ // Here we store an eager macro id for the argument expanded subtree here
+ // for that purpose.
+ let arg_id = db.intern_macro_call(MacroCallLoc {
+ def,
+ krate,
+ eager: Some(EagerCallInfo {
+ arg_or_expansion: Arc::new(parsed_args.clone()),
+ included_file: None,
+ }),
+ kind: MacroCallKind::FnLike { ast_id: call_id, expand_to: ExpandTo::Expr },
+ });
+
+ let parsed_args = mbe::token_tree_to_syntax_node(&parsed_args, mbe::TopEntryPoint::Expr).0;
+ let result = match eager_macro_recur(
+ db,
+ &hygiene,
+ InFile::new(arg_id.as_file(), parsed_args.syntax_node()),
+ krate,
+ resolver,
+ diagnostic_sink,
+ ) {
+ Ok(Ok(it)) => it,
+ Ok(Err(err)) => return Ok(Err(err)),
+ Err(err) => return Err(err),
+ };
+ let subtree = to_subtree(&result);
+
+ if let MacroDefKind::BuiltInEager(eager, _) = def.kind {
+ let res = eager.expand(db, arg_id, &subtree);
+ if let Some(err) = res.err {
+ diagnostic_sink(err);
+ }
+
+ let loc = MacroCallLoc {
+ def,
+ krate,
+ eager: Some(EagerCallInfo {
+ arg_or_expansion: Arc::new(res.value.subtree),
+ included_file: res.value.included_file,
+ }),
+ kind: MacroCallKind::FnLike { ast_id: call_id, expand_to },
+ };
+
+ Ok(Ok(db.intern_macro_call(loc)))
+ } else {
+ panic!("called `expand_eager_macro` on non-eager macro def {:?}", def);
+ }
+}
+
+fn to_subtree(node: &SyntaxNode) -> tt::Subtree {
+ let mut subtree = mbe::syntax_node_to_token_tree(node).0;
+ subtree.delimiter = None;
+ subtree
+}
+
+fn lazy_expand(
+ db: &dyn AstDatabase,
+ def: &MacroDefId,
+ macro_call: InFile<ast::MacroCall>,
+ krate: CrateId,
+) -> ExpandResult<Option<InFile<SyntaxNode>>> {
+ let ast_id = db.ast_id_map(macro_call.file_id).ast_id(&macro_call.value);
+
+ let expand_to = ExpandTo::from_call_site(&macro_call.value);
+ let id = def.as_lazy_macro(
+ db,
+ krate,
+ MacroCallKind::FnLike { ast_id: macro_call.with_value(ast_id), expand_to },
+ );
+
+ let err = db.macro_expand_error(id);
+ let value = db.parse_or_expand(id.as_file()).map(|node| InFile::new(id.as_file(), node));
+
+ ExpandResult { value, err }
+}
+
+fn eager_macro_recur(
+ db: &dyn AstDatabase,
+ hygiene: &Hygiene,
+ curr: InFile<SyntaxNode>,
+ krate: CrateId,
+ macro_resolver: &dyn Fn(ModPath) -> Option<MacroDefId>,
+ mut diagnostic_sink: &mut dyn FnMut(ExpandError),
+) -> Result<Result<SyntaxNode, ErrorEmitted>, UnresolvedMacro> {
+ let original = curr.value.clone_for_update();
+
+ let children = original.descendants().filter_map(ast::MacroCall::cast);
+ let mut replacements = Vec::new();
+
+ // Collect replacement
+ for child in children {
+ let def = match child.path().and_then(|path| ModPath::from_src(db, path, hygiene)) {
+ Some(path) => macro_resolver(path.clone()).ok_or_else(|| UnresolvedMacro { path })?,
+ None => {
+ diagnostic_sink(ExpandError::Other("malformed macro invocation".into()));
+ continue;
+ }
+ };
+ let insert = match def.kind {
+ MacroDefKind::BuiltInEager(..) => {
+ let id = match expand_eager_macro(
+ db,
+ krate,
+ curr.with_value(child.clone()),
+ def,
+ macro_resolver,
+ diagnostic_sink,
+ ) {
+ Ok(Ok(it)) => it,
+ Ok(Err(err)) => return Ok(Err(err)),
+ Err(err) => return Err(err),
+ };
+ db.parse_or_expand(id.as_file())
+ .expect("successful macro expansion should be parseable")
+ .clone_for_update()
+ }
+ MacroDefKind::Declarative(_)
+ | MacroDefKind::BuiltIn(..)
+ | MacroDefKind::BuiltInAttr(..)
+ | MacroDefKind::BuiltInDerive(..)
+ | MacroDefKind::ProcMacro(..) => {
+ let res = lazy_expand(db, &def, curr.with_value(child.clone()), krate);
+ let val = match diagnostic_sink.expand_result_option(res) {
+ Ok(it) => it,
+ Err(err) => return Ok(Err(err)),
+ };
+
+ // replace macro inside
+ let hygiene = Hygiene::new(db, val.file_id);
+ match eager_macro_recur(db, &hygiene, val, krate, macro_resolver, diagnostic_sink) {
+ Ok(Ok(it)) => it,
+ Ok(Err(err)) => return Ok(Err(err)),
+ Err(err) => return Err(err),
+ }
+ }
+ };
+
+ // check if the whole original syntax is replaced
+ if child.syntax() == &original {
+ return Ok(Ok(insert));
+ }
+
+ replacements.push((child, insert));
+ }
+
+ replacements.into_iter().rev().for_each(|(old, new)| ted::replace(old.syntax(), new));
+ Ok(Ok(original))
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/fixup.rs b/src/tools/rust-analyzer/crates/hir-expand/src/fixup.rs
new file mode 100644
index 000000000..9999790fa
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/fixup.rs
@@ -0,0 +1,382 @@
+//! To make attribute macros work reliably when typing, we need to take care to
+//! fix up syntax errors in the code we're passing to them.
+use std::mem;
+
+use mbe::{SyntheticToken, SyntheticTokenId, TokenMap};
+use rustc_hash::FxHashMap;
+use syntax::{
+ ast::{self, AstNode},
+ match_ast, SyntaxElement, SyntaxKind, SyntaxNode, TextRange,
+};
+use tt::Subtree;
+
+/// The result of calculating fixes for a syntax node -- a bunch of changes
+/// (appending to and replacing nodes), the information that is needed to
+/// reverse those changes afterwards, and a token map.
+#[derive(Debug)]
+pub(crate) struct SyntaxFixups {
+ pub(crate) append: FxHashMap<SyntaxElement, Vec<SyntheticToken>>,
+ pub(crate) replace: FxHashMap<SyntaxElement, Vec<SyntheticToken>>,
+ pub(crate) undo_info: SyntaxFixupUndoInfo,
+ pub(crate) token_map: TokenMap,
+ pub(crate) next_id: u32,
+}
+
+/// This is the information needed to reverse the fixups.
+#[derive(Debug, PartialEq, Eq)]
+pub struct SyntaxFixupUndoInfo {
+ original: Vec<Subtree>,
+}
+
+const EMPTY_ID: SyntheticTokenId = SyntheticTokenId(!0);
+
+pub(crate) fn fixup_syntax(node: &SyntaxNode) -> SyntaxFixups {
+ let mut append = FxHashMap::<SyntaxElement, _>::default();
+ let mut replace = FxHashMap::<SyntaxElement, _>::default();
+ let mut preorder = node.preorder();
+ let mut original = Vec::new();
+ let mut token_map = TokenMap::default();
+ let mut next_id = 0;
+ while let Some(event) = preorder.next() {
+ let node = match event {
+ syntax::WalkEvent::Enter(node) => node,
+ syntax::WalkEvent::Leave(_) => continue,
+ };
+
+ if can_handle_error(&node) && has_error_to_handle(&node) {
+ // the node contains an error node, we have to completely replace it by something valid
+ let (original_tree, new_tmap, new_next_id) =
+ mbe::syntax_node_to_token_tree_with_modifications(
+ &node,
+ mem::take(&mut token_map),
+ next_id,
+ Default::default(),
+ Default::default(),
+ );
+ token_map = new_tmap;
+ next_id = new_next_id;
+ let idx = original.len() as u32;
+ original.push(original_tree);
+ let replacement = SyntheticToken {
+ kind: SyntaxKind::IDENT,
+ text: "__ra_fixup".into(),
+ range: node.text_range(),
+ id: SyntheticTokenId(idx),
+ };
+ replace.insert(node.clone().into(), vec![replacement]);
+ preorder.skip_subtree();
+ continue;
+ }
+
+ // In some other situations, we can fix things by just appending some tokens.
+ let end_range = TextRange::empty(node.text_range().end());
+ match_ast! {
+ match node {
+ ast::FieldExpr(it) => {
+ if it.name_ref().is_none() {
+ // incomplete field access: some_expr.|
+ append.insert(node.clone().into(), vec![
+ SyntheticToken {
+ kind: SyntaxKind::IDENT,
+ text: "__ra_fixup".into(),
+ range: end_range,
+ id: EMPTY_ID,
+ },
+ ]);
+ }
+ },
+ ast::ExprStmt(it) => {
+ if it.semicolon_token().is_none() {
+ append.insert(node.clone().into(), vec![
+ SyntheticToken {
+ kind: SyntaxKind::SEMICOLON,
+ text: ";".into(),
+ range: end_range,
+ id: EMPTY_ID,
+ },
+ ]);
+ }
+ },
+ ast::LetStmt(it) => {
+ if it.semicolon_token().is_none() {
+ append.insert(node.clone().into(), vec![
+ SyntheticToken {
+ kind: SyntaxKind::SEMICOLON,
+ text: ";".into(),
+ range: end_range,
+ id: EMPTY_ID,
+ },
+ ]);
+ }
+ },
+ ast::IfExpr(it) => {
+ if it.condition().is_none() {
+ // insert placeholder token after the if token
+ let if_token = match it.if_token() {
+ Some(t) => t,
+ None => continue,
+ };
+ append.insert(if_token.into(), vec![
+ SyntheticToken {
+ kind: SyntaxKind::IDENT,
+ text: "__ra_fixup".into(),
+ range: end_range,
+ id: EMPTY_ID,
+ },
+ ]);
+ }
+ if it.then_branch().is_none() {
+ append.insert(node.clone().into(), vec![
+ SyntheticToken {
+ kind: SyntaxKind::L_CURLY,
+ text: "{".into(),
+ range: end_range,
+ id: EMPTY_ID,
+ },
+ SyntheticToken {
+ kind: SyntaxKind::R_CURLY,
+ text: "}".into(),
+ range: end_range,
+ id: EMPTY_ID,
+ },
+ ]);
+ }
+ },
+ // FIXME: foo::
+ // FIXME: for, loop, match etc.
+ _ => (),
+ }
+ }
+ }
+ SyntaxFixups {
+ append,
+ replace,
+ token_map,
+ next_id,
+ undo_info: SyntaxFixupUndoInfo { original },
+ }
+}
+
+fn has_error(node: &SyntaxNode) -> bool {
+ node.children().any(|c| c.kind() == SyntaxKind::ERROR)
+}
+
+fn can_handle_error(node: &SyntaxNode) -> bool {
+ ast::Expr::can_cast(node.kind())
+}
+
+fn has_error_to_handle(node: &SyntaxNode) -> bool {
+ has_error(node) || node.children().any(|c| !can_handle_error(&c) && has_error_to_handle(&c))
+}
+
+pub(crate) fn reverse_fixups(
+ tt: &mut Subtree,
+ token_map: &TokenMap,
+ undo_info: &SyntaxFixupUndoInfo,
+) {
+ tt.token_trees.retain(|tt| match tt {
+ tt::TokenTree::Leaf(leaf) => {
+ token_map.synthetic_token_id(leaf.id()).is_none()
+ || token_map.synthetic_token_id(leaf.id()) != Some(EMPTY_ID)
+ }
+ tt::TokenTree::Subtree(st) => st.delimiter.map_or(true, |d| {
+ token_map.synthetic_token_id(d.id).is_none()
+ || token_map.synthetic_token_id(d.id) != Some(EMPTY_ID)
+ }),
+ });
+ tt.token_trees.iter_mut().for_each(|tt| match tt {
+ tt::TokenTree::Subtree(tt) => reverse_fixups(tt, token_map, undo_info),
+ tt::TokenTree::Leaf(leaf) => {
+ if let Some(id) = token_map.synthetic_token_id(leaf.id()) {
+ let original = &undo_info.original[id.0 as usize];
+ *tt = tt::TokenTree::Subtree(original.clone());
+ }
+ }
+ });
+}
+
+#[cfg(test)]
+mod tests {
+ use expect_test::{expect, Expect};
+
+ use super::reverse_fixups;
+
+ #[track_caller]
+ fn check(ra_fixture: &str, mut expect: Expect) {
+ let parsed = syntax::SourceFile::parse(ra_fixture);
+ let fixups = super::fixup_syntax(&parsed.syntax_node());
+ let (mut tt, tmap, _) = mbe::syntax_node_to_token_tree_with_modifications(
+ &parsed.syntax_node(),
+ fixups.token_map,
+ fixups.next_id,
+ fixups.replace,
+ fixups.append,
+ );
+
+ let mut actual = tt.to_string();
+ actual.push('\n');
+
+ expect.indent(false);
+ expect.assert_eq(&actual);
+
+ // the fixed-up tree should be syntactically valid
+ let (parse, _) = mbe::token_tree_to_syntax_node(&tt, ::mbe::TopEntryPoint::MacroItems);
+ assert_eq!(
+ parse.errors(),
+ &[],
+ "parse has syntax errors. parse tree:\n{:#?}",
+ parse.syntax_node()
+ );
+
+ reverse_fixups(&mut tt, &tmap, &fixups.undo_info);
+
+ // the fixed-up + reversed version should be equivalent to the original input
+ // (but token IDs don't matter)
+ let (original_as_tt, _) = mbe::syntax_node_to_token_tree(&parsed.syntax_node());
+ assert_eq!(tt.to_string(), original_as_tt.to_string());
+ }
+
+ #[test]
+ fn incomplete_field_expr_1() {
+ check(
+ r#"
+fn foo() {
+ a.
+}
+"#,
+ expect![[r#"
+fn foo () {a . __ra_fixup}
+"#]],
+ )
+ }
+
+ #[test]
+ fn incomplete_field_expr_2() {
+ check(
+ r#"
+fn foo() {
+ a. ;
+}
+"#,
+ expect![[r#"
+fn foo () {a . __ra_fixup ;}
+"#]],
+ )
+ }
+
+ #[test]
+ fn incomplete_field_expr_3() {
+ check(
+ r#"
+fn foo() {
+ a. ;
+ bar();
+}
+"#,
+ expect![[r#"
+fn foo () {a . __ra_fixup ; bar () ;}
+"#]],
+ )
+ }
+
+ #[test]
+ fn incomplete_let() {
+ check(
+ r#"
+fn foo() {
+ let x = a
+}
+"#,
+ expect![[r#"
+fn foo () {let x = a ;}
+"#]],
+ )
+ }
+
+ #[test]
+ fn incomplete_field_expr_in_let() {
+ check(
+ r#"
+fn foo() {
+ let x = a.
+}
+"#,
+ expect![[r#"
+fn foo () {let x = a . __ra_fixup ;}
+"#]],
+ )
+ }
+
+ #[test]
+ fn field_expr_before_call() {
+ // another case that easily happens while typing
+ check(
+ r#"
+fn foo() {
+ a.b
+ bar();
+}
+"#,
+ expect![[r#"
+fn foo () {a . b ; bar () ;}
+"#]],
+ )
+ }
+
+ #[test]
+ fn extraneous_comma() {
+ check(
+ r#"
+fn foo() {
+ bar(,);
+}
+"#,
+ expect![[r#"
+fn foo () {__ra_fixup ;}
+"#]],
+ )
+ }
+
+ #[test]
+ fn fixup_if_1() {
+ check(
+ r#"
+fn foo() {
+ if a
+}
+"#,
+ expect![[r#"
+fn foo () {if a {}}
+"#]],
+ )
+ }
+
+ #[test]
+ fn fixup_if_2() {
+ check(
+ r#"
+fn foo() {
+ if
+}
+"#,
+ expect![[r#"
+fn foo () {if __ra_fixup {}}
+"#]],
+ )
+ }
+
+ #[test]
+ fn fixup_if_3() {
+ check(
+ r#"
+fn foo() {
+ if {}
+}
+"#,
+ // the {} gets parsed as the condition, I think?
+ expect![[r#"
+fn foo () {if {} {}}
+"#]],
+ )
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/hygiene.rs b/src/tools/rust-analyzer/crates/hir-expand/src/hygiene.rs
new file mode 100644
index 000000000..d60734372
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/hygiene.rs
@@ -0,0 +1,256 @@
+//! This modules handles hygiene information.
+//!
+//! Specifically, `ast` + `Hygiene` allows you to create a `Name`. Note that, at
+//! this moment, this is horribly incomplete and handles only `$crate`.
+use std::sync::Arc;
+
+use base_db::CrateId;
+use db::TokenExpander;
+use either::Either;
+use mbe::Origin;
+use syntax::{
+ ast::{self, HasDocComments},
+ AstNode, SyntaxKind, SyntaxNode, TextRange, TextSize,
+};
+
+use crate::{
+ db::{self, AstDatabase},
+ fixup,
+ name::{AsName, Name},
+ HirFileId, HirFileIdRepr, InFile, MacroCallKind, MacroCallLoc, MacroDefKind, MacroFile,
+};
+
+#[derive(Clone, Debug)]
+pub struct Hygiene {
+ frames: Option<HygieneFrames>,
+}
+
+impl Hygiene {
+ pub fn new(db: &dyn AstDatabase, file_id: HirFileId) -> Hygiene {
+ Hygiene { frames: Some(HygieneFrames::new(db, file_id)) }
+ }
+
+ pub fn new_unhygienic() -> Hygiene {
+ Hygiene { frames: None }
+ }
+
+ // FIXME: this should just return name
+ pub fn name_ref_to_name(
+ &self,
+ db: &dyn AstDatabase,
+ name_ref: ast::NameRef,
+ ) -> Either<Name, CrateId> {
+ if let Some(frames) = &self.frames {
+ if name_ref.text() == "$crate" {
+ if let Some(krate) = frames.root_crate(db, name_ref.syntax()) {
+ return Either::Right(krate);
+ }
+ }
+ }
+
+ Either::Left(name_ref.as_name())
+ }
+
+ pub fn local_inner_macros(&self, db: &dyn AstDatabase, path: ast::Path) -> Option<CrateId> {
+ let mut token = path.syntax().first_token()?.text_range();
+ let frames = self.frames.as_ref()?;
+ let mut current = &frames.0;
+
+ loop {
+ let (mapped, origin) = current.expansion.as_ref()?.map_ident_up(db, token)?;
+ if origin == Origin::Def {
+ return if current.local_inner {
+ frames.root_crate(db, path.syntax())
+ } else {
+ None
+ };
+ }
+ current = current.call_site.as_ref()?;
+ token = mapped.value;
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+struct HygieneFrames(Arc<HygieneFrame>);
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct HygieneFrame {
+ expansion: Option<HygieneInfo>,
+
+ // Indicate this is a local inner macro
+ local_inner: bool,
+ krate: Option<CrateId>,
+
+ call_site: Option<Arc<HygieneFrame>>,
+ def_site: Option<Arc<HygieneFrame>>,
+}
+
+impl HygieneFrames {
+ fn new(db: &dyn AstDatabase, file_id: HirFileId) -> Self {
+ // Note that this intentionally avoids the `hygiene_frame` query to avoid blowing up memory
+ // usage. The query is only helpful for nested `HygieneFrame`s as it avoids redundant work.
+ HygieneFrames(Arc::new(HygieneFrame::new(db, file_id)))
+ }
+
+ fn root_crate(&self, db: &dyn AstDatabase, node: &SyntaxNode) -> Option<CrateId> {
+ let mut token = node.first_token()?.text_range();
+ let mut result = self.0.krate;
+ let mut current = self.0.clone();
+
+ while let Some((mapped, origin)) =
+ current.expansion.as_ref().and_then(|it| it.map_ident_up(db, token))
+ {
+ result = current.krate;
+
+ let site = match origin {
+ Origin::Def => &current.def_site,
+ Origin::Call => &current.call_site,
+ };
+
+ let site = match site {
+ None => break,
+ Some(it) => it,
+ };
+
+ current = site.clone();
+ token = mapped.value;
+ }
+
+ result
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+struct HygieneInfo {
+ file: MacroFile,
+ /// The start offset of the `macro_rules!` arguments or attribute input.
+ attr_input_or_mac_def_start: Option<InFile<TextSize>>,
+
+ macro_def: Arc<TokenExpander>,
+ macro_arg: Arc<(tt::Subtree, mbe::TokenMap, fixup::SyntaxFixupUndoInfo)>,
+ macro_arg_shift: mbe::Shift,
+ exp_map: Arc<mbe::TokenMap>,
+}
+
+impl HygieneInfo {
+ fn map_ident_up(
+ &self,
+ db: &dyn AstDatabase,
+ token: TextRange,
+ ) -> Option<(InFile<TextRange>, Origin)> {
+ let token_id = self.exp_map.token_by_range(token)?;
+ let (mut token_id, origin) = self.macro_def.map_id_up(token_id);
+
+ let loc = db.lookup_intern_macro_call(self.file.macro_call_id);
+
+ let (token_map, tt) = match &loc.kind {
+ MacroCallKind::Attr { attr_args, .. } => match self.macro_arg_shift.unshift(token_id) {
+ Some(unshifted) => {
+ token_id = unshifted;
+ (&attr_args.1, self.attr_input_or_mac_def_start?)
+ }
+ None => (
+ &self.macro_arg.1,
+ InFile::new(loc.kind.file_id(), loc.kind.arg(db)?.text_range().start()),
+ ),
+ },
+ _ => match origin {
+ mbe::Origin::Call => (
+ &self.macro_arg.1,
+ InFile::new(loc.kind.file_id(), loc.kind.arg(db)?.text_range().start()),
+ ),
+ mbe::Origin::Def => match (&*self.macro_def, &self.attr_input_or_mac_def_start) {
+ (TokenExpander::DeclarativeMacro { def_site_token_map, .. }, Some(tt)) => {
+ (def_site_token_map, *tt)
+ }
+ _ => panic!("`Origin::Def` used with non-`macro_rules!` macro"),
+ },
+ },
+ };
+
+ let range = token_map.first_range_by_token(token_id, SyntaxKind::IDENT)?;
+ Some((tt.with_value(range + tt.value), origin))
+ }
+}
+
+fn make_hygiene_info(
+ db: &dyn AstDatabase,
+ macro_file: MacroFile,
+ loc: &MacroCallLoc,
+) -> Option<HygieneInfo> {
+ let def = loc.def.ast_id().left().and_then(|id| {
+ let def_tt = match id.to_node(db) {
+ ast::Macro::MacroRules(mac) => mac.token_tree()?,
+ ast::Macro::MacroDef(mac) => mac.body()?,
+ };
+ Some(InFile::new(id.file_id, def_tt))
+ });
+ let attr_input_or_mac_def = def.or_else(|| match loc.kind {
+ MacroCallKind::Attr { ast_id, invoc_attr_index, .. } => {
+ let tt = ast_id
+ .to_node(db)
+ .doc_comments_and_attrs()
+ .nth(invoc_attr_index as usize)
+ .and_then(Either::left)?
+ .token_tree()?;
+ Some(InFile::new(ast_id.file_id, tt))
+ }
+ _ => None,
+ });
+
+ let macro_def = db.macro_def(loc.def).ok()?;
+ let (_, exp_map) = db.parse_macro_expansion(macro_file).value?;
+ let macro_arg = db.macro_arg(macro_file.macro_call_id)?;
+
+ Some(HygieneInfo {
+ file: macro_file,
+ attr_input_or_mac_def_start: attr_input_or_mac_def
+ .map(|it| it.map(|tt| tt.syntax().text_range().start())),
+ macro_arg_shift: mbe::Shift::new(&macro_arg.0),
+ macro_arg,
+ macro_def,
+ exp_map,
+ })
+}
+
+impl HygieneFrame {
+ pub(crate) fn new(db: &dyn AstDatabase, file_id: HirFileId) -> HygieneFrame {
+ let (info, krate, local_inner) = match file_id.0 {
+ HirFileIdRepr::FileId(_) => (None, None, false),
+ HirFileIdRepr::MacroFile(macro_file) => {
+ let loc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+ let info =
+ make_hygiene_info(db, macro_file, &loc).map(|info| (loc.kind.file_id(), info));
+ match loc.def.kind {
+ MacroDefKind::Declarative(_) => {
+ (info, Some(loc.def.krate), loc.def.local_inner)
+ }
+ MacroDefKind::BuiltIn(..) => (info, Some(loc.def.krate), false),
+ MacroDefKind::BuiltInAttr(..) => (info, None, false),
+ MacroDefKind::BuiltInDerive(..) => (info, None, false),
+ MacroDefKind::BuiltInEager(..) => (info, None, false),
+ MacroDefKind::ProcMacro(..) => (info, None, false),
+ }
+ }
+ };
+
+ let (calling_file, info) = match info {
+ None => {
+ return HygieneFrame {
+ expansion: None,
+ local_inner,
+ krate,
+ call_site: None,
+ def_site: None,
+ };
+ }
+ Some(it) => it,
+ };
+
+ let def_site = info.attr_input_or_mac_def_start.map(|it| db.hygiene_frame(it.file_id));
+ let call_site = Some(db.hygiene_frame(calling_file));
+
+ HygieneFrame { expansion: Some(info), local_inner, krate, call_site, def_site }
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/lib.rs b/src/tools/rust-analyzer/crates/hir-expand/src/lib.rs
new file mode 100644
index 000000000..252293090
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/lib.rs
@@ -0,0 +1,1000 @@
+//! `hir_expand` deals with macro expansion.
+//!
+//! Specifically, it implements a concept of `MacroFile` -- a file whose syntax
+//! tree originates not from the text of some `FileId`, but from some macro
+//! expansion.
+
+#![warn(rust_2018_idioms, unused_lifetimes, semicolon_in_expressions_from_macros)]
+
+pub mod db;
+pub mod ast_id_map;
+pub mod name;
+pub mod hygiene;
+pub mod builtin_attr_macro;
+pub mod builtin_derive_macro;
+pub mod builtin_fn_macro;
+pub mod proc_macro;
+pub mod quote;
+pub mod eager;
+pub mod mod_path;
+mod fixup;
+
+pub use mbe::{Origin, ValueResult};
+
+use std::{fmt, hash::Hash, iter, sync::Arc};
+
+use base_db::{impl_intern_key, salsa, CrateId, FileId, FileRange, ProcMacroKind};
+use either::Either;
+use syntax::{
+ algo::{self, skip_trivia_token},
+ ast::{self, AstNode, HasDocComments},
+ Direction, SyntaxNode, SyntaxToken,
+};
+
+use crate::{
+ ast_id_map::FileAstId,
+ builtin_attr_macro::BuiltinAttrExpander,
+ builtin_derive_macro::BuiltinDeriveExpander,
+ builtin_fn_macro::{BuiltinFnLikeExpander, EagerExpander},
+ db::TokenExpander,
+ mod_path::ModPath,
+ proc_macro::ProcMacroExpander,
+};
+
+pub type ExpandResult<T> = ValueResult<T, ExpandError>;
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum ExpandError {
+ UnresolvedProcMacro(CrateId),
+ Mbe(mbe::ExpandError),
+ Other(Box<str>),
+}
+
+impl From<mbe::ExpandError> for ExpandError {
+ fn from(mbe: mbe::ExpandError) -> Self {
+ Self::Mbe(mbe)
+ }
+}
+
+impl fmt::Display for ExpandError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ExpandError::UnresolvedProcMacro(_) => f.write_str("unresolved proc-macro"),
+ ExpandError::Mbe(it) => it.fmt(f),
+ ExpandError::Other(it) => f.write_str(it),
+ }
+ }
+}
+
+/// Input to the analyzer is a set of files, where each file is identified by
+/// `FileId` and contains source code. However, another source of source code in
+/// Rust are macros: each macro can be thought of as producing a "temporary
+/// file". To assign an id to such a file, we use the id of the macro call that
+/// produced the file. So, a `HirFileId` is either a `FileId` (source code
+/// written by user), or a `MacroCallId` (source code produced by macro).
+///
+/// What is a `MacroCallId`? Simplifying, it's a `HirFileId` of a file
+/// containing the call plus the offset of the macro call in the file. Note that
+/// this is a recursive definition! However, the size_of of `HirFileId` is
+/// finite (because everything bottoms out at the real `FileId`) and small
+/// (`MacroCallId` uses the location interning. You can check details here:
+/// <https://en.wikipedia.org/wiki/String_interning>).
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub struct HirFileId(HirFileIdRepr);
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+enum HirFileIdRepr {
+ FileId(FileId),
+ MacroFile(MacroFile),
+}
+
+impl From<FileId> for HirFileId {
+ fn from(id: FileId) -> Self {
+ HirFileId(HirFileIdRepr::FileId(id))
+ }
+}
+
+impl From<MacroFile> for HirFileId {
+ fn from(id: MacroFile) -> Self {
+ HirFileId(HirFileIdRepr::MacroFile(id))
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub struct MacroFile {
+ pub macro_call_id: MacroCallId,
+}
+
+/// `MacroCallId` identifies a particular macro invocation, like
+/// `println!("Hello, {}", world)`.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub struct MacroCallId(salsa::InternId);
+impl_intern_key!(MacroCallId);
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct MacroCallLoc {
+ pub def: MacroDefId,
+ pub(crate) krate: CrateId,
+ eager: Option<EagerCallInfo>,
+ pub kind: MacroCallKind,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub struct MacroDefId {
+ pub krate: CrateId,
+ pub kind: MacroDefKind,
+ pub local_inner: bool,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum MacroDefKind {
+ Declarative(AstId<ast::Macro>),
+ BuiltIn(BuiltinFnLikeExpander, AstId<ast::Macro>),
+ // FIXME: maybe just Builtin and rename BuiltinFnLikeExpander to BuiltinExpander
+ BuiltInAttr(BuiltinAttrExpander, AstId<ast::Macro>),
+ BuiltInDerive(BuiltinDeriveExpander, AstId<ast::Macro>),
+ BuiltInEager(EagerExpander, AstId<ast::Macro>),
+ ProcMacro(ProcMacroExpander, ProcMacroKind, AstId<ast::Fn>),
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+struct EagerCallInfo {
+ /// NOTE: This can be *either* the expansion result, *or* the argument to the eager macro!
+ arg_or_expansion: Arc<tt::Subtree>,
+ included_file: Option<FileId>,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub enum MacroCallKind {
+ FnLike {
+ ast_id: AstId<ast::MacroCall>,
+ expand_to: ExpandTo,
+ },
+ Derive {
+ ast_id: AstId<ast::Adt>,
+ /// Syntactical index of the invoking `#[derive]` attribute.
+ ///
+ /// Outer attributes are counted first, then inner attributes. This does not support
+ /// out-of-line modules, which may have attributes spread across 2 files!
+ derive_attr_index: u32,
+ /// Index of the derive macro in the derive attribute
+ derive_index: u32,
+ },
+ Attr {
+ ast_id: AstId<ast::Item>,
+ attr_args: Arc<(tt::Subtree, mbe::TokenMap)>,
+ /// Syntactical index of the invoking `#[attribute]`.
+ ///
+ /// Outer attributes are counted first, then inner attributes. This does not support
+ /// out-of-line modules, which may have attributes spread across 2 files!
+ invoc_attr_index: u32,
+ /// Whether this attribute is the `#[derive]` attribute.
+ is_derive: bool,
+ },
+}
+
+impl HirFileId {
+ /// For macro-expansion files, returns the file original source file the
+ /// expansion originated from.
+ pub fn original_file(self, db: &dyn db::AstDatabase) -> FileId {
+ let mut file_id = self;
+ loop {
+ match file_id.0 {
+ HirFileIdRepr::FileId(id) => break id,
+ HirFileIdRepr::MacroFile(MacroFile { macro_call_id }) => {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_call_id);
+ file_id = match loc.eager {
+ Some(EagerCallInfo { included_file: Some(file), .. }) => file.into(),
+ _ => loc.kind.file_id(),
+ };
+ }
+ }
+ }
+ }
+
+ pub fn expansion_level(self, db: &dyn db::AstDatabase) -> u32 {
+ let mut level = 0;
+ let mut curr = self;
+ while let HirFileIdRepr::MacroFile(macro_file) = curr.0 {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+
+ level += 1;
+ curr = loc.kind.file_id();
+ }
+ level
+ }
+
+ /// If this is a macro call, returns the syntax node of the call.
+ pub fn call_node(self, db: &dyn db::AstDatabase) -> Option<InFile<SyntaxNode>> {
+ match self.0 {
+ HirFileIdRepr::FileId(_) => None,
+ HirFileIdRepr::MacroFile(macro_file) => {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+ Some(loc.kind.to_node(db))
+ }
+ }
+ }
+
+ /// If this is a macro call, returns the syntax node of the very first macro call this file resides in.
+ pub fn original_call_node(self, db: &dyn db::AstDatabase) -> Option<(FileId, SyntaxNode)> {
+ let mut call = match self.0 {
+ HirFileIdRepr::FileId(_) => return None,
+ HirFileIdRepr::MacroFile(MacroFile { macro_call_id }) => {
+ db.lookup_intern_macro_call(macro_call_id).kind.to_node(db)
+ }
+ };
+ loop {
+ match call.file_id.0 {
+ HirFileIdRepr::FileId(file_id) => break Some((file_id, call.value)),
+ HirFileIdRepr::MacroFile(MacroFile { macro_call_id }) => {
+ call = db.lookup_intern_macro_call(macro_call_id).kind.to_node(db);
+ }
+ }
+ }
+ }
+
+ /// Return expansion information if it is a macro-expansion file
+ pub fn expansion_info(self, db: &dyn db::AstDatabase) -> Option<ExpansionInfo> {
+ match self.0 {
+ HirFileIdRepr::FileId(_) => None,
+ HirFileIdRepr::MacroFile(macro_file) => {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+
+ let arg_tt = loc.kind.arg(db)?;
+
+ let macro_def = db.macro_def(loc.def).ok()?;
+ let (parse, exp_map) = db.parse_macro_expansion(macro_file).value?;
+ let macro_arg = db.macro_arg(macro_file.macro_call_id)?;
+
+ let def = loc.def.ast_id().left().and_then(|id| {
+ let def_tt = match id.to_node(db) {
+ ast::Macro::MacroRules(mac) => mac.token_tree()?,
+ ast::Macro::MacroDef(_)
+ if matches!(*macro_def, TokenExpander::BuiltinAttr(_)) =>
+ {
+ return None
+ }
+ ast::Macro::MacroDef(mac) => mac.body()?,
+ };
+ Some(InFile::new(id.file_id, def_tt))
+ });
+ let attr_input_or_mac_def = def.or_else(|| match loc.kind {
+ MacroCallKind::Attr { ast_id, invoc_attr_index, .. } => {
+ let tt = ast_id
+ .to_node(db)
+ .doc_comments_and_attrs()
+ .nth(invoc_attr_index as usize)
+ .and_then(Either::left)?
+ .token_tree()?;
+ Some(InFile::new(ast_id.file_id, tt))
+ }
+ _ => None,
+ });
+
+ Some(ExpansionInfo {
+ expanded: InFile::new(self, parse.syntax_node()),
+ arg: InFile::new(loc.kind.file_id(), arg_tt),
+ attr_input_or_mac_def,
+ macro_arg_shift: mbe::Shift::new(&macro_arg.0),
+ macro_arg,
+ macro_def,
+ exp_map,
+ })
+ }
+ }
+ }
+
+ /// Indicate it is macro file generated for builtin derive
+ pub fn is_builtin_derive(&self, db: &dyn db::AstDatabase) -> Option<InFile<ast::Attr>> {
+ match self.0 {
+ HirFileIdRepr::FileId(_) => None,
+ HirFileIdRepr::MacroFile(macro_file) => {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+ let attr = match loc.def.kind {
+ MacroDefKind::BuiltInDerive(..) => loc.kind.to_node(db),
+ _ => return None,
+ };
+ Some(attr.with_value(ast::Attr::cast(attr.value.clone())?))
+ }
+ }
+ }
+
+ pub fn is_custom_derive(&self, db: &dyn db::AstDatabase) -> bool {
+ match self.0 {
+ HirFileIdRepr::FileId(_) => false,
+ HirFileIdRepr::MacroFile(macro_file) => {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+ matches!(loc.def.kind, MacroDefKind::ProcMacro(_, ProcMacroKind::CustomDerive, _))
+ }
+ }
+ }
+
+ /// Return whether this file is an include macro
+ pub fn is_include_macro(&self, db: &dyn db::AstDatabase) -> bool {
+ match self.0 {
+ HirFileIdRepr::MacroFile(macro_file) => {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+ matches!(loc.eager, Some(EagerCallInfo { included_file: Some(_), .. }))
+ }
+ _ => false,
+ }
+ }
+
+ /// Return whether this file is an attr macro
+ pub fn is_attr_macro(&self, db: &dyn db::AstDatabase) -> bool {
+ match self.0 {
+ HirFileIdRepr::MacroFile(macro_file) => {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+ matches!(loc.kind, MacroCallKind::Attr { .. })
+ }
+ _ => false,
+ }
+ }
+
+ /// Return whether this file is the pseudo expansion of the derive attribute.
+ /// See [`crate::builtin_attr_macro::derive_attr_expand`].
+ pub fn is_derive_attr_pseudo_expansion(&self, db: &dyn db::AstDatabase) -> bool {
+ match self.0 {
+ HirFileIdRepr::MacroFile(macro_file) => {
+ let loc: MacroCallLoc = db.lookup_intern_macro_call(macro_file.macro_call_id);
+ matches!(loc.kind, MacroCallKind::Attr { is_derive: true, .. })
+ }
+ _ => false,
+ }
+ }
+
+ pub fn is_macro(self) -> bool {
+ matches!(self.0, HirFileIdRepr::MacroFile(_))
+ }
+
+ pub fn macro_file(self) -> Option<MacroFile> {
+ match self.0 {
+ HirFileIdRepr::FileId(_) => None,
+ HirFileIdRepr::MacroFile(m) => Some(m),
+ }
+ }
+}
+
+impl MacroDefId {
+ pub fn as_lazy_macro(
+ self,
+ db: &dyn db::AstDatabase,
+ krate: CrateId,
+ kind: MacroCallKind,
+ ) -> MacroCallId {
+ db.intern_macro_call(MacroCallLoc { def: self, krate, eager: None, kind })
+ }
+
+ pub fn ast_id(&self) -> Either<AstId<ast::Macro>, AstId<ast::Fn>> {
+ let id = match self.kind {
+ MacroDefKind::ProcMacro(.., id) => return Either::Right(id),
+ MacroDefKind::Declarative(id)
+ | MacroDefKind::BuiltIn(_, id)
+ | MacroDefKind::BuiltInAttr(_, id)
+ | MacroDefKind::BuiltInDerive(_, id)
+ | MacroDefKind::BuiltInEager(_, id) => id,
+ };
+ Either::Left(id)
+ }
+
+ pub fn is_proc_macro(&self) -> bool {
+ matches!(self.kind, MacroDefKind::ProcMacro(..))
+ }
+
+ pub fn is_attribute(&self) -> bool {
+ matches!(
+ self.kind,
+ MacroDefKind::BuiltInAttr(..) | MacroDefKind::ProcMacro(_, ProcMacroKind::Attr, _)
+ )
+ }
+}
+
+// FIXME: attribute indices do not account for `cfg_attr`, which means that we'll strip the whole
+// `cfg_attr` instead of just one of the attributes it expands to
+
+impl MacroCallKind {
+ /// Returns the file containing the macro invocation.
+ fn file_id(&self) -> HirFileId {
+ match *self {
+ MacroCallKind::FnLike { ast_id: InFile { file_id, .. }, .. }
+ | MacroCallKind::Derive { ast_id: InFile { file_id, .. }, .. }
+ | MacroCallKind::Attr { ast_id: InFile { file_id, .. }, .. } => file_id,
+ }
+ }
+
+ pub fn to_node(&self, db: &dyn db::AstDatabase) -> InFile<SyntaxNode> {
+ match self {
+ MacroCallKind::FnLike { ast_id, .. } => {
+ ast_id.with_value(ast_id.to_node(db).syntax().clone())
+ }
+ MacroCallKind::Derive { ast_id, derive_attr_index, .. } => {
+ // FIXME: handle `cfg_attr`
+ ast_id.with_value(ast_id.to_node(db)).map(|it| {
+ it.doc_comments_and_attrs()
+ .nth(*derive_attr_index as usize)
+ .and_then(|it| match it {
+ Either::Left(attr) => Some(attr.syntax().clone()),
+ Either::Right(_) => None,
+ })
+ .unwrap_or_else(|| it.syntax().clone())
+ })
+ }
+ MacroCallKind::Attr { ast_id, is_derive: true, invoc_attr_index, .. } => {
+ // FIXME: handle `cfg_attr`
+ ast_id.with_value(ast_id.to_node(db)).map(|it| {
+ it.doc_comments_and_attrs()
+ .nth(*invoc_attr_index as usize)
+ .and_then(|it| match it {
+ Either::Left(attr) => Some(attr.syntax().clone()),
+ Either::Right(_) => None,
+ })
+ .unwrap_or_else(|| it.syntax().clone())
+ })
+ }
+ MacroCallKind::Attr { ast_id, .. } => {
+ ast_id.with_value(ast_id.to_node(db).syntax().clone())
+ }
+ }
+ }
+
+ /// Returns the original file range that best describes the location of this macro call.
+ ///
+ /// Unlike `MacroCallKind::original_call_range`, this also spans the item of attributes and derives.
+ pub fn original_call_range_with_body(self, db: &dyn db::AstDatabase) -> FileRange {
+ let mut kind = self;
+ let file_id = loop {
+ match kind.file_id().0 {
+ HirFileIdRepr::MacroFile(file) => {
+ kind = db.lookup_intern_macro_call(file.macro_call_id).kind;
+ }
+ HirFileIdRepr::FileId(file_id) => break file_id,
+ }
+ };
+
+ let range = match kind {
+ MacroCallKind::FnLike { ast_id, .. } => ast_id.to_node(db).syntax().text_range(),
+ MacroCallKind::Derive { ast_id, .. } => ast_id.to_node(db).syntax().text_range(),
+ MacroCallKind::Attr { ast_id, .. } => ast_id.to_node(db).syntax().text_range(),
+ };
+
+ FileRange { range, file_id }
+ }
+
+ /// Returns the original file range that best describes the location of this macro call.
+ ///
+ /// Here we try to roughly match what rustc does to improve diagnostics: fn-like macros
+ /// get the whole `ast::MacroCall`, attribute macros get the attribute's range, and derives
+ /// get only the specific derive that is being referred to.
+ pub fn original_call_range(self, db: &dyn db::AstDatabase) -> FileRange {
+ let mut kind = self;
+ let file_id = loop {
+ match kind.file_id().0 {
+ HirFileIdRepr::MacroFile(file) => {
+ kind = db.lookup_intern_macro_call(file.macro_call_id).kind;
+ }
+ HirFileIdRepr::FileId(file_id) => break file_id,
+ }
+ };
+
+ let range = match kind {
+ MacroCallKind::FnLike { ast_id, .. } => ast_id.to_node(db).syntax().text_range(),
+ MacroCallKind::Derive { ast_id, derive_attr_index, .. } => {
+ // FIXME: should be the range of the macro name, not the whole derive
+ ast_id
+ .to_node(db)
+ .doc_comments_and_attrs()
+ .nth(derive_attr_index as usize)
+ .expect("missing derive")
+ .expect_left("derive is a doc comment?")
+ .syntax()
+ .text_range()
+ }
+ MacroCallKind::Attr { ast_id, invoc_attr_index, .. } => ast_id
+ .to_node(db)
+ .doc_comments_and_attrs()
+ .nth(invoc_attr_index as usize)
+ .expect("missing attribute")
+ .expect_left("attribute macro is a doc comment?")
+ .syntax()
+ .text_range(),
+ };
+
+ FileRange { range, file_id }
+ }
+
+ fn arg(&self, db: &dyn db::AstDatabase) -> Option<SyntaxNode> {
+ match self {
+ MacroCallKind::FnLike { ast_id, .. } => {
+ Some(ast_id.to_node(db).token_tree()?.syntax().clone())
+ }
+ MacroCallKind::Derive { ast_id, .. } => Some(ast_id.to_node(db).syntax().clone()),
+ MacroCallKind::Attr { ast_id, .. } => Some(ast_id.to_node(db).syntax().clone()),
+ }
+ }
+
+ fn expand_to(&self) -> ExpandTo {
+ match self {
+ MacroCallKind::FnLike { expand_to, .. } => *expand_to,
+ MacroCallKind::Derive { .. } => ExpandTo::Items,
+ MacroCallKind::Attr { is_derive: true, .. } => ExpandTo::Statements,
+ MacroCallKind::Attr { .. } => ExpandTo::Items, // is this always correct?
+ }
+ }
+}
+
+impl MacroCallId {
+ pub fn as_file(self) -> HirFileId {
+ MacroFile { macro_call_id: self }.into()
+ }
+}
+
+/// ExpansionInfo mainly describes how to map text range between src and expanded macro
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct ExpansionInfo {
+ expanded: InFile<SyntaxNode>,
+ /// The argument TokenTree or item for attributes
+ arg: InFile<SyntaxNode>,
+ /// The `macro_rules!` or attribute input.
+ attr_input_or_mac_def: Option<InFile<ast::TokenTree>>,
+
+ macro_def: Arc<TokenExpander>,
+ macro_arg: Arc<(tt::Subtree, mbe::TokenMap, fixup::SyntaxFixupUndoInfo)>,
+ /// A shift built from `macro_arg`'s subtree, relevant for attributes as the item is the macro arg
+ /// and as such we need to shift tokens if they are part of an attributes input instead of their item.
+ macro_arg_shift: mbe::Shift,
+ exp_map: Arc<mbe::TokenMap>,
+}
+
+impl ExpansionInfo {
+ pub fn expanded(&self) -> InFile<SyntaxNode> {
+ self.expanded.clone()
+ }
+
+ pub fn call_node(&self) -> Option<InFile<SyntaxNode>> {
+ Some(self.arg.with_value(self.arg.value.parent()?))
+ }
+
+ /// Map a token down from macro input into the macro expansion.
+ ///
+ /// The inner workings of this function differ slightly depending on the type of macro we are dealing with:
+ /// - declarative:
+ /// For declarative macros, we need to accommodate for the macro definition site(which acts as a second unchanging input)
+ /// , as tokens can mapped in and out of it.
+ /// To do this we shift all ids in the expansion by the maximum id of the definition site giving us an easy
+ /// way to map all the tokens.
+ /// - attribute:
+ /// Attributes have two different inputs, the input tokentree in the attribute node and the item
+ /// the attribute is annotating. Similarly as for declarative macros we need to do a shift here
+ /// as well. Currently this is done by shifting the attribute input by the maximum id of the item.
+ /// - function-like and derives:
+ /// Both of these only have one simple call site input so no special handling is required here.
+ pub fn map_token_down(
+ &self,
+ db: &dyn db::AstDatabase,
+ item: Option<ast::Item>,
+ token: InFile<&SyntaxToken>,
+ ) -> Option<impl Iterator<Item = InFile<SyntaxToken>> + '_> {
+ assert_eq!(token.file_id, self.arg.file_id);
+ let token_id_in_attr_input = if let Some(item) = item {
+ // check if we are mapping down in an attribute input
+ // this is a special case as attributes can have two inputs
+ let call_id = self.expanded.file_id.macro_file()?.macro_call_id;
+ let loc = db.lookup_intern_macro_call(call_id);
+
+ let token_range = token.value.text_range();
+ match &loc.kind {
+ MacroCallKind::Attr { attr_args, invoc_attr_index, is_derive, .. } => {
+ let attr = item
+ .doc_comments_and_attrs()
+ .nth(*invoc_attr_index as usize)
+ .and_then(Either::left)?;
+ match attr.token_tree() {
+ Some(token_tree)
+ if token_tree.syntax().text_range().contains_range(token_range) =>
+ {
+ let attr_input_start =
+ token_tree.left_delimiter_token()?.text_range().start();
+ let relative_range =
+ token.value.text_range().checked_sub(attr_input_start)?;
+ // shift by the item's tree's max id
+ let token_id = attr_args.1.token_by_range(relative_range)?;
+ let token_id = if *is_derive {
+ // we do not shift for `#[derive]`, as we only need to downmap the derive attribute tokens
+ token_id
+ } else {
+ self.macro_arg_shift.shift(token_id)
+ };
+ Some(token_id)
+ }
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ } else {
+ None
+ };
+
+ let token_id = match token_id_in_attr_input {
+ Some(token_id) => token_id,
+ // the token is not inside an attribute's input so do the lookup in the macro_arg as ususal
+ None => {
+ let relative_range =
+ token.value.text_range().checked_sub(self.arg.value.text_range().start())?;
+ let token_id = self.macro_arg.1.token_by_range(relative_range)?;
+ // conditionally shift the id by a declaratives macro definition
+ self.macro_def.map_id_down(token_id)
+ }
+ };
+
+ let tokens = self
+ .exp_map
+ .ranges_by_token(token_id, token.value.kind())
+ .flat_map(move |range| self.expanded.value.covering_element(range).into_token());
+
+ Some(tokens.map(move |token| self.expanded.with_value(token)))
+ }
+
+ /// Map a token up out of the expansion it resides in into the arguments of the macro call of the expansion.
+ pub fn map_token_up(
+ &self,
+ db: &dyn db::AstDatabase,
+ token: InFile<&SyntaxToken>,
+ ) -> Option<(InFile<SyntaxToken>, Origin)> {
+ // Fetch the id through its text range,
+ let token_id = self.exp_map.token_by_range(token.value.text_range())?;
+ // conditionally unshifting the id to accommodate for macro-rules def site
+ let (mut token_id, origin) = self.macro_def.map_id_up(token_id);
+
+ let call_id = self.expanded.file_id.macro_file()?.macro_call_id;
+ let loc = db.lookup_intern_macro_call(call_id);
+
+ // Attributes are a bit special for us, they have two inputs, the input tokentree and the annotated item.
+ let (token_map, tt) = match &loc.kind {
+ MacroCallKind::Attr { attr_args, is_derive: true, .. } => {
+ (&attr_args.1, self.attr_input_or_mac_def.clone()?.syntax().cloned())
+ }
+ MacroCallKind::Attr { attr_args, .. } => {
+ // try unshifting the the token id, if unshifting fails, the token resides in the non-item attribute input
+ // note that the `TokenExpander::map_id_up` earlier only unshifts for declarative macros, so we don't double unshift with this
+ match self.macro_arg_shift.unshift(token_id) {
+ Some(unshifted) => {
+ token_id = unshifted;
+ (&attr_args.1, self.attr_input_or_mac_def.clone()?.syntax().cloned())
+ }
+ None => (&self.macro_arg.1, self.arg.clone()),
+ }
+ }
+ _ => match origin {
+ mbe::Origin::Call => (&self.macro_arg.1, self.arg.clone()),
+ mbe::Origin::Def => match (&*self.macro_def, &self.attr_input_or_mac_def) {
+ (TokenExpander::DeclarativeMacro { def_site_token_map, .. }, Some(tt)) => {
+ (def_site_token_map, tt.syntax().cloned())
+ }
+ _ => panic!("`Origin::Def` used with non-`macro_rules!` macro"),
+ },
+ },
+ };
+
+ let range = token_map.first_range_by_token(token_id, token.value.kind())?;
+ let token =
+ tt.value.covering_element(range + tt.value.text_range().start()).into_token()?;
+ Some((tt.with_value(token), origin))
+ }
+}
+
+/// `AstId` points to an AST node in any file.
+///
+/// It is stable across reparses, and can be used as salsa key/value.
+pub type AstId<N> = InFile<FileAstId<N>>;
+
+impl<N: AstNode> AstId<N> {
+ pub fn to_node(&self, db: &dyn db::AstDatabase) -> N {
+ let root = db.parse_or_expand(self.file_id).unwrap();
+ db.ast_id_map(self.file_id).get(self.value).to_node(&root)
+ }
+}
+
+/// `InFile<T>` stores a value of `T` inside a particular file/syntax tree.
+///
+/// Typical usages are:
+///
+/// * `InFile<SyntaxNode>` -- syntax node in a file
+/// * `InFile<ast::FnDef>` -- ast node in a file
+/// * `InFile<TextSize>` -- offset in a file
+#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
+pub struct InFile<T> {
+ pub file_id: HirFileId,
+ pub value: T,
+}
+
+impl<T> InFile<T> {
+ pub fn new(file_id: HirFileId, value: T) -> InFile<T> {
+ InFile { file_id, value }
+ }
+
+ pub fn with_value<U>(&self, value: U) -> InFile<U> {
+ InFile::new(self.file_id, value)
+ }
+
+ pub fn map<F: FnOnce(T) -> U, U>(self, f: F) -> InFile<U> {
+ InFile::new(self.file_id, f(self.value))
+ }
+
+ pub fn as_ref(&self) -> InFile<&T> {
+ self.with_value(&self.value)
+ }
+
+ pub fn file_syntax(&self, db: &dyn db::AstDatabase) -> SyntaxNode {
+ db.parse_or_expand(self.file_id).expect("source created from invalid file")
+ }
+}
+
+impl<T: Clone> InFile<&T> {
+ pub fn cloned(&self) -> InFile<T> {
+ self.with_value(self.value.clone())
+ }
+}
+
+impl<T> InFile<Option<T>> {
+ pub fn transpose(self) -> Option<InFile<T>> {
+ let value = self.value?;
+ Some(InFile::new(self.file_id, value))
+ }
+}
+
+impl<'a> InFile<&'a SyntaxNode> {
+ pub fn ancestors_with_macros(
+ self,
+ db: &dyn db::AstDatabase,
+ ) -> impl Iterator<Item = InFile<SyntaxNode>> + Clone + '_ {
+ iter::successors(Some(self.cloned()), move |node| match node.value.parent() {
+ Some(parent) => Some(node.with_value(parent)),
+ None => node.file_id.call_node(db),
+ })
+ }
+
+ /// Skips the attributed item that caused the macro invocation we are climbing up
+ pub fn ancestors_with_macros_skip_attr_item(
+ self,
+ db: &dyn db::AstDatabase,
+ ) -> impl Iterator<Item = InFile<SyntaxNode>> + '_ {
+ let succ = move |node: &InFile<SyntaxNode>| match node.value.parent() {
+ Some(parent) => Some(node.with_value(parent)),
+ None => {
+ let parent_node = node.file_id.call_node(db)?;
+ if node.file_id.is_attr_macro(db) {
+ // macro call was an attributed item, skip it
+ // FIXME: does this fail if this is a direct expansion of another macro?
+ parent_node.map(|node| node.parent()).transpose()
+ } else {
+ Some(parent_node)
+ }
+ }
+ };
+ iter::successors(succ(&self.cloned()), succ)
+ }
+
+ /// Falls back to the macro call range if the node cannot be mapped up fully.
+ ///
+ /// For attributes and derives, this will point back to the attribute only.
+ /// For the entire item `InFile::use original_file_range_full`.
+ pub fn original_file_range(self, db: &dyn db::AstDatabase) -> FileRange {
+ match self.file_id.0 {
+ HirFileIdRepr::FileId(file_id) => FileRange { file_id, range: self.value.text_range() },
+ HirFileIdRepr::MacroFile(mac_file) => {
+ if let Some(res) = self.original_file_range_opt(db) {
+ return res;
+ }
+ // Fall back to whole macro call.
+ let loc = db.lookup_intern_macro_call(mac_file.macro_call_id);
+ loc.kind.original_call_range(db)
+ }
+ }
+ }
+
+ /// Attempts to map the syntax node back up its macro calls.
+ pub fn original_file_range_opt(self, db: &dyn db::AstDatabase) -> Option<FileRange> {
+ match ascend_node_border_tokens(db, self) {
+ Some(InFile { file_id, value: (first, last) }) => {
+ let original_file = file_id.original_file(db);
+ let range = first.text_range().cover(last.text_range());
+ if file_id != original_file.into() {
+ tracing::error!("Failed mapping up more for {:?}", range);
+ return None;
+ }
+ Some(FileRange { file_id: original_file, range })
+ }
+ _ if !self.file_id.is_macro() => Some(FileRange {
+ file_id: self.file_id.original_file(db),
+ range: self.value.text_range(),
+ }),
+ _ => None,
+ }
+ }
+}
+
+impl InFile<SyntaxToken> {
+ pub fn upmap(self, db: &dyn db::AstDatabase) -> Option<InFile<SyntaxToken>> {
+ let expansion = self.file_id.expansion_info(db)?;
+ expansion.map_token_up(db, self.as_ref()).map(|(it, _)| it)
+ }
+
+ /// Falls back to the macro call range if the node cannot be mapped up fully.
+ pub fn original_file_range(self, db: &dyn db::AstDatabase) -> FileRange {
+ match self.file_id.0 {
+ HirFileIdRepr::FileId(file_id) => FileRange { file_id, range: self.value.text_range() },
+ HirFileIdRepr::MacroFile(mac_file) => {
+ if let Some(res) = self.original_file_range_opt(db) {
+ return res;
+ }
+ // Fall back to whole macro call.
+ let loc = db.lookup_intern_macro_call(mac_file.macro_call_id);
+ loc.kind.original_call_range(db)
+ }
+ }
+ }
+
+ /// Attempts to map the syntax node back up its macro calls.
+ pub fn original_file_range_opt(self, db: &dyn db::AstDatabase) -> Option<FileRange> {
+ match self.file_id.0 {
+ HirFileIdRepr::FileId(file_id) => {
+ Some(FileRange { file_id, range: self.value.text_range() })
+ }
+ HirFileIdRepr::MacroFile(_) => {
+ let expansion = self.file_id.expansion_info(db)?;
+ let InFile { file_id, value } = ascend_call_token(db, &expansion, self)?;
+ let original_file = file_id.original_file(db);
+ if file_id != original_file.into() {
+ return None;
+ }
+ Some(FileRange { file_id: original_file, range: value.text_range() })
+ }
+ }
+ }
+
+ pub fn ancestors_with_macros(
+ self,
+ db: &dyn db::AstDatabase,
+ ) -> impl Iterator<Item = InFile<SyntaxNode>> + '_ {
+ self.value.parent().into_iter().flat_map({
+ let file_id = self.file_id;
+ move |parent| InFile::new(file_id, &parent).ancestors_with_macros(db)
+ })
+ }
+}
+
+fn ascend_node_border_tokens(
+ db: &dyn db::AstDatabase,
+ InFile { file_id, value: node }: InFile<&SyntaxNode>,
+) -> Option<InFile<(SyntaxToken, SyntaxToken)>> {
+ let expansion = file_id.expansion_info(db)?;
+
+ let first_token = |node: &SyntaxNode| skip_trivia_token(node.first_token()?, Direction::Next);
+ let last_token = |node: &SyntaxNode| skip_trivia_token(node.last_token()?, Direction::Prev);
+
+ let first = first_token(node)?;
+ let last = last_token(node)?;
+ let first = ascend_call_token(db, &expansion, InFile::new(file_id, first))?;
+ let last = ascend_call_token(db, &expansion, InFile::new(file_id, last))?;
+ (first.file_id == last.file_id).then(|| InFile::new(first.file_id, (first.value, last.value)))
+}
+
+fn ascend_call_token(
+ db: &dyn db::AstDatabase,
+ expansion: &ExpansionInfo,
+ token: InFile<SyntaxToken>,
+) -> Option<InFile<SyntaxToken>> {
+ let mut mapping = expansion.map_token_up(db, token.as_ref())?;
+ while let (mapped, Origin::Call) = mapping {
+ match mapped.file_id.expansion_info(db) {
+ Some(info) => mapping = info.map_token_up(db, mapped.as_ref())?,
+ None => return Some(mapped),
+ }
+ }
+ None
+}
+
+impl<N: AstNode> InFile<N> {
+ pub fn descendants<T: AstNode>(self) -> impl Iterator<Item = InFile<T>> {
+ self.value.syntax().descendants().filter_map(T::cast).map(move |n| self.with_value(n))
+ }
+
+ pub fn original_ast_node(self, db: &dyn db::AstDatabase) -> Option<InFile<N>> {
+ // This kind of upmapping can only be achieved in attribute expanded files,
+ // as we don't have node inputs otherwise and therefor can't find an `N` node in the input
+ if !self.file_id.is_macro() {
+ return Some(self);
+ } else if !self.file_id.is_attr_macro(db) {
+ return None;
+ }
+
+ if let Some(InFile { file_id, value: (first, last) }) =
+ ascend_node_border_tokens(db, self.syntax())
+ {
+ if file_id.is_macro() {
+ let range = first.text_range().cover(last.text_range());
+ tracing::error!("Failed mapping out of macro file for {:?}", range);
+ return None;
+ }
+ // FIXME: This heuristic is brittle and with the right macro may select completely unrelated nodes
+ let anc = algo::least_common_ancestor(&first.parent()?, &last.parent()?)?;
+ let value = anc.ancestors().find_map(N::cast)?;
+ return Some(InFile::new(file_id, value));
+ }
+ None
+ }
+
+ pub fn syntax(&self) -> InFile<&SyntaxNode> {
+ self.with_value(self.value.syntax())
+ }
+}
+
+/// In Rust, macros expand token trees to token trees. When we want to turn a
+/// token tree into an AST node, we need to figure out what kind of AST node we
+/// want: something like `foo` can be a type, an expression, or a pattern.
+///
+/// Naively, one would think that "what this expands to" is a property of a
+/// particular macro: macro `m1` returns an item, while macro `m2` returns an
+/// expression, etc. That's not the case -- macros are polymorphic in the
+/// result, and can expand to any type of the AST node.
+///
+/// What defines the actual AST node is the syntactic context of the macro
+/// invocation. As a contrived example, in `let T![*] = T![*];` the first `T`
+/// expands to a pattern, while the second one expands to an expression.
+///
+/// `ExpandTo` captures this bit of information about a particular macro call
+/// site.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum ExpandTo {
+ Statements,
+ Items,
+ Pattern,
+ Type,
+ Expr,
+}
+
+impl ExpandTo {
+ pub fn from_call_site(call: &ast::MacroCall) -> ExpandTo {
+ use syntax::SyntaxKind::*;
+
+ let syn = call.syntax();
+
+ let parent = match syn.parent() {
+ Some(it) => it,
+ None => return ExpandTo::Statements,
+ };
+
+ // FIXME: macros in statement position are treated as expression statements, they should
+ // probably be their own statement kind. The *grand*parent indicates what's valid.
+ if parent.kind() == MACRO_EXPR
+ && parent
+ .parent()
+ .map_or(true, |p| matches!(p.kind(), EXPR_STMT | STMT_LIST | MACRO_STMTS))
+ {
+ return ExpandTo::Statements;
+ }
+
+ match parent.kind() {
+ MACRO_ITEMS | SOURCE_FILE | ITEM_LIST => ExpandTo::Items,
+ MACRO_STMTS | EXPR_STMT | STMT_LIST => ExpandTo::Statements,
+ MACRO_PAT => ExpandTo::Pattern,
+ MACRO_TYPE => ExpandTo::Type,
+
+ ARG_LIST | ARRAY_EXPR | AWAIT_EXPR | BIN_EXPR | BREAK_EXPR | CALL_EXPR | CAST_EXPR
+ | CLOSURE_EXPR | FIELD_EXPR | FOR_EXPR | IF_EXPR | INDEX_EXPR | LET_EXPR
+ | MATCH_ARM | MATCH_EXPR | MATCH_GUARD | METHOD_CALL_EXPR | PAREN_EXPR | PATH_EXPR
+ | PREFIX_EXPR | RANGE_EXPR | RECORD_EXPR_FIELD | REF_EXPR | RETURN_EXPR | TRY_EXPR
+ | TUPLE_EXPR | WHILE_EXPR | MACRO_EXPR => ExpandTo::Expr,
+ _ => {
+ // Unknown , Just guess it is `Items`
+ ExpandTo::Items
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct UnresolvedMacro {
+ pub path: ModPath,
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/mod_path.rs b/src/tools/rust-analyzer/crates/hir-expand/src/mod_path.rs
new file mode 100644
index 000000000..fea09521e
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/mod_path.rs
@@ -0,0 +1,276 @@
+//! A lowering for `use`-paths (more generally, paths without angle-bracketed segments).
+
+use std::{
+ fmt::{self, Display},
+ iter,
+};
+
+use crate::{
+ db::AstDatabase,
+ hygiene::Hygiene,
+ name::{known, Name},
+};
+use base_db::CrateId;
+use either::Either;
+use smallvec::SmallVec;
+use syntax::{ast, AstNode};
+
+#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct ModPath {
+ pub kind: PathKind,
+ segments: SmallVec<[Name; 1]>,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct EscapedModPath<'a>(&'a ModPath);
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub enum PathKind {
+ Plain,
+ /// `self::` is `Super(0)`
+ Super(u8),
+ Crate,
+ /// Absolute path (::foo)
+ Abs,
+ /// `$crate` from macro expansion
+ DollarCrate(CrateId),
+}
+
+impl ModPath {
+ pub fn from_src(db: &dyn AstDatabase, path: ast::Path, hygiene: &Hygiene) -> Option<ModPath> {
+ convert_path(db, None, path, hygiene)
+ }
+
+ pub fn from_segments(kind: PathKind, segments: impl IntoIterator<Item = Name>) -> ModPath {
+ let segments = segments.into_iter().collect();
+ ModPath { kind, segments }
+ }
+
+ /// Creates a `ModPath` from a `PathKind`, with no extra path segments.
+ pub const fn from_kind(kind: PathKind) -> ModPath {
+ ModPath { kind, segments: SmallVec::new_const() }
+ }
+
+ pub fn segments(&self) -> &[Name] {
+ &self.segments
+ }
+
+ pub fn push_segment(&mut self, segment: Name) {
+ self.segments.push(segment);
+ }
+
+ pub fn pop_segment(&mut self) -> Option<Name> {
+ self.segments.pop()
+ }
+
+ /// Returns the number of segments in the path (counting special segments like `$crate` and
+ /// `super`).
+ pub fn len(&self) -> usize {
+ self.segments.len()
+ + match self.kind {
+ PathKind::Plain => 0,
+ PathKind::Super(i) => i as usize,
+ PathKind::Crate => 1,
+ PathKind::Abs => 0,
+ PathKind::DollarCrate(_) => 1,
+ }
+ }
+
+ pub fn is_ident(&self) -> bool {
+ self.as_ident().is_some()
+ }
+
+ pub fn is_self(&self) -> bool {
+ self.kind == PathKind::Super(0) && self.segments.is_empty()
+ }
+
+ #[allow(non_snake_case)]
+ pub fn is_Self(&self) -> bool {
+ self.kind == PathKind::Plain
+ && matches!(&*self.segments, [name] if *name == known::SELF_TYPE)
+ }
+
+ /// If this path is a single identifier, like `foo`, return its name.
+ pub fn as_ident(&self) -> Option<&Name> {
+ if self.kind != PathKind::Plain {
+ return None;
+ }
+
+ match &*self.segments {
+ [name] => Some(name),
+ _ => None,
+ }
+ }
+
+ pub fn escaped(&self) -> EscapedModPath<'_> {
+ EscapedModPath(self)
+ }
+
+ fn _fmt(&self, f: &mut fmt::Formatter<'_>, escaped: bool) -> fmt::Result {
+ let mut first_segment = true;
+ let mut add_segment = |s| -> fmt::Result {
+ if !first_segment {
+ f.write_str("::")?;
+ }
+ first_segment = false;
+ f.write_str(s)?;
+ Ok(())
+ };
+ match self.kind {
+ PathKind::Plain => {}
+ PathKind::Super(0) => add_segment("self")?,
+ PathKind::Super(n) => {
+ for _ in 0..n {
+ add_segment("super")?;
+ }
+ }
+ PathKind::Crate => add_segment("crate")?,
+ PathKind::Abs => add_segment("")?,
+ PathKind::DollarCrate(_) => add_segment("$crate")?,
+ }
+ for segment in &self.segments {
+ if !first_segment {
+ f.write_str("::")?;
+ }
+ first_segment = false;
+ if escaped {
+ segment.escaped().fmt(f)?
+ } else {
+ segment.fmt(f)?
+ };
+ }
+ Ok(())
+ }
+}
+
+impl Display for ModPath {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self._fmt(f, false)
+ }
+}
+
+impl<'a> Display for EscapedModPath<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0._fmt(f, true)
+ }
+}
+
+impl From<Name> for ModPath {
+ fn from(name: Name) -> ModPath {
+ ModPath::from_segments(PathKind::Plain, iter::once(name))
+ }
+}
+
+fn convert_path(
+ db: &dyn AstDatabase,
+ prefix: Option<ModPath>,
+ path: ast::Path,
+ hygiene: &Hygiene,
+) -> Option<ModPath> {
+ let prefix = match path.qualifier() {
+ Some(qual) => Some(convert_path(db, prefix, qual, hygiene)?),
+ None => prefix,
+ };
+
+ let segment = path.segment()?;
+ let mut mod_path = match segment.kind()? {
+ ast::PathSegmentKind::Name(name_ref) => {
+ match hygiene.name_ref_to_name(db, name_ref) {
+ Either::Left(name) => {
+ // no type args in use
+ let mut res = prefix.unwrap_or_else(|| {
+ ModPath::from_kind(
+ segment.coloncolon_token().map_or(PathKind::Plain, |_| PathKind::Abs),
+ )
+ });
+ res.segments.push(name);
+ res
+ }
+ Either::Right(crate_id) => {
+ return Some(ModPath::from_segments(
+ PathKind::DollarCrate(crate_id),
+ iter::empty(),
+ ))
+ }
+ }
+ }
+ ast::PathSegmentKind::SelfTypeKw => {
+ if prefix.is_some() {
+ return None;
+ }
+ ModPath::from_segments(PathKind::Plain, Some(known::SELF_TYPE))
+ }
+ ast::PathSegmentKind::CrateKw => {
+ if prefix.is_some() {
+ return None;
+ }
+ ModPath::from_segments(PathKind::Crate, iter::empty())
+ }
+ ast::PathSegmentKind::SelfKw => {
+ if prefix.is_some() {
+ return None;
+ }
+ ModPath::from_segments(PathKind::Super(0), iter::empty())
+ }
+ ast::PathSegmentKind::SuperKw => {
+ let nested_super_count = match prefix.map(|p| p.kind) {
+ Some(PathKind::Super(n)) => n,
+ Some(_) => return None,
+ None => 0,
+ };
+
+ ModPath::from_segments(PathKind::Super(nested_super_count + 1), iter::empty())
+ }
+ ast::PathSegmentKind::Type { .. } => {
+ // not allowed in imports
+ return None;
+ }
+ };
+
+ // handle local_inner_macros :
+ // Basically, even in rustc it is quite hacky:
+ // https://github.com/rust-lang/rust/blob/614f273e9388ddd7804d5cbc80b8865068a3744e/src/librustc_resolve/macros.rs#L456
+ // We follow what it did anyway :)
+ if mod_path.segments.len() == 1 && mod_path.kind == PathKind::Plain {
+ if let Some(_macro_call) = path.syntax().parent().and_then(ast::MacroCall::cast) {
+ if let Some(crate_id) = hygiene.local_inner_macros(db, path) {
+ mod_path.kind = PathKind::DollarCrate(crate_id);
+ }
+ }
+ }
+
+ Some(mod_path)
+}
+
+pub use crate::name as __name;
+
+#[macro_export]
+macro_rules! __known_path {
+ (core::iter::IntoIterator) => {};
+ (core::iter::Iterator) => {};
+ (core::result::Result) => {};
+ (core::option::Option) => {};
+ (core::ops::Range) => {};
+ (core::ops::RangeFrom) => {};
+ (core::ops::RangeFull) => {};
+ (core::ops::RangeTo) => {};
+ (core::ops::RangeToInclusive) => {};
+ (core::ops::RangeInclusive) => {};
+ (core::future::Future) => {};
+ (core::ops::Try) => {};
+ ($path:path) => {
+ compile_error!("Please register your known path in the path module")
+ };
+}
+
+#[macro_export]
+macro_rules! __path {
+ ($start:ident $(:: $seg:ident)*) => ({
+ $crate::__known_path!($start $(:: $seg)*);
+ $crate::mod_path::ModPath::from_segments($crate::mod_path::PathKind::Abs, vec![
+ $crate::mod_path::__name![$start], $($crate::mod_path::__name![$seg],)*
+ ])
+ });
+}
+
+pub use crate::__path as path;
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/name.rs b/src/tools/rust-analyzer/crates/hir-expand/src/name.rs
new file mode 100644
index 000000000..85b0a7735
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/name.rs
@@ -0,0 +1,433 @@
+//! See [`Name`].
+
+use std::fmt;
+
+use syntax::{ast, SmolStr, SyntaxKind};
+
+/// `Name` is a wrapper around string, which is used in hir for both references
+/// and declarations. In theory, names should also carry hygiene info, but we are
+/// not there yet!
+#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct Name(Repr);
+
+/// `EscapedName` will add a prefix "r#" to the wrapped `Name` when it is a raw identifier
+#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct EscapedName<'a>(&'a Name);
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+enum Repr {
+ Text(SmolStr),
+ TupleField(usize),
+}
+
+impl fmt::Display for Name {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self.0 {
+ Repr::Text(text) => fmt::Display::fmt(&text, f),
+ Repr::TupleField(idx) => fmt::Display::fmt(&idx, f),
+ }
+ }
+}
+
+fn is_raw_identifier(name: &str) -> bool {
+ let is_keyword = SyntaxKind::from_keyword(name).is_some();
+ is_keyword && !matches!(name, "self" | "crate" | "super" | "Self")
+}
+
+impl<'a> fmt::Display for EscapedName<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self.0 .0 {
+ Repr::Text(text) => {
+ if is_raw_identifier(text) {
+ write!(f, "r#{}", &text)
+ } else {
+ fmt::Display::fmt(&text, f)
+ }
+ }
+ Repr::TupleField(idx) => fmt::Display::fmt(&idx, f),
+ }
+ }
+}
+
+impl<'a> EscapedName<'a> {
+ pub fn is_escaped(&self) -> bool {
+ match &self.0 .0 {
+ Repr::Text(it) => is_raw_identifier(&it),
+ Repr::TupleField(_) => false,
+ }
+ }
+
+ /// Returns the textual representation of this name as a [`SmolStr`].
+ /// Prefer using this over [`ToString::to_string`] if possible as this conversion is cheaper in
+ /// the general case.
+ pub fn to_smol_str(&self) -> SmolStr {
+ match &self.0 .0 {
+ Repr::Text(it) => {
+ if is_raw_identifier(&it) {
+ SmolStr::from_iter(["r#", &it])
+ } else {
+ it.clone()
+ }
+ }
+ Repr::TupleField(it) => SmolStr::new(&it.to_string()),
+ }
+ }
+}
+
+impl Name {
+ /// Note: this is private to make creating name from random string hard.
+ /// Hopefully, this should allow us to integrate hygiene cleaner in the
+ /// future, and to switch to interned representation of names.
+ const fn new_text(text: SmolStr) -> Name {
+ Name(Repr::Text(text))
+ }
+
+ pub fn new_tuple_field(idx: usize) -> Name {
+ Name(Repr::TupleField(idx))
+ }
+
+ pub fn new_lifetime(lt: &ast::Lifetime) -> Name {
+ Self::new_text(lt.text().into())
+ }
+
+ /// Shortcut to create inline plain text name
+ const fn new_inline(text: &str) -> Name {
+ Name::new_text(SmolStr::new_inline(text))
+ }
+
+ /// Resolve a name from the text of token.
+ fn resolve(raw_text: &str) -> Name {
+ match raw_text.strip_prefix("r#") {
+ Some(text) => Name::new_text(SmolStr::new(text)),
+ None => Name::new_text(raw_text.into()),
+ }
+ }
+
+ /// A fake name for things missing in the source code.
+ ///
+ /// For example, `impl Foo for {}` should be treated as a trait impl for a
+ /// type with a missing name. Similarly, `struct S { : u32 }` should have a
+ /// single field with a missing name.
+ ///
+ /// Ideally, we want a `gensym` semantics for missing names -- each missing
+ /// name is equal only to itself. It's not clear how to implement this in
+ /// salsa though, so we punt on that bit for a moment.
+ pub const fn missing() -> Name {
+ Name::new_inline("[missing name]")
+ }
+
+ /// Returns the tuple index this name represents if it is a tuple field.
+ pub fn as_tuple_index(&self) -> Option<usize> {
+ match self.0 {
+ Repr::TupleField(idx) => Some(idx),
+ _ => None,
+ }
+ }
+
+ /// Returns the text this name represents if it isn't a tuple field.
+ pub fn as_text(&self) -> Option<SmolStr> {
+ match &self.0 {
+ Repr::Text(it) => Some(it.clone()),
+ _ => None,
+ }
+ }
+
+ /// Returns the textual representation of this name as a [`SmolStr`].
+ /// Prefer using this over [`ToString::to_string`] if possible as this conversion is cheaper in
+ /// the general case.
+ pub fn to_smol_str(&self) -> SmolStr {
+ match &self.0 {
+ Repr::Text(it) => it.clone(),
+ Repr::TupleField(it) => SmolStr::new(&it.to_string()),
+ }
+ }
+
+ pub fn escaped(&self) -> EscapedName<'_> {
+ EscapedName(self)
+ }
+}
+
+pub trait AsName {
+ fn as_name(&self) -> Name;
+}
+
+impl AsName for ast::NameRef {
+ fn as_name(&self) -> Name {
+ match self.as_tuple_field() {
+ Some(idx) => Name::new_tuple_field(idx),
+ None => Name::resolve(&self.text()),
+ }
+ }
+}
+
+impl AsName for ast::Name {
+ fn as_name(&self) -> Name {
+ Name::resolve(&self.text())
+ }
+}
+
+impl AsName for ast::NameOrNameRef {
+ fn as_name(&self) -> Name {
+ match self {
+ ast::NameOrNameRef::Name(it) => it.as_name(),
+ ast::NameOrNameRef::NameRef(it) => it.as_name(),
+ }
+ }
+}
+
+impl AsName for tt::Ident {
+ fn as_name(&self) -> Name {
+ Name::resolve(&self.text)
+ }
+}
+
+impl AsName for ast::FieldKind {
+ fn as_name(&self) -> Name {
+ match self {
+ ast::FieldKind::Name(nr) => nr.as_name(),
+ ast::FieldKind::Index(idx) => {
+ let idx = idx.text().parse::<usize>().unwrap_or(0);
+ Name::new_tuple_field(idx)
+ }
+ }
+ }
+}
+
+impl AsName for base_db::Dependency {
+ fn as_name(&self) -> Name {
+ Name::new_text(SmolStr::new(&*self.name))
+ }
+}
+
+pub mod known {
+ macro_rules! known_names {
+ ($($ident:ident),* $(,)?) => {
+ $(
+ #[allow(bad_style)]
+ pub const $ident: super::Name =
+ super::Name::new_inline(stringify!($ident));
+ )*
+ };
+ }
+
+ known_names!(
+ // Primitives
+ isize,
+ i8,
+ i16,
+ i32,
+ i64,
+ i128,
+ usize,
+ u8,
+ u16,
+ u32,
+ u64,
+ u128,
+ f32,
+ f64,
+ bool,
+ char,
+ str,
+ // Special names
+ macro_rules,
+ doc,
+ cfg,
+ cfg_attr,
+ register_attr,
+ register_tool,
+ // Components of known path (value or mod name)
+ std,
+ core,
+ alloc,
+ iter,
+ ops,
+ future,
+ result,
+ boxed,
+ option,
+ prelude,
+ rust_2015,
+ rust_2018,
+ rust_2021,
+ v1,
+ // Components of known path (type name)
+ Iterator,
+ IntoIterator,
+ Item,
+ Try,
+ Ok,
+ Future,
+ Result,
+ Option,
+ Output,
+ Target,
+ Box,
+ RangeFrom,
+ RangeFull,
+ RangeInclusive,
+ RangeToInclusive,
+ RangeTo,
+ Range,
+ Neg,
+ Not,
+ None,
+ Index,
+ // Components of known path (function name)
+ filter_map,
+ next,
+ iter_mut,
+ len,
+ is_empty,
+ new,
+ // Builtin macros
+ asm,
+ assert,
+ column,
+ compile_error,
+ concat_idents,
+ concat_bytes,
+ concat,
+ const_format_args,
+ core_panic,
+ env,
+ file,
+ format_args_nl,
+ format_args,
+ global_asm,
+ include_bytes,
+ include_str,
+ include,
+ line,
+ llvm_asm,
+ log_syntax,
+ module_path,
+ option_env,
+ std_panic,
+ stringify,
+ trace_macros,
+ unreachable,
+ // Builtin derives
+ Copy,
+ Clone,
+ Default,
+ Debug,
+ Hash,
+ Ord,
+ PartialOrd,
+ Eq,
+ PartialEq,
+ // Builtin attributes
+ bench,
+ cfg_accessible,
+ cfg_eval,
+ crate_type,
+ derive,
+ global_allocator,
+ test,
+ test_case,
+ recursion_limit,
+ // Safe intrinsics
+ abort,
+ add_with_overflow,
+ black_box,
+ bitreverse,
+ bswap,
+ caller_location,
+ ctlz,
+ ctpop,
+ cttz,
+ discriminant_value,
+ forget,
+ likely,
+ maxnumf32,
+ maxnumf64,
+ min_align_of_val,
+ min_align_of,
+ minnumf32,
+ minnumf64,
+ mul_with_overflow,
+ needs_drop,
+ ptr_guaranteed_eq,
+ ptr_guaranteed_ne,
+ rotate_left,
+ rotate_right,
+ rustc_peek,
+ saturating_add,
+ saturating_sub,
+ size_of_val,
+ size_of,
+ sub_with_overflow,
+ type_id,
+ type_name,
+ unlikely,
+ variant_count,
+ wrapping_add,
+ wrapping_mul,
+ wrapping_sub,
+ // known methods of lang items
+ eq,
+ ne,
+ ge,
+ gt,
+ le,
+ lt,
+ // lang items
+ add_assign,
+ add,
+ bitand_assign,
+ bitand,
+ bitor_assign,
+ bitor,
+ bitxor_assign,
+ bitxor,
+ deref_mut,
+ deref,
+ div_assign,
+ div,
+ fn_mut,
+ fn_once,
+ future_trait,
+ index,
+ index_mut,
+ mul_assign,
+ mul,
+ neg,
+ not,
+ owned_box,
+ partial_ord,
+ r#fn,
+ rem_assign,
+ rem,
+ shl_assign,
+ shl,
+ shr_assign,
+ shr,
+ sub_assign,
+ sub,
+ );
+
+ // self/Self cannot be used as an identifier
+ pub const SELF_PARAM: super::Name = super::Name::new_inline("self");
+ pub const SELF_TYPE: super::Name = super::Name::new_inline("Self");
+
+ pub const STATIC_LIFETIME: super::Name = super::Name::new_inline("'static");
+
+ #[macro_export]
+ macro_rules! name {
+ (self) => {
+ $crate::name::known::SELF_PARAM
+ };
+ (Self) => {
+ $crate::name::known::SELF_TYPE
+ };
+ ('static) => {
+ $crate::name::known::STATIC_LIFETIME
+ };
+ ($ident:ident) => {
+ $crate::name::known::$ident
+ };
+ }
+}
+
+pub use crate::name;
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/proc_macro.rs b/src/tools/rust-analyzer/crates/hir-expand/src/proc_macro.rs
new file mode 100644
index 000000000..5afdcc0e6
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/proc_macro.rs
@@ -0,0 +1,81 @@
+//! Proc Macro Expander stub
+
+use base_db::{CrateId, ProcMacroExpansionError, ProcMacroId, ProcMacroKind};
+use stdx::never;
+
+use crate::{db::AstDatabase, ExpandError, ExpandResult};
+
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub struct ProcMacroExpander {
+ krate: CrateId,
+ proc_macro_id: Option<ProcMacroId>,
+}
+
+impl ProcMacroExpander {
+ pub fn new(krate: CrateId, proc_macro_id: ProcMacroId) -> Self {
+ Self { krate, proc_macro_id: Some(proc_macro_id) }
+ }
+
+ pub fn dummy(krate: CrateId) -> Self {
+ // FIXME: Should store the name for better errors
+ Self { krate, proc_macro_id: None }
+ }
+
+ pub fn is_dummy(&self) -> bool {
+ self.proc_macro_id.is_none()
+ }
+
+ pub fn expand(
+ self,
+ db: &dyn AstDatabase,
+ calling_crate: CrateId,
+ tt: &tt::Subtree,
+ attr_arg: Option<&tt::Subtree>,
+ ) -> ExpandResult<tt::Subtree> {
+ match self.proc_macro_id {
+ Some(id) => {
+ let krate_graph = db.crate_graph();
+ let proc_macros = match &krate_graph[self.krate].proc_macro {
+ Ok(proc_macros) => proc_macros,
+ Err(_) => {
+ never!("Non-dummy expander even though there are no proc macros");
+ return ExpandResult::only_err(ExpandError::Other("Internal error".into()));
+ }
+ };
+ let proc_macro = match proc_macros.get(id.0 as usize) {
+ Some(proc_macro) => proc_macro,
+ None => {
+ never!(
+ "Proc macro index out of bounds: the length is {} but the index is {}",
+ proc_macros.len(),
+ id.0
+ );
+ return ExpandResult::only_err(ExpandError::Other("Internal error".into()));
+ }
+ };
+
+ // Proc macros have access to the environment variables of the invoking crate.
+ let env = &krate_graph[calling_crate].env;
+ match proc_macro.expander.expand(tt, attr_arg, env) {
+ Ok(t) => ExpandResult::ok(t),
+ Err(err) => match err {
+ // Don't discard the item in case something unexpected happened while expanding attributes
+ ProcMacroExpansionError::System(text)
+ if proc_macro.kind == ProcMacroKind::Attr =>
+ {
+ ExpandResult {
+ value: tt.clone(),
+ err: Some(ExpandError::Other(text.into())),
+ }
+ }
+ ProcMacroExpansionError::System(text)
+ | ProcMacroExpansionError::Panic(text) => {
+ ExpandResult::only_err(ExpandError::Other(text.into()))
+ }
+ },
+ }
+ }
+ None => ExpandResult::only_err(ExpandError::UnresolvedProcMacro(self.krate)),
+ }
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/hir-expand/src/quote.rs b/src/tools/rust-analyzer/crates/hir-expand/src/quote.rs
new file mode 100644
index 000000000..82f410ecd
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-expand/src/quote.rs
@@ -0,0 +1,284 @@
+//! A simplified version of quote-crate like quasi quote macro
+
+// A helper macro quote macro
+// FIXME:
+// 1. Not all puncts are handled
+// 2. #()* pattern repetition not supported now
+// * But we can do it manually, see `test_quote_derive_copy_hack`
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __quote {
+ () => {
+ Vec::<tt::TokenTree>::new()
+ };
+
+ ( @SUBTREE $delim:ident $($tt:tt)* ) => {
+ {
+ let children = $crate::__quote!($($tt)*);
+ tt::Subtree {
+ delimiter: Some(tt::Delimiter {
+ kind: tt::DelimiterKind::$delim,
+ id: tt::TokenId::unspecified(),
+ }),
+ token_trees: $crate::quote::IntoTt::to_tokens(children),
+ }
+ }
+ };
+
+ ( @PUNCT $first:literal ) => {
+ {
+ vec![
+ tt::Leaf::Punct(tt::Punct {
+ char: $first,
+ spacing: tt::Spacing::Alone,
+ id: tt::TokenId::unspecified(),
+ }).into()
+ ]
+ }
+ };
+
+ ( @PUNCT $first:literal, $sec:literal ) => {
+ {
+ vec![
+ tt::Leaf::Punct(tt::Punct {
+ char: $first,
+ spacing: tt::Spacing::Joint,
+ id: tt::TokenId::unspecified(),
+ }).into(),
+ tt::Leaf::Punct(tt::Punct {
+ char: $sec,
+ spacing: tt::Spacing::Alone,
+ id: tt::TokenId::unspecified(),
+ }).into()
+ ]
+ }
+ };
+
+ // hash variable
+ ( # $first:ident $($tail:tt)* ) => {
+ {
+ let token = $crate::quote::ToTokenTree::to_token($first);
+ let mut tokens = vec![token.into()];
+ let mut tail_tokens = $crate::quote::IntoTt::to_tokens($crate::__quote!($($tail)*));
+ tokens.append(&mut tail_tokens);
+ tokens
+ }
+ };
+
+ ( ## $first:ident $($tail:tt)* ) => {
+ {
+ let mut tokens = $first.into_iter().map($crate::quote::ToTokenTree::to_token).collect::<Vec<tt::TokenTree>>();
+ let mut tail_tokens = $crate::quote::IntoTt::to_tokens($crate::__quote!($($tail)*));
+ tokens.append(&mut tail_tokens);
+ tokens
+ }
+ };
+
+ // Brace
+ ( { $($tt:tt)* } ) => { $crate::__quote!(@SUBTREE Brace $($tt)*) };
+ // Bracket
+ ( [ $($tt:tt)* ] ) => { $crate::__quote!(@SUBTREE Bracket $($tt)*) };
+ // Parenthesis
+ ( ( $($tt:tt)* ) ) => { $crate::__quote!(@SUBTREE Parenthesis $($tt)*) };
+
+ // Literal
+ ( $tt:literal ) => { vec![$crate::quote::ToTokenTree::to_token($tt).into()] };
+ // Ident
+ ( $tt:ident ) => {
+ vec![ {
+ tt::Leaf::Ident(tt::Ident {
+ text: stringify!($tt).into(),
+ id: tt::TokenId::unspecified(),
+ }).into()
+ }]
+ };
+
+ // Puncts
+ // FIXME: Not all puncts are handled
+ ( -> ) => {$crate::__quote!(@PUNCT '-', '>')};
+ ( & ) => {$crate::__quote!(@PUNCT '&')};
+ ( , ) => {$crate::__quote!(@PUNCT ',')};
+ ( : ) => {$crate::__quote!(@PUNCT ':')};
+ ( ; ) => {$crate::__quote!(@PUNCT ';')};
+ ( :: ) => {$crate::__quote!(@PUNCT ':', ':')};
+ ( . ) => {$crate::__quote!(@PUNCT '.')};
+ ( < ) => {$crate::__quote!(@PUNCT '<')};
+ ( > ) => {$crate::__quote!(@PUNCT '>')};
+ ( ! ) => {$crate::__quote!(@PUNCT '!')};
+
+ ( $first:tt $($tail:tt)+ ) => {
+ {
+ let mut tokens = $crate::quote::IntoTt::to_tokens($crate::__quote!($first));
+ let mut tail_tokens = $crate::quote::IntoTt::to_tokens($crate::__quote!($($tail)*));
+
+ tokens.append(&mut tail_tokens);
+ tokens
+ }
+ };
+}
+
+/// FIXME:
+/// It probably should implement in proc-macro
+#[macro_export]
+macro_rules! quote {
+ ( $($tt:tt)* ) => {
+ $crate::quote::IntoTt::to_subtree($crate::__quote!($($tt)*))
+ }
+}
+
+pub(crate) trait IntoTt {
+ fn to_subtree(self) -> tt::Subtree;
+ fn to_tokens(self) -> Vec<tt::TokenTree>;
+}
+
+impl IntoTt for Vec<tt::TokenTree> {
+ fn to_subtree(self) -> tt::Subtree {
+ tt::Subtree { delimiter: None, token_trees: self }
+ }
+
+ fn to_tokens(self) -> Vec<tt::TokenTree> {
+ self
+ }
+}
+
+impl IntoTt for tt::Subtree {
+ fn to_subtree(self) -> tt::Subtree {
+ self
+ }
+
+ fn to_tokens(self) -> Vec<tt::TokenTree> {
+ vec![tt::TokenTree::Subtree(self)]
+ }
+}
+
+pub(crate) trait ToTokenTree {
+ fn to_token(self) -> tt::TokenTree;
+}
+
+impl ToTokenTree for tt::TokenTree {
+ fn to_token(self) -> tt::TokenTree {
+ self
+ }
+}
+
+impl ToTokenTree for tt::Subtree {
+ fn to_token(self) -> tt::TokenTree {
+ self.into()
+ }
+}
+
+macro_rules! impl_to_to_tokentrees {
+ ($($ty:ty => $this:ident $im:block);*) => {
+ $(
+ impl ToTokenTree for $ty {
+ fn to_token($this) -> tt::TokenTree {
+ let leaf: tt::Leaf = $im.into();
+ leaf.into()
+ }
+ }
+
+ impl ToTokenTree for &$ty {
+ fn to_token($this) -> tt::TokenTree {
+ let leaf: tt::Leaf = $im.clone().into();
+ leaf.into()
+ }
+ }
+ )*
+ }
+}
+
+impl_to_to_tokentrees! {
+ u32 => self { tt::Literal{text: self.to_string().into(), id: tt::TokenId::unspecified()} };
+ usize => self { tt::Literal{text: self.to_string().into(), id: tt::TokenId::unspecified()} };
+ i32 => self { tt::Literal{text: self.to_string().into(), id: tt::TokenId::unspecified()} };
+ bool => self { tt::Ident{text: self.to_string().into(), id: tt::TokenId::unspecified()} };
+ tt::Leaf => self { self };
+ tt::Literal => self { self };
+ tt::Ident => self { self };
+ tt::Punct => self { self };
+ &str => self { tt::Literal{text: format!("\"{}\"", self.escape_debug()).into(), id: tt::TokenId::unspecified()}};
+ String => self { tt::Literal{text: format!("\"{}\"", self.escape_debug()).into(), id: tt::TokenId::unspecified()}}
+}
+
+#[cfg(test)]
+mod tests {
+ #[test]
+ fn test_quote_delimiters() {
+ assert_eq!(quote!({}).to_string(), "{}");
+ assert_eq!(quote!(()).to_string(), "()");
+ assert_eq!(quote!([]).to_string(), "[]");
+ }
+
+ #[test]
+ fn test_quote_idents() {
+ assert_eq!(quote!(32).to_string(), "32");
+ assert_eq!(quote!(struct).to_string(), "struct");
+ }
+
+ #[test]
+ fn test_quote_hash_simple_literal() {
+ let a = 20;
+ assert_eq!(quote!(#a).to_string(), "20");
+ let s: String = "hello".into();
+ assert_eq!(quote!(#s).to_string(), "\"hello\"");
+ }
+
+ fn mk_ident(name: &str) -> tt::Ident {
+ tt::Ident { text: name.into(), id: tt::TokenId::unspecified() }
+ }
+
+ #[test]
+ fn test_quote_hash_token_tree() {
+ let a = mk_ident("hello");
+
+ let quoted = quote!(#a);
+ assert_eq!(quoted.to_string(), "hello");
+ let t = format!("{:?}", quoted);
+ assert_eq!(t, "SUBTREE $\n IDENT hello 4294967295");
+ }
+
+ #[test]
+ fn test_quote_simple_derive_copy() {
+ let name = mk_ident("Foo");
+
+ let quoted = quote! {
+ impl Clone for #name {
+ fn clone(&self) -> Self {
+ Self {}
+ }
+ }
+ };
+
+ assert_eq!(quoted.to_string(), "impl Clone for Foo {fn clone (& self) -> Self {Self {}}}");
+ }
+
+ #[test]
+ fn test_quote_derive_copy_hack() {
+ // Assume the given struct is:
+ // struct Foo {
+ // name: String,
+ // id: u32,
+ // }
+ let struct_name = mk_ident("Foo");
+ let fields = [mk_ident("name"), mk_ident("id")];
+ let fields = fields.iter().flat_map(|it| quote!(#it: self.#it.clone(), ).token_trees);
+
+ let list = tt::Subtree {
+ delimiter: Some(tt::Delimiter {
+ kind: tt::DelimiterKind::Brace,
+ id: tt::TokenId::unspecified(),
+ }),
+ token_trees: fields.collect(),
+ };
+
+ let quoted = quote! {
+ impl Clone for #struct_name {
+ fn clone(&self) -> Self {
+ Self #list
+ }
+ }
+ };
+
+ assert_eq!(quoted.to_string(), "impl Clone for Foo {fn clone (& self) -> Self {Self {name : self . name . clone () , id : self . id . clone () ,}}}");
+ }
+}