summaryrefslogtreecommitdiffstats
path: root/src/tools/rust-analyzer/crates/ide-db
diff options
context:
space:
mode:
Diffstat (limited to 'src/tools/rust-analyzer/crates/ide-db')
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/Cargo.toml39
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs78
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/apply_change.rs163
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/assists.rs137
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/defs.rs545
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/famous_defs.rs185
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/generated/lints.rs7682
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/helpers.rs105
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/imports/import_assets.rs674
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/imports/insert_use.rs446
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/imports/insert_use/tests.rs1084
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/imports/merge_imports.rs295
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/items_locator.rs151
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/label.rs48
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/lib.rs246
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/line_index.rs300
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs287
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/rename.rs540
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/rust_doc.rs34
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/search.rs785
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/source_change.rs99
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs429
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/format_string.rs308
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/insert_whitespace_into_node.rs136
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/node_ext.rs460
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/test_data/test_symbol_index_collection.txt533
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/tests/sourcegen_lints.rs284
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/traits.rs273
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/ty_filter.rs86
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/use_trivial_contructor.rs34
30 files changed, 16466 insertions, 0 deletions
diff --git a/src/tools/rust-analyzer/crates/ide-db/Cargo.toml b/src/tools/rust-analyzer/crates/ide-db/Cargo.toml
new file mode 100644
index 000000000..a1b0bd6cb
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/Cargo.toml
@@ -0,0 +1,39 @@
+[package]
+name = "ide-db"
+version = "0.0.0"
+description = "TBD"
+license = "MIT OR Apache-2.0"
+edition = "2021"
+rust-version = "1.57"
+
+[lib]
+doctest = false
+
+[dependencies]
+cov-mark = "2.0.0-pre.1"
+tracing = "0.1.35"
+rayon = "1.5.3"
+fst = { version = "0.4.7", default-features = false }
+rustc-hash = "1.1.0"
+once_cell = "1.12.0"
+either = "1.7.0"
+itertools = "0.10.3"
+arrayvec = "0.7.2"
+indexmap = "1.9.1"
+
+stdx = { path = "../stdx", version = "0.0.0" }
+parser = { path = "../parser", version = "0.0.0" }
+syntax = { path = "../syntax", version = "0.0.0" }
+text-edit = { path = "../text-edit", version = "0.0.0" }
+base-db = { path = "../base-db", version = "0.0.0" }
+profile = { path = "../profile", version = "0.0.0" }
+# ide should depend only on the top-level `hir` package. if you need
+# something from some `hir-xxx` subpackage, reexport the API via `hir`.
+hir = { path = "../hir", version = "0.0.0" }
+limit = { path = "../limit", version = "0.0.0" }
+
+[dev-dependencies]
+test-utils = { path = "../test-utils" }
+sourcegen = { path = "../sourcegen" }
+xshell = "0.2.2"
+expect-test = "1.4.0"
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs b/src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs
new file mode 100644
index 000000000..7303ef8b7
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs
@@ -0,0 +1,78 @@
+//! This module provides functionality for querying callable information about a token.
+
+use either::Either;
+use hir::{Semantics, Type};
+use syntax::{
+ ast::{self, HasArgList, HasName},
+ AstNode, SyntaxToken,
+};
+
+use crate::RootDatabase;
+
+#[derive(Debug)]
+pub struct ActiveParameter {
+ pub ty: Type,
+ pub pat: Either<ast::SelfParam, ast::Pat>,
+}
+
+impl ActiveParameter {
+ /// Returns information about the call argument this token is part of.
+ pub fn at_token(sema: &Semantics<'_, RootDatabase>, token: SyntaxToken) -> Option<Self> {
+ let (signature, active_parameter) = callable_for_token(sema, token)?;
+
+ let idx = active_parameter?;
+ let mut params = signature.params(sema.db);
+ if !(idx < params.len()) {
+ cov_mark::hit!(too_many_arguments);
+ return None;
+ }
+ let (pat, ty) = params.swap_remove(idx);
+ pat.map(|pat| ActiveParameter { ty, pat })
+ }
+
+ pub fn ident(&self) -> Option<ast::Name> {
+ self.pat.as_ref().right().and_then(|param| match param {
+ ast::Pat::IdentPat(ident) => ident.name(),
+ _ => None,
+ })
+ }
+}
+
+/// Returns a [`hir::Callable`] this token is a part of and its argument index of said callable.
+pub fn callable_for_token(
+ sema: &Semantics<'_, RootDatabase>,
+ token: SyntaxToken,
+) -> Option<(hir::Callable, Option<usize>)> {
+ // Find the calling expression and its NameRef
+ let parent = token.parent()?;
+ let calling_node = parent.ancestors().filter_map(ast::CallableExpr::cast).find(|it| {
+ it.arg_list()
+ .map_or(false, |it| it.syntax().text_range().contains(token.text_range().start()))
+ })?;
+
+ callable_for_node(sema, &calling_node, &token)
+}
+
+pub fn callable_for_node(
+ sema: &Semantics<'_, RootDatabase>,
+ calling_node: &ast::CallableExpr,
+ token: &SyntaxToken,
+) -> Option<(hir::Callable, Option<usize>)> {
+ let callable = match &calling_node {
+ ast::CallableExpr::Call(call) => {
+ let expr = call.expr()?;
+ sema.type_of_expr(&expr)?.adjusted().as_callable(sema.db)
+ }
+ ast::CallableExpr::MethodCall(call) => sema.resolve_method_call_as_callable(call),
+ }?;
+ let active_param = if let Some(arg_list) = calling_node.arg_list() {
+ let param = arg_list
+ .args()
+ .take_while(|arg| arg.syntax().text_range().end() <= token.text_range().start())
+ .count();
+ Some(param)
+ } else {
+ None
+ };
+ Some((callable, active_param))
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/apply_change.rs b/src/tools/rust-analyzer/crates/ide-db/src/apply_change.rs
new file mode 100644
index 000000000..98b0e9c94
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/apply_change.rs
@@ -0,0 +1,163 @@
+//! Applies changes to the IDE state transactionally.
+
+use std::sync::Arc;
+
+use base_db::{
+ salsa::{Database, Durability},
+ Change, SourceRootId,
+};
+use profile::{memory_usage, Bytes};
+use rustc_hash::FxHashSet;
+
+use crate::{symbol_index::SymbolsDatabase, RootDatabase};
+
+impl RootDatabase {
+ pub fn request_cancellation(&mut self) {
+ let _p = profile::span("RootDatabase::request_cancellation");
+ self.salsa_runtime_mut().synthetic_write(Durability::LOW);
+ }
+
+ pub fn apply_change(&mut self, change: Change) {
+ let _p = profile::span("RootDatabase::apply_change");
+ self.request_cancellation();
+ tracing::info!("apply_change {:?}", change);
+ if let Some(roots) = &change.roots {
+ let mut local_roots = FxHashSet::default();
+ let mut library_roots = FxHashSet::default();
+ for (idx, root) in roots.iter().enumerate() {
+ let root_id = SourceRootId(idx as u32);
+ if root.is_library {
+ library_roots.insert(root_id);
+ } else {
+ local_roots.insert(root_id);
+ }
+ }
+ self.set_local_roots_with_durability(Arc::new(local_roots), Durability::HIGH);
+ self.set_library_roots_with_durability(Arc::new(library_roots), Durability::HIGH);
+ }
+ change.apply(self);
+ }
+
+ // Feature: Memory Usage
+ //
+ // Clears rust-analyzer's internal database and prints memory usage statistics.
+ //
+ // |===
+ // | Editor | Action Name
+ //
+ // | VS Code | **Rust Analyzer: Memory Usage (Clears Database)**
+ // |===
+ // image::https://user-images.githubusercontent.com/48062697/113065592-08559f00-91b1-11eb-8c96-64b88068ec02.gif[]
+ pub fn per_query_memory_usage(&mut self) -> Vec<(String, Bytes)> {
+ let mut acc: Vec<(String, Bytes)> = vec![];
+ macro_rules! purge_each_query {
+ ($($q:path)*) => {$(
+ let before = memory_usage().allocated;
+ $q.in_db(self).purge();
+ let after = memory_usage().allocated;
+ let q: $q = Default::default();
+ let name = format!("{:?}", q);
+ acc.push((name, before - after));
+ )*}
+ }
+ purge_each_query![
+ // SourceDatabase
+ base_db::ParseQuery
+ base_db::CrateGraphQuery
+
+ // SourceDatabaseExt
+ base_db::FileTextQuery
+ base_db::FileSourceRootQuery
+ base_db::SourceRootQuery
+ base_db::SourceRootCratesQuery
+
+ // AstDatabase
+ hir::db::AstIdMapQuery
+ hir::db::MacroArgTextQuery
+ hir::db::MacroDefQuery
+ hir::db::ParseMacroExpansionQuery
+ hir::db::MacroExpandQuery
+ hir::db::HygieneFrameQuery
+ hir::db::InternMacroCallQuery
+
+ // DefDatabase
+ hir::db::FileItemTreeQuery
+ hir::db::BlockDefMapQuery
+ hir::db::CrateDefMapQueryQuery
+ hir::db::FieldsAttrsQuery
+ hir::db::VariantsAttrsQuery
+ hir::db::FieldsAttrsSourceMapQuery
+ hir::db::VariantsAttrsSourceMapQuery
+ hir::db::StructDataQuery
+ hir::db::UnionDataQuery
+ hir::db::EnumDataQuery
+ hir::db::ImplDataQuery
+ hir::db::TraitDataQuery
+ hir::db::TypeAliasDataQuery
+ hir::db::FunctionDataQuery
+ hir::db::ConstDataQuery
+ hir::db::StaticDataQuery
+ hir::db::BodyWithSourceMapQuery
+ hir::db::BodyQuery
+ hir::db::ExprScopesQuery
+ hir::db::GenericParamsQuery
+ hir::db::AttrsQuery
+ hir::db::CrateLangItemsQuery
+ hir::db::LangItemQuery
+ hir::db::ImportMapQuery
+
+ // HirDatabase
+ hir::db::InferQueryQuery
+ hir::db::TyQuery
+ hir::db::ValueTyQuery
+ hir::db::ImplSelfTyQuery
+ hir::db::ImplTraitQuery
+ hir::db::FieldTypesQuery
+ hir::db::CallableItemSignatureQuery
+ hir::db::GenericPredicatesForParamQuery
+ hir::db::GenericPredicatesQuery
+ hir::db::GenericDefaultsQuery
+ hir::db::InherentImplsInCrateQuery
+ hir::db::TraitEnvironmentQuery
+ hir::db::TraitImplsInCrateQuery
+ hir::db::TraitImplsInDepsQuery
+ hir::db::AssociatedTyDataQuery
+ hir::db::AssociatedTyDataQuery
+ hir::db::TraitDatumQuery
+ hir::db::StructDatumQuery
+ hir::db::ImplDatumQuery
+ hir::db::FnDefDatumQuery
+ hir::db::ReturnTypeImplTraitsQuery
+ hir::db::InternCallableDefQuery
+ hir::db::InternTypeOrConstParamIdQuery
+ hir::db::InternImplTraitIdQuery
+ hir::db::InternClosureQuery
+ hir::db::AssociatedTyValueQuery
+ hir::db::TraitSolveQueryQuery
+ hir::db::InternTypeOrConstParamIdQuery
+
+ // SymbolsDatabase
+ crate::symbol_index::ModuleSymbolsQuery
+ crate::symbol_index::LibrarySymbolsQuery
+ crate::symbol_index::LocalRootsQuery
+ crate::symbol_index::LibraryRootsQuery
+
+ // LineIndexDatabase
+ crate::LineIndexQuery
+
+ // InternDatabase
+ hir::db::InternFunctionQuery
+ hir::db::InternStructQuery
+ hir::db::InternUnionQuery
+ hir::db::InternEnumQuery
+ hir::db::InternConstQuery
+ hir::db::InternStaticQuery
+ hir::db::InternTraitQuery
+ hir::db::InternTypeAliasQuery
+ hir::db::InternImplQuery
+ ];
+
+ acc.sort_by_key(|it| std::cmp::Reverse(it.1));
+ acc
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/assists.rs b/src/tools/rust-analyzer/crates/ide-db/src/assists.rs
new file mode 100644
index 000000000..da23763dc
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/assists.rs
@@ -0,0 +1,137 @@
+//! This module defines the `Assist` data structure. The actual assist live in
+//! the `ide_assists` downstream crate. We want to define the data structures in
+//! this low-level crate though, because `ide_diagnostics` also need them
+//! (fixits for diagnostics and assists are the same thing under the hood). We
+//! want to compile `ide_assists` and `ide_diagnostics` in parallel though, so
+//! we pull the common definitions upstream, to this crate.
+
+use std::str::FromStr;
+
+use syntax::TextRange;
+
+use crate::{label::Label, source_change::SourceChange};
+
+#[derive(Debug, Clone)]
+pub struct Assist {
+ pub id: AssistId,
+ /// Short description of the assist, as shown in the UI.
+ pub label: Label,
+ pub group: Option<GroupLabel>,
+ /// Target ranges are used to sort assists: the smaller the target range,
+ /// the more specific assist is, and so it should be sorted first.
+ pub target: TextRange,
+ /// Computing source change sometimes is much more costly then computing the
+ /// other fields. Additionally, the actual change is not required to show
+ /// the lightbulb UI, it only is needed when the user tries to apply an
+ /// assist. So, we compute it lazily: the API allow requesting assists with
+ /// or without source change. We could (and in fact, used to) distinguish
+ /// between resolved and unresolved assists at the type level, but this is
+ /// cumbersome, especially if you want to embed an assist into another data
+ /// structure, such as a diagnostic.
+ pub source_change: Option<SourceChange>,
+ pub trigger_signature_help: bool,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum AssistKind {
+ // FIXME: does the None variant make sense? Probably not.
+ None,
+
+ QuickFix,
+ Generate,
+ Refactor,
+ RefactorExtract,
+ RefactorInline,
+ RefactorRewrite,
+}
+
+impl AssistKind {
+ pub fn contains(self, other: AssistKind) -> bool {
+ if self == other {
+ return true;
+ }
+
+ match self {
+ AssistKind::None | AssistKind::Generate => true,
+ AssistKind::Refactor => matches!(
+ other,
+ AssistKind::RefactorExtract
+ | AssistKind::RefactorInline
+ | AssistKind::RefactorRewrite
+ ),
+ _ => false,
+ }
+ }
+
+ pub fn name(&self) -> &str {
+ match self {
+ AssistKind::None => "None",
+ AssistKind::QuickFix => "QuickFix",
+ AssistKind::Generate => "Generate",
+ AssistKind::Refactor => "Refactor",
+ AssistKind::RefactorExtract => "RefactorExtract",
+ AssistKind::RefactorInline => "RefactorInline",
+ AssistKind::RefactorRewrite => "RefactorRewrite",
+ }
+ }
+}
+
+impl FromStr for AssistKind {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "None" => Ok(AssistKind::None),
+ "QuickFix" => Ok(AssistKind::QuickFix),
+ "Generate" => Ok(AssistKind::Generate),
+ "Refactor" => Ok(AssistKind::Refactor),
+ "RefactorExtract" => Ok(AssistKind::RefactorExtract),
+ "RefactorInline" => Ok(AssistKind::RefactorInline),
+ "RefactorRewrite" => Ok(AssistKind::RefactorRewrite),
+ unknown => Err(format!("Unknown AssistKind: '{}'", unknown)),
+ }
+ }
+}
+
+/// Unique identifier of the assist, should not be shown to the user
+/// directly.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct AssistId(pub &'static str, pub AssistKind);
+
+/// A way to control how many asssist to resolve during the assist resolution.
+/// When an assist is resolved, its edits are calculated that might be costly to always do by default.
+#[derive(Debug)]
+pub enum AssistResolveStrategy {
+ /// No assists should be resolved.
+ None,
+ /// All assists should be resolved.
+ All,
+ /// Only a certain assist should be resolved.
+ Single(SingleResolve),
+}
+
+/// Hold the [`AssistId`] data of a certain assist to resolve.
+/// The original id object cannot be used due to a `'static` lifetime
+/// and the requirement to construct this struct dynamically during the resolve handling.
+#[derive(Debug)]
+pub struct SingleResolve {
+ /// The id of the assist.
+ pub assist_id: String,
+ // The kind of the assist.
+ pub assist_kind: AssistKind,
+}
+
+impl AssistResolveStrategy {
+ pub fn should_resolve(&self, id: &AssistId) -> bool {
+ match self {
+ AssistResolveStrategy::None => false,
+ AssistResolveStrategy::All => true,
+ AssistResolveStrategy::Single(single_resolve) => {
+ single_resolve.assist_id == id.0 && single_resolve.assist_kind == id.1
+ }
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct GroupLabel(pub String);
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/defs.rs b/src/tools/rust-analyzer/crates/ide-db/src/defs.rs
new file mode 100644
index 000000000..aeaca00ec
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/defs.rs
@@ -0,0 +1,545 @@
+//! `NameDefinition` keeps information about the element we want to search references for.
+//! The element is represented by `NameKind`. It's located inside some `container` and
+//! has a `visibility`, which defines a search scope.
+//! Note that the reference search is possible for not all of the classified items.
+
+// FIXME: this badly needs rename/rewrite (matklad, 2020-02-06).
+
+use arrayvec::ArrayVec;
+use hir::{
+ Adt, AsAssocItem, AssocItem, BuiltinAttr, BuiltinType, Const, Crate, DeriveHelper, Field,
+ Function, GenericParam, HasVisibility, Impl, ItemInNs, Label, Local, Macro, Module, ModuleDef,
+ Name, PathResolution, Semantics, Static, ToolModule, Trait, TypeAlias, Variant, Visibility,
+};
+use stdx::impl_from;
+use syntax::{
+ ast::{self, AstNode},
+ match_ast, SyntaxKind, SyntaxNode, SyntaxToken,
+};
+
+use crate::RootDatabase;
+
+// FIXME: a more precise name would probably be `Symbol`?
+#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
+pub enum Definition {
+ Macro(Macro),
+ Field(Field),
+ Module(Module),
+ Function(Function),
+ Adt(Adt),
+ Variant(Variant),
+ Const(Const),
+ Static(Static),
+ Trait(Trait),
+ TypeAlias(TypeAlias),
+ BuiltinType(BuiltinType),
+ SelfType(Impl),
+ Local(Local),
+ GenericParam(GenericParam),
+ Label(Label),
+ DeriveHelper(DeriveHelper),
+ BuiltinAttr(BuiltinAttr),
+ ToolModule(ToolModule),
+}
+
+impl Definition {
+ pub fn canonical_module_path(&self, db: &RootDatabase) -> Option<impl Iterator<Item = Module>> {
+ self.module(db).map(|it| it.path_to_root(db).into_iter().rev())
+ }
+
+ pub fn krate(&self, db: &RootDatabase) -> Option<Crate> {
+ Some(match self {
+ Definition::Module(m) => m.krate(),
+ _ => self.module(db)?.krate(),
+ })
+ }
+
+ pub fn module(&self, db: &RootDatabase) -> Option<Module> {
+ let module = match self {
+ Definition::Macro(it) => it.module(db),
+ Definition::Module(it) => it.parent(db)?,
+ Definition::Field(it) => it.parent_def(db).module(db),
+ Definition::Function(it) => it.module(db),
+ Definition::Adt(it) => it.module(db),
+ Definition::Const(it) => it.module(db),
+ Definition::Static(it) => it.module(db),
+ Definition::Trait(it) => it.module(db),
+ Definition::TypeAlias(it) => it.module(db),
+ Definition::Variant(it) => it.module(db),
+ Definition::SelfType(it) => it.module(db),
+ Definition::Local(it) => it.module(db),
+ Definition::GenericParam(it) => it.module(db),
+ Definition::Label(it) => it.module(db),
+ Definition::DeriveHelper(it) => it.derive().module(db),
+ Definition::BuiltinAttr(_) | Definition::BuiltinType(_) | Definition::ToolModule(_) => {
+ return None
+ }
+ };
+ Some(module)
+ }
+
+ pub fn visibility(&self, db: &RootDatabase) -> Option<Visibility> {
+ let vis = match self {
+ Definition::Field(sf) => sf.visibility(db),
+ Definition::Module(it) => it.visibility(db),
+ Definition::Function(it) => it.visibility(db),
+ Definition::Adt(it) => it.visibility(db),
+ Definition::Const(it) => it.visibility(db),
+ Definition::Static(it) => it.visibility(db),
+ Definition::Trait(it) => it.visibility(db),
+ Definition::TypeAlias(it) => it.visibility(db),
+ Definition::Variant(it) => it.visibility(db),
+ Definition::BuiltinType(_) => Visibility::Public,
+ Definition::Macro(_) => return None,
+ Definition::BuiltinAttr(_)
+ | Definition::ToolModule(_)
+ | Definition::SelfType(_)
+ | Definition::Local(_)
+ | Definition::GenericParam(_)
+ | Definition::Label(_)
+ | Definition::DeriveHelper(_) => return None,
+ };
+ Some(vis)
+ }
+
+ pub fn name(&self, db: &RootDatabase) -> Option<Name> {
+ let name = match self {
+ Definition::Macro(it) => it.name(db),
+ Definition::Field(it) => it.name(db),
+ Definition::Module(it) => it.name(db)?,
+ Definition::Function(it) => it.name(db),
+ Definition::Adt(it) => it.name(db),
+ Definition::Variant(it) => it.name(db),
+ Definition::Const(it) => it.name(db)?,
+ Definition::Static(it) => it.name(db),
+ Definition::Trait(it) => it.name(db),
+ Definition::TypeAlias(it) => it.name(db),
+ Definition::BuiltinType(it) => it.name(),
+ Definition::SelfType(_) => return None,
+ Definition::Local(it) => it.name(db),
+ Definition::GenericParam(it) => it.name(db),
+ Definition::Label(it) => it.name(db),
+ Definition::BuiltinAttr(_) => return None, // FIXME
+ Definition::ToolModule(_) => return None, // FIXME
+ Definition::DeriveHelper(it) => it.name(db),
+ };
+ Some(name)
+ }
+}
+
+#[derive(Debug)]
+pub enum IdentClass {
+ NameClass(NameClass),
+ NameRefClass(NameRefClass),
+}
+
+impl IdentClass {
+ pub fn classify_node(
+ sema: &Semantics<'_, RootDatabase>,
+ node: &SyntaxNode,
+ ) -> Option<IdentClass> {
+ match_ast! {
+ match node {
+ ast::Name(name) => NameClass::classify(sema, &name).map(IdentClass::NameClass),
+ ast::NameRef(name_ref) => NameRefClass::classify(sema, &name_ref).map(IdentClass::NameRefClass),
+ ast::Lifetime(lifetime) => {
+ NameClass::classify_lifetime(sema, &lifetime)
+ .map(IdentClass::NameClass)
+ .or_else(|| NameRefClass::classify_lifetime(sema, &lifetime).map(IdentClass::NameRefClass))
+ },
+ _ => None,
+ }
+ }
+ }
+
+ pub fn classify_token(
+ sema: &Semantics<'_, RootDatabase>,
+ token: &SyntaxToken,
+ ) -> Option<IdentClass> {
+ let parent = token.parent()?;
+ Self::classify_node(sema, &parent)
+ }
+
+ pub fn classify_lifetime(
+ sema: &Semantics<'_, RootDatabase>,
+ lifetime: &ast::Lifetime,
+ ) -> Option<IdentClass> {
+ NameRefClass::classify_lifetime(sema, lifetime)
+ .map(IdentClass::NameRefClass)
+ .or_else(|| NameClass::classify_lifetime(sema, lifetime).map(IdentClass::NameClass))
+ }
+
+ pub fn definitions(self) -> ArrayVec<Definition, 2> {
+ let mut res = ArrayVec::new();
+ match self {
+ IdentClass::NameClass(NameClass::Definition(it) | NameClass::ConstReference(it)) => {
+ res.push(it)
+ }
+ IdentClass::NameClass(NameClass::PatFieldShorthand { local_def, field_ref }) => {
+ res.push(Definition::Local(local_def));
+ res.push(Definition::Field(field_ref));
+ }
+ IdentClass::NameRefClass(NameRefClass::Definition(it)) => res.push(it),
+ IdentClass::NameRefClass(NameRefClass::FieldShorthand { local_ref, field_ref }) => {
+ res.push(Definition::Local(local_ref));
+ res.push(Definition::Field(field_ref));
+ }
+ }
+ res
+ }
+}
+
+/// On a first blush, a single `ast::Name` defines a single definition at some
+/// scope. That is, that, by just looking at the syntactical category, we can
+/// unambiguously define the semantic category.
+///
+/// Sadly, that's not 100% true, there are special cases. To make sure that
+/// callers handle all the special cases correctly via exhaustive matching, we
+/// add a [`NameClass`] enum which lists all of them!
+///
+/// A model special case is `None` constant in pattern.
+#[derive(Debug)]
+pub enum NameClass {
+ Definition(Definition),
+ /// `None` in `if let None = Some(82) {}`.
+ /// Syntactically, it is a name, but semantically it is a reference.
+ ConstReference(Definition),
+ /// `field` in `if let Foo { field } = foo`. Here, `ast::Name` both introduces
+ /// a definition into a local scope, and refers to an existing definition.
+ PatFieldShorthand {
+ local_def: Local,
+ field_ref: Field,
+ },
+}
+
+impl NameClass {
+ /// `Definition` defined by this name.
+ pub fn defined(self) -> Option<Definition> {
+ let res = match self {
+ NameClass::Definition(it) => it,
+ NameClass::ConstReference(_) => return None,
+ NameClass::PatFieldShorthand { local_def, field_ref: _ } => {
+ Definition::Local(local_def)
+ }
+ };
+ Some(res)
+ }
+
+ pub fn classify(sema: &Semantics<'_, RootDatabase>, name: &ast::Name) -> Option<NameClass> {
+ let _p = profile::span("classify_name");
+
+ let parent = name.syntax().parent()?;
+
+ let definition = match_ast! {
+ match parent {
+ ast::Item(it) => classify_item(sema, it)?,
+ ast::IdentPat(it) => return classify_ident_pat(sema, it),
+ ast::Rename(it) => classify_rename(sema, it)?,
+ ast::SelfParam(it) => Definition::Local(sema.to_def(&it)?),
+ ast::RecordField(it) => Definition::Field(sema.to_def(&it)?),
+ ast::Variant(it) => Definition::Variant(sema.to_def(&it)?),
+ ast::TypeParam(it) => Definition::GenericParam(sema.to_def(&it)?.into()),
+ ast::ConstParam(it) => Definition::GenericParam(sema.to_def(&it)?.into()),
+ _ => return None,
+ }
+ };
+ return Some(NameClass::Definition(definition));
+
+ fn classify_item(
+ sema: &Semantics<'_, RootDatabase>,
+ item: ast::Item,
+ ) -> Option<Definition> {
+ let definition = match item {
+ ast::Item::MacroRules(it) => {
+ Definition::Macro(sema.to_def(&ast::Macro::MacroRules(it))?)
+ }
+ ast::Item::MacroDef(it) => {
+ Definition::Macro(sema.to_def(&ast::Macro::MacroDef(it))?)
+ }
+ ast::Item::Const(it) => Definition::Const(sema.to_def(&it)?),
+ ast::Item::Fn(it) => {
+ let def = sema.to_def(&it)?;
+ def.as_proc_macro(sema.db)
+ .map(Definition::Macro)
+ .unwrap_or(Definition::Function(def))
+ }
+ ast::Item::Module(it) => Definition::Module(sema.to_def(&it)?),
+ ast::Item::Static(it) => Definition::Static(sema.to_def(&it)?),
+ ast::Item::Trait(it) => Definition::Trait(sema.to_def(&it)?),
+ ast::Item::TypeAlias(it) => Definition::TypeAlias(sema.to_def(&it)?),
+ ast::Item::Enum(it) => Definition::Adt(hir::Adt::Enum(sema.to_def(&it)?)),
+ ast::Item::Struct(it) => Definition::Adt(hir::Adt::Struct(sema.to_def(&it)?)),
+ ast::Item::Union(it) => Definition::Adt(hir::Adt::Union(sema.to_def(&it)?)),
+ _ => return None,
+ };
+ Some(definition)
+ }
+
+ fn classify_ident_pat(
+ sema: &Semantics<'_, RootDatabase>,
+ ident_pat: ast::IdentPat,
+ ) -> Option<NameClass> {
+ if let Some(def) = sema.resolve_bind_pat_to_const(&ident_pat) {
+ return Some(NameClass::ConstReference(Definition::from(def)));
+ }
+
+ let local = sema.to_def(&ident_pat)?;
+ let pat_parent = ident_pat.syntax().parent();
+ if let Some(record_pat_field) = pat_parent.and_then(ast::RecordPatField::cast) {
+ if record_pat_field.name_ref().is_none() {
+ if let Some(field) = sema.resolve_record_pat_field(&record_pat_field) {
+ return Some(NameClass::PatFieldShorthand {
+ local_def: local,
+ field_ref: field,
+ });
+ }
+ }
+ }
+ Some(NameClass::Definition(Definition::Local(local)))
+ }
+
+ fn classify_rename(
+ sema: &Semantics<'_, RootDatabase>,
+ rename: ast::Rename,
+ ) -> Option<Definition> {
+ if let Some(use_tree) = rename.syntax().parent().and_then(ast::UseTree::cast) {
+ let path = use_tree.path()?;
+ sema.resolve_path(&path).map(Definition::from)
+ } else {
+ let extern_crate = rename.syntax().parent().and_then(ast::ExternCrate::cast)?;
+ let krate = sema.resolve_extern_crate(&extern_crate)?;
+ let root_module = krate.root_module(sema.db);
+ Some(Definition::Module(root_module))
+ }
+ }
+ }
+
+ pub fn classify_lifetime(
+ sema: &Semantics<'_, RootDatabase>,
+ lifetime: &ast::Lifetime,
+ ) -> Option<NameClass> {
+ let _p = profile::span("classify_lifetime").detail(|| lifetime.to_string());
+ let parent = lifetime.syntax().parent()?;
+
+ if let Some(it) = ast::LifetimeParam::cast(parent.clone()) {
+ sema.to_def(&it).map(Into::into).map(Definition::GenericParam)
+ } else if let Some(it) = ast::Label::cast(parent) {
+ sema.to_def(&it).map(Definition::Label)
+ } else {
+ None
+ }
+ .map(NameClass::Definition)
+ }
+}
+
+/// This is similar to [`NameClass`], but works for [`ast::NameRef`] rather than
+/// for [`ast::Name`]. Similarly, what looks like a reference in syntax is a
+/// reference most of the time, but there are a couple of annoying exceptions.
+///
+/// A model special case is field shorthand syntax, which uses a single
+/// reference to point to two different defs.
+#[derive(Debug)]
+pub enum NameRefClass {
+ Definition(Definition),
+ FieldShorthand { local_ref: Local, field_ref: Field },
+}
+
+impl NameRefClass {
+ // Note: we don't have unit-tests for this rather important function.
+ // It is primarily exercised via goto definition tests in `ide`.
+ pub fn classify(
+ sema: &Semantics<'_, RootDatabase>,
+ name_ref: &ast::NameRef,
+ ) -> Option<NameRefClass> {
+ let _p = profile::span("classify_name_ref").detail(|| name_ref.to_string());
+
+ let parent = name_ref.syntax().parent()?;
+
+ if let Some(record_field) = ast::RecordExprField::for_field_name(name_ref) {
+ if let Some((field, local, _)) = sema.resolve_record_field(&record_field) {
+ let res = match local {
+ None => NameRefClass::Definition(Definition::Field(field)),
+ Some(local) => {
+ NameRefClass::FieldShorthand { field_ref: field, local_ref: local }
+ }
+ };
+ return Some(res);
+ }
+ }
+
+ if let Some(path) = ast::PathSegment::cast(parent.clone()).map(|it| it.parent_path()) {
+ if path.parent_path().is_none() {
+ if let Some(macro_call) = path.syntax().parent().and_then(ast::MacroCall::cast) {
+ // Only use this to resolve to macro calls for last segments as qualifiers resolve
+ // to modules below.
+ if let Some(macro_def) = sema.resolve_macro_call(&macro_call) {
+ return Some(NameRefClass::Definition(Definition::Macro(macro_def)));
+ }
+ }
+ }
+ return sema.resolve_path(&path).map(Into::into).map(NameRefClass::Definition);
+ }
+
+ match_ast! {
+ match parent {
+ ast::MethodCallExpr(method_call) => {
+ sema.resolve_method_call(&method_call)
+ .map(Definition::Function)
+ .map(NameRefClass::Definition)
+ },
+ ast::FieldExpr(field_expr) => {
+ sema.resolve_field(&field_expr)
+ .map(Definition::Field)
+ .map(NameRefClass::Definition)
+ },
+ ast::RecordPatField(record_pat_field) => {
+ sema.resolve_record_pat_field(&record_pat_field)
+ .map(Definition::Field)
+ .map(NameRefClass::Definition)
+ },
+ ast::AssocTypeArg(_) => {
+ // `Trait<Assoc = Ty>`
+ // ^^^^^
+ let containing_path = name_ref.syntax().ancestors().find_map(ast::Path::cast)?;
+ let resolved = sema.resolve_path(&containing_path)?;
+ if let PathResolution::Def(ModuleDef::Trait(tr)) = resolved {
+ if let Some(ty) = tr
+ .items_with_supertraits(sema.db)
+ .iter()
+ .filter_map(|&assoc| match assoc {
+ hir::AssocItem::TypeAlias(it) => Some(it),
+ _ => None,
+ })
+ .find(|alias| alias.name(sema.db).to_smol_str() == name_ref.text().as_str())
+ {
+ return Some(NameRefClass::Definition(Definition::TypeAlias(ty)));
+ }
+ }
+ None
+ },
+ ast::ExternCrate(extern_crate) => {
+ let krate = sema.resolve_extern_crate(&extern_crate)?;
+ let root_module = krate.root_module(sema.db);
+ Some(NameRefClass::Definition(Definition::Module(root_module)))
+ },
+ _ => None
+ }
+ }
+ }
+
+ pub fn classify_lifetime(
+ sema: &Semantics<'_, RootDatabase>,
+ lifetime: &ast::Lifetime,
+ ) -> Option<NameRefClass> {
+ let _p = profile::span("classify_lifetime_ref").detail(|| lifetime.to_string());
+ let parent = lifetime.syntax().parent()?;
+ match parent.kind() {
+ SyntaxKind::BREAK_EXPR | SyntaxKind::CONTINUE_EXPR => {
+ sema.resolve_label(lifetime).map(Definition::Label).map(NameRefClass::Definition)
+ }
+ SyntaxKind::LIFETIME_ARG
+ | SyntaxKind::SELF_PARAM
+ | SyntaxKind::TYPE_BOUND
+ | SyntaxKind::WHERE_PRED
+ | SyntaxKind::REF_TYPE => sema
+ .resolve_lifetime_param(lifetime)
+ .map(GenericParam::LifetimeParam)
+ .map(Definition::GenericParam)
+ .map(NameRefClass::Definition),
+ // lifetime bounds, as in the 'b in 'a: 'b aren't wrapped in TypeBound nodes so we gotta check
+ // if our lifetime is in a LifetimeParam without being the constrained lifetime
+ _ if ast::LifetimeParam::cast(parent).and_then(|param| param.lifetime()).as_ref()
+ != Some(lifetime) =>
+ {
+ sema.resolve_lifetime_param(lifetime)
+ .map(GenericParam::LifetimeParam)
+ .map(Definition::GenericParam)
+ .map(NameRefClass::Definition)
+ }
+ _ => None,
+ }
+ }
+}
+
+impl_from!(
+ Field, Module, Function, Adt, Variant, Const, Static, Trait, TypeAlias, BuiltinType, Local,
+ GenericParam, Label, Macro
+ for Definition
+);
+
+impl From<Impl> for Definition {
+ fn from(impl_: Impl) -> Self {
+ Definition::SelfType(impl_)
+ }
+}
+
+impl AsAssocItem for Definition {
+ fn as_assoc_item(self, db: &dyn hir::db::HirDatabase) -> Option<AssocItem> {
+ match self {
+ Definition::Function(it) => it.as_assoc_item(db),
+ Definition::Const(it) => it.as_assoc_item(db),
+ Definition::TypeAlias(it) => it.as_assoc_item(db),
+ _ => None,
+ }
+ }
+}
+
+impl From<AssocItem> for Definition {
+ fn from(assoc_item: AssocItem) -> Self {
+ match assoc_item {
+ AssocItem::Function(it) => Definition::Function(it),
+ AssocItem::Const(it) => Definition::Const(it),
+ AssocItem::TypeAlias(it) => Definition::TypeAlias(it),
+ }
+ }
+}
+
+impl From<PathResolution> for Definition {
+ fn from(path_resolution: PathResolution) -> Self {
+ match path_resolution {
+ PathResolution::Def(def) => def.into(),
+ PathResolution::Local(local) => Definition::Local(local),
+ PathResolution::TypeParam(par) => Definition::GenericParam(par.into()),
+ PathResolution::ConstParam(par) => Definition::GenericParam(par.into()),
+ PathResolution::SelfType(impl_def) => Definition::SelfType(impl_def),
+ PathResolution::BuiltinAttr(attr) => Definition::BuiltinAttr(attr),
+ PathResolution::ToolModule(tool) => Definition::ToolModule(tool),
+ PathResolution::DeriveHelper(helper) => Definition::DeriveHelper(helper),
+ }
+ }
+}
+
+impl From<ModuleDef> for Definition {
+ fn from(def: ModuleDef) -> Self {
+ match def {
+ ModuleDef::Module(it) => Definition::Module(it),
+ ModuleDef::Function(it) => Definition::Function(it),
+ ModuleDef::Adt(it) => Definition::Adt(it),
+ ModuleDef::Variant(it) => Definition::Variant(it),
+ ModuleDef::Const(it) => Definition::Const(it),
+ ModuleDef::Static(it) => Definition::Static(it),
+ ModuleDef::Trait(it) => Definition::Trait(it),
+ ModuleDef::TypeAlias(it) => Definition::TypeAlias(it),
+ ModuleDef::Macro(it) => Definition::Macro(it),
+ ModuleDef::BuiltinType(it) => Definition::BuiltinType(it),
+ }
+ }
+}
+
+impl From<Definition> for Option<ItemInNs> {
+ fn from(def: Definition) -> Self {
+ let item = match def {
+ Definition::Module(it) => ModuleDef::Module(it),
+ Definition::Function(it) => ModuleDef::Function(it),
+ Definition::Adt(it) => ModuleDef::Adt(it),
+ Definition::Variant(it) => ModuleDef::Variant(it),
+ Definition::Const(it) => ModuleDef::Const(it),
+ Definition::Static(it) => ModuleDef::Static(it),
+ Definition::Trait(it) => ModuleDef::Trait(it),
+ Definition::TypeAlias(it) => ModuleDef::TypeAlias(it),
+ Definition::BuiltinType(it) => ModuleDef::BuiltinType(it),
+ _ => return None,
+ };
+ Some(ItemInNs::from(item))
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/famous_defs.rs b/src/tools/rust-analyzer/crates/ide-db/src/famous_defs.rs
new file mode 100644
index 000000000..c8341fed1
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/famous_defs.rs
@@ -0,0 +1,185 @@
+//! See [`FamousDefs`].
+
+use base_db::{CrateOrigin, LangCrateOrigin, SourceDatabase};
+use hir::{Crate, Enum, Macro, Module, ScopeDef, Semantics, Trait};
+
+use crate::RootDatabase;
+
+/// Helps with finding well-know things inside the standard library. This is
+/// somewhat similar to the known paths infra inside hir, but it different; We
+/// want to make sure that IDE specific paths don't become interesting inside
+/// the compiler itself as well.
+///
+/// Note that, by default, rust-analyzer tests **do not** include core or std
+/// libraries. If you are writing tests for functionality using [`FamousDefs`],
+/// you'd want to include minicore (see `test_utils::MiniCore`) declaration at
+/// the start of your tests:
+///
+/// ```
+/// //- minicore: iterator, ord, derive
+/// ```
+pub struct FamousDefs<'a, 'b>(pub &'a Semantics<'b, RootDatabase>, pub Crate);
+
+#[allow(non_snake_case)]
+impl FamousDefs<'_, '_> {
+ pub fn std(&self) -> Option<Crate> {
+ self.find_lang_crate(LangCrateOrigin::Std)
+ }
+
+ pub fn core(&self) -> Option<Crate> {
+ self.find_lang_crate(LangCrateOrigin::Core)
+ }
+
+ pub fn alloc(&self) -> Option<Crate> {
+ self.find_lang_crate(LangCrateOrigin::Alloc)
+ }
+
+ pub fn test(&self) -> Option<Crate> {
+ self.find_lang_crate(LangCrateOrigin::Test)
+ }
+
+ pub fn proc_macro(&self) -> Option<Crate> {
+ self.find_lang_crate(LangCrateOrigin::ProcMacro)
+ }
+
+ pub fn core_cmp_Ord(&self) -> Option<Trait> {
+ self.find_trait("core:cmp:Ord")
+ }
+
+ pub fn core_convert_From(&self) -> Option<Trait> {
+ self.find_trait("core:convert:From")
+ }
+
+ pub fn core_convert_Into(&self) -> Option<Trait> {
+ self.find_trait("core:convert:Into")
+ }
+
+ pub fn core_option_Option(&self) -> Option<Enum> {
+ self.find_enum("core:option:Option")
+ }
+
+ pub fn core_result_Result(&self) -> Option<Enum> {
+ self.find_enum("core:result:Result")
+ }
+
+ pub fn core_default_Default(&self) -> Option<Trait> {
+ self.find_trait("core:default:Default")
+ }
+
+ pub fn core_iter_Iterator(&self) -> Option<Trait> {
+ self.find_trait("core:iter:traits:iterator:Iterator")
+ }
+
+ pub fn core_iter_IntoIterator(&self) -> Option<Trait> {
+ self.find_trait("core:iter:traits:collect:IntoIterator")
+ }
+
+ pub fn core_iter(&self) -> Option<Module> {
+ self.find_module("core:iter")
+ }
+
+ pub fn core_ops_Deref(&self) -> Option<Trait> {
+ self.find_trait("core:ops:Deref")
+ }
+
+ pub fn core_ops_DerefMut(&self) -> Option<Trait> {
+ self.find_trait("core:ops:DerefMut")
+ }
+
+ pub fn core_convert_AsRef(&self) -> Option<Trait> {
+ self.find_trait("core:convert:AsRef")
+ }
+
+ pub fn core_ops_ControlFlow(&self) -> Option<Enum> {
+ self.find_enum("core:ops:ControlFlow")
+ }
+
+ pub fn core_ops_Drop(&self) -> Option<Trait> {
+ self.find_trait("core:ops:Drop")
+ }
+
+ pub fn core_marker_Copy(&self) -> Option<Trait> {
+ self.find_trait("core:marker:Copy")
+ }
+
+ pub fn core_macros_builtin_derive(&self) -> Option<Macro> {
+ self.find_macro("core:macros:builtin:derive")
+ }
+
+ pub fn builtin_crates(&self) -> impl Iterator<Item = Crate> {
+ IntoIterator::into_iter([
+ self.std(),
+ self.core(),
+ self.alloc(),
+ self.test(),
+ self.proc_macro(),
+ ])
+ .flatten()
+ }
+
+ fn find_trait(&self, path: &str) -> Option<Trait> {
+ match self.find_def(path)? {
+ hir::ScopeDef::ModuleDef(hir::ModuleDef::Trait(it)) => Some(it),
+ _ => None,
+ }
+ }
+
+ fn find_macro(&self, path: &str) -> Option<Macro> {
+ match self.find_def(path)? {
+ hir::ScopeDef::ModuleDef(hir::ModuleDef::Macro(it)) => Some(it),
+ _ => None,
+ }
+ }
+
+ fn find_enum(&self, path: &str) -> Option<Enum> {
+ match self.find_def(path)? {
+ hir::ScopeDef::ModuleDef(hir::ModuleDef::Adt(hir::Adt::Enum(it))) => Some(it),
+ _ => None,
+ }
+ }
+
+ fn find_module(&self, path: &str) -> Option<Module> {
+ match self.find_def(path)? {
+ hir::ScopeDef::ModuleDef(hir::ModuleDef::Module(it)) => Some(it),
+ _ => None,
+ }
+ }
+
+ fn find_lang_crate(&self, origin: LangCrateOrigin) -> Option<Crate> {
+ let krate = self.1;
+ let db = self.0.db;
+ let crate_graph = self.0.db.crate_graph();
+ let res = krate
+ .dependencies(db)
+ .into_iter()
+ .find(|dep| crate_graph[dep.krate.into()].origin == CrateOrigin::Lang(origin))?
+ .krate;
+ Some(res)
+ }
+
+ fn find_def(&self, path: &str) -> Option<ScopeDef> {
+ let db = self.0.db;
+ let mut path = path.split(':');
+ let trait_ = path.next_back()?;
+ let lang_crate = path.next()?;
+ let lang_crate = match LangCrateOrigin::from(lang_crate) {
+ LangCrateOrigin::Other => return None,
+ lang_crate => lang_crate,
+ };
+ let std_crate = self.find_lang_crate(lang_crate)?;
+ let mut module = std_crate.root_module(db);
+ for segment in path {
+ module = module.children(db).find_map(|child| {
+ let name = child.name(db)?;
+ if name.to_smol_str() == segment {
+ Some(child)
+ } else {
+ None
+ }
+ })?;
+ }
+ let def =
+ module.scope(db, None).into_iter().find(|(name, _def)| name.to_smol_str() == trait_)?.1;
+ Some(def)
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/generated/lints.rs b/src/tools/rust-analyzer/crates/ide-db/src/generated/lints.rs
new file mode 100644
index 000000000..64dd2bb5f
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/generated/lints.rs
@@ -0,0 +1,7682 @@
+//! Generated by `sourcegen_lints`, do not edit by hand.
+
+#[derive(Clone)]
+pub struct Lint {
+ pub label: &'static str,
+ pub description: &'static str,
+}
+pub struct LintGroup {
+ pub lint: Lint,
+ pub children: &'static [&'static str],
+}
+pub const DEFAULT_LINTS: &[Lint] = &[
+ Lint {
+ label: "absolute_paths_not_starting_with_crate",
+ description: r##"fully qualified paths that start with a module name instead of `crate`, `self`, or an extern crate name"##,
+ },
+ Lint { label: "ambiguous_associated_items", description: r##"ambiguous associated items"## },
+ Lint { label: "anonymous_parameters", description: r##"detects anonymous parameters"## },
+ Lint { label: "arithmetic_overflow", description: r##"arithmetic operation overflows"## },
+ Lint {
+ label: "array_into_iter",
+ description: r##"detects calling `into_iter` on arrays in Rust 2015 and 2018"##,
+ },
+ Lint {
+ label: "asm_sub_register",
+ description: r##"using only a subset of a register for inline asm inputs"##,
+ },
+ Lint { label: "bad_asm_style", description: r##"incorrect use of inline assembly"## },
+ Lint {
+ label: "bare_trait_objects",
+ description: r##"suggest using `dyn Trait` for trait objects"##,
+ },
+ Lint {
+ label: "bindings_with_variant_name",
+ description: r##"detects pattern bindings with the same name as one of the matched variants"##,
+ },
+ Lint { label: "box_pointers", description: r##"use of owned (Box type) heap memory"## },
+ Lint {
+ label: "break_with_label_and_loop",
+ description: r##"`break` expression with label and unlabeled loop as value expression"##,
+ },
+ Lint {
+ label: "cenum_impl_drop_cast",
+ description: r##"a C-like enum implementing Drop is cast"##,
+ },
+ Lint {
+ label: "clashing_extern_declarations",
+ description: r##"detects when an extern fn has been declared with the same name but different types"##,
+ },
+ Lint {
+ label: "coherence_leak_check",
+ description: r##"distinct impls distinguished only by the leak-check code"##,
+ },
+ Lint {
+ label: "conflicting_repr_hints",
+ description: r##"conflicts between `#[repr(..)]` hints that were previously accepted and used in practice"##,
+ },
+ Lint {
+ label: "confusable_idents",
+ description: r##"detects visually confusable pairs between identifiers"##,
+ },
+ Lint {
+ label: "const_err",
+ description: r##"constant evaluation encountered erroneous expression"##,
+ },
+ Lint {
+ label: "const_evaluatable_unchecked",
+ description: r##"detects a generic constant is used in a type without a emitting a warning"##,
+ },
+ Lint {
+ label: "const_item_mutation",
+ description: r##"detects attempts to mutate a `const` item"##,
+ },
+ Lint { label: "dead_code", description: r##"detect unused, unexported items"## },
+ Lint { label: "deprecated", description: r##"detects use of deprecated items"## },
+ Lint {
+ label: "deprecated_in_future",
+ description: r##"detects use of items that will be deprecated in a future version"##,
+ },
+ Lint {
+ label: "deref_into_dyn_supertrait",
+ description: r##"`Deref` implementation usage with a supertrait trait object for output might be shadowed in the future"##,
+ },
+ Lint {
+ label: "deref_nullptr",
+ description: r##"detects when an null pointer is dereferenced"##,
+ },
+ Lint {
+ label: "drop_bounds",
+ description: r##"bounds of the form `T: Drop` are most likely incorrect"##,
+ },
+ Lint {
+ label: "dyn_drop",
+ description: r##"trait objects of the form `dyn Drop` are useless"##,
+ },
+ Lint {
+ label: "elided_lifetimes_in_paths",
+ description: r##"hidden lifetime parameters in types are deprecated"##,
+ },
+ Lint {
+ label: "ellipsis_inclusive_range_patterns",
+ description: r##"`...` range patterns are deprecated"##,
+ },
+ Lint {
+ label: "enum_intrinsics_non_enums",
+ description: r##"detects calls to `core::mem::discriminant` and `core::mem::variant_count` with non-enum types"##,
+ },
+ Lint {
+ label: "explicit_outlives_requirements",
+ description: r##"outlives requirements can be inferred"##,
+ },
+ Lint {
+ label: "exported_private_dependencies",
+ description: r##"public interface leaks type from a private dependency"##,
+ },
+ Lint { label: "forbidden_lint_groups", description: r##"applying forbid to lint-groups"## },
+ Lint {
+ label: "function_item_references",
+ description: r##"suggest casting to a function pointer when attempting to take references to function items"##,
+ },
+ Lint {
+ label: "future_incompatible",
+ description: r##"lint group for: forbidden-lint-groups, illegal-floating-point-literal-pattern, private-in-public, pub-use-of-private-extern-crate, invalid-type-param-default, const-err, unaligned-references, patterns-in-fns-without-body, missing-fragment-specifier, late-bound-lifetime-arguments, order-dependent-trait-objects, coherence-leak-check, unstable-name-collisions, where-clauses-object-safety, proc-macro-derive-resolution-fallback, macro-expanded-macro-exports-accessed-by-absolute-paths, ill-formed-attribute-input, conflicting-repr-hints, ambiguous-associated-items, mutable-borrow-reservation-conflict, indirect-structural-match, pointer-structural-match, nontrivial-structural-match, soft-unstable, cenum-impl-drop-cast, const-evaluatable-unchecked, uninhabited-static, unsupported-naked-functions, invalid-doc-attributes, semicolon-in-expressions-from-macros, legacy-derive-helpers, proc-macro-back-compat, unsupported-calling-conventions, deref-into-dyn-supertrait"##,
+ },
+ Lint {
+ label: "ill_formed_attribute_input",
+ description: r##"ill-formed attribute inputs that were previously accepted and used in practice"##,
+ },
+ Lint {
+ label: "illegal_floating_point_literal_pattern",
+ description: r##"floating-point literals cannot be used in patterns"##,
+ },
+ Lint {
+ label: "improper_ctypes",
+ description: r##"proper use of libc types in foreign modules"##,
+ },
+ Lint {
+ label: "improper_ctypes_definitions",
+ description: r##"proper use of libc types in foreign item definitions"##,
+ },
+ Lint {
+ label: "incomplete_features",
+ description: r##"incomplete features that may function improperly in some or all cases"##,
+ },
+ Lint { label: "incomplete_include", description: r##"trailing content in included file"## },
+ Lint {
+ label: "indirect_structural_match",
+ description: r##"constant used in pattern contains value of non-structural-match type in a field or a variant"##,
+ },
+ Lint {
+ label: "ineffective_unstable_trait_impl",
+ description: r##"detects `#[unstable]` on stable trait implementations for stable types"##,
+ },
+ Lint {
+ label: "inline_no_sanitize",
+ description: r##"detects incompatible use of `#[inline(always)]` and `#[no_sanitize(...)]`"##,
+ },
+ Lint {
+ label: "invalid_atomic_ordering",
+ description: r##"usage of invalid atomic ordering in atomic operations and memory fences"##,
+ },
+ Lint {
+ label: "invalid_doc_attributes",
+ description: r##"detects invalid `#[doc(...)]` attributes"##,
+ },
+ Lint {
+ label: "invalid_type_param_default",
+ description: r##"type parameter default erroneously allowed in invalid location"##,
+ },
+ Lint {
+ label: "invalid_value",
+ description: r##"an invalid value is being created (such as a null reference)"##,
+ },
+ Lint {
+ label: "irrefutable_let_patterns",
+ description: r##"detects irrefutable patterns in `if let` and `while let` statements"##,
+ },
+ Lint {
+ label: "keyword_idents",
+ description: r##"detects edition keywords being used as an identifier"##,
+ },
+ Lint { label: "large_assignments", description: r##"detects large moves or copies"## },
+ Lint {
+ label: "late_bound_lifetime_arguments",
+ description: r##"detects generic lifetime arguments in path segments with late bound lifetime parameters"##,
+ },
+ Lint {
+ label: "legacy_derive_helpers",
+ description: r##"detects derive helper attributes that are used before they are introduced"##,
+ },
+ Lint {
+ label: "macro_expanded_macro_exports_accessed_by_absolute_paths",
+ description: r##"macro-expanded `macro_export` macros from the current crate cannot be referred to by absolute paths"##,
+ },
+ Lint {
+ label: "macro_use_extern_crate",
+ description: r##"the `#[macro_use]` attribute is now deprecated in favor of using macros via the module system"##,
+ },
+ Lint {
+ label: "meta_variable_misuse",
+ description: r##"possible meta-variable misuse at macro definition"##,
+ },
+ Lint { label: "missing_abi", description: r##"No declared ABI for extern declaration"## },
+ Lint {
+ label: "missing_copy_implementations",
+ description: r##"detects potentially-forgotten implementations of `Copy`"##,
+ },
+ Lint {
+ label: "missing_debug_implementations",
+ description: r##"detects missing implementations of Debug"##,
+ },
+ Lint {
+ label: "missing_docs",
+ description: r##"detects missing documentation for public members"##,
+ },
+ Lint {
+ label: "missing_fragment_specifier",
+ description: r##"detects missing fragment specifiers in unused `macro_rules!` patterns"##,
+ },
+ Lint {
+ label: "mixed_script_confusables",
+ description: r##"detects Unicode scripts whose mixed script confusables codepoints are solely used"##,
+ },
+ Lint {
+ label: "must_not_suspend",
+ description: r##"use of a `#[must_not_suspend]` value across a yield point"##,
+ },
+ Lint {
+ label: "mutable_borrow_reservation_conflict",
+ description: r##"reservation of a two-phased borrow conflicts with other shared borrows"##,
+ },
+ Lint {
+ label: "mutable_transmutes",
+ description: r##"mutating transmuted &mut T from &T may cause undefined behavior"##,
+ },
+ Lint { label: "named_asm_labels", description: r##"named labels in inline assembly"## },
+ Lint {
+ label: "no_mangle_const_items",
+ description: r##"const items will not have their symbols exported"##,
+ },
+ Lint { label: "no_mangle_generic_items", description: r##"generic items must be mangled"## },
+ Lint { label: "non_ascii_idents", description: r##"detects non-ASCII identifiers"## },
+ Lint {
+ label: "non_camel_case_types",
+ description: r##"types, variants, traits and type parameters should have camel case names"##,
+ },
+ Lint {
+ label: "non_exhaustive_omitted_patterns",
+ description: r##"detect when patterns of types marked `non_exhaustive` are missed"##,
+ },
+ Lint {
+ label: "non_fmt_panics",
+ description: r##"detect single-argument panic!() invocations in which the argument is not a format string"##,
+ },
+ Lint {
+ label: "non_shorthand_field_patterns",
+ description: r##"using `Struct { x: x }` instead of `Struct { x }` in a pattern"##,
+ },
+ Lint {
+ label: "non_snake_case",
+ description: r##"variables, methods, functions, lifetime parameters and modules should have snake case names"##,
+ },
+ Lint {
+ label: "non_upper_case_globals",
+ description: r##"static constants should have uppercase identifiers"##,
+ },
+ Lint {
+ label: "nonstandard_style",
+ description: r##"lint group for: non-camel-case-types, non-snake-case, non-upper-case-globals"##,
+ },
+ Lint {
+ label: "nontrivial_structural_match",
+ description: r##"constant used in pattern of non-structural-match type and the constant's initializer expression contains values of non-structural-match types"##,
+ },
+ Lint {
+ label: "noop_method_call",
+ description: r##"detects the use of well-known noop methods"##,
+ },
+ Lint {
+ label: "order_dependent_trait_objects",
+ description: r##"trait-object types were treated as different depending on marker-trait order"##,
+ },
+ Lint { label: "overflowing_literals", description: r##"literal out of range for its type"## },
+ Lint {
+ label: "overlapping_range_endpoints",
+ description: r##"detects range patterns with overlapping endpoints"##,
+ },
+ Lint { label: "path_statements", description: r##"path statements with no effect"## },
+ Lint {
+ label: "patterns_in_fns_without_body",
+ description: r##"patterns in functions without body were erroneously allowed"##,
+ },
+ Lint {
+ label: "pointer_structural_match",
+ description: r##"pointers are not structural-match"##,
+ },
+ Lint {
+ label: "private_in_public",
+ description: r##"detect private items in public interfaces not caught by the old implementation"##,
+ },
+ Lint {
+ label: "proc_macro_back_compat",
+ description: r##"detects usage of old versions of certain proc-macro crates"##,
+ },
+ Lint {
+ label: "proc_macro_derive_resolution_fallback",
+ description: r##"detects proc macro derives using inaccessible names from parent modules"##,
+ },
+ Lint {
+ label: "pub_use_of_private_extern_crate",
+ description: r##"detect public re-exports of private extern crates"##,
+ },
+ Lint {
+ label: "redundant_semicolons",
+ description: r##"detects unnecessary trailing semicolons"##,
+ },
+ Lint {
+ label: "renamed_and_removed_lints",
+ description: r##"lints that have been renamed or removed"##,
+ },
+ Lint {
+ label: "rust_2018_compatibility",
+ description: r##"lint group for: keyword-idents, anonymous-parameters, tyvar-behind-raw-pointer, absolute-paths-not-starting-with-crate"##,
+ },
+ Lint {
+ label: "rust_2018_idioms",
+ description: r##"lint group for: bare-trait-objects, unused-extern-crates, ellipsis-inclusive-range-patterns, elided-lifetimes-in-paths, explicit-outlives-requirements"##,
+ },
+ Lint {
+ label: "rust_2021_compatibility",
+ description: r##"lint group for: ellipsis-inclusive-range-patterns, bare-trait-objects, rust-2021-incompatible-closure-captures, rust-2021-incompatible-or-patterns, rust-2021-prelude-collisions, rust-2021-prefixes-incompatible-syntax, array-into-iter, non-fmt-panics"##,
+ },
+ Lint {
+ label: "rust_2021_incompatible_closure_captures",
+ description: r##"detects closures affected by Rust 2021 changes"##,
+ },
+ Lint {
+ label: "rust_2021_incompatible_or_patterns",
+ description: r##"detects usage of old versions of or-patterns"##,
+ },
+ Lint {
+ label: "rust_2021_prefixes_incompatible_syntax",
+ description: r##"identifiers that will be parsed as a prefix in Rust 2021"##,
+ },
+ Lint {
+ label: "rust_2021_prelude_collisions",
+ description: r##"detects the usage of trait methods which are ambiguous with traits added to the prelude in future editions"##,
+ },
+ Lint {
+ label: "semicolon_in_expressions_from_macros",
+ description: r##"trailing semicolon in macro body used as expression"##,
+ },
+ Lint {
+ label: "single_use_lifetimes",
+ description: r##"detects lifetime parameters that are only used once"##,
+ },
+ Lint {
+ label: "soft_unstable",
+ description: r##"a feature gate that doesn't break dependent crates"##,
+ },
+ Lint {
+ label: "stable_features",
+ description: r##"stable features found in `#[feature]` directive"##,
+ },
+ Lint {
+ label: "temporary_cstring_as_ptr",
+ description: r##"detects getting the inner pointer of a temporary `CString`"##,
+ },
+ Lint {
+ label: "text_direction_codepoint_in_comment",
+ description: r##"invisible directionality-changing codepoints in comment"##,
+ },
+ Lint {
+ label: "text_direction_codepoint_in_literal",
+ description: r##"detect special Unicode codepoints that affect the visual representation of text on screen, changing the direction in which text flows"##,
+ },
+ Lint {
+ label: "trivial_bounds",
+ description: r##"these bounds don't depend on an type parameters"##,
+ },
+ Lint {
+ label: "trivial_casts",
+ description: r##"detects trivial casts which could be removed"##,
+ },
+ Lint {
+ label: "trivial_numeric_casts",
+ description: r##"detects trivial casts of numeric types which could be removed"##,
+ },
+ Lint {
+ label: "type_alias_bounds",
+ description: r##"bounds in type aliases are not enforced"##,
+ },
+ Lint {
+ label: "tyvar_behind_raw_pointer",
+ description: r##"raw pointer to an inference variable"##,
+ },
+ Lint {
+ label: "unaligned_references",
+ description: r##"detects unaligned references to fields of packed structs"##,
+ },
+ Lint {
+ label: "uncommon_codepoints",
+ description: r##"detects uncommon Unicode codepoints in identifiers"##,
+ },
+ Lint {
+ label: "unconditional_panic",
+ description: r##"operation will cause a panic at runtime"##,
+ },
+ Lint {
+ label: "unconditional_recursion",
+ description: r##"functions that cannot return without calling themselves"##,
+ },
+ Lint { label: "uninhabited_static", description: r##"uninhabited static"## },
+ Lint {
+ label: "unknown_crate_types",
+ description: r##"unknown crate type found in `#[crate_type]` directive"##,
+ },
+ Lint { label: "unknown_lints", description: r##"unrecognized lint attribute"## },
+ Lint {
+ label: "unnameable_test_items",
+ description: r##"detects an item that cannot be named being marked as `#[test_case]`"##,
+ },
+ Lint { label: "unreachable_code", description: r##"detects unreachable code paths"## },
+ Lint { label: "unreachable_patterns", description: r##"detects unreachable patterns"## },
+ Lint {
+ label: "unreachable_pub",
+ description: r##"`pub` items not reachable from crate root"##,
+ },
+ Lint { label: "unsafe_code", description: r##"usage of `unsafe` code"## },
+ Lint {
+ label: "unsafe_op_in_unsafe_fn",
+ description: r##"unsafe operations in unsafe functions without an explicit unsafe block are deprecated"##,
+ },
+ Lint {
+ label: "unstable_features",
+ description: r##"enabling unstable features (deprecated. do not use)"##,
+ },
+ Lint {
+ label: "unstable_name_collisions",
+ description: r##"detects name collision with an existing but unstable method"##,
+ },
+ Lint {
+ label: "unsupported_calling_conventions",
+ description: r##"use of unsupported calling convention"##,
+ },
+ Lint {
+ label: "unsupported_naked_functions",
+ description: r##"unsupported naked function definitions"##,
+ },
+ Lint {
+ label: "unused",
+ description: r##"lint group for: unused-imports, unused-variables, unused-assignments, dead-code, unused-mut, unreachable-code, unreachable-patterns, unused-must-use, unused-unsafe, path-statements, unused-attributes, unused-macros, unused-allocation, unused-doc-comments, unused-extern-crates, unused-features, unused-labels, unused-parens, unused-braces, redundant-semicolons"##,
+ },
+ Lint {
+ label: "unused_allocation",
+ description: r##"detects unnecessary allocations that can be eliminated"##,
+ },
+ Lint {
+ label: "unused_assignments",
+ description: r##"detect assignments that will never be read"##,
+ },
+ Lint {
+ label: "unused_attributes",
+ description: r##"detects attributes that were not used by the compiler"##,
+ },
+ Lint { label: "unused_braces", description: r##"unnecessary braces around an expression"## },
+ Lint {
+ label: "unused_comparisons",
+ description: r##"comparisons made useless by limits of the types involved"##,
+ },
+ Lint {
+ label: "unused_crate_dependencies",
+ description: r##"crate dependencies that are never used"##,
+ },
+ Lint {
+ label: "unused_doc_comments",
+ description: r##"detects doc comments that aren't used by rustdoc"##,
+ },
+ Lint { label: "unused_extern_crates", description: r##"extern crates that are never used"## },
+ Lint {
+ label: "unused_features",
+ description: r##"unused features found in crate-level `#[feature]` directives"##,
+ },
+ Lint {
+ label: "unused_import_braces",
+ description: r##"unnecessary braces around an imported item"##,
+ },
+ Lint { label: "unused_imports", description: r##"imports that are never used"## },
+ Lint { label: "unused_labels", description: r##"detects labels that are never used"## },
+ Lint {
+ label: "unused_lifetimes",
+ description: r##"detects lifetime parameters that are never used"##,
+ },
+ Lint { label: "unused_macros", description: r##"detects macros that were not used"## },
+ Lint {
+ label: "unused_must_use",
+ description: r##"unused result of a type flagged as `#[must_use]`"##,
+ },
+ Lint {
+ label: "unused_mut",
+ description: r##"detect mut variables which don't need to be mutable"##,
+ },
+ Lint {
+ label: "unused_parens",
+ description: r##"`if`, `match`, `while` and `return` do not need parentheses"##,
+ },
+ Lint {
+ label: "unused_qualifications",
+ description: r##"detects unnecessarily qualified names"##,
+ },
+ Lint {
+ label: "unused_results",
+ description: r##"unused result of an expression in a statement"##,
+ },
+ Lint { label: "unused_unsafe", description: r##"unnecessary use of an `unsafe` block"## },
+ Lint {
+ label: "unused_variables",
+ description: r##"detect variables which are not used in any way"##,
+ },
+ Lint {
+ label: "useless_deprecated",
+ description: r##"detects deprecation attributes with no effect"##,
+ },
+ Lint {
+ label: "variant_size_differences",
+ description: r##"detects enums with widely varying variant sizes"##,
+ },
+ Lint {
+ label: "warnings",
+ description: r##"mass-change the level for lints which produce warnings"##,
+ },
+ Lint {
+ label: "warnings",
+ description: r##"lint group for: all lints that are set to issue warnings"##,
+ },
+ Lint {
+ label: "where_clauses_object_safety",
+ description: r##"checks the object safety of where clauses"##,
+ },
+ Lint {
+ label: "while_true",
+ description: r##"suggest using `loop { }` instead of `while true { }`"##,
+ },
+];
+pub const DEFAULT_LINT_GROUPS: &[LintGroup] = &[
+ LintGroup {
+ lint: Lint {
+ label: "future_incompatible",
+ description: r##"lint group for: forbidden-lint-groups, illegal-floating-point-literal-pattern, private-in-public, pub-use-of-private-extern-crate, invalid-type-param-default, const-err, unaligned-references, patterns-in-fns-without-body, missing-fragment-specifier, late-bound-lifetime-arguments, order-dependent-trait-objects, coherence-leak-check, unstable-name-collisions, where-clauses-object-safety, proc-macro-derive-resolution-fallback, macro-expanded-macro-exports-accessed-by-absolute-paths, ill-formed-attribute-input, conflicting-repr-hints, ambiguous-associated-items, mutable-borrow-reservation-conflict, indirect-structural-match, pointer-structural-match, nontrivial-structural-match, soft-unstable, cenum-impl-drop-cast, const-evaluatable-unchecked, uninhabited-static, unsupported-naked-functions, invalid-doc-attributes, semicolon-in-expressions-from-macros, legacy-derive-helpers, proc-macro-back-compat, unsupported-calling-conventions, deref-into-dyn-supertrait"##,
+ },
+ children: &[
+ "forbidden_lint_groups",
+ "illegal_floating_point_literal_pattern",
+ "private_in_public",
+ "pub_use_of_private_extern_crate",
+ "invalid_type_param_default",
+ "const_err",
+ "unaligned_references",
+ "patterns_in_fns_without_body",
+ "missing_fragment_specifier",
+ "late_bound_lifetime_arguments",
+ "order_dependent_trait_objects",
+ "coherence_leak_check",
+ "unstable_name_collisions",
+ "where_clauses_object_safety",
+ "proc_macro_derive_resolution_fallback",
+ "macro_expanded_macro_exports_accessed_by_absolute_paths",
+ "ill_formed_attribute_input",
+ "conflicting_repr_hints",
+ "ambiguous_associated_items",
+ "mutable_borrow_reservation_conflict",
+ "indirect_structural_match",
+ "pointer_structural_match",
+ "nontrivial_structural_match",
+ "soft_unstable",
+ "cenum_impl_drop_cast",
+ "const_evaluatable_unchecked",
+ "uninhabited_static",
+ "unsupported_naked_functions",
+ "invalid_doc_attributes",
+ "semicolon_in_expressions_from_macros",
+ "legacy_derive_helpers",
+ "proc_macro_back_compat",
+ "unsupported_calling_conventions",
+ "deref_into_dyn_supertrait",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "nonstandard_style",
+ description: r##"lint group for: non-camel-case-types, non-snake-case, non-upper-case-globals"##,
+ },
+ children: &["non_camel_case_types", "non_snake_case", "non_upper_case_globals"],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "rust_2018_compatibility",
+ description: r##"lint group for: keyword-idents, anonymous-parameters, tyvar-behind-raw-pointer, absolute-paths-not-starting-with-crate"##,
+ },
+ children: &[
+ "keyword_idents",
+ "anonymous_parameters",
+ "tyvar_behind_raw_pointer",
+ "absolute_paths_not_starting_with_crate",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "rust_2018_idioms",
+ description: r##"lint group for: bare-trait-objects, unused-extern-crates, ellipsis-inclusive-range-patterns, elided-lifetimes-in-paths, explicit-outlives-requirements"##,
+ },
+ children: &[
+ "bare_trait_objects",
+ "unused_extern_crates",
+ "ellipsis_inclusive_range_patterns",
+ "elided_lifetimes_in_paths",
+ "explicit_outlives_requirements",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "rust_2021_compatibility",
+ description: r##"lint group for: ellipsis-inclusive-range-patterns, bare-trait-objects, rust-2021-incompatible-closure-captures, rust-2021-incompatible-or-patterns, rust-2021-prelude-collisions, rust-2021-prefixes-incompatible-syntax, array-into-iter, non-fmt-panics"##,
+ },
+ children: &[
+ "ellipsis_inclusive_range_patterns",
+ "bare_trait_objects",
+ "rust_2021_incompatible_closure_captures",
+ "rust_2021_incompatible_or_patterns",
+ "rust_2021_prelude_collisions",
+ "rust_2021_prefixes_incompatible_syntax",
+ "array_into_iter",
+ "non_fmt_panics",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "unused",
+ description: r##"lint group for: unused-imports, unused-variables, unused-assignments, dead-code, unused-mut, unreachable-code, unreachable-patterns, unused-must-use, unused-unsafe, path-statements, unused-attributes, unused-macros, unused-allocation, unused-doc-comments, unused-extern-crates, unused-features, unused-labels, unused-parens, unused-braces, redundant-semicolons"##,
+ },
+ children: &[
+ "unused_imports",
+ "unused_variables",
+ "unused_assignments",
+ "dead_code",
+ "unused_mut",
+ "unreachable_code",
+ "unreachable_patterns",
+ "unused_must_use",
+ "unused_unsafe",
+ "path_statements",
+ "unused_attributes",
+ "unused_macros",
+ "unused_allocation",
+ "unused_doc_comments",
+ "unused_extern_crates",
+ "unused_features",
+ "unused_labels",
+ "unused_parens",
+ "unused_braces",
+ "redundant_semicolons",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "warnings",
+ description: r##"lint group for: all lints that are set to issue warnings"##,
+ },
+ children: &[],
+ },
+];
+
+pub const RUSTDOC_LINTS: &[Lint] = &[
+ Lint {
+ label: "rustdoc::all",
+ description: r##"lint group for: rustdoc::broken-intra-doc-links, rustdoc::private-intra-doc-links, rustdoc::missing-doc-code-examples, rustdoc::private-doc-tests, rustdoc::invalid-codeblock-attributes, rustdoc::invalid-rust-codeblocks, rustdoc::invalid-html-tags, rustdoc::bare-urls, rustdoc::missing-crate-level-docs"##,
+ },
+ Lint { label: "rustdoc::bare_urls", description: r##"detects URLs that are not hyperlinks"## },
+ Lint {
+ label: "rustdoc::broken_intra_doc_links",
+ description: r##"failures in resolving intra-doc link targets"##,
+ },
+ Lint {
+ label: "rustdoc::invalid_codeblock_attributes",
+ description: r##"codeblock attribute looks a lot like a known one"##,
+ },
+ Lint {
+ label: "rustdoc::invalid_html_tags",
+ description: r##"detects invalid HTML tags in doc comments"##,
+ },
+ Lint {
+ label: "rustdoc::invalid_rust_codeblocks",
+ description: r##"codeblock could not be parsed as valid Rust or is empty"##,
+ },
+ Lint {
+ label: "rustdoc::missing_crate_level_docs",
+ description: r##"detects crates with no crate-level documentation"##,
+ },
+ Lint {
+ label: "rustdoc::missing_doc_code_examples",
+ description: r##"detects publicly-exported items without code samples in their documentation"##,
+ },
+ Lint {
+ label: "rustdoc::private_doc_tests",
+ description: r##"detects code samples in docs of private items not documented by rustdoc"##,
+ },
+ Lint {
+ label: "rustdoc::private_intra_doc_links",
+ description: r##"linking from a public item to a private one"##,
+ },
+];
+pub const RUSTDOC_LINT_GROUPS: &[LintGroup] = &[LintGroup {
+ lint: Lint {
+ label: "rustdoc::all",
+ description: r##"lint group for: rustdoc::broken-intra-doc-links, rustdoc::private-intra-doc-links, rustdoc::missing-doc-code-examples, rustdoc::private-doc-tests, rustdoc::invalid-codeblock-attributes, rustdoc::invalid-rust-codeblocks, rustdoc::invalid-html-tags, rustdoc::bare-urls, rustdoc::missing-crate-level-docs"##,
+ },
+ children: &[
+ "rustdoc::broken_intra_doc_links",
+ "rustdoc::private_intra_doc_links",
+ "rustdoc::missing_doc_code_examples",
+ "rustdoc::private_doc_tests",
+ "rustdoc::invalid_codeblock_attributes",
+ "rustdoc::invalid_rust_codeblocks",
+ "rustdoc::invalid_html_tags",
+ "rustdoc::bare_urls",
+ "rustdoc::missing_crate_level_docs",
+ ],
+}];
+
+pub const FEATURES: &[Lint] = &[
+ Lint {
+ label: "abi_c_cmse_nonsecure_call",
+ description: r##"# `abi_c_cmse_nonsecure_call`
+
+The tracking issue for this feature is: [#81391]
+
+[#81391]: https://github.com/rust-lang/rust/issues/81391
+
+------------------------
+
+The [TrustZone-M
+feature](https://developer.arm.com/documentation/100690/latest/) is available
+for targets with the Armv8-M architecture profile (`thumbv8m` in their target
+name).
+LLVM, the Rust compiler and the linker are providing
+[support](https://developer.arm.com/documentation/ecm0359818/latest/) for the
+TrustZone-M feature.
+
+One of the things provided, with this unstable feature, is the
+`C-cmse-nonsecure-call` function ABI. This ABI is used on function pointers to
+non-secure code to mark a non-secure function call (see [section
+5.5](https://developer.arm.com/documentation/ecm0359818/latest/) for details).
+
+With this ABI, the compiler will do the following to perform the call:
+* save registers needed after the call to Secure memory
+* clear all registers that might contain confidential information
+* clear the Least Significant Bit of the function address
+* branches using the BLXNS instruction
+
+To avoid using the non-secure stack, the compiler will constrain the number and
+type of parameters/return value.
+
+The `extern "C-cmse-nonsecure-call"` ABI is otherwise equivalent to the
+`extern "C"` ABI.
+
+<!-- NOTE(ignore) this example is specific to thumbv8m targets -->
+
+``` rust,ignore
+#![no_std]
+#![feature(abi_c_cmse_nonsecure_call)]
+
+#[no_mangle]
+pub fn call_nonsecure_function(addr: usize) -> u32 {
+ let non_secure_function =
+ unsafe { core::mem::transmute::<usize, extern "C-cmse-nonsecure-call" fn() -> u32>(addr) };
+ non_secure_function()
+}
+```
+
+``` text
+$ rustc --emit asm --crate-type lib --target thumbv8m.main-none-eabi function.rs
+
+call_nonsecure_function:
+ .fnstart
+ .save {r7, lr}
+ push {r7, lr}
+ .setfp r7, sp
+ mov r7, sp
+ .pad #16
+ sub sp, #16
+ str r0, [sp, #12]
+ ldr r0, [sp, #12]
+ str r0, [sp, #8]
+ b .LBB0_1
+.LBB0_1:
+ ldr r0, [sp, #8]
+ push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+ bic r0, r0, #1
+ mov r1, r0
+ mov r2, r0
+ mov r3, r0
+ mov r4, r0
+ mov r5, r0
+ mov r6, r0
+ mov r7, r0
+ mov r8, r0
+ mov r9, r0
+ mov r10, r0
+ mov r11, r0
+ mov r12, r0
+ msr apsr_nzcvq, r0
+ blxns r0
+ pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+ str r0, [sp, #4]
+ b .LBB0_2
+.LBB0_2:
+ ldr r0, [sp, #4]
+ add sp, #16
+ pop {r7, pc}
+```
+"##,
+ },
+ Lint {
+ label: "abi_msp430_interrupt",
+ description: r##"# `abi_msp430_interrupt`
+
+The tracking issue for this feature is: [#38487]
+
+[#38487]: https://github.com/rust-lang/rust/issues/38487
+
+------------------------
+
+In the MSP430 architecture, interrupt handlers have a special calling
+convention. You can use the `"msp430-interrupt"` ABI to make the compiler apply
+the right calling convention to the interrupt handlers you define.
+
+<!-- NOTE(ignore) this example is specific to the msp430 target -->
+
+``` rust,ignore
+#![feature(abi_msp430_interrupt)]
+#![no_std]
+
+// Place the interrupt handler at the appropriate memory address
+// (Alternatively, you can use `#[used]` and remove `pub` and `#[no_mangle]`)
+#[link_section = "__interrupt_vector_10"]
+#[no_mangle]
+pub static TIM0_VECTOR: extern "msp430-interrupt" fn() = tim0;
+
+// The interrupt handler
+extern "msp430-interrupt" fn tim0() {
+ // ..
+}
+```
+
+``` text
+$ msp430-elf-objdump -CD ./target/msp430/release/app
+Disassembly of section __interrupt_vector_10:
+
+0000fff2 <TIM0_VECTOR>:
+ fff2: 00 c0 interrupt service routine at 0xc000
+
+Disassembly of section .text:
+
+0000c000 <int::tim0>:
+ c000: 00 13 reti
+```
+"##,
+ },
+ Lint {
+ label: "abi_ptx",
+ description: r##"# `abi_ptx`
+
+The tracking issue for this feature is: [#38788]
+
+[#38788]: https://github.com/rust-lang/rust/issues/38788
+
+------------------------
+
+When emitting PTX code, all vanilla Rust functions (`fn`) get translated to
+"device" functions. These functions are *not* callable from the host via the
+CUDA API so a crate with only device functions is not too useful!
+
+OTOH, "global" functions *can* be called by the host; you can think of them
+as the real public API of your crate. To produce a global function use the
+`"ptx-kernel"` ABI.
+
+<!-- NOTE(ignore) this example is specific to the nvptx targets -->
+
+``` rust,ignore
+#![feature(abi_ptx)]
+#![no_std]
+
+pub unsafe extern "ptx-kernel" fn global_function() {
+ device_function();
+}
+
+pub fn device_function() {
+ // ..
+}
+```
+
+``` text
+$ xargo rustc --target nvptx64-nvidia-cuda --release -- --emit=asm
+
+$ cat $(find -name '*.s')
+//
+// Generated by LLVM NVPTX Back-End
+//
+
+.version 3.2
+.target sm_20
+.address_size 64
+
+ // .globl _ZN6kernel15global_function17h46111ebe6516b382E
+
+.visible .entry _ZN6kernel15global_function17h46111ebe6516b382E()
+{
+
+
+ ret;
+}
+
+ // .globl _ZN6kernel15device_function17hd6a0e4993bbf3f78E
+.visible .func _ZN6kernel15device_function17hd6a0e4993bbf3f78E()
+{
+
+
+ ret;
+}
+```
+"##,
+ },
+ Lint {
+ label: "abi_thiscall",
+ description: r##"# `abi_thiscall`
+
+The tracking issue for this feature is: [#42202]
+
+[#42202]: https://github.com/rust-lang/rust/issues/42202
+
+------------------------
+
+The MSVC ABI on x86 Windows uses the `thiscall` calling convention for C++
+instance methods by default; it is identical to the usual (C) calling
+convention on x86 Windows except that the first parameter of the method,
+the `this` pointer, is passed in the ECX register.
+"##,
+ },
+ Lint {
+ label: "allocator_api",
+ description: r##"# `allocator_api`
+
+The tracking issue for this feature is [#32838]
+
+[#32838]: https://github.com/rust-lang/rust/issues/32838
+
+------------------------
+
+Sometimes you want the memory for one collection to use a different
+allocator than the memory for another collection. In this case,
+replacing the global allocator is not a workable option. Instead,
+you need to pass in an instance of an `AllocRef` to each collection
+for which you want a custom allocator.
+
+TBD
+"##,
+ },
+ Lint {
+ label: "allocator_internals",
+ description: r##"# `allocator_internals`
+
+This feature does not have a tracking issue, it is an unstable implementation
+detail of the `global_allocator` feature not intended for use outside the
+compiler.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "arbitrary_enum_discriminant",
+ description: r##"# `arbitrary_enum_discriminant`
+
+The tracking issue for this feature is: [#60553]
+
+[#60553]: https://github.com/rust-lang/rust/issues/60553
+
+------------------------
+
+The `arbitrary_enum_discriminant` feature permits tuple-like and
+struct-like enum variants with `#[repr(<int-type>)]` to have explicit discriminants.
+
+## Examples
+
+```rust
+#![feature(arbitrary_enum_discriminant)]
+
+#[allow(dead_code)]
+#[repr(u8)]
+enum Enum {
+ Unit = 3,
+ Tuple(u16) = 2,
+ Struct {
+ a: u8,
+ b: u16,
+ } = 1,
+}
+
+impl Enum {
+ fn tag(&self) -> u8 {
+ unsafe { *(self as *const Self as *const u8) }
+ }
+}
+
+assert_eq!(3, Enum::Unit.tag());
+assert_eq!(2, Enum::Tuple(5).tag());
+assert_eq!(1, Enum::Struct{a: 7, b: 11}.tag());
+```
+"##,
+ },
+ Lint {
+ label: "asm_const",
+ description: r##"# `asm_const`
+
+The tracking issue for this feature is: [#72016]
+
+[#72016]: https://github.com/rust-lang/rust/issues/72016
+
+------------------------
+
+This feature adds a `const <expr>` operand type to `asm!` and `global_asm!`.
+- `<expr>` must be an integer constant expression.
+- The value of the expression is formatted as a string and substituted directly into the asm template string.
+"##,
+ },
+ Lint {
+ label: "asm_experimental_arch",
+ description: r##"# `asm_experimental_arch`
+
+The tracking issue for this feature is: [#72016]
+
+[#72016]: https://github.com/rust-lang/rust/issues/72016
+
+------------------------
+
+This feature tracks `asm!` and `global_asm!` support for the following architectures:
+- NVPTX
+- PowerPC
+- Hexagon
+- MIPS32r2 and MIPS64r2
+- wasm32
+- BPF
+- SPIR-V
+- AVR
+
+## Register classes
+
+| Architecture | Register class | Registers | LLVM constraint code |
+| ------------ | -------------- | ---------------------------------- | -------------------- |
+| MIPS | `reg` | `$[2-25]` | `r` |
+| MIPS | `freg` | `$f[0-31]` | `f` |
+| NVPTX | `reg16` | None\* | `h` |
+| NVPTX | `reg32` | None\* | `r` |
+| NVPTX | `reg64` | None\* | `l` |
+| Hexagon | `reg` | `r[0-28]` | `r` |
+| PowerPC | `reg` | `r[0-31]` | `r` |
+| PowerPC | `reg_nonzero` | `r[1-31]` | `b` |
+| PowerPC | `freg` | `f[0-31]` | `f` |
+| PowerPC | `cr` | `cr[0-7]`, `cr` | Only clobbers |
+| PowerPC | `xer` | `xer` | Only clobbers |
+| wasm32 | `local` | None\* | `r` |
+| BPF | `reg` | `r[0-10]` | `r` |
+| BPF | `wreg` | `w[0-10]` | `w` |
+| AVR | `reg` | `r[2-25]`, `XH`, `XL`, `ZH`, `ZL` | `r` |
+| AVR | `reg_upper` | `r[16-25]`, `XH`, `XL`, `ZH`, `ZL` | `d` |
+| AVR | `reg_pair` | `r3r2` .. `r25r24`, `X`, `Z` | `r` |
+| AVR | `reg_iw` | `r25r24`, `X`, `Z` | `w` |
+| AVR | `reg_ptr` | `X`, `Z` | `e` |
+
+> **Notes**:
+> - NVPTX doesn't have a fixed register set, so named registers are not supported.
+>
+> - WebAssembly doesn't have registers, so named registers are not supported.
+
+# Register class supported types
+
+| Architecture | Register class | Target feature | Allowed types |
+| ------------ | ------------------------------- | -------------- | --------------------------------------- |
+| MIPS32 | `reg` | None | `i8`, `i16`, `i32`, `f32` |
+| MIPS32 | `freg` | None | `f32`, `f64` |
+| MIPS64 | `reg` | None | `i8`, `i16`, `i32`, `i64`, `f32`, `f64` |
+| MIPS64 | `freg` | None | `f32`, `f64` |
+| NVPTX | `reg16` | None | `i8`, `i16` |
+| NVPTX | `reg32` | None | `i8`, `i16`, `i32`, `f32` |
+| NVPTX | `reg64` | None | `i8`, `i16`, `i32`, `f32`, `i64`, `f64` |
+| Hexagon | `reg` | None | `i8`, `i16`, `i32`, `f32` |
+| PowerPC | `reg` | None | `i8`, `i16`, `i32` |
+| PowerPC | `reg_nonzero` | None | `i8`, `i16`, `i32` |
+| PowerPC | `freg` | None | `f32`, `f64` |
+| PowerPC | `cr` | N/A | Only clobbers |
+| PowerPC | `xer` | N/A | Only clobbers |
+| wasm32 | `local` | None | `i8` `i16` `i32` `i64` `f32` `f64` |
+| BPF | `reg` | None | `i8` `i16` `i32` `i64` |
+| BPF | `wreg` | `alu32` | `i8` `i16` `i32` |
+| AVR | `reg`, `reg_upper` | None | `i8` |
+| AVR | `reg_pair`, `reg_iw`, `reg_ptr` | None | `i16` |
+
+## Register aliases
+
+| Architecture | Base register | Aliases |
+| ------------ | ------------- | --------- |
+| Hexagon | `r29` | `sp` |
+| Hexagon | `r30` | `fr` |
+| Hexagon | `r31` | `lr` |
+| BPF | `r[0-10]` | `w[0-10]` |
+| AVR | `XH` | `r27` |
+| AVR | `XL` | `r26` |
+| AVR | `ZH` | `r31` |
+| AVR | `ZL` | `r30` |
+
+## Unsupported registers
+
+| Architecture | Unsupported register | Reason |
+| ------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| All | `sp` | The stack pointer must be restored to its original value at the end of an asm code block. |
+| All | `fr` (Hexagon), `$fp` (MIPS), `Y` (AVR) | The frame pointer cannot be used as an input or output. |
+| All | `r19` (Hexagon) | This is used internally by LLVM as a "base pointer" for functions with complex stack frames. |
+| MIPS | `$0` or `$zero` | This is a constant zero register which can't be modified. |
+| MIPS | `$1` or `$at` | Reserved for assembler. |
+| MIPS | `$26`/`$k0`, `$27`/`$k1` | OS-reserved registers. |
+| MIPS | `$28`/`$gp` | Global pointer cannot be used as inputs or outputs. |
+| MIPS | `$ra` | Return address cannot be used as inputs or outputs. |
+| Hexagon | `lr` | This is the link register which cannot be used as an input or output. |
+| AVR | `r0`, `r1`, `r1r0` | Due to an issue in LLVM, the `r0` and `r1` registers cannot be used as inputs or outputs. If modified, they must be restored to their original values before the end of the block. |
+
+## Template modifiers
+
+| Architecture | Register class | Modifier | Example output | LLVM modifier |
+| ------------ | -------------- | -------- | -------------- | ------------- |
+| MIPS | `reg` | None | `$2` | None |
+| MIPS | `freg` | None | `$f0` | None |
+| NVPTX | `reg16` | None | `rs0` | None |
+| NVPTX | `reg32` | None | `r0` | None |
+| NVPTX | `reg64` | None | `rd0` | None |
+| Hexagon | `reg` | None | `r0` | None |
+| PowerPC | `reg` | None | `0` | None |
+| PowerPC | `reg_nonzero` | None | `3` | `b` |
+| PowerPC | `freg` | None | `0` | None |
+
+# Flags covered by `preserves_flags`
+
+These flags registers must be restored upon exiting the asm block if the `preserves_flags` option is set:
+- AVR
+ - The status register `SREG`.
+"##,
+ },
+ Lint {
+ label: "asm_sym",
+ description: r##"# `asm_sym`
+
+The tracking issue for this feature is: [#72016]
+
+[#72016]: https://github.com/rust-lang/rust/issues/72016
+
+------------------------
+
+This feature adds a `sym <path>` operand type to `asm!` and `global_asm!`.
+- `<path>` must refer to a `fn` or `static`.
+- A mangled symbol name referring to the item is substituted into the asm template string.
+- The substituted string does not include any modifiers (e.g. GOT, PLT, relocations, etc).
+- `<path>` is allowed to point to a `#[thread_local]` static, in which case the asm code can combine the symbol with relocations (e.g. `@plt`, `@TPOFF`) to read from thread-local data.
+"##,
+ },
+ Lint {
+ label: "asm_unwind",
+ description: r##"# `asm_unwind`
+
+The tracking issue for this feature is: [#72016]
+
+[#72016]: https://github.com/rust-lang/rust/issues/72016
+
+------------------------
+
+This feature adds a `may_unwind` option to `asm!` which allows an `asm` block to unwind stack and be part of the stack unwinding process. This option is only supported by the LLVM backend right now.
+"##,
+ },
+ Lint {
+ label: "auto_traits",
+ description: r##"# `auto_traits`
+
+The tracking issue for this feature is [#13231]
+
+[#13231]: https://github.com/rust-lang/rust/issues/13231
+
+----
+
+The `auto_traits` feature gate allows you to define auto traits.
+
+Auto traits, like [`Send`] or [`Sync`] in the standard library, are marker traits
+that are automatically implemented for every type, unless the type, or a type it contains,
+has explicitly opted out via a negative impl. (Negative impls are separately controlled
+by the `negative_impls` feature.)
+
+[`Send`]: https://doc.rust-lang.org/std/marker/trait.Send.html
+[`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
+
+```rust,ignore (partial-example)
+impl !Trait for Type {}
+```
+
+Example:
+
+```rust
+#![feature(negative_impls)]
+#![feature(auto_traits)]
+
+auto trait Valid {}
+
+struct True;
+struct False;
+
+impl !Valid for False {}
+
+struct MaybeValid<T>(T);
+
+fn must_be_valid<T: Valid>(_t: T) { }
+
+fn main() {
+ // works
+ must_be_valid( MaybeValid(True) );
+
+ // compiler error - trait bound not satisfied
+ // must_be_valid( MaybeValid(False) );
+}
+```
+
+## Automatic trait implementations
+
+When a type is declared as an `auto trait`, we will automatically
+create impls for every struct/enum/union, unless an explicit impl is
+provided. These automatic impls contain a where clause for each field
+of the form `T: AutoTrait`, where `T` is the type of the field and
+`AutoTrait` is the auto trait in question. As an example, consider the
+struct `List` and the auto trait `Send`:
+
+```rust
+struct List<T> {
+ data: T,
+ next: Option<Box<List<T>>>,
+}
+```
+
+Presuming that there is no explicit impl of `Send` for `List`, the
+compiler will supply an automatic impl of the form:
+
+```rust
+struct List<T> {
+ data: T,
+ next: Option<Box<List<T>>>,
+}
+
+unsafe impl<T> Send for List<T>
+where
+ T: Send, // from the field `data`
+ Option<Box<List<T>>>: Send, // from the field `next`
+{ }
+```
+
+Explicit impls may be either positive or negative. They take the form:
+
+```rust,ignore (partial-example)
+impl<...> AutoTrait for StructName<..> { }
+impl<...> !AutoTrait for StructName<..> { }
+```
+
+## Coinduction: Auto traits permit cyclic matching
+
+Unlike ordinary trait matching, auto traits are **coinductive**. This
+means, in short, that cycles which occur in trait matching are
+considered ok. As an example, consider the recursive struct `List`
+introduced in the previous section. In attempting to determine whether
+`List: Send`, we would wind up in a cycle: to apply the impl, we must
+show that `Option<Box<List>>: Send`, which will in turn require
+`Box<List>: Send` and then finally `List: Send` again. Under ordinary
+trait matching, this cycle would be an error, but for an auto trait it
+is considered a successful match.
+
+## Items
+
+Auto traits cannot have any trait items, such as methods or associated types. This ensures that we can generate default implementations.
+
+## Supertraits
+
+Auto traits cannot have supertraits. This is for soundness reasons, as the interaction of coinduction with implied bounds is difficult to reconcile.
+"##,
+ },
+ Lint {
+ label: "box_patterns",
+ description: r##"# `box_patterns`
+
+The tracking issue for this feature is: [#29641]
+
+[#29641]: https://github.com/rust-lang/rust/issues/29641
+
+See also [`box_syntax`](box-syntax.md)
+
+------------------------
+
+Box patterns let you match on `Box<T>`s:
+
+
+```rust
+#![feature(box_patterns)]
+
+fn main() {
+ let b = Some(Box::new(5));
+ match b {
+ Some(box n) if n < 0 => {
+ println!("Box contains negative number {}", n);
+ },
+ Some(box n) if n >= 0 => {
+ println!("Box contains non-negative number {}", n);
+ },
+ None => {
+ println!("No box");
+ },
+ _ => unreachable!()
+ }
+}
+```
+"##,
+ },
+ Lint {
+ label: "box_syntax",
+ description: r##"# `box_syntax`
+
+The tracking issue for this feature is: [#49733]
+
+[#49733]: https://github.com/rust-lang/rust/issues/49733
+
+See also [`box_patterns`](box-patterns.md)
+
+------------------------
+
+Currently the only stable way to create a `Box` is via the `Box::new` method.
+Also it is not possible in stable Rust to destructure a `Box` in a match
+pattern. The unstable `box` keyword can be used to create a `Box`. An example
+usage would be:
+
+```rust
+#![feature(box_syntax)]
+
+fn main() {
+ let b = box 5;
+}
+```
+"##,
+ },
+ Lint {
+ label: "c_unwind",
+ description: r##"# `c_unwind`
+
+The tracking issue for this feature is: [#74990]
+
+[#74990]: https://github.com/rust-lang/rust/issues/74990
+
+------------------------
+
+Introduces four new ABI strings: "C-unwind", "stdcall-unwind",
+"thiscall-unwind", and "system-unwind". These enable unwinding from other
+languages (such as C++) into Rust frames and from Rust into other languages.
+
+See [RFC 2945] for more information.
+
+[RFC 2945]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
+"##,
+ },
+ Lint {
+ label: "c_variadic",
+ description: r##"# `c_variadic`
+
+The tracking issue for this feature is: [#44930]
+
+[#44930]: https://github.com/rust-lang/rust/issues/44930
+
+------------------------
+
+The `c_variadic` language feature enables C-variadic functions to be
+defined in Rust. The may be called both from within Rust and via FFI.
+
+## Examples
+
+```rust
+#![feature(c_variadic)]
+
+pub unsafe extern "C" fn add(n: usize, mut args: ...) -> usize {
+ let mut sum = 0;
+ for _ in 0..n {
+ sum += args.arg::<usize>();
+ }
+ sum
+}
+```
+"##,
+ },
+ Lint {
+ label: "c_variadic",
+ description: r##"# `c_variadic`
+
+The tracking issue for this feature is: [#44930]
+
+[#44930]: https://github.com/rust-lang/rust/issues/44930
+
+------------------------
+
+The `c_variadic` library feature exposes the `VaList` structure,
+Rust's analogue of C's `va_list` type.
+
+## Examples
+
+```rust
+#![feature(c_variadic)]
+
+use std::ffi::VaList;
+
+pub unsafe extern "C" fn vadd(n: usize, mut args: VaList) -> usize {
+ let mut sum = 0;
+ for _ in 0..n {
+ sum += args.arg::<usize>();
+ }
+ sum
+}
+```
+"##,
+ },
+ Lint {
+ label: "c_void_variant",
+ description: r##"# `c_void_variant`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "cfg_panic",
+ description: r##"# `cfg_panic`
+
+The tracking issue for this feature is: [#77443]
+
+[#77443]: https://github.com/rust-lang/rust/issues/77443
+
+------------------------
+
+The `cfg_panic` feature makes it possible to execute different code
+depending on the panic strategy.
+
+Possible values at the moment are `"unwind"` or `"abort"`, although
+it is possible that new panic strategies may be added to Rust in the
+future.
+
+## Examples
+
+```rust
+#![feature(cfg_panic)]
+
+#[cfg(panic = "unwind")]
+fn a() {
+ // ...
+}
+
+#[cfg(not(panic = "unwind"))]
+fn a() {
+ // ...
+}
+
+fn b() {
+ if cfg!(panic = "abort") {
+ // ...
+ } else {
+ // ...
+ }
+}
+```
+"##,
+ },
+ Lint {
+ label: "cfg_sanitize",
+ description: r##"# `cfg_sanitize`
+
+The tracking issue for this feature is: [#39699]
+
+[#39699]: https://github.com/rust-lang/rust/issues/39699
+
+------------------------
+
+The `cfg_sanitize` feature makes it possible to execute different code
+depending on whether a particular sanitizer is enabled or not.
+
+## Examples
+
+```rust
+#![feature(cfg_sanitize)]
+
+#[cfg(sanitize = "thread")]
+fn a() {
+ // ...
+}
+
+#[cfg(not(sanitize = "thread"))]
+fn a() {
+ // ...
+}
+
+fn b() {
+ if cfg!(sanitize = "leak") {
+ // ...
+ } else {
+ // ...
+ }
+}
+```
+"##,
+ },
+ Lint {
+ label: "cfg_version",
+ description: r##"# `cfg_version`
+
+The tracking issue for this feature is: [#64796]
+
+[#64796]: https://github.com/rust-lang/rust/issues/64796
+
+------------------------
+
+The `cfg_version` feature makes it possible to execute different code
+depending on the compiler version. It will return true if the compiler
+version is greater than or equal to the specified version.
+
+## Examples
+
+```rust
+#![feature(cfg_version)]
+
+#[cfg(version("1.42"))] // 1.42 and above
+fn a() {
+ // ...
+}
+
+#[cfg(not(version("1.42")))] // 1.41 and below
+fn a() {
+ // ...
+}
+
+fn b() {
+ if cfg!(version("1.42")) {
+ // ...
+ } else {
+ // ...
+ }
+}
+```
+"##,
+ },
+ Lint {
+ label: "char_error_internals",
+ description: r##"# `char_error_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "closure_track_caller",
+ description: r##"# `closure_track_caller`
+
+The tracking issue for this feature is: [#87417]
+
+[#87417]: https://github.com/rust-lang/rust/issues/87417
+
+------------------------
+
+Allows using the `#[track_caller]` attribute on closures and generators.
+Calls made to the closure or generator will have caller information
+available through `std::panic::Location::caller()`, just like using
+`#[track_caller]` on a function.
+"##,
+ },
+ Lint {
+ label: "cmse_nonsecure_entry",
+ description: r##"# `cmse_nonsecure_entry`
+
+The tracking issue for this feature is: [#75835]
+
+[#75835]: https://github.com/rust-lang/rust/issues/75835
+
+------------------------
+
+The [TrustZone-M
+feature](https://developer.arm.com/documentation/100690/latest/) is available
+for targets with the Armv8-M architecture profile (`thumbv8m` in their target
+name).
+LLVM, the Rust compiler and the linker are providing
+[support](https://developer.arm.com/documentation/ecm0359818/latest/) for the
+TrustZone-M feature.
+
+One of the things provided, with this unstable feature, is the
+`cmse_nonsecure_entry` attribute. This attribute marks a Secure function as an
+entry function (see [section
+5.4](https://developer.arm.com/documentation/ecm0359818/latest/) for details).
+With this attribute, the compiler will do the following:
+* add a special symbol on the function which is the `__acle_se_` prefix and the
+ standard function name
+* constrain the number of parameters to avoid using the Non-Secure stack
+* before returning from the function, clear registers that might contain Secure
+ information
+* use the `BXNS` instruction to return
+
+Because the stack can not be used to pass parameters, there will be compilation
+errors if:
+* the total size of all parameters is too big (for example more than four 32
+ bits integers)
+* the entry function is not using a C ABI
+
+The special symbol `__acle_se_` will be used by the linker to generate a secure
+gateway veneer.
+
+<!-- NOTE(ignore) this example is specific to thumbv8m targets -->
+
+``` rust,ignore
+#![feature(cmse_nonsecure_entry)]
+
+#[no_mangle]
+#[cmse_nonsecure_entry]
+pub extern "C" fn entry_function(input: u32) -> u32 {
+ input + 6
+}
+```
+
+``` text
+$ rustc --emit obj --crate-type lib --target thumbv8m.main-none-eabi function.rs
+$ arm-none-eabi-objdump -D function.o
+
+00000000 <entry_function>:
+ 0: b580 push {r7, lr}
+ 2: 466f mov r7, sp
+ 4: b082 sub sp, #8
+ 6: 9001 str r0, [sp, #4]
+ 8: 1d81 adds r1, r0, #6
+ a: 460a mov r2, r1
+ c: 4281 cmp r1, r0
+ e: 9200 str r2, [sp, #0]
+ 10: d30b bcc.n 2a <entry_function+0x2a>
+ 12: e7ff b.n 14 <entry_function+0x14>
+ 14: 9800 ldr r0, [sp, #0]
+ 16: b002 add sp, #8
+ 18: e8bd 4080 ldmia.w sp!, {r7, lr}
+ 1c: 4671 mov r1, lr
+ 1e: 4672 mov r2, lr
+ 20: 4673 mov r3, lr
+ 22: 46f4 mov ip, lr
+ 24: f38e 8800 msr CPSR_f, lr
+ 28: 4774 bxns lr
+ 2a: f240 0000 movw r0, #0
+ 2e: f2c0 0000 movt r0, #0
+ 32: f240 0200 movw r2, #0
+ 36: f2c0 0200 movt r2, #0
+ 3a: 211c movs r1, #28
+ 3c: f7ff fffe bl 0 <_ZN4core9panicking5panic17h5c028258ca2fb3f5E>
+ 40: defe udf #254 ; 0xfe
+```
+"##,
+ },
+ Lint {
+ label: "compiler_builtins",
+ description: r##"# `compiler_builtins`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "concat_idents",
+ description: r##"# `concat_idents`
+
+The tracking issue for this feature is: [#29599]
+
+[#29599]: https://github.com/rust-lang/rust/issues/29599
+
+------------------------
+
+The `concat_idents` feature adds a macro for concatenating multiple identifiers
+into one identifier.
+
+## Examples
+
+```rust
+#![feature(concat_idents)]
+
+fn main() {
+ fn foobar() -> u32 { 23 }
+ let f = concat_idents!(foo, bar);
+ assert_eq!(f(), 23);
+}
+```
+"##,
+ },
+ Lint {
+ label: "const_eval_limit",
+ description: r##"# `const_eval_limit`
+
+The tracking issue for this feature is: [#67217]
+
+[#67217]: https://github.com/rust-lang/rust/issues/67217
+
+The `const_eval_limit` allows someone to limit the evaluation steps the CTFE undertakes to evaluate a `const fn`.
+"##,
+ },
+ Lint {
+ label: "core_intrinsics",
+ description: r##"# `core_intrinsics`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "core_panic",
+ description: r##"# `core_panic`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "core_private_bignum",
+ description: r##"# `core_private_bignum`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "core_private_diy_float",
+ description: r##"# `core_private_diy_float`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "crate_visibility_modifier",
+ description: r##"# `crate_visibility_modifier`
+
+The tracking issue for this feature is: [#53120]
+
+[#53120]: https://github.com/rust-lang/rust/issues/53120
+
+-----
+
+The `crate_visibility_modifier` feature allows the `crate` keyword to be used
+as a visibility modifier synonymous to `pub(crate)`, indicating that a type
+(function, _&c._) is to be visible to the entire enclosing crate, but not to
+other crates.
+
+```rust
+#![feature(crate_visibility_modifier)]
+
+crate struct Foo {
+ bar: usize,
+}
+```
+"##,
+ },
+ Lint {
+ label: "custom_test_frameworks",
+ description: r##"# `custom_test_frameworks`
+
+The tracking issue for this feature is: [#50297]
+
+[#50297]: https://github.com/rust-lang/rust/issues/50297
+
+------------------------
+
+The `custom_test_frameworks` feature allows the use of `#[test_case]` and `#![test_runner]`.
+Any function, const, or static can be annotated with `#[test_case]` causing it to be aggregated (like `#[test]`)
+and be passed to the test runner determined by the `#![test_runner]` crate attribute.
+
+```rust
+#![feature(custom_test_frameworks)]
+#![test_runner(my_runner)]
+
+fn my_runner(tests: &[&i32]) {
+ for t in tests {
+ if **t == 0 {
+ println!("PASSED");
+ } else {
+ println!("FAILED");
+ }
+ }
+}
+
+#[test_case]
+const WILL_PASS: i32 = 0;
+
+#[test_case]
+const WILL_FAIL: i32 = 4;
+```
+"##,
+ },
+ Lint {
+ label: "dec2flt",
+ description: r##"# `dec2flt`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "default_free_fn",
+ description: r##"# `default_free_fn`
+
+The tracking issue for this feature is: [#73014]
+
+[#73014]: https://github.com/rust-lang/rust/issues/73014
+
+------------------------
+
+Adds a free `default()` function to the `std::default` module. This function
+just forwards to [`Default::default()`], but may remove repetition of the word
+"default" from the call site.
+
+[`Default::default()`]: https://doc.rust-lang.org/nightly/std/default/trait.Default.html#tymethod.default
+
+Here is an example:
+
+```rust
+#![feature(default_free_fn)]
+use std::default::default;
+
+#[derive(Default)]
+struct AppConfig {
+ foo: FooConfig,
+ bar: BarConfig,
+}
+
+#[derive(Default)]
+struct FooConfig {
+ foo: i32,
+}
+
+#[derive(Default)]
+struct BarConfig {
+ bar: f32,
+ baz: u8,
+}
+
+fn main() {
+ let options = AppConfig {
+ foo: default(),
+ bar: BarConfig {
+ bar: 10.1,
+ ..default()
+ },
+ };
+}
+```
+"##,
+ },
+ Lint {
+ label: "derive_clone_copy",
+ description: r##"# `derive_clone_copy`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "derive_eq",
+ description: r##"# `derive_eq`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "doc_cfg",
+ description: r##"# `doc_cfg`
+
+The tracking issue for this feature is: [#43781]
+
+------
+
+The `doc_cfg` feature allows an API be documented as only available in some specific platforms.
+This attribute has two effects:
+
+1. In the annotated item's documentation, there will be a message saying "This is supported on
+ (platform) only".
+
+2. The item's doc-tests will only run on the specific platform.
+
+In addition to allowing the use of the `#[doc(cfg)]` attribute, this feature enables the use of a
+special conditional compilation flag, `#[cfg(doc)]`, set whenever building documentation on your
+crate.
+
+This feature was introduced as part of PR [#43348] to allow the platform-specific parts of the
+standard library be documented.
+
+```rust
+#![feature(doc_cfg)]
+
+#[cfg(any(windows, doc))]
+#[doc(cfg(windows))]
+/// The application's icon in the notification area (a.k.a. system tray).
+///
+/// # Examples
+///
+/// ```no_run
+/// extern crate my_awesome_ui_library;
+/// use my_awesome_ui_library::current_app;
+/// use my_awesome_ui_library::windows::notification;
+///
+/// let icon = current_app().get::<notification::Icon>();
+/// icon.show();
+/// icon.show_message("Hello");
+/// ```
+pub struct Icon {
+ // ...
+}
+```
+
+[#43781]: https://github.com/rust-lang/rust/issues/43781
+[#43348]: https://github.com/rust-lang/rust/issues/43348
+"##,
+ },
+ Lint {
+ label: "doc_masked",
+ description: r##"# `doc_masked`
+
+The tracking issue for this feature is: [#44027]
+
+-----
+
+The `doc_masked` feature allows a crate to exclude types from a given crate from appearing in lists
+of trait implementations. The specifics of the feature are as follows:
+
+1. When rustdoc encounters an `extern crate` statement annotated with a `#[doc(masked)]` attribute,
+ it marks the crate as being masked.
+
+2. When listing traits a given type implements, rustdoc ensures that traits from masked crates are
+ not emitted into the documentation.
+
+3. When listing types that implement a given trait, rustdoc ensures that types from masked crates
+ are not emitted into the documentation.
+
+This feature was introduced in PR [#44026] to ensure that compiler-internal and
+implementation-specific types and traits were not included in the standard library's documentation.
+Such types would introduce broken links into the documentation.
+
+[#44026]: https://github.com/rust-lang/rust/pull/44026
+[#44027]: https://github.com/rust-lang/rust/pull/44027
+"##,
+ },
+ Lint {
+ label: "doc_notable_trait",
+ description: r##"# `doc_notable_trait`
+
+The tracking issue for this feature is: [#45040]
+
+The `doc_notable_trait` feature allows the use of the `#[doc(notable_trait)]`
+attribute, which will display the trait in a "Notable traits" dialog for
+functions returning types that implement the trait. For example, this attribute
+is applied to the `Iterator`, `Future`, `io::Read`, and `io::Write` traits in
+the standard library.
+
+You can do this on your own traits like so:
+
+```
+#![feature(doc_notable_trait)]
+
+#[doc(notable_trait)]
+pub trait MyTrait {}
+
+pub struct MyStruct;
+impl MyTrait for MyStruct {}
+
+/// The docs for this function will have a button that displays a dialog about
+/// `MyStruct` implementing `MyTrait`.
+pub fn my_fn() -> MyStruct { MyStruct }
+```
+
+This feature was originally implemented in PR [#45039].
+
+See also its documentation in [the rustdoc book][rustdoc-book-notable_trait].
+
+[#45040]: https://github.com/rust-lang/rust/issues/45040
+[#45039]: https://github.com/rust-lang/rust/pull/45039
+[rustdoc-book-notable_trait]: ../../rustdoc/unstable-features.html#adding-your-trait-to-the-notable-traits-dialog
+"##,
+ },
+ Lint {
+ label: "exclusive_range_pattern",
+ description: r##"# `exclusive_range_pattern`
+
+The tracking issue for this feature is: [#37854].
+
+
+[#67264]: https://github.com/rust-lang/rust/issues/67264
+[#37854]: https://github.com/rust-lang/rust/issues/37854
+-----
+
+The `exclusive_range_pattern` feature allows non-inclusive range
+patterns (`0..10`) to be used in appropriate pattern matching
+contexts. It also can be combined with `#![feature(half_open_range_patterns]`
+to be able to use RangeTo patterns (`..10`).
+
+It also enabled RangeFrom patterns but that has since been
+stabilized.
+
+```rust
+#![feature(exclusive_range_pattern)]
+ let x = 5;
+ match x {
+ 0..10 => println!("single digit"),
+ 10 => println!("ten isn't part of the above range"),
+ _ => println!("nor is everything else.")
+ }
+```
+"##,
+ },
+ Lint {
+ label: "explicit_generic_args_with_impl_trait",
+ description: r##"# `explicit_generic_args_with_impl_trait`
+
+The tracking issue for this feature is: [#83701]
+
+[#83701]: https://github.com/rust-lang/rust/issues/83701
+
+------------------------
+
+The `explicit_generic_args_with_impl_trait` feature gate lets you specify generic arguments even
+when `impl Trait` is used in argument position.
+
+A simple example is:
+
+```rust
+#![feature(explicit_generic_args_with_impl_trait)]
+
+fn foo<T: ?Sized>(_f: impl AsRef<T>) {}
+
+fn main() {
+ foo::<str>("".to_string());
+}
+```
+
+This is currently rejected:
+
+```text
+error[E0632]: cannot provide explicit generic arguments when `impl Trait` is used in argument position
+ --> src/main.rs:6:11
+ |
+6 | foo::<str>("".to_string());
+ | ^^^ explicit generic argument not allowed
+
+```
+
+However it would compile if `explicit_generic_args_with_impl_trait` is enabled.
+
+Note that the synthetic type parameters from `impl Trait` are still implicit and you
+cannot explicitly specify these:
+
+```rust,compile_fail
+#![feature(explicit_generic_args_with_impl_trait)]
+
+fn foo<T: ?Sized>(_f: impl AsRef<T>) {}
+fn bar<T: ?Sized, F: AsRef<T>>(_f: F) {}
+
+fn main() {
+ bar::<str, _>("".to_string()); // Okay
+ bar::<str, String>("".to_string()); // Okay
+
+ foo::<str>("".to_string()); // Okay
+ foo::<str, String>("".to_string()); // Error, you cannot specify `impl Trait` explicitly
+}
+```
+"##,
+ },
+ Lint {
+ label: "fd",
+ description: r##"# `fd`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "fd_read",
+ description: r##"# `fd_read`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "ffi_const",
+ description: r##"# `ffi_const`
+
+The tracking issue for this feature is: [#58328]
+
+------
+
+The `#[ffi_const]` attribute applies clang's `const` attribute to foreign
+functions declarations.
+
+That is, `#[ffi_const]` functions shall have no effects except for its return
+value, which can only depend on the values of the function parameters, and is
+not affected by changes to the observable state of the program.
+
+Applying the `#[ffi_const]` attribute to a function that violates these
+requirements is undefined behaviour.
+
+This attribute enables Rust to perform common optimizations, like sub-expression
+elimination, and it can avoid emitting some calls in repeated invocations of the
+function with the same argument values regardless of other operations being
+performed in between these functions calls (as opposed to `#[ffi_pure]`
+functions).
+
+## Pitfalls
+
+A `#[ffi_const]` function can only read global memory that would not affect
+its return value for the whole execution of the program (e.g. immutable global
+memory). `#[ffi_const]` functions are referentially-transparent and therefore
+more strict than `#[ffi_pure]` functions.
+
+A common pitfall involves applying the `#[ffi_const]` attribute to a
+function that reads memory through pointer arguments which do not necessarily
+point to immutable global memory.
+
+A `#[ffi_const]` function that returns unit has no effect on the abstract
+machine's state, and a `#[ffi_const]` function cannot be `#[ffi_pure]`.
+
+A `#[ffi_const]` function must not diverge, neither via a side effect (e.g. a
+call to `abort`) nor by infinite loops.
+
+When translating C headers to Rust FFI, it is worth verifying for which targets
+the `const` attribute is enabled in those headers, and using the appropriate
+`cfg` macros in the Rust side to match those definitions. While the semantics of
+`const` are implemented identically by many C and C++ compilers, e.g., clang,
+[GCC], [ARM C/C++ compiler], [IBM ILE C/C++], etc. they are not necessarily
+implemented in this way on all of them. It is therefore also worth verifying
+that the semantics of the C toolchain used to compile the binary being linked
+against are compatible with those of the `#[ffi_const]`.
+
+[#58328]: https://github.com/rust-lang/rust/issues/58328
+[ARM C/C++ compiler]: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0491c/Cacgigch.html
+[GCC]: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
+[IBM ILE C/C++]: https://www.ibm.com/support/knowledgecenter/fr/ssw_ibm_i_71/rzarg/fn_attrib_const.htm
+"##,
+ },
+ Lint {
+ label: "ffi_pure",
+ description: r##"# `ffi_pure`
+
+The tracking issue for this feature is: [#58329]
+
+------
+
+The `#[ffi_pure]` attribute applies clang's `pure` attribute to foreign
+functions declarations.
+
+That is, `#[ffi_pure]` functions shall have no effects except for its return
+value, which shall not change across two consecutive function calls with
+the same parameters.
+
+Applying the `#[ffi_pure]` attribute to a function that violates these
+requirements is undefined behavior.
+
+This attribute enables Rust to perform common optimizations, like sub-expression
+elimination and loop optimizations. Some common examples of pure functions are
+`strlen` or `memcmp`.
+
+These optimizations are only applicable when the compiler can prove that no
+program state observable by the `#[ffi_pure]` function has changed between calls
+of the function, which could alter the result. See also the `#[ffi_const]`
+attribute, which provides stronger guarantees regarding the allowable behavior
+of a function, enabling further optimization.
+
+## Pitfalls
+
+A `#[ffi_pure]` function can read global memory through the function
+parameters (e.g. pointers), globals, etc. `#[ffi_pure]` functions are not
+referentially-transparent, and are therefore more relaxed than `#[ffi_const]`
+functions.
+
+However, accessing global memory through volatile or atomic reads can violate the
+requirement that two consecutive function calls shall return the same value.
+
+A `pure` function that returns unit has no effect on the abstract machine's
+state.
+
+A `#[ffi_pure]` function must not diverge, neither via a side effect (e.g. a
+call to `abort`) nor by infinite loops.
+
+When translating C headers to Rust FFI, it is worth verifying for which targets
+the `pure` attribute is enabled in those headers, and using the appropriate
+`cfg` macros in the Rust side to match those definitions. While the semantics of
+`pure` are implemented identically by many C and C++ compilers, e.g., clang,
+[GCC], [ARM C/C++ compiler], [IBM ILE C/C++], etc. they are not necessarily
+implemented in this way on all of them. It is therefore also worth verifying
+that the semantics of the C toolchain used to compile the binary being linked
+against are compatible with those of the `#[ffi_pure]`.
+
+
+[#58329]: https://github.com/rust-lang/rust/issues/58329
+[ARM C/C++ compiler]: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0491c/Cacigdac.html
+[GCC]: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
+[IBM ILE C/C++]: https://www.ibm.com/support/knowledgecenter/fr/ssw_ibm_i_71/rzarg/fn_attrib_pure.htm
+"##,
+ },
+ Lint {
+ label: "flt2dec",
+ description: r##"# `flt2dec`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "fmt_internals",
+ description: r##"# `fmt_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "fn_traits",
+ description: r##"# `fn_traits`
+
+The tracking issue for this feature is [#29625]
+
+See Also: [`unboxed_closures`](../language-features/unboxed-closures.md)
+
+[#29625]: https://github.com/rust-lang/rust/issues/29625
+
+----
+
+The `fn_traits` feature allows for implementation of the [`Fn*`] traits
+for creating custom closure-like types.
+
+[`Fn*`]: https://doc.rust-lang.org/std/ops/trait.Fn.html
+
+```rust
+#![feature(unboxed_closures)]
+#![feature(fn_traits)]
+
+struct Adder {
+ a: u32
+}
+
+impl FnOnce<(u32, )> for Adder {
+ type Output = u32;
+ extern "rust-call" fn call_once(self, b: (u32, )) -> Self::Output {
+ self.a + b.0
+ }
+}
+
+fn main() {
+ let adder = Adder { a: 3 };
+ assert_eq!(adder(2), 5);
+}
+```
+"##,
+ },
+ Lint {
+ label: "generators",
+ description: r##"# `generators`
+
+The tracking issue for this feature is: [#43122]
+
+[#43122]: https://github.com/rust-lang/rust/issues/43122
+
+------------------------
+
+The `generators` feature gate in Rust allows you to define generator or
+coroutine literals. A generator is a "resumable function" that syntactically
+resembles a closure but compiles to much different semantics in the compiler
+itself. The primary feature of a generator is that it can be suspended during
+execution to be resumed at a later date. Generators use the `yield` keyword to
+"return", and then the caller can `resume` a generator to resume execution just
+after the `yield` keyword.
+
+Generators are an extra-unstable feature in the compiler right now. Added in
+[RFC 2033] they're mostly intended right now as a information/constraint
+gathering phase. The intent is that experimentation can happen on the nightly
+compiler before actual stabilization. A further RFC will be required to
+stabilize generators/coroutines and will likely contain at least a few small
+tweaks to the overall design.
+
+[RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033
+
+A syntactical example of a generator is:
+
+```rust
+#![feature(generators, generator_trait)]
+
+use std::ops::{Generator, GeneratorState};
+use std::pin::Pin;
+
+fn main() {
+ let mut generator = || {
+ yield 1;
+ return "foo"
+ };
+
+ match Pin::new(&mut generator).resume(()) {
+ GeneratorState::Yielded(1) => {}
+ _ => panic!("unexpected value from resume"),
+ }
+ match Pin::new(&mut generator).resume(()) {
+ GeneratorState::Complete("foo") => {}
+ _ => panic!("unexpected value from resume"),
+ }
+}
+```
+
+Generators are closure-like literals which can contain a `yield` statement. The
+`yield` statement takes an optional expression of a value to yield out of the
+generator. All generator literals implement the `Generator` trait in the
+`std::ops` module. The `Generator` trait has one main method, `resume`, which
+resumes execution of the generator at the previous suspension point.
+
+An example of the control flow of generators is that the following example
+prints all numbers in order:
+
+```rust
+#![feature(generators, generator_trait)]
+
+use std::ops::Generator;
+use std::pin::Pin;
+
+fn main() {
+ let mut generator = || {
+ println!("2");
+ yield;
+ println!("4");
+ };
+
+ println!("1");
+ Pin::new(&mut generator).resume(());
+ println!("3");
+ Pin::new(&mut generator).resume(());
+ println!("5");
+}
+```
+
+At this time the main intended use case of generators is an implementation
+primitive for async/await syntax, but generators will likely be extended to
+ergonomic implementations of iterators and other primitives in the future.
+Feedback on the design and usage is always appreciated!
+
+### The `Generator` trait
+
+The `Generator` trait in `std::ops` currently looks like:
+
+```rust
+# #![feature(arbitrary_self_types, generator_trait)]
+# use std::ops::GeneratorState;
+# use std::pin::Pin;
+
+pub trait Generator<R = ()> {
+ type Yield;
+ type Return;
+ fn resume(self: Pin<&mut Self>, resume: R) -> GeneratorState<Self::Yield, Self::Return>;
+}
+```
+
+The `Generator::Yield` type is the type of values that can be yielded with the
+`yield` statement. The `Generator::Return` type is the returned type of the
+generator. This is typically the last expression in a generator's definition or
+any value passed to `return` in a generator. The `resume` function is the entry
+point for executing the `Generator` itself.
+
+The return value of `resume`, `GeneratorState`, looks like:
+
+```rust
+pub enum GeneratorState<Y, R> {
+ Yielded(Y),
+ Complete(R),
+}
+```
+
+The `Yielded` variant indicates that the generator can later be resumed. This
+corresponds to a `yield` point in a generator. The `Complete` variant indicates
+that the generator is complete and cannot be resumed again. Calling `resume`
+after a generator has returned `Complete` will likely result in a panic of the
+program.
+
+### Closure-like semantics
+
+The closure-like syntax for generators alludes to the fact that they also have
+closure-like semantics. Namely:
+
+* When created, a generator executes no code. A closure literal does not
+ actually execute any of the closure's code on construction, and similarly a
+ generator literal does not execute any code inside the generator when
+ constructed.
+
+* Generators can capture outer variables by reference or by move, and this can
+ be tweaked with the `move` keyword at the beginning of the closure. Like
+ closures all generators will have an implicit environment which is inferred by
+ the compiler. Outer variables can be moved into a generator for use as the
+ generator progresses.
+
+* Generator literals produce a value with a unique type which implements the
+ `std::ops::Generator` trait. This allows actual execution of the generator
+ through the `Generator::resume` method as well as also naming it in return
+ types and such.
+
+* Traits like `Send` and `Sync` are automatically implemented for a `Generator`
+ depending on the captured variables of the environment. Unlike closures,
+ generators also depend on variables live across suspension points. This means
+ that although the ambient environment may be `Send` or `Sync`, the generator
+ itself may not be due to internal variables live across `yield` points being
+ not-`Send` or not-`Sync`. Note that generators do
+ not implement traits like `Copy` or `Clone` automatically.
+
+* Whenever a generator is dropped it will drop all captured environment
+ variables.
+
+### Generators as state machines
+
+In the compiler, generators are currently compiled as state machines. Each
+`yield` expression will correspond to a different state that stores all live
+variables over that suspension point. Resumption of a generator will dispatch on
+the current state and then execute internally until a `yield` is reached, at
+which point all state is saved off in the generator and a value is returned.
+
+Let's take a look at an example to see what's going on here:
+
+```rust
+#![feature(generators, generator_trait)]
+
+use std::ops::Generator;
+use std::pin::Pin;
+
+fn main() {
+ let ret = "foo";
+ let mut generator = move || {
+ yield 1;
+ return ret
+ };
+
+ Pin::new(&mut generator).resume(());
+ Pin::new(&mut generator).resume(());
+}
+```
+
+This generator literal will compile down to something similar to:
+
+```rust
+#![feature(arbitrary_self_types, generators, generator_trait)]
+
+use std::ops::{Generator, GeneratorState};
+use std::pin::Pin;
+
+fn main() {
+ let ret = "foo";
+ let mut generator = {
+ enum __Generator {
+ Start(&'static str),
+ Yield1(&'static str),
+ Done,
+ }
+
+ impl Generator for __Generator {
+ type Yield = i32;
+ type Return = &'static str;
+
+ fn resume(mut self: Pin<&mut Self>, resume: ()) -> GeneratorState<i32, &'static str> {
+ use std::mem;
+ match mem::replace(&mut *self, __Generator::Done) {
+ __Generator::Start(s) => {
+ *self = __Generator::Yield1(s);
+ GeneratorState::Yielded(1)
+ }
+
+ __Generator::Yield1(s) => {
+ *self = __Generator::Done;
+ GeneratorState::Complete(s)
+ }
+
+ __Generator::Done => {
+ panic!("generator resumed after completion")
+ }
+ }
+ }
+ }
+
+ __Generator::Start(ret)
+ };
+
+ Pin::new(&mut generator).resume(());
+ Pin::new(&mut generator).resume(());
+}
+```
+
+Notably here we can see that the compiler is generating a fresh type,
+`__Generator` in this case. This type has a number of states (represented here
+as an `enum`) corresponding to each of the conceptual states of the generator.
+At the beginning we're closing over our outer variable `foo` and then that
+variable is also live over the `yield` point, so it's stored in both states.
+
+When the generator starts it'll immediately yield 1, but it saves off its state
+just before it does so indicating that it has reached the yield point. Upon
+resuming again we'll execute the `return ret` which returns the `Complete`
+state.
+
+Here we can also note that the `Done` state, if resumed, panics immediately as
+it's invalid to resume a completed generator. It's also worth noting that this
+is just a rough desugaring, not a normative specification for what the compiler
+does.
+"##,
+ },
+ Lint {
+ label: "half_open_range_patterns",
+ description: r##"# `half_open_range_patterns`
+
+The tracking issue for this feature is: [#67264]
+It is part of the `#![exclusive_range_pattern]` feature,
+tracked at [#37854].
+
+[#67264]: https://github.com/rust-lang/rust/issues/67264
+[#37854]: https://github.com/rust-lang/rust/issues/37854
+-----
+
+The `half_open_range_patterns` feature allows RangeTo patterns
+(`..10`) to be used in appropriate pattern matching contexts.
+This requires also enabling the `exclusive_range_pattern` feature.
+
+It also enabled RangeFrom patterns but that has since been
+stabilized.
+
+```rust
+#![feature(half_open_range_patterns)]
+#![feature(exclusive_range_pattern)]
+ let x = 5;
+ match x {
+ ..0 => println!("negative!"), // "RangeTo" pattern. Unstable.
+ 0 => println!("zero!"),
+ 1.. => println!("positive!"), // "RangeFrom" pattern. Stable.
+ }
+```
+"##,
+ },
+ Lint {
+ label: "infer_static_outlives_requirements",
+ description: r##"# `infer_static_outlives_requirements`
+
+The tracking issue for this feature is: [#54185]
+
+[#54185]: https://github.com/rust-lang/rust/issues/54185
+
+------------------------
+The `infer_static_outlives_requirements` feature indicates that certain
+`'static` outlives requirements can be inferred by the compiler rather than
+stating them explicitly.
+
+Note: It is an accompanying feature to `infer_outlives_requirements`,
+which must be enabled to infer outlives requirements.
+
+For example, currently generic struct definitions that contain
+references, require where-clauses of the form T: 'static. By using
+this feature the outlives predicates will be inferred, although
+they may still be written explicitly.
+
+```rust,ignore (pseudo-Rust)
+struct Foo<U> where U: 'static { // <-- currently required
+ bar: Bar<U>
+}
+struct Bar<T: 'static> {
+ x: T,
+}
+```
+
+
+## Examples:
+
+```rust,ignore (pseudo-Rust)
+#![feature(infer_outlives_requirements)]
+#![feature(infer_static_outlives_requirements)]
+
+#[rustc_outlives]
+// Implicitly infer U: 'static
+struct Foo<U> {
+ bar: Bar<U>
+}
+struct Bar<T: 'static> {
+ x: T,
+}
+```
+"##,
+ },
+ Lint {
+ label: "inline_const",
+ description: r##"# `inline_const`
+
+The tracking issue for this feature is: [#76001]
+
+See also [`inline_const_pat`](inline-const-pat.md)
+
+------
+
+This feature allows you to use inline constant expressions. For example, you can
+turn this code:
+
+```rust
+# fn add_one(x: i32) -> i32 { x + 1 }
+const MY_COMPUTATION: i32 = 1 + 2 * 3 / 4;
+
+fn main() {
+ let x = add_one(MY_COMPUTATION);
+}
+```
+
+into this code:
+
+```rust
+#![feature(inline_const)]
+
+# fn add_one(x: i32) -> i32 { x + 1 }
+fn main() {
+ let x = add_one(const { 1 + 2 * 3 / 4 });
+}
+```
+
+[#76001]: https://github.com/rust-lang/rust/issues/76001
+"##,
+ },
+ Lint {
+ label: "inline_const_pat",
+ description: r##"# `inline_const_pat`
+
+The tracking issue for this feature is: [#76001]
+
+See also [`inline_const`](inline-const.md)
+
+------
+
+This feature allows you to use inline constant expressions in pattern position:
+
+```rust
+#![feature(inline_const_pat)]
+
+const fn one() -> i32 { 1 }
+
+let some_int = 3;
+match some_int {
+ const { 1 + 2 } => println!("Matched 1 + 2"),
+ const { one() } => println!("Matched const fn returning 1"),
+ _ => println!("Didn't match anything :("),
+}
+```
+
+[#76001]: https://github.com/rust-lang/rust/issues/76001
+"##,
+ },
+ Lint {
+ label: "int_error_internals",
+ description: r##"# `int_error_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "internal_output_capture",
+ description: r##"# `internal_output_capture`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "intra_doc_pointers",
+ description: r##"# `intra-doc-pointers`
+
+The tracking issue for this feature is: [#80896]
+
+[#80896]: https://github.com/rust-lang/rust/issues/80896
+
+------------------------
+
+Rustdoc does not currently allow disambiguating between `*const` and `*mut`, and
+raw pointers in intra-doc links are unstable until it does.
+
+```rust
+#![feature(intra_doc_pointers)]
+//! [pointer::add]
+```
+"##,
+ },
+ Lint {
+ label: "intrinsics",
+ description: r##"# `intrinsics`
+
+The tracking issue for this feature is: None.
+
+Intrinsics are never intended to be stable directly, but intrinsics are often
+exported in some sort of stable manner. Prefer using the stable interfaces to
+the intrinsic directly when you can.
+
+------------------------
+
+
+These are imported as if they were FFI functions, with the special
+`rust-intrinsic` ABI. For example, if one was in a freestanding
+context, but wished to be able to `transmute` between types, and
+perform efficient pointer arithmetic, one would import those functions
+via a declaration like
+
+```rust
+#![feature(intrinsics)]
+# fn main() {}
+
+extern "rust-intrinsic" {
+ fn transmute<T, U>(x: T) -> U;
+
+ fn offset<T>(dst: *const T, offset: isize) -> *const T;
+}
+```
+
+As with any other FFI functions, these are always `unsafe` to call.
+"##,
+ },
+ Lint {
+ label: "is_sorted",
+ description: r##"# `is_sorted`
+
+The tracking issue for this feature is: [#53485]
+
+[#53485]: https://github.com/rust-lang/rust/issues/53485
+
+------------------------
+
+Add the methods `is_sorted`, `is_sorted_by` and `is_sorted_by_key` to `[T]`;
+add the methods `is_sorted`, `is_sorted_by` and `is_sorted_by_key` to
+`Iterator`.
+"##,
+ },
+ Lint {
+ label: "lang_items",
+ description: r##"# `lang_items`
+
+The tracking issue for this feature is: None.
+
+------------------------
+
+The `rustc` compiler has certain pluggable operations, that is,
+functionality that isn't hard-coded into the language, but is
+implemented in libraries, with a special marker to tell the compiler
+it exists. The marker is the attribute `#[lang = "..."]` and there are
+various different values of `...`, i.e. various different 'lang
+items'.
+
+For example, `Box` pointers require two lang items, one for allocation
+and one for deallocation. A freestanding program that uses the `Box`
+sugar for dynamic allocations via `malloc` and `free`:
+
+```rust,ignore (libc-is-finicky)
+#![feature(lang_items, box_syntax, start, libc, core_intrinsics, rustc_private)]
+#![no_std]
+use core::intrinsics;
+use core::panic::PanicInfo;
+
+extern crate libc;
+
+#[lang = "owned_box"]
+pub struct Box<T>(*mut T);
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+ let p = libc::malloc(size as libc::size_t) as *mut u8;
+
+ // Check if `malloc` failed:
+ if p as usize == 0 {
+ intrinsics::abort();
+ }
+
+ p
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+ libc::free(ptr as *mut libc::c_void)
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ let _x = box 1;
+
+ 0
+}
+
+#[lang = "eh_personality"] extern fn rust_eh_personality() {}
+#[lang = "panic_impl"] extern fn rust_begin_panic(info: &PanicInfo) -> ! { unsafe { intrinsics::abort() } }
+#[no_mangle] pub extern fn rust_eh_register_frames () {}
+#[no_mangle] pub extern fn rust_eh_unregister_frames () {}
+```
+
+Note the use of `abort`: the `exchange_malloc` lang item is assumed to
+return a valid pointer, and so needs to do the check internally.
+
+Other features provided by lang items include:
+
+- overloadable operators via traits: the traits corresponding to the
+ `==`, `<`, dereferencing (`*`) and `+` (etc.) operators are all
+ marked with lang items; those specific four are `eq`, `ord`,
+ `deref`, and `add` respectively.
+- stack unwinding and general failure; the `eh_personality`,
+ `panic` and `panic_bounds_check` lang items.
+- the traits in `std::marker` used to indicate types of
+ various kinds; lang items `send`, `sync` and `copy`.
+- the marker types and variance indicators found in
+ `std::marker`; lang items `covariant_type`,
+ `contravariant_lifetime`, etc.
+
+Lang items are loaded lazily by the compiler; e.g. if one never uses
+`Box` then there is no need to define functions for `exchange_malloc`
+and `box_free`. `rustc` will emit an error when an item is needed
+but not found in the current crate or any that it depends on.
+
+Most lang items are defined by `libcore`, but if you're trying to build
+an executable without the standard library, you'll run into the need
+for lang items. The rest of this page focuses on this use-case, even though
+lang items are a bit broader than that.
+
+### Using libc
+
+In order to build a `#[no_std]` executable we will need libc as a dependency.
+We can specify this using our `Cargo.toml` file:
+
+```toml
+[dependencies]
+libc = { version = "0.2.14", default-features = false }
+```
+
+Note that the default features have been disabled. This is a critical step -
+**the default features of libc include the standard library and so must be
+disabled.**
+
+### Writing an executable without stdlib
+
+Controlling the entry point is possible in two ways: the `#[start]` attribute,
+or overriding the default shim for the C `main` function with your own.
+
+The function marked `#[start]` is passed the command line parameters
+in the same format as C:
+
+```rust,ignore (libc-is-finicky)
+#![feature(lang_items, core_intrinsics, rustc_private)]
+#![feature(start)]
+#![no_std]
+use core::intrinsics;
+use core::panic::PanicInfo;
+
+// Pull in the system libc library for what crt0.o likely requires.
+extern crate libc;
+
+// Entry point for this program.
+#[start]
+fn start(_argc: isize, _argv: *const *const u8) -> isize {
+ 0
+}
+
+// These functions are used by the compiler, but not
+// for a bare-bones hello world. These are normally
+// provided by libstd.
+#[lang = "eh_personality"]
+#[no_mangle]
+pub extern fn rust_eh_personality() {
+}
+
+#[lang = "panic_impl"]
+#[no_mangle]
+pub extern fn rust_begin_panic(info: &PanicInfo) -> ! {
+ unsafe { intrinsics::abort() }
+}
+```
+
+To override the compiler-inserted `main` shim, one has to disable it
+with `#![no_main]` and then create the appropriate symbol with the
+correct ABI and the correct name, which requires overriding the
+compiler's name mangling too:
+
+```rust,ignore (libc-is-finicky)
+#![feature(lang_items, core_intrinsics, rustc_private)]
+#![feature(start)]
+#![no_std]
+#![no_main]
+use core::intrinsics;
+use core::panic::PanicInfo;
+
+// Pull in the system libc library for what crt0.o likely requires.
+extern crate libc;
+
+// Entry point for this program.
+#[no_mangle] // ensure that this symbol is called `main` in the output
+pub extern fn main(_argc: i32, _argv: *const *const u8) -> i32 {
+ 0
+}
+
+// These functions are used by the compiler, but not
+// for a bare-bones hello world. These are normally
+// provided by libstd.
+#[lang = "eh_personality"]
+#[no_mangle]
+pub extern fn rust_eh_personality() {
+}
+
+#[lang = "panic_impl"]
+#[no_mangle]
+pub extern fn rust_begin_panic(info: &PanicInfo) -> ! {
+ unsafe { intrinsics::abort() }
+}
+```
+
+In many cases, you may need to manually link to the `compiler_builtins` crate
+when building a `no_std` binary. You may observe this via linker error messages
+such as "```undefined reference to `__rust_probestack'```".
+
+## More about the language items
+
+The compiler currently makes a few assumptions about symbols which are
+available in the executable to call. Normally these functions are provided by
+the standard library, but without it you must define your own. These symbols
+are called "language items", and they each have an internal name, and then a
+signature that an implementation must conform to.
+
+The first of these functions, `rust_eh_personality`, is used by the failure
+mechanisms of the compiler. This is often mapped to GCC's personality function
+(see the [libstd implementation][unwind] for more information), but crates
+which do not trigger a panic can be assured that this function is never
+called. The language item's name is `eh_personality`.
+
+[unwind]: https://github.com/rust-lang/rust/blob/master/library/panic_unwind/src/gcc.rs
+
+The second function, `rust_begin_panic`, is also used by the failure mechanisms of the
+compiler. When a panic happens, this controls the message that's displayed on
+the screen. While the language item's name is `panic_impl`, the symbol name is
+`rust_begin_panic`.
+
+Finally, a `eh_catch_typeinfo` static is needed for certain targets which
+implement Rust panics on top of C++ exceptions.
+
+## List of all language items
+
+This is a list of all language items in Rust along with where they are located in
+the source code.
+
+- Primitives
+ - `i8`: `libcore/num/mod.rs`
+ - `i16`: `libcore/num/mod.rs`
+ - `i32`: `libcore/num/mod.rs`
+ - `i64`: `libcore/num/mod.rs`
+ - `i128`: `libcore/num/mod.rs`
+ - `isize`: `libcore/num/mod.rs`
+ - `u8`: `libcore/num/mod.rs`
+ - `u16`: `libcore/num/mod.rs`
+ - `u32`: `libcore/num/mod.rs`
+ - `u64`: `libcore/num/mod.rs`
+ - `u128`: `libcore/num/mod.rs`
+ - `usize`: `libcore/num/mod.rs`
+ - `f32`: `libstd/f32.rs`
+ - `f64`: `libstd/f64.rs`
+ - `char`: `libcore/char.rs`
+ - `slice`: `liballoc/slice.rs`
+ - `str`: `liballoc/str.rs`
+ - `const_ptr`: `libcore/ptr.rs`
+ - `mut_ptr`: `libcore/ptr.rs`
+ - `unsafe_cell`: `libcore/cell.rs`
+- Runtime
+ - `start`: `libstd/rt.rs`
+ - `eh_personality`: `libpanic_unwind/emcc.rs` (EMCC)
+ - `eh_personality`: `libpanic_unwind/gcc.rs` (GNU)
+ - `eh_personality`: `libpanic_unwind/seh.rs` (SEH)
+ - `eh_catch_typeinfo`: `libpanic_unwind/emcc.rs` (EMCC)
+ - `panic`: `libcore/panicking.rs`
+ - `panic_bounds_check`: `libcore/panicking.rs`
+ - `panic_impl`: `libcore/panicking.rs`
+ - `panic_impl`: `libstd/panicking.rs`
+- Allocations
+ - `owned_box`: `liballoc/boxed.rs`
+ - `exchange_malloc`: `liballoc/heap.rs`
+ - `box_free`: `liballoc/heap.rs`
+- Operands
+ - `not`: `libcore/ops/bit.rs`
+ - `bitand`: `libcore/ops/bit.rs`
+ - `bitor`: `libcore/ops/bit.rs`
+ - `bitxor`: `libcore/ops/bit.rs`
+ - `shl`: `libcore/ops/bit.rs`
+ - `shr`: `libcore/ops/bit.rs`
+ - `bitand_assign`: `libcore/ops/bit.rs`
+ - `bitor_assign`: `libcore/ops/bit.rs`
+ - `bitxor_assign`: `libcore/ops/bit.rs`
+ - `shl_assign`: `libcore/ops/bit.rs`
+ - `shr_assign`: `libcore/ops/bit.rs`
+ - `deref`: `libcore/ops/deref.rs`
+ - `deref_mut`: `libcore/ops/deref.rs`
+ - `index`: `libcore/ops/index.rs`
+ - `index_mut`: `libcore/ops/index.rs`
+ - `add`: `libcore/ops/arith.rs`
+ - `sub`: `libcore/ops/arith.rs`
+ - `mul`: `libcore/ops/arith.rs`
+ - `div`: `libcore/ops/arith.rs`
+ - `rem`: `libcore/ops/arith.rs`
+ - `neg`: `libcore/ops/arith.rs`
+ - `add_assign`: `libcore/ops/arith.rs`
+ - `sub_assign`: `libcore/ops/arith.rs`
+ - `mul_assign`: `libcore/ops/arith.rs`
+ - `div_assign`: `libcore/ops/arith.rs`
+ - `rem_assign`: `libcore/ops/arith.rs`
+ - `eq`: `libcore/cmp.rs`
+ - `ord`: `libcore/cmp.rs`
+- Functions
+ - `fn`: `libcore/ops/function.rs`
+ - `fn_mut`: `libcore/ops/function.rs`
+ - `fn_once`: `libcore/ops/function.rs`
+ - `generator_state`: `libcore/ops/generator.rs`
+ - `generator`: `libcore/ops/generator.rs`
+- Other
+ - `coerce_unsized`: `libcore/ops/unsize.rs`
+ - `drop`: `libcore/ops/drop.rs`
+ - `drop_in_place`: `libcore/ptr.rs`
+ - `clone`: `libcore/clone.rs`
+ - `copy`: `libcore/marker.rs`
+ - `send`: `libcore/marker.rs`
+ - `sized`: `libcore/marker.rs`
+ - `unsize`: `libcore/marker.rs`
+ - `sync`: `libcore/marker.rs`
+ - `phantom_data`: `libcore/marker.rs`
+ - `discriminant_kind`: `libcore/marker.rs`
+ - `freeze`: `libcore/marker.rs`
+ - `debug_trait`: `libcore/fmt/mod.rs`
+ - `non_zero`: `libcore/nonzero.rs`
+ - `arc`: `liballoc/sync.rs`
+ - `rc`: `liballoc/rc.rs`
+"##,
+ },
+ Lint {
+ label: "libstd_sys_internals",
+ description: r##"# `libstd_sys_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "libstd_thread_internals",
+ description: r##"# `libstd_thread_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "link_cfg",
+ description: r##"# `link_cfg`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "llvm_asm",
+ description: r##"# `llvm_asm`
+
+The tracking issue for this feature is: [#70173]
+
+[#70173]: https://github.com/rust-lang/rust/issues/70173
+
+------------------------
+
+For extremely low-level manipulations and performance reasons, one
+might wish to control the CPU directly. Rust supports using inline
+assembly to do this via the `llvm_asm!` macro.
+
+```rust,ignore (pseudo-code)
+llvm_asm!(assembly template
+ : output operands
+ : input operands
+ : clobbers
+ : options
+ );
+```
+
+Any use of `llvm_asm` is feature gated (requires `#![feature(llvm_asm)]` on the
+crate to allow) and of course requires an `unsafe` block.
+
+> **Note**: the examples here are given in x86/x86-64 assembly, but
+> all platforms are supported.
+
+## Assembly template
+
+The `assembly template` is the only required parameter and must be a
+literal string (i.e. `""`)
+
+```rust
+#![feature(llvm_asm)]
+
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+fn foo() {
+ unsafe {
+ llvm_asm!("NOP");
+ }
+}
+
+// Other platforms:
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+fn foo() { /* ... */ }
+
+fn main() {
+ // ...
+ foo();
+ // ...
+}
+```
+
+(The `feature(llvm_asm)` and `#[cfg]`s are omitted from now on.)
+
+Output operands, input operands, clobbers and options are all optional
+but you must add the right number of `:` if you skip them:
+
+```rust
+# #![feature(llvm_asm)]
+# #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+# fn main() { unsafe {
+llvm_asm!("xor %eax, %eax"
+ :
+ :
+ : "eax"
+ );
+# } }
+# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+# fn main() {}
+```
+
+Whitespace also doesn't matter:
+
+```rust
+# #![feature(llvm_asm)]
+# #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+# fn main() { unsafe {
+llvm_asm!("xor %eax, %eax" ::: "eax");
+# } }
+# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+# fn main() {}
+```
+
+## Operands
+
+Input and output operands follow the same format: `:
+"constraints1"(expr1), "constraints2"(expr2), ..."`. Output operand
+expressions must be mutable place, or not yet assigned:
+
+```rust
+# #![feature(llvm_asm)]
+# #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+fn add(a: i32, b: i32) -> i32 {
+ let c: i32;
+ unsafe {
+ llvm_asm!("add $2, $0"
+ : "=r"(c)
+ : "0"(a), "r"(b)
+ );
+ }
+ c
+}
+# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+# fn add(a: i32, b: i32) -> i32 { a + b }
+
+fn main() {
+ assert_eq!(add(3, 14159), 14162)
+}
+```
+
+If you would like to use real operands in this position, however,
+you are required to put curly braces `{}` around the register that
+you want, and you are required to put the specific size of the
+operand. This is useful for very low level programming, where
+which register you use is important:
+
+```rust
+# #![feature(llvm_asm)]
+# #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+# unsafe fn read_byte_in(port: u16) -> u8 {
+let result: u8;
+llvm_asm!("in %dx, %al" : "={al}"(result) : "{dx}"(port));
+result
+# }
+```
+
+## Clobbers
+
+Some instructions modify registers which might otherwise have held
+different values so we use the clobbers list to indicate to the
+compiler not to assume any values loaded into those registers will
+stay valid.
+
+```rust
+# #![feature(llvm_asm)]
+# #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+# fn main() { unsafe {
+// Put the value 0x200 in eax:
+llvm_asm!("mov $$0x200, %eax" : /* no outputs */ : /* no inputs */ : "eax");
+# } }
+# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+# fn main() {}
+```
+
+Input and output registers need not be listed since that information
+is already communicated by the given constraints. Otherwise, any other
+registers used either implicitly or explicitly should be listed.
+
+If the assembly changes the condition code register `cc` should be
+specified as one of the clobbers. Similarly, if the assembly modifies
+memory, `memory` should also be specified.
+
+## Options
+
+The last section, `options` is specific to Rust. The format is comma
+separated literal strings (i.e. `:"foo", "bar", "baz"`). It's used to
+specify some extra info about the inline assembly:
+
+Current valid options are:
+
+1. `volatile` - specifying this is analogous to
+ `__asm__ __volatile__ (...)` in gcc/clang.
+2. `alignstack` - certain instructions expect the stack to be
+ aligned a certain way (i.e. SSE) and specifying this indicates to
+ the compiler to insert its usual stack alignment code
+3. `intel` - use intel syntax instead of the default AT&T.
+
+```rust
+# #![feature(llvm_asm)]
+# #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+# fn main() {
+let result: i32;
+unsafe {
+ llvm_asm!("mov eax, 2" : "={eax}"(result) : : : "intel")
+}
+println!("eax is currently {}", result);
+# }
+# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+# fn main() {}
+```
+
+## More Information
+
+The current implementation of the `llvm_asm!` macro is a direct binding to [LLVM's
+inline assembler expressions][llvm-docs], so be sure to check out [their
+documentation as well][llvm-docs] for more information about clobbers,
+constraints, etc.
+
+[llvm-docs]: http://llvm.org/docs/LangRef.html#inline-assembler-expressions
+"##,
+ },
+ Lint {
+ label: "marker_trait_attr",
+ description: r##"# `marker_trait_attr`
+
+The tracking issue for this feature is: [#29864]
+
+[#29864]: https://github.com/rust-lang/rust/issues/29864
+
+------------------------
+
+Normally, Rust keeps you from adding trait implementations that could
+overlap with each other, as it would be ambiguous which to use. This
+feature, however, carves out an exception to that rule: a trait can
+opt-in to having overlapping implementations, at the cost that those
+implementations are not allowed to override anything (and thus the
+trait itself cannot have any associated items, as they're pointless
+when they'd need to do the same thing for every type anyway).
+
+```rust
+#![feature(marker_trait_attr)]
+
+#[marker] trait CheapToClone: Clone {}
+
+impl<T: Copy> CheapToClone for T {}
+
+// These could potentially overlap with the blanket implementation above,
+// so are only allowed because CheapToClone is a marker trait.
+impl<T: CheapToClone, U: CheapToClone> CheapToClone for (T, U) {}
+impl<T: CheapToClone> CheapToClone for std::ops::Range<T> {}
+
+fn cheap_clone<T: CheapToClone>(t: T) -> T {
+ t.clone()
+}
+```
+
+This is expected to replace the unstable `overlapping_marker_traits`
+feature, which applied to all empty traits (without needing an opt-in).
+"##,
+ },
+ Lint {
+ label: "more_qualified_paths",
+ description: r##"# `more_qualified_paths`
+
+The `more_qualified_paths` feature can be used in order to enable the
+use of qualified paths in patterns.
+
+## Example
+
+```rust
+#![feature(more_qualified_paths)]
+
+fn main() {
+ // destructure through a qualified path
+ let <Foo as A>::Assoc { br } = StructStruct { br: 2 };
+}
+
+struct StructStruct {
+ br: i8,
+}
+
+struct Foo;
+
+trait A {
+ type Assoc;
+}
+
+impl A for Foo {
+ type Assoc = StructStruct;
+}
+```
+"##,
+ },
+ Lint {
+ label: "native_link_modifiers",
+ description: r##"# `native_link_modifiers`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers` feature allows you to use the `modifiers` syntax with the `#[link(..)]` attribute.
+
+Modifiers are specified as a comma-delimited string with each modifier prefixed with either a `+` or `-` to indicate that the modifier is enabled or disabled, respectively. The last boolean value specified for a given modifier wins.
+"##,
+ },
+ Lint {
+ label: "native_link_modifiers_as_needed",
+ description: r##"# `native_link_modifiers_as_needed`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers_as_needed` feature allows you to use the `as-needed` modifier.
+
+`as-needed` is only compatible with the `dynamic` and `framework` linking kinds. Using any other kind will result in a compiler error.
+
+`+as-needed` means that the library will be actually linked only if it satisfies some undefined symbols at the point at which it is specified on the command line, making it similar to static libraries in this regard.
+
+This modifier translates to `--as-needed` for ld-like linkers, and to `-dead_strip_dylibs` / `-needed_library` / `-needed_framework` for ld64.
+The modifier does nothing for linkers that don't support it (e.g. `link.exe`).
+
+The default for this modifier is unclear, some targets currently specify it as `+as-needed`, some do not. We may want to try making `+as-needed` a default for all targets.
+"##,
+ },
+ Lint {
+ label: "native_link_modifiers_bundle",
+ description: r##"# `native_link_modifiers_bundle`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers_bundle` feature allows you to use the `bundle` modifier.
+
+Only compatible with the `static` linking kind. Using any other kind will result in a compiler error.
+
+`+bundle` means objects from the static library are bundled into the produced crate (a rlib, for example) and are used from this crate later during linking of the final binary.
+
+`-bundle` means the static library is included into the produced rlib "by name" and object files from it are included only during linking of the final binary, the file search by that name is also performed during final linking.
+
+This modifier is supposed to supersede the `static-nobundle` linking kind defined by [RFC 1717](https://github.com/rust-lang/rfcs/pull/1717).
+
+The default for this modifier is currently `+bundle`, but it could be changed later on some future edition boundary.
+"##,
+ },
+ Lint {
+ label: "native_link_modifiers_verbatim",
+ description: r##"# `native_link_modifiers_verbatim`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers_verbatim` feature allows you to use the `verbatim` modifier.
+
+`+verbatim` means that rustc itself won't add any target-specified library prefixes or suffixes (like `lib` or `.a`) to the library name, and will try its best to ask for the same thing from the linker.
+
+For `ld`-like linkers rustc will use the `-l:filename` syntax (note the colon) when passing the library, so the linker won't add any prefixes or suffixes as well.
+See [`-l namespec`](https://sourceware.org/binutils/docs/ld/Options.html) in ld documentation for more details.
+For linkers not supporting any verbatim modifiers (e.g. `link.exe` or `ld64`) the library name will be passed as is.
+
+The default for this modifier is `-verbatim`.
+
+This RFC changes the behavior of `raw-dylib` linking kind specified by [RFC 2627](https://github.com/rust-lang/rfcs/pull/2627). The `.dll` suffix (or other target-specified suffixes for other targets) is now added automatically.
+If your DLL doesn't have the `.dll` suffix, it can be specified with `+verbatim`.
+"##,
+ },
+ Lint {
+ label: "native_link_modifiers_whole_archive",
+ description: r##"# `native_link_modifiers_whole_archive`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers_whole_archive` feature allows you to use the `whole-archive` modifier.
+
+Only compatible with the `static` linking kind. Using any other kind will result in a compiler error.
+
+`+whole-archive` means that the static library is linked as a whole archive without throwing any object files away.
+
+This modifier translates to `--whole-archive` for `ld`-like linkers, to `/WHOLEARCHIVE` for `link.exe`, and to `-force_load` for `ld64`.
+The modifier does nothing for linkers that don't support it.
+
+The default for this modifier is `-whole-archive`.
+"##,
+ },
+ Lint {
+ label: "negative_impls",
+ description: r##"# `negative_impls`
+
+The tracking issue for this feature is [#68318].
+
+[#68318]: https://github.com/rust-lang/rust/issues/68318
+
+----
+
+With the feature gate `negative_impls`, you can write negative impls as well as positive ones:
+
+```rust
+#![feature(negative_impls)]
+trait DerefMut { }
+impl<T: ?Sized> !DerefMut for &T { }
+```
+
+Negative impls indicate a semver guarantee that the given trait will not be implemented for the given types. Negative impls play an additional purpose for auto traits, described below.
+
+Negative impls have the following characteristics:
+
+* They do not have any items.
+* They must obey the orphan rules as if they were a positive impl.
+* They cannot "overlap" with any positive impls.
+
+## Semver interaction
+
+It is a breaking change to remove a negative impl. Negative impls are a commitment not to implement the given trait for the named types.
+
+## Orphan and overlap rules
+
+Negative impls must obey the same orphan rules as a positive impl. This implies you cannot add a negative impl for types defined in upstream crates and so forth.
+
+Similarly, negative impls cannot overlap with positive impls, again using the same "overlap" check that we ordinarily use to determine if two impls overlap. (Note that positive impls typically cannot overlap with one another either, except as permitted by specialization.)
+
+## Interaction with auto traits
+
+Declaring a negative impl `impl !SomeAutoTrait for SomeType` for an
+auto-trait serves two purposes:
+
+* as with any trait, it declares that `SomeType` will never implement `SomeAutoTrait`;
+* it disables the automatic `SomeType: SomeAutoTrait` impl that would otherwise have been generated.
+
+Note that, at present, there is no way to indicate that a given type
+does not implement an auto trait *but that it may do so in the
+future*. For ordinary types, this is done by simply not declaring any
+impl at all, but that is not an option for auto traits. A workaround
+is that one could embed a marker type as one of the fields, where the
+marker type is `!AutoTrait`.
+
+## Immediate uses
+
+Negative impls are used to declare that `&T: !DerefMut` and `&mut T: !Clone`, as required to fix the soundness of `Pin` described in [#66544](https://github.com/rust-lang/rust/issues/66544).
+
+This serves two purposes:
+
+* For proving the correctness of unsafe code, we can use that impl as evidence that no `DerefMut` or `Clone` impl exists.
+* It prevents downstream crates from creating such impls.
+"##,
+ },
+ Lint {
+ label: "no_coverage",
+ description: r##"# `no_coverage`
+
+The tracking issue for this feature is: [#84605]
+
+[#84605]: https://github.com/rust-lang/rust/issues/84605
+
+---
+
+The `no_coverage` attribute can be used to selectively disable coverage
+instrumentation in an annotated function. This might be useful to:
+
+- Avoid instrumentation overhead in a performance critical function
+- Avoid generating coverage for a function that is not meant to be executed,
+ but still target 100% coverage for the rest of the program.
+
+## Example
+
+```rust
+#![feature(no_coverage)]
+
+// `foo()` will get coverage instrumentation (by default)
+fn foo() {
+ // ...
+}
+
+#[no_coverage]
+fn bar() {
+ // ...
+}
+```
+"##,
+ },
+ Lint {
+ label: "no_sanitize",
+ description: r##"# `no_sanitize`
+
+The tracking issue for this feature is: [#39699]
+
+[#39699]: https://github.com/rust-lang/rust/issues/39699
+
+------------------------
+
+The `no_sanitize` attribute can be used to selectively disable sanitizer
+instrumentation in an annotated function. This might be useful to: avoid
+instrumentation overhead in a performance critical function, or avoid
+instrumenting code that contains constructs unsupported by given sanitizer.
+
+The precise effect of this annotation depends on particular sanitizer in use.
+For example, with `no_sanitize(thread)`, the thread sanitizer will no longer
+instrument non-atomic store / load operations, but it will instrument atomic
+operations to avoid reporting false positives and provide meaning full stack
+traces.
+
+## Examples
+
+``` rust
+#![feature(no_sanitize)]
+
+#[no_sanitize(address)]
+fn foo() {
+ // ...
+}
+```
+"##,
+ },
+ Lint {
+ label: "plugin",
+ description: r##"# `plugin`
+
+The tracking issue for this feature is: [#29597]
+
+[#29597]: https://github.com/rust-lang/rust/issues/29597
+
+
+This feature is part of "compiler plugins." It will often be used with the
+`rustc_private` feature.
+
+------------------------
+
+`rustc` can load compiler plugins, which are user-provided libraries that
+extend the compiler's behavior with new lint checks, etc.
+
+A plugin is a dynamic library crate with a designated *registrar* function that
+registers extensions with `rustc`. Other crates can load these extensions using
+the crate attribute `#![plugin(...)]`. See the
+`rustc_driver::plugin` documentation for more about the
+mechanics of defining and loading a plugin.
+
+In the vast majority of cases, a plugin should *only* be used through
+`#![plugin]` and not through an `extern crate` item. Linking a plugin would
+pull in all of librustc_ast and librustc as dependencies of your crate. This is
+generally unwanted unless you are building another plugin.
+
+The usual practice is to put compiler plugins in their own crate, separate from
+any `macro_rules!` macros or ordinary Rust code meant to be used by consumers
+of a library.
+
+# Lint plugins
+
+Plugins can extend [Rust's lint
+infrastructure](../../reference/attributes/diagnostics.md#lint-check-attributes) with
+additional checks for code style, safety, etc. Now let's write a plugin
+[`lint-plugin-test.rs`](https://github.com/rust-lang/rust/blob/master/src/test/ui-fulldeps/auxiliary/lint-plugin-test.rs)
+that warns about any item named `lintme`.
+
+```rust,ignore (requires-stage-2)
+#![feature(box_syntax, rustc_private)]
+
+extern crate rustc_ast;
+
+// Load rustc as a plugin to get macros
+extern crate rustc_driver;
+#[macro_use]
+extern crate rustc_lint;
+#[macro_use]
+extern crate rustc_session;
+
+use rustc_driver::plugin::Registry;
+use rustc_lint::{EarlyContext, EarlyLintPass, LintArray, LintContext, LintPass};
+use rustc_ast::ast;
+declare_lint!(TEST_LINT, Warn, "Warn about items named 'lintme'");
+
+declare_lint_pass!(Pass => [TEST_LINT]);
+
+impl EarlyLintPass for Pass {
+ fn check_item(&mut self, cx: &EarlyContext, it: &ast::Item) {
+ if it.ident.name.as_str() == "lintme" {
+ cx.lint(TEST_LINT, |lint| {
+ lint.build("item is named 'lintme'").set_span(it.span).emit()
+ });
+ }
+ }
+}
+
+#[no_mangle]
+fn __rustc_plugin_registrar(reg: &mut Registry) {
+ reg.lint_store.register_lints(&[&TEST_LINT]);
+ reg.lint_store.register_early_pass(|| box Pass);
+}
+```
+
+Then code like
+
+```rust,ignore (requires-plugin)
+#![feature(plugin)]
+#![plugin(lint_plugin_test)]
+
+fn lintme() { }
+```
+
+will produce a compiler warning:
+
+```txt
+foo.rs:4:1: 4:16 warning: item is named 'lintme', #[warn(test_lint)] on by default
+foo.rs:4 fn lintme() { }
+ ^~~~~~~~~~~~~~~
+```
+
+The components of a lint plugin are:
+
+* one or more `declare_lint!` invocations, which define static `Lint` structs;
+
+* a struct holding any state needed by the lint pass (here, none);
+
+* a `LintPass`
+ implementation defining how to check each syntax element. A single
+ `LintPass` may call `span_lint` for several different `Lint`s, but should
+ register them all through the `get_lints` method.
+
+Lint passes are syntax traversals, but they run at a late stage of compilation
+where type information is available. `rustc`'s [built-in
+lints](https://github.com/rust-lang/rust/blob/master/src/librustc_session/lint/builtin.rs)
+mostly use the same infrastructure as lint plugins, and provide examples of how
+to access type information.
+
+Lints defined by plugins are controlled by the usual [attributes and compiler
+flags](../../reference/attributes/diagnostics.md#lint-check-attributes), e.g.
+`#[allow(test_lint)]` or `-A test-lint`. These identifiers are derived from the
+first argument to `declare_lint!`, with appropriate case and punctuation
+conversion.
+
+You can run `rustc -W help foo.rs` to see a list of lints known to `rustc`,
+including those provided by plugins loaded by `foo.rs`.
+"##,
+ },
+ Lint {
+ label: "print_internals",
+ description: r##"# `print_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "profiler_runtime",
+ description: r##"# `profiler_runtime`
+
+The tracking issue for this feature is: [#42524](https://github.com/rust-lang/rust/issues/42524).
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "profiler_runtime_lib",
+ description: r##"# `profiler_runtime_lib`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "raw_dylib",
+ description: r##"# `raw_dylib`
+
+The tracking issue for this feature is: [#58713]
+
+[#58713]: https://github.com/rust-lang/rust/issues/58713
+
+------------------------
+
+The `raw_dylib` feature allows you to link against the implementations of functions in an `extern`
+block without, on Windows, linking against an import library.
+
+```rust,ignore (partial-example)
+#![feature(raw_dylib)]
+
+#[link(name="library", kind="raw-dylib")]
+extern {
+ fn extern_function(x: i32);
+}
+
+fn main() {
+ unsafe {
+ extern_function(14);
+ }
+}
+```
+
+## Limitations
+
+Currently, this feature is only supported on `-windows-msvc` targets. Non-Windows platforms don't have import
+libraries, and an incompatibility between LLVM and the BFD linker means that it is not currently supported on
+`-windows-gnu` targets.
+
+On the `i686-pc-windows-msvc` target, this feature supports only the `cdecl`, `stdcall`, `system`, and `fastcall`
+calling conventions.
+"##,
+ },
+ Lint {
+ label: "repr128",
+ description: r##"# `repr128`
+
+The tracking issue for this feature is: [#56071]
+
+[#56071]: https://github.com/rust-lang/rust/issues/56071
+
+------------------------
+
+The `repr128` feature adds support for `#[repr(u128)]` on `enum`s.
+
+```rust
+#![feature(repr128)]
+
+#[repr(u128)]
+enum Foo {
+ Bar(u64),
+}
+```
+"##,
+ },
+ Lint {
+ label: "rt",
+ description: r##"# `rt`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "rustc_attrs",
+ description: r##"# `rustc_attrs`
+
+This feature has no tracking issue, and is therefore internal to
+the compiler, not being intended for general use.
+
+Note: `rustc_attrs` enables many rustc-internal attributes and this page
+only discuss a few of them.
+
+------------------------
+
+The `rustc_attrs` feature allows debugging rustc type layouts by using
+`#[rustc_layout(...)]` to debug layout at compile time (it even works
+with `cargo check`) as an alternative to `rustc -Z print-type-sizes`
+that is way more verbose.
+
+Options provided by `#[rustc_layout(...)]` are `debug`, `size`, `align`,
+`abi`. Note that it only works on sized types without generics.
+
+## Examples
+
+```rust,compile_fail
+#![feature(rustc_attrs)]
+
+#[rustc_layout(abi, size)]
+pub enum X {
+ Y(u8, u8, u8),
+ Z(isize),
+}
+```
+
+When that is compiled, the compiler will error with something like
+
+```text
+error: abi: Aggregate { sized: true }
+ --> src/lib.rs:4:1
+ |
+4 | / pub enum T {
+5 | | Y(u8, u8, u8),
+6 | | Z(isize),
+7 | | }
+ | |_^
+
+error: size: Size { raw: 16 }
+ --> src/lib.rs:4:1
+ |
+4 | / pub enum T {
+5 | | Y(u8, u8, u8),
+6 | | Z(isize),
+7 | | }
+ | |_^
+
+error: aborting due to 2 previous errors
+```
+"##,
+ },
+ Lint {
+ label: "sort_internals",
+ description: r##"# `sort_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "str_internals",
+ description: r##"# `str_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "test",
+ description: r##"# `test`
+
+The tracking issue for this feature is: None.
+
+------------------------
+
+The internals of the `test` crate are unstable, behind the `test` flag. The
+most widely used part of the `test` crate are benchmark tests, which can test
+the performance of your code. Let's make our `src/lib.rs` look like this
+(comments elided):
+
+```rust,no_run
+#![feature(test)]
+
+extern crate test;
+
+pub fn add_two(a: i32) -> i32 {
+ a + 2
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use test::Bencher;
+
+ #[test]
+ fn it_works() {
+ assert_eq!(4, add_two(2));
+ }
+
+ #[bench]
+ fn bench_add_two(b: &mut Bencher) {
+ b.iter(|| add_two(2));
+ }
+}
+```
+
+Note the `test` feature gate, which enables this unstable feature.
+
+We've imported the `test` crate, which contains our benchmarking support.
+We have a new function as well, with the `bench` attribute. Unlike regular
+tests, which take no arguments, benchmark tests take a `&mut Bencher`. This
+`Bencher` provides an `iter` method, which takes a closure. This closure
+contains the code we'd like to benchmark.
+
+We can run benchmark tests with `cargo bench`:
+
+```bash
+$ cargo bench
+ Compiling adder v0.0.1 (file:///home/steve/tmp/adder)
+ Running target/release/adder-91b3e234d4ed382a
+
+running 2 tests
+test tests::it_works ... ignored
+test tests::bench_add_two ... bench: 1 ns/iter (+/- 0)
+
+test result: ok. 0 passed; 0 failed; 1 ignored; 1 measured
+```
+
+Our non-benchmark test was ignored. You may have noticed that `cargo bench`
+takes a bit longer than `cargo test`. This is because Rust runs our benchmark
+a number of times, and then takes the average. Because we're doing so little
+work in this example, we have a `1 ns/iter (+/- 0)`, but this would show
+the variance if there was one.
+
+Advice on writing benchmarks:
+
+
+* Move setup code outside the `iter` loop; only put the part you want to measure inside
+* Make the code do "the same thing" on each iteration; do not accumulate or change state
+* Make the outer function idempotent too; the benchmark runner is likely to run
+ it many times
+* Make the inner `iter` loop short and fast so benchmark runs are fast and the
+ calibrator can adjust the run-length at fine resolution
+* Make the code in the `iter` loop do something simple, to assist in pinpointing
+ performance improvements (or regressions)
+
+## Gotcha: optimizations
+
+There's another tricky part to writing benchmarks: benchmarks compiled with
+optimizations activated can be dramatically changed by the optimizer so that
+the benchmark is no longer benchmarking what one expects. For example, the
+compiler might recognize that some calculation has no external effects and
+remove it entirely.
+
+```rust,no_run
+#![feature(test)]
+
+extern crate test;
+use test::Bencher;
+
+#[bench]
+fn bench_xor_1000_ints(b: &mut Bencher) {
+ b.iter(|| {
+ (0..1000).fold(0, |old, new| old ^ new);
+ });
+}
+```
+
+gives the following results
+
+```text
+running 1 test
+test bench_xor_1000_ints ... bench: 0 ns/iter (+/- 0)
+
+test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured
+```
+
+The benchmarking runner offers two ways to avoid this. Either, the closure that
+the `iter` method receives can return an arbitrary value which forces the
+optimizer to consider the result used and ensures it cannot remove the
+computation entirely. This could be done for the example above by adjusting the
+`b.iter` call to
+
+```rust
+# struct X;
+# impl X { fn iter<T, F>(&self, _: F) where F: FnMut() -> T {} } let b = X;
+b.iter(|| {
+ // Note lack of `;` (could also use an explicit `return`).
+ (0..1000).fold(0, |old, new| old ^ new)
+});
+```
+
+Or, the other option is to call the generic `test::black_box` function, which
+is an opaque "black box" to the optimizer and so forces it to consider any
+argument as used.
+
+```rust
+#![feature(test)]
+
+extern crate test;
+
+# fn main() {
+# struct X;
+# impl X { fn iter<T, F>(&self, _: F) where F: FnMut() -> T {} } let b = X;
+b.iter(|| {
+ let n = test::black_box(1000);
+
+ (0..n).fold(0, |a, b| a ^ b)
+})
+# }
+```
+
+Neither of these read or modify the value, and are very cheap for small values.
+Larger values can be passed indirectly to reduce overhead (e.g.
+`black_box(&huge_struct)`).
+
+Performing either of the above changes gives the following benchmarking results
+
+```text
+running 1 test
+test bench_xor_1000_ints ... bench: 131 ns/iter (+/- 3)
+
+test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured
+```
+
+However, the optimizer can still modify a testcase in an undesirable manner
+even when using either of the above.
+"##,
+ },
+ Lint {
+ label: "thread_local_internals",
+ description: r##"# `thread_local_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "trace_macros",
+ description: r##"# `trace_macros`
+
+The tracking issue for this feature is [#29598].
+
+[#29598]: https://github.com/rust-lang/rust/issues/29598
+
+------------------------
+
+With `trace_macros` you can trace the expansion of macros in your code.
+
+## Examples
+
+```rust
+#![feature(trace_macros)]
+
+fn main() {
+ trace_macros!(true);
+ println!("Hello, Rust!");
+ trace_macros!(false);
+}
+```
+
+The `cargo build` output:
+
+```txt
+note: trace_macro
+ --> src/main.rs:5:5
+ |
+5 | println!("Hello, Rust!");
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: expanding `println! { "Hello, Rust!" }`
+ = note: to `print ! ( concat ! ( "Hello, Rust!" , "\n" ) )`
+ = note: expanding `print! { concat ! ( "Hello, Rust!" , "\n" ) }`
+ = note: to `$crate :: io :: _print ( format_args ! ( concat ! ( "Hello, Rust!" , "\n" ) )
+ )`
+
+ Finished dev [unoptimized + debuginfo] target(s) in 0.60 secs
+```
+"##,
+ },
+ Lint {
+ label: "trait_alias",
+ description: r##"# `trait_alias`
+
+The tracking issue for this feature is: [#41517]
+
+[#41517]: https://github.com/rust-lang/rust/issues/41517
+
+------------------------
+
+The `trait_alias` feature adds support for trait aliases. These allow aliases
+to be created for one or more traits (currently just a single regular trait plus
+any number of auto-traits), and used wherever traits would normally be used as
+either bounds or trait objects.
+
+```rust
+#![feature(trait_alias)]
+
+trait Foo = std::fmt::Debug + Send;
+trait Bar = Foo + Sync;
+
+// Use trait alias as bound on type parameter.
+fn foo<T: Foo>(v: &T) {
+ println!("{:?}", v);
+}
+
+pub fn main() {
+ foo(&1);
+
+ // Use trait alias for trait objects.
+ let a: &Bar = &123;
+ println!("{:?}", a);
+ let b = Box::new(456) as Box<dyn Foo>;
+ println!("{:?}", b);
+}
+```
+"##,
+ },
+ Lint {
+ label: "trait_upcasting",
+ description: r##"# `trait_upcasting`
+
+The tracking issue for this feature is: [#65991]
+
+[#65991]: https://github.com/rust-lang/rust/issues/65991
+
+------------------------
+
+The `trait_upcasting` feature adds support for trait upcasting coercion. This allows a
+trait object of type `dyn Bar` to be cast to a trait object of type `dyn Foo`
+so long as `Bar: Foo`.
+
+```rust,edition2018
+#![feature(trait_upcasting)]
+#![allow(incomplete_features)]
+
+trait Foo {}
+
+trait Bar: Foo {}
+
+impl Foo for i32 {}
+
+impl<T: Foo + ?Sized> Bar for T {}
+
+let bar: &dyn Bar = &123;
+let foo: &dyn Foo = bar;
+```
+"##,
+ },
+ Lint {
+ label: "transparent_unions",
+ description: r##"# `transparent_unions`
+
+The tracking issue for this feature is [#60405]
+
+[#60405]: https://github.com/rust-lang/rust/issues/60405
+
+----
+
+The `transparent_unions` feature allows you mark `union`s as
+`#[repr(transparent)]`. A `union` may be `#[repr(transparent)]` in exactly the
+same conditions in which a `struct` may be `#[repr(transparent)]` (generally,
+this means the `union` must have exactly one non-zero-sized field). Some
+concrete illustrations follow.
+
+```rust
+#![feature(transparent_unions)]
+
+// This union has the same representation as `f32`.
+#[repr(transparent)]
+union SingleFieldUnion {
+ field: f32,
+}
+
+// This union has the same representation as `usize`.
+#[repr(transparent)]
+union MultiFieldUnion {
+ field: usize,
+ nothing: (),
+}
+```
+
+For consistency with transparent `struct`s, `union`s must have exactly one
+non-zero-sized field. If all fields are zero-sized, the `union` must not be
+`#[repr(transparent)]`:
+
+```rust
+#![feature(transparent_unions)]
+
+// This (non-transparent) union is already valid in stable Rust:
+pub union GoodUnion {
+ pub nothing: (),
+}
+
+// Error: transparent union needs exactly one non-zero-sized field, but has 0
+// #[repr(transparent)]
+// pub union BadUnion {
+// pub nothing: (),
+// }
+```
+
+The one exception is if the `union` is generic over `T` and has a field of type
+`T`, it may be `#[repr(transparent)]` even if `T` is a zero-sized type:
+
+```rust
+#![feature(transparent_unions)]
+
+// This union has the same representation as `T`.
+#[repr(transparent)]
+pub union GenericUnion<T: Copy> { // Unions with non-`Copy` fields are unstable.
+ pub field: T,
+ pub nothing: (),
+}
+
+// This is okay even though `()` is a zero-sized type.
+pub const THIS_IS_OKAY: GenericUnion<()> = GenericUnion { field: () };
+```
+
+Like transarent `struct`s, a transparent `union` of type `U` has the same
+layout, size, and ABI as its single non-ZST field. If it is generic over a type
+`T`, and all its fields are ZSTs except for exactly one field of type `T`, then
+it has the same layout and ABI as `T` (even if `T` is a ZST when monomorphized).
+
+Like transparent `struct`s, transparent `union`s are FFI-safe if and only if
+their underlying representation type is also FFI-safe.
+
+A `union` may not be eligible for the same nonnull-style optimizations that a
+`struct` or `enum` (with the same fields) are eligible for. Adding
+`#[repr(transparent)]` to `union` does not change this. To give a more concrete
+example, it is unspecified whether `size_of::<T>()` is equal to
+`size_of::<Option<T>>()`, where `T` is a `union` (regardless of whether or not
+it is transparent). The Rust compiler is free to perform this optimization if
+possible, but is not required to, and different compiler versions may differ in
+their application of these optimizations.
+"##,
+ },
+ Lint {
+ label: "try_blocks",
+ description: r##"# `try_blocks`
+
+The tracking issue for this feature is: [#31436]
+
+[#31436]: https://github.com/rust-lang/rust/issues/31436
+
+------------------------
+
+The `try_blocks` feature adds support for `try` blocks. A `try`
+block creates a new scope one can use the `?` operator in.
+
+```rust,edition2018
+#![feature(try_blocks)]
+
+use std::num::ParseIntError;
+
+let result: Result<i32, ParseIntError> = try {
+ "1".parse::<i32>()?
+ + "2".parse::<i32>()?
+ + "3".parse::<i32>()?
+};
+assert_eq!(result, Ok(6));
+
+let result: Result<i32, ParseIntError> = try {
+ "1".parse::<i32>()?
+ + "foo".parse::<i32>()?
+ + "3".parse::<i32>()?
+};
+assert!(result.is_err());
+```
+"##,
+ },
+ Lint {
+ label: "type_changing_struct_update",
+ description: r##"# `type_changing_struct_update`
+
+The tracking issue for this feature is: [#86555]
+
+[#86555]: https://github.com/rust-lang/rust/issues/86555
+
+------------------------
+
+This implements [RFC2528]. When turned on, you can create instances of the same struct
+that have different generic type or lifetime parameters.
+
+[RFC2528]: https://github.com/rust-lang/rfcs/blob/master/text/2528-type-changing-struct-update-syntax.md
+
+```rust
+#![allow(unused_variables, dead_code)]
+#![feature(type_changing_struct_update)]
+
+fn main () {
+ struct Foo<T, U> {
+ field1: T,
+ field2: U,
+ }
+
+ let base: Foo<String, i32> = Foo {
+ field1: String::from("hello"),
+ field2: 1234,
+ };
+ let updated: Foo<f64, i32> = Foo {
+ field1: 3.14,
+ ..base
+ };
+}
+```
+"##,
+ },
+ Lint {
+ label: "unboxed_closures",
+ description: r##"# `unboxed_closures`
+
+The tracking issue for this feature is [#29625]
+
+See Also: [`fn_traits`](../library-features/fn-traits.md)
+
+[#29625]: https://github.com/rust-lang/rust/issues/29625
+
+----
+
+The `unboxed_closures` feature allows you to write functions using the `"rust-call"` ABI,
+required for implementing the [`Fn*`] family of traits. `"rust-call"` functions must have
+exactly one (non self) argument, a tuple representing the argument list.
+
+[`Fn*`]: https://doc.rust-lang.org/std/ops/trait.Fn.html
+
+```rust
+#![feature(unboxed_closures)]
+
+extern "rust-call" fn add_args(args: (u32, u32)) -> u32 {
+ args.0 + args.1
+}
+
+fn main() {}
+```
+"##,
+ },
+ Lint {
+ label: "unsized_locals",
+ description: r##"# `unsized_locals`
+
+The tracking issue for this feature is: [#48055]
+
+[#48055]: https://github.com/rust-lang/rust/issues/48055
+
+------------------------
+
+This implements [RFC1909]. When turned on, you can have unsized arguments and locals:
+
+[RFC1909]: https://github.com/rust-lang/rfcs/blob/master/text/1909-unsized-rvalues.md
+
+```rust
+#![allow(incomplete_features)]
+#![feature(unsized_locals, unsized_fn_params)]
+
+use std::any::Any;
+
+fn main() {
+ let x: Box<dyn Any> = Box::new(42);
+ let x: dyn Any = *x;
+ // ^ unsized local variable
+ // ^^ unsized temporary
+ foo(x);
+}
+
+fn foo(_: dyn Any) {}
+// ^^^^^^ unsized argument
+```
+
+The RFC still forbids the following unsized expressions:
+
+```rust,compile_fail
+#![feature(unsized_locals)]
+
+use std::any::Any;
+
+struct MyStruct<T: ?Sized> {
+ content: T,
+}
+
+struct MyTupleStruct<T: ?Sized>(T);
+
+fn answer() -> Box<dyn Any> {
+ Box::new(42)
+}
+
+fn main() {
+ // You CANNOT have unsized statics.
+ static X: dyn Any = *answer(); // ERROR
+ const Y: dyn Any = *answer(); // ERROR
+
+ // You CANNOT have struct initialized unsized.
+ MyStruct { content: *answer() }; // ERROR
+ MyTupleStruct(*answer()); // ERROR
+ (42, *answer()); // ERROR
+
+ // You CANNOT have unsized return types.
+ fn my_function() -> dyn Any { *answer() } // ERROR
+
+ // You CAN have unsized local variables...
+ let mut x: dyn Any = *answer(); // OK
+ // ...but you CANNOT reassign to them.
+ x = *answer(); // ERROR
+
+ // You CANNOT even initialize them separately.
+ let y: dyn Any; // OK
+ y = *answer(); // ERROR
+
+ // Not mentioned in the RFC, but by-move captured variables are also Sized.
+ let x: dyn Any = *answer();
+ (move || { // ERROR
+ let y = x;
+ })();
+
+ // You CAN create a closure with unsized arguments,
+ // but you CANNOT call it.
+ // This is an implementation detail and may be changed in the future.
+ let f = |x: dyn Any| {};
+ f(*answer()); // ERROR
+}
+```
+
+## By-value trait objects
+
+With this feature, you can have by-value `self` arguments without `Self: Sized` bounds.
+
+```rust
+#![feature(unsized_fn_params)]
+
+trait Foo {
+ fn foo(self) {}
+}
+
+impl<T: ?Sized> Foo for T {}
+
+fn main() {
+ let slice: Box<[i32]> = Box::new([1, 2, 3]);
+ <[i32] as Foo>::foo(*slice);
+}
+```
+
+And `Foo` will also be object-safe.
+
+```rust
+#![feature(unsized_fn_params)]
+
+trait Foo {
+ fn foo(self) {}
+}
+
+impl<T: ?Sized> Foo for T {}
+
+fn main () {
+ let slice: Box<dyn Foo> = Box::new([1, 2, 3]);
+ // doesn't compile yet
+ <dyn Foo as Foo>::foo(*slice);
+}
+```
+
+One of the objectives of this feature is to allow `Box<dyn FnOnce>`.
+
+## Variable length arrays
+
+The RFC also describes an extension to the array literal syntax: `[e; dyn n]`. In the syntax, `n` isn't necessarily a constant expression. The array is dynamically allocated on the stack and has the type of `[T]`, instead of `[T; n]`.
+
+```rust,ignore (not-yet-implemented)
+#![feature(unsized_locals)]
+
+fn mergesort<T: Ord>(a: &mut [T]) {
+ let mut tmp = [T; dyn a.len()];
+ // ...
+}
+
+fn main() {
+ let mut a = [3, 1, 5, 6];
+ mergesort(&mut a);
+ assert_eq!(a, [1, 3, 5, 6]);
+}
+```
+
+VLAs are not implemented yet. The syntax isn't final, either. We may need an alternative syntax for Rust 2015 because, in Rust 2015, expressions like `[e; dyn(1)]` would be ambiguous. One possible alternative proposed in the RFC is `[e; n]`: if `n` captures one or more local variables, then it is considered as `[e; dyn n]`.
+
+## Advisory on stack usage
+
+It's advised not to casually use the `#![feature(unsized_locals)]` feature. Typical use-cases are:
+
+- When you need a by-value trait objects.
+- When you really need a fast allocation of small temporary arrays.
+
+Another pitfall is repetitive allocation and temporaries. Currently the compiler simply extends the stack frame every time it encounters an unsized assignment. So for example, the code
+
+```rust
+#![feature(unsized_locals)]
+
+fn main() {
+ let x: Box<[i32]> = Box::new([1, 2, 3, 4, 5]);
+ let _x = {{{{{{{{{{*x}}}}}}}}}};
+}
+```
+
+and the code
+
+```rust
+#![feature(unsized_locals)]
+
+fn main() {
+ for _ in 0..10 {
+ let x: Box<[i32]> = Box::new([1, 2, 3, 4, 5]);
+ let _x = *x;
+ }
+}
+```
+
+will unnecessarily extend the stack frame.
+"##,
+ },
+ Lint {
+ label: "unsized_tuple_coercion",
+ description: r##"# `unsized_tuple_coercion`
+
+The tracking issue for this feature is: [#42877]
+
+[#42877]: https://github.com/rust-lang/rust/issues/42877
+
+------------------------
+
+This is a part of [RFC0401]. According to the RFC, there should be an implementation like this:
+
+```rust,ignore (partial-example)
+impl<..., T, U: ?Sized> Unsized<(..., U)> for (..., T) where T: Unsized<U> {}
+```
+
+This implementation is currently gated behind `#[feature(unsized_tuple_coercion)]` to avoid insta-stability. Therefore you can use it like this:
+
+```rust
+#![feature(unsized_tuple_coercion)]
+
+fn main() {
+ let x : ([i32; 3], [i32; 3]) = ([1, 2, 3], [4, 5, 6]);
+ let y : &([i32; 3], [i32]) = &x;
+ assert_eq!(y.1[0], 4);
+}
+```
+
+[RFC0401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md
+"##,
+ },
+ Lint {
+ label: "update_panic_count",
+ description: r##"# `update_panic_count`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "windows_c",
+ description: r##"# `windows_c`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "windows_handle",
+ description: r##"# `windows_handle`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "windows_net",
+ description: r##"# `windows_net`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+ Lint {
+ label: "windows_stdio",
+ description: r##"# `windows_stdio`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+"##,
+ },
+];
+
+pub const CLIPPY_LINTS: &[Lint] = &[
+ Lint {
+ label: "clippy::absurd_extreme_comparisons",
+ description: r##"Checks for comparisons where one side of the relation is
+either the minimum or maximum value for its type and warns if it involves a
+case that is always true or always false. Only integer and boolean types are
+checked."##,
+ },
+ Lint {
+ label: "clippy::almost_swapped",
+ description: r##"Checks for `foo = bar; bar = foo` sequences."##,
+ },
+ Lint {
+ label: "clippy::approx_constant",
+ description: r##"Checks for floating point literals that approximate
+constants which are defined in
+[`std::f32::consts`](https://doc.rust-lang.org/stable/std/f32/consts/#constants)
+or
+[`std::f64::consts`](https://doc.rust-lang.org/stable/std/f64/consts/#constants),
+respectively, suggesting to use the predefined constant."##,
+ },
+ Lint {
+ label: "clippy::as_conversions",
+ description: r##"Checks for usage of `as` conversions.
+
+Note that this lint is specialized in linting *every single* use of `as`
+regardless of whether good alternatives exist or not.
+If you want more precise lints for `as`, please consider using these separate lints:
+`unnecessary_cast`, `cast_lossless/possible_truncation/possible_wrap/precision_loss/sign_loss`,
+`fn_to_numeric_cast(_with_truncation)`, `char_lit_as_u8`, `ref_to_mut` and `ptr_as_ptr`.
+There is a good explanation the reason why this lint should work in this way and how it is useful
+[in this issue](https://github.com/rust-lang/rust-clippy/issues/5122)."##,
+ },
+ Lint {
+ label: "clippy::assertions_on_constants",
+ description: r##"Checks for `assert!(true)` and `assert!(false)` calls."##,
+ },
+ Lint {
+ label: "clippy::assign_op_pattern",
+ description: r##"Checks for `a = a op b` or `a = b commutative_op a`
+patterns."##,
+ },
+ Lint {
+ label: "clippy::assign_ops",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::async_yields_async",
+ description: r##"Checks for async blocks that yield values of types
+that can themselves be awaited."##,
+ },
+ Lint {
+ label: "clippy::await_holding_lock",
+ description: r##"Checks for calls to await while holding a
+non-async-aware MutexGuard."##,
+ },
+ Lint {
+ label: "clippy::await_holding_refcell_ref",
+ description: r##"Checks for calls to await while holding a
+`RefCell` `Ref` or `RefMut`."##,
+ },
+ Lint {
+ label: "clippy::bad_bit_mask",
+ description: r##"Checks for incompatible bit masks in comparisons.
+
+The formula for detecting if an expression of the type `_ <bit_op> m
+<cmp_op> c` (where `<bit_op>` is one of {`&`, `|`} and `<cmp_op>` is one of
+{`!=`, `>=`, `>`, `!=`, `>=`, `>`}) can be determined from the following
+table:
+
+|Comparison |Bit Op|Example |is always|Formula |
+|------------|------|------------|---------|----------------------|
+|`==` or `!=`| `&` |`x & 2 == 3`|`false` |`c & m != c` |
+|`<` or `>=`| `&` |`x & 2 < 3` |`true` |`m < c` |
+|`>` or `<=`| `&` |`x & 1 > 1` |`false` |`m <= c` |
+|`==` or `!=`| `|` |`x | 1 == 0`|`false` |`c | m != c` |
+|`<` or `>=`| `|` |`x | 1 < 1` |`false` |`m >= c` |
+|`<=` or `>` | `|` |`x | 1 > 0` |`true` |`m > c` |"##,
+ },
+ Lint {
+ label: "clippy::bind_instead_of_map",
+ description: r##"Checks for usage of `_.and_then(|x| Some(y))`, `_.and_then(|x| Ok(y))` or
+`_.or_else(|x| Err(y))`."##,
+ },
+ Lint {
+ label: "clippy::blacklisted_name",
+ description: r##"Checks for usage of blacklisted names for variables, such
+as `foo`."##,
+ },
+ Lint {
+ label: "clippy::blanket_clippy_restriction_lints",
+ description: r##"Checks for `warn`/`deny`/`forbid` attributes targeting the whole clippy::restriction category."##,
+ },
+ Lint {
+ label: "clippy::blocks_in_if_conditions",
+ description: r##"Checks for `if` conditions that use blocks containing an
+expression, statements or conditions that use closures with blocks."##,
+ },
+ Lint {
+ label: "clippy::bool_assert_comparison",
+ description: r##"This lint warns about boolean comparisons in assert-like macros."##,
+ },
+ Lint {
+ label: "clippy::bool_comparison",
+ description: r##"Checks for expressions of the form `x == true`,
+`x != true` and order comparisons such as `x < true` (or vice versa) and
+suggest using the variable directly."##,
+ },
+ Lint {
+ label: "clippy::borrow_interior_mutable_const",
+ description: r##"Checks if `const` items which is interior mutable (e.g.,
+contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly."##,
+ },
+ Lint {
+ label: "clippy::borrowed_box",
+ description: r##"Checks for use of `&Box<T>` anywhere in the code.
+Check the [Box documentation](https://doc.rust-lang.org/std/boxed/index.html) for more information."##,
+ },
+ Lint {
+ label: "clippy::box_collection",
+ description: r##"Checks for use of `Box<T>` where T is a collection such as Vec anywhere in the code.
+Check the [Box documentation](https://doc.rust-lang.org/std/boxed/index.html) for more information."##,
+ },
+ Lint {
+ label: "clippy::boxed_local",
+ description: r##"Checks for usage of `Box<T>` where an unboxed `T` would
+work fine."##,
+ },
+ Lint {
+ label: "clippy::branches_sharing_code",
+ description: r##"Checks if the `if` and `else` block contain shared code that can be
+moved out of the blocks."##,
+ },
+ Lint {
+ label: "clippy::builtin_type_shadow",
+ description: r##"Warns if a generic shadows a built-in type."##,
+ },
+ Lint {
+ label: "clippy::bytes_nth",
+ description: r##"Checks for the use of `.bytes().nth()`."##,
+ },
+ Lint {
+ label: "clippy::cargo_common_metadata",
+ description: r##"Checks to see if all common metadata is defined in
+`Cargo.toml`. See: https://rust-lang-nursery.github.io/api-guidelines/documentation.html#cargotoml-includes-all-common-metadata-c-metadata"##,
+ },
+ Lint {
+ label: "clippy::case_sensitive_file_extension_comparisons",
+ description: r##"Checks for calls to `ends_with` with possible file extensions
+and suggests to use a case-insensitive approach instead."##,
+ },
+ Lint {
+ label: "clippy::cast_lossless",
+ description: r##"Checks for casts between numerical types that may
+be replaced by safe conversion functions."##,
+ },
+ Lint {
+ label: "clippy::cast_possible_truncation",
+ description: r##"Checks for casts between numerical types that may
+truncate large values. This is expected behavior, so the cast is `Allow` by
+default."##,
+ },
+ Lint {
+ label: "clippy::cast_possible_wrap",
+ description: r##"Checks for casts from an unsigned type to a signed type of
+the same size. Performing such a cast is a 'no-op' for the compiler,
+i.e., nothing is changed at the bit level, and the binary representation of
+the value is reinterpreted. This can cause wrapping if the value is too big
+for the target signed type. However, the cast works as defined, so this lint
+is `Allow` by default."##,
+ },
+ Lint {
+ label: "clippy::cast_precision_loss",
+ description: r##"Checks for casts from any numerical to a float type where
+the receiving type cannot store all values from the original type without
+rounding errors. This possible rounding is to be expected, so this lint is
+`Allow` by default.
+
+Basically, this warns on casting any integer with 32 or more bits to `f32`
+or any 64-bit integer to `f64`."##,
+ },
+ Lint {
+ label: "clippy::cast_ptr_alignment",
+ description: r##"Checks for casts, using `as` or `pointer::cast`,
+from a less-strictly-aligned pointer to a more-strictly-aligned pointer"##,
+ },
+ Lint {
+ label: "clippy::cast_ref_to_mut",
+ description: r##"Checks for casts of `&T` to `&mut T` anywhere in the code."##,
+ },
+ Lint {
+ label: "clippy::cast_sign_loss",
+ description: r##"Checks for casts from a signed to an unsigned numerical
+type. In this case, negative values wrap around to large positive values,
+which can be quite surprising in practice. However, as the cast works as
+defined, this lint is `Allow` by default."##,
+ },
+ Lint {
+ label: "clippy::char_lit_as_u8",
+ description: r##"Checks for expressions where a character literal is cast
+to `u8` and suggests using a byte literal instead."##,
+ },
+ Lint {
+ label: "clippy::chars_last_cmp",
+ description: r##"Checks for usage of `_.chars().last()` or
+`_.chars().next_back()` on a `str` to check if it ends with a given char."##,
+ },
+ Lint {
+ label: "clippy::chars_next_cmp",
+ description: r##"Checks for usage of `.chars().next()` on a `str` to check
+if it starts with a given char."##,
+ },
+ Lint {
+ label: "clippy::checked_conversions",
+ description: r##"Checks for explicit bounds checking when casting."##,
+ },
+ Lint {
+ label: "clippy::clone_double_ref",
+ description: r##"Checks for usage of `.clone()` on an `&&T`."##,
+ },
+ Lint {
+ label: "clippy::clone_on_copy",
+ description: r##"Checks for usage of `.clone()` on a `Copy` type."##,
+ },
+ Lint {
+ label: "clippy::clone_on_ref_ptr",
+ description: r##"Checks for usage of `.clone()` on a ref-counted pointer,
+(`Rc`, `Arc`, `rc::Weak`, or `sync::Weak`), and suggests calling Clone via unified
+function syntax instead (e.g., `Rc::clone(foo)`)."##,
+ },
+ Lint {
+ label: "clippy::cloned_instead_of_copied",
+ description: r##"Checks for usages of `cloned()` on an `Iterator` or `Option` where
+`copied()` could be used instead."##,
+ },
+ Lint { label: "clippy::cmp_nan", description: r##"Checks for comparisons to NaN."## },
+ Lint {
+ label: "clippy::cmp_null",
+ description: r##"This lint checks for equality comparisons with `ptr::null`"##,
+ },
+ Lint {
+ label: "clippy::cmp_owned",
+ description: r##"Checks for conversions to owned values just for the sake
+of a comparison."##,
+ },
+ Lint {
+ label: "clippy::cognitive_complexity",
+ description: r##"Checks for methods with high cognitive complexity."##,
+ },
+ Lint {
+ label: "clippy::collapsible_else_if",
+ description: r##"Checks for collapsible `else { if ... }` expressions
+that can be collapsed to `else if ...`."##,
+ },
+ Lint {
+ label: "clippy::collapsible_if",
+ description: r##"Checks for nested `if` statements which can be collapsed
+by `&&`-combining their conditions."##,
+ },
+ Lint {
+ label: "clippy::collapsible_match",
+ description: r##"Finds nested `match` or `if let` expressions where the patterns may be collapsed together
+without adding any branches.
+
+Note that this lint is not intended to find _all_ cases where nested match patterns can be merged, but only
+cases where merging would most likely make the code more readable."##,
+ },
+ Lint {
+ label: "clippy::comparison_chain",
+ description: r##"Checks comparison chains written with `if` that can be
+rewritten with `match` and `cmp`."##,
+ },
+ Lint {
+ label: "clippy::comparison_to_empty",
+ description: r##"Checks for comparing to an empty slice such as `` or `[]`,
+and suggests using `.is_empty()` where applicable."##,
+ },
+ Lint {
+ label: "clippy::copy_iterator",
+ description: r##"Checks for types that implement `Copy` as well as
+`Iterator`."##,
+ },
+ Lint {
+ label: "clippy::create_dir",
+ description: r##"Checks usage of `std::fs::create_dir` and suggest using `std::fs::create_dir_all` instead."##,
+ },
+ Lint {
+ label: "clippy::crosspointer_transmute",
+ description: r##"Checks for transmutes between a type `T` and `*T`."##,
+ },
+ Lint { label: "clippy::dbg_macro", description: r##"Checks for usage of dbg!() macro."## },
+ Lint {
+ label: "clippy::debug_assert_with_mut_call",
+ description: r##"Checks for function/method calls with a mutable
+parameter in `debug_assert!`, `debug_assert_eq!` and `debug_assert_ne!` macros."##,
+ },
+ Lint {
+ label: "clippy::decimal_literal_representation",
+ description: r##"Warns if there is a better representation for a numeric literal."##,
+ },
+ Lint {
+ label: "clippy::declare_interior_mutable_const",
+ description: r##"Checks for declaration of `const` items which is interior
+mutable (e.g., contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.)."##,
+ },
+ Lint {
+ label: "clippy::default_numeric_fallback",
+ description: r##"Checks for usage of unconstrained numeric literals which may cause default numeric fallback in type
+inference.
+
+Default numeric fallback means that if numeric types have not yet been bound to concrete
+types at the end of type inference, then integer type is bound to `i32`, and similarly
+floating type is bound to `f64`.
+
+See [RFC0212](https://github.com/rust-lang/rfcs/blob/master/text/0212-restore-int-fallback.md) for more information about the fallback."##,
+ },
+ Lint {
+ label: "clippy::default_trait_access",
+ description: r##"Checks for literal calls to `Default::default()`."##,
+ },
+ Lint {
+ label: "clippy::deprecated_cfg_attr",
+ description: r##"Checks for `#[cfg_attr(rustfmt, rustfmt_skip)]` and suggests to replace it
+with `#[rustfmt::skip]`."##,
+ },
+ Lint {
+ label: "clippy::deprecated_semver",
+ description: r##"Checks for `#[deprecated]` annotations with a `since`
+field that is not a valid semantic version."##,
+ },
+ Lint {
+ label: "clippy::deref_addrof",
+ description: r##"Checks for usage of `*&` and `*&mut` in expressions."##,
+ },
+ Lint {
+ label: "clippy::derivable_impls",
+ description: r##"Detects manual `std::default::Default` implementations that are identical to a derived implementation."##,
+ },
+ Lint {
+ label: "clippy::derive_hash_xor_eq",
+ description: r##"Checks for deriving `Hash` but implementing `PartialEq`
+explicitly or vice versa."##,
+ },
+ Lint {
+ label: "clippy::derive_ord_xor_partial_ord",
+ description: r##"Checks for deriving `Ord` but implementing `PartialOrd`
+explicitly or vice versa."##,
+ },
+ Lint {
+ label: "clippy::disallowed_methods",
+ description: r##"Denies the configured methods and functions in clippy.toml"##,
+ },
+ Lint {
+ label: "clippy::disallowed_script_idents",
+ description: r##"Checks for usage of unicode scripts other than those explicitly allowed
+by the lint config.
+
+This lint doesn't take into account non-text scripts such as `Unknown` and `Linear_A`.
+It also ignores the `Common` script type.
+While configuring, be sure to use official script name [aliases] from
+[the list of supported scripts][supported_scripts].
+
+See also: [`non_ascii_idents`].
+
+[aliases]: http://www.unicode.org/reports/tr24/tr24-31.html#Script_Value_Aliases
+[supported_scripts]: https://www.unicode.org/iso15924/iso15924-codes.html"##,
+ },
+ Lint {
+ label: "clippy::disallowed_types",
+ description: r##"Denies the configured types in clippy.toml."##,
+ },
+ Lint {
+ label: "clippy::diverging_sub_expression",
+ description: r##"Checks for diverging calls that are not match arms or
+statements."##,
+ },
+ Lint {
+ label: "clippy::doc_markdown",
+ description: r##"Checks for the presence of `_`, `::` or camel-case words
+outside ticks in documentation."##,
+ },
+ Lint {
+ label: "clippy::double_comparisons",
+ description: r##"Checks for double comparisons that could be simplified to a single expression."##,
+ },
+ Lint {
+ label: "clippy::double_must_use",
+ description: r##"Checks for a `#[must_use]` attribute without
+further information on functions and methods that return a type already
+marked as `#[must_use]`."##,
+ },
+ Lint {
+ label: "clippy::double_neg",
+ description: r##"Detects expressions of the form `--x`."##,
+ },
+ Lint {
+ label: "clippy::double_parens",
+ description: r##"Checks for unnecessary double parentheses."##,
+ },
+ Lint {
+ label: "clippy::drop_copy",
+ description: r##"Checks for calls to `std::mem::drop` with a value
+that derives the Copy trait"##,
+ },
+ Lint {
+ label: "clippy::drop_ref",
+ description: r##"Checks for calls to `std::mem::drop` with a reference
+instead of an owned value."##,
+ },
+ Lint {
+ label: "clippy::duplicate_underscore_argument",
+ description: r##"Checks for function arguments having the similar names
+differing by an underscore."##,
+ },
+ Lint {
+ label: "clippy::duration_subsec",
+ description: r##"Checks for calculation of subsecond microseconds or milliseconds
+from other `Duration` methods."##,
+ },
+ Lint {
+ label: "clippy::else_if_without_else",
+ description: r##"Checks for usage of if expressions with an `else if` branch,
+but without a final `else` branch."##,
+ },
+ Lint {
+ label: "clippy::empty_enum",
+ description: r##"Checks for `enum`s with no variants.
+
+As of this writing, the `never_type` is still a
+nightly-only experimental API. Therefore, this lint is only triggered
+if the `never_type` is enabled."##,
+ },
+ Lint {
+ label: "clippy::empty_line_after_outer_attr",
+ description: r##"Checks for empty lines after outer attributes"##,
+ },
+ Lint { label: "clippy::empty_loop", description: r##"Checks for empty `loop` expressions."## },
+ Lint {
+ label: "clippy::enum_clike_unportable_variant",
+ description: r##"Checks for C-like enumerations that are
+`repr(isize/usize)` and have values that don't fit into an `i32`."##,
+ },
+ Lint { label: "clippy::enum_glob_use", description: r##"Checks for `use Enum::*`."## },
+ Lint {
+ label: "clippy::enum_variant_names",
+ description: r##"Detects enumeration variants that are prefixed or suffixed
+by the same characters."##,
+ },
+ Lint {
+ label: "clippy::eq_op",
+ description: r##"Checks for equal operands to comparison, logical and
+bitwise, difference and division binary operators (`==`, `>`, etc., `&&`,
+`||`, `&`, `|`, `^`, `-` and `/`)."##,
+ },
+ Lint {
+ label: "clippy::equatable_if_let",
+ description: r##"Checks for pattern matchings that can be expressed using equality."##,
+ },
+ Lint {
+ label: "clippy::erasing_op",
+ description: r##"Checks for erasing operations, e.g., `x * 0`."##,
+ },
+ Lint {
+ label: "clippy::eval_order_dependence",
+ description: r##"Checks for a read and a write to the same variable where
+whether the read occurs before or after the write depends on the evaluation
+order of sub-expressions."##,
+ },
+ Lint {
+ label: "clippy::excessive_precision",
+ description: r##"Checks for float literals with a precision greater
+than that supported by the underlying type."##,
+ },
+ Lint {
+ label: "clippy::exhaustive_enums",
+ description: r##"Warns on any exported `enum`s that are not tagged `#[non_exhaustive]`"##,
+ },
+ Lint {
+ label: "clippy::exhaustive_structs",
+ description: r##"Warns on any exported `structs`s that are not tagged `#[non_exhaustive]`"##,
+ },
+ Lint {
+ label: "clippy::exit",
+ description: r##"`exit()` terminates the program and doesn't provide a
+stack trace."##,
+ },
+ Lint {
+ label: "clippy::expect_fun_call",
+ description: r##"Checks for calls to `.expect(&format!(...))`, `.expect(foo(..))`,
+etc., and suggests to use `unwrap_or_else` instead"##,
+ },
+ Lint {
+ label: "clippy::expect_used",
+ description: r##"Checks for `.expect()` calls on `Option`s and `Result`s."##,
+ },
+ Lint {
+ label: "clippy::expl_impl_clone_on_copy",
+ description: r##"Checks for explicit `Clone` implementations for `Copy`
+types."##,
+ },
+ Lint {
+ label: "clippy::explicit_counter_loop",
+ description: r##"Checks `for` loops over slices with an explicit counter
+and suggests the use of `.enumerate()`."##,
+ },
+ Lint {
+ label: "clippy::explicit_deref_methods",
+ description: r##"Checks for explicit `deref()` or `deref_mut()` method calls."##,
+ },
+ Lint {
+ label: "clippy::explicit_into_iter_loop",
+ description: r##"Checks for loops on `y.into_iter()` where `y` will do, and
+suggests the latter."##,
+ },
+ Lint {
+ label: "clippy::explicit_iter_loop",
+ description: r##"Checks for loops on `x.iter()` where `&x` will do, and
+suggests the latter."##,
+ },
+ Lint {
+ label: "clippy::explicit_write",
+ description: r##"Checks for usage of `write!()` / `writeln()!` which can be
+replaced with `(e)print!()` / `(e)println!()`"##,
+ },
+ Lint {
+ label: "clippy::extend_from_slice",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::extend_with_drain",
+ description: r##"Checks for occurrences where one vector gets extended instead of append"##,
+ },
+ Lint {
+ label: "clippy::extra_unused_lifetimes",
+ description: r##"Checks for lifetimes in generics that are never used
+anywhere else."##,
+ },
+ Lint {
+ label: "clippy::fallible_impl_from",
+ description: r##"Checks for impls of `From<..>` that contain `panic!()` or `unwrap()`"##,
+ },
+ Lint {
+ label: "clippy::field_reassign_with_default",
+ description: r##"Checks for immediate reassignment of fields initialized
+with Default::default()."##,
+ },
+ Lint {
+ label: "clippy::filetype_is_file",
+ description: r##"Checks for `FileType::is_file()`."##,
+ },
+ Lint {
+ label: "clippy::filter_map",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::filter_map_identity",
+ description: r##"Checks for usage of `filter_map(|x| x)`."##,
+ },
+ Lint {
+ label: "clippy::filter_map_next",
+ description: r##"Checks for usage of `_.filter_map(_).next()`."##,
+ },
+ Lint {
+ label: "clippy::filter_next",
+ description: r##"Checks for usage of `_.filter(_).next()`."##,
+ },
+ Lint { label: "clippy::find_map", description: r##"Nothing. This lint has been deprecated."## },
+ Lint {
+ label: "clippy::flat_map_identity",
+ description: r##"Checks for usage of `flat_map(|x| x)`."##,
+ },
+ Lint {
+ label: "clippy::flat_map_option",
+ description: r##"Checks for usages of `Iterator::flat_map()` where `filter_map()` could be
+used instead."##,
+ },
+ Lint { label: "clippy::float_arithmetic", description: r##"Checks for float arithmetic."## },
+ Lint {
+ label: "clippy::float_cmp",
+ description: r##"Checks for (in-)equality comparisons on floating-point
+values (apart from zero), except in functions called `*eq*` (which probably
+implement equality for a type involving floats)."##,
+ },
+ Lint {
+ label: "clippy::float_cmp_const",
+ description: r##"Checks for (in-)equality comparisons on floating-point
+value and constant, except in functions called `*eq*` (which probably
+implement equality for a type involving floats)."##,
+ },
+ Lint {
+ label: "clippy::float_equality_without_abs",
+ description: r##"Checks for statements of the form `(a - b) < f32::EPSILON` or
+`(a - b) < f64::EPSILON`. Notes the missing `.abs()`."##,
+ },
+ Lint {
+ label: "clippy::fn_address_comparisons",
+ description: r##"Checks for comparisons with an address of a function item."##,
+ },
+ Lint {
+ label: "clippy::fn_params_excessive_bools",
+ description: r##"Checks for excessive use of
+bools in function definitions."##,
+ },
+ Lint {
+ label: "clippy::fn_to_numeric_cast",
+ description: r##"Checks for casts of function pointers to something other than usize"##,
+ },
+ Lint {
+ label: "clippy::fn_to_numeric_cast_any",
+ description: r##"Checks for casts of a function pointer to any integer type."##,
+ },
+ Lint {
+ label: "clippy::fn_to_numeric_cast_with_truncation",
+ description: r##"Checks for casts of a function pointer to a numeric type not wide enough to
+store address."##,
+ },
+ Lint {
+ label: "clippy::for_kv_map",
+ description: r##"Checks for iterating a map (`HashMap` or `BTreeMap`) and
+ignoring either the keys or values."##,
+ },
+ Lint {
+ label: "clippy::for_loops_over_fallibles",
+ description: r##"Checks for `for` loops over `Option` or `Result` values."##,
+ },
+ Lint {
+ label: "clippy::forget_copy",
+ description: r##"Checks for calls to `std::mem::forget` with a value that
+derives the Copy trait"##,
+ },
+ Lint {
+ label: "clippy::forget_ref",
+ description: r##"Checks for calls to `std::mem::forget` with a reference
+instead of an owned value."##,
+ },
+ Lint {
+ label: "clippy::format_in_format_args",
+ description: r##"Detects `format!` within the arguments of another macro that does
+formatting such as `format!` itself, `write!` or `println!`. Suggests
+inlining the `format!` call."##,
+ },
+ Lint {
+ label: "clippy::from_iter_instead_of_collect",
+ description: r##"Checks for `from_iter()` function calls on types that implement the `FromIterator`
+trait."##,
+ },
+ Lint {
+ label: "clippy::from_over_into",
+ description: r##"Searches for implementations of the `Into<..>` trait and suggests to implement `From<..>` instead."##,
+ },
+ Lint {
+ label: "clippy::from_str_radix_10",
+ description: r##"Checks for function invocations of the form `primitive::from_str_radix(s, 10)`"##,
+ },
+ Lint {
+ label: "clippy::future_not_send",
+ description: r##"This lint requires Future implementations returned from
+functions and methods to implement the `Send` marker trait. It is mostly
+used by library authors (public and internal) that target an audience where
+multithreaded executors are likely to be used for running these Futures."##,
+ },
+ Lint {
+ label: "clippy::get_last_with_len",
+ description: r##"Checks for using `x.get(x.len() - 1)` instead of
+`x.last()`."##,
+ },
+ Lint {
+ label: "clippy::get_unwrap",
+ description: r##"Checks for use of `.get().unwrap()` (or
+`.get_mut().unwrap`) on a standard library type which implements `Index`"##,
+ },
+ Lint {
+ label: "clippy::identity_op",
+ description: r##"Checks for identity operations, e.g., `x + 0`."##,
+ },
+ Lint {
+ label: "clippy::if_let_mutex",
+ description: r##"Checks for `Mutex::lock` calls in `if let` expression
+with lock calls in any of the else blocks."##,
+ },
+ Lint {
+ label: "clippy::if_let_redundant_pattern_matching",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::if_not_else",
+ description: r##"Checks for usage of `!` or `!=` in an if condition with an
+else branch."##,
+ },
+ Lint {
+ label: "clippy::if_same_then_else",
+ description: r##"Checks for `if/else` with the same body as the *then* part
+and the *else* part."##,
+ },
+ Lint {
+ label: "clippy::if_then_some_else_none",
+ description: r##"Checks for if-else that could be written to `bool::then`."##,
+ },
+ Lint {
+ label: "clippy::ifs_same_cond",
+ description: r##"Checks for consecutive `if`s with the same condition."##,
+ },
+ Lint {
+ label: "clippy::implicit_clone",
+ description: r##"Checks for the usage of `_.to_owned()`, `vec.to_vec()`, or similar when calling `_.clone()` would be clearer."##,
+ },
+ Lint {
+ label: "clippy::implicit_hasher",
+ description: r##"Checks for public `impl` or `fn` missing generalization
+over different hashers and implicitly defaulting to the default hashing
+algorithm (`SipHash`)."##,
+ },
+ Lint {
+ label: "clippy::implicit_return",
+ description: r##"Checks for missing return statements at the end of a block."##,
+ },
+ Lint {
+ label: "clippy::implicit_saturating_sub",
+ description: r##"Checks for implicit saturating subtraction."##,
+ },
+ Lint {
+ label: "clippy::imprecise_flops",
+ description: r##"Looks for floating-point expressions that
+can be expressed using built-in methods to improve accuracy
+at the cost of performance."##,
+ },
+ Lint {
+ label: "clippy::inconsistent_digit_grouping",
+ description: r##"Warns if an integral or floating-point constant is
+grouped inconsistently with underscores."##,
+ },
+ Lint {
+ label: "clippy::inconsistent_struct_constructor",
+ description: r##"Checks for struct constructors where all fields are shorthand and
+the order of the field init shorthand in the constructor is inconsistent
+with the order in the struct definition."##,
+ },
+ Lint {
+ label: "clippy::index_refutable_slice",
+ description: r##"The lint checks for slice bindings in patterns that are only used to
+access individual slice values."##,
+ },
+ Lint {
+ label: "clippy::indexing_slicing",
+ description: r##"Checks for usage of indexing or slicing. Arrays are special cases, this lint
+does report on arrays if we can tell that slicing operations are in bounds and does not
+lint on constant `usize` indexing on arrays because that is handled by rustc's `const_err` lint."##,
+ },
+ Lint {
+ label: "clippy::ineffective_bit_mask",
+ description: r##"Checks for bit masks in comparisons which can be removed
+without changing the outcome. The basic structure can be seen in the
+following table:
+
+|Comparison| Bit Op |Example |equals |
+|----------|---------|-----------|-------|
+|`>` / `<=`|`|` / `^`|`x | 2 > 3`|`x > 3`|
+|`<` / `>=`|`|` / `^`|`x ^ 1 < 4`|`x < 4`|"##,
+ },
+ Lint {
+ label: "clippy::inefficient_to_string",
+ description: r##"Checks for usage of `.to_string()` on an `&&T` where
+`T` implements `ToString` directly (like `&&str` or `&&String`)."##,
+ },
+ Lint {
+ label: "clippy::infallible_destructuring_match",
+ description: r##"Checks for matches being used to destructure a single-variant enum
+or tuple struct where a `let` will suffice."##,
+ },
+ Lint {
+ label: "clippy::infinite_iter",
+ description: r##"Checks for iteration that is guaranteed to be infinite."##,
+ },
+ Lint {
+ label: "clippy::inherent_to_string",
+ description: r##"Checks for the definition of inherent methods with a signature of `to_string(&self) -> String`."##,
+ },
+ Lint {
+ label: "clippy::inherent_to_string_shadow_display",
+ description: r##"Checks for the definition of inherent methods with a signature of `to_string(&self) -> String` and if the type implementing this method also implements the `Display` trait."##,
+ },
+ Lint {
+ label: "clippy::inline_always",
+ description: r##"Checks for items annotated with `#[inline(always)]`,
+unless the annotated function is empty or simply panics."##,
+ },
+ Lint {
+ label: "clippy::inline_asm_x86_att_syntax",
+ description: r##"Checks for usage of AT&T x86 assembly syntax."##,
+ },
+ Lint {
+ label: "clippy::inline_asm_x86_intel_syntax",
+ description: r##"Checks for usage of Intel x86 assembly syntax."##,
+ },
+ Lint {
+ label: "clippy::inline_fn_without_body",
+ description: r##"Checks for `#[inline]` on trait methods without bodies"##,
+ },
+ Lint {
+ label: "clippy::inspect_for_each",
+ description: r##"Checks for usage of `inspect().for_each()`."##,
+ },
+ Lint {
+ label: "clippy::int_plus_one",
+ description: r##"Checks for usage of `x >= y + 1` or `x - 1 >= y` (and `<=`) in a block"##,
+ },
+ Lint {
+ label: "clippy::integer_arithmetic",
+ description: r##"Checks for integer arithmetic operations which could overflow or panic.
+
+Specifically, checks for any operators (`+`, `-`, `*`, `<<`, etc) which are capable
+of overflowing according to the [Rust
+Reference](https://doc.rust-lang.org/reference/expressions/operator-expr.html#overflow),
+or which can panic (`/`, `%`). No bounds analysis or sophisticated reasoning is
+attempted."##,
+ },
+ Lint { label: "clippy::integer_division", description: r##"Checks for division of integers"## },
+ Lint {
+ label: "clippy::into_iter_on_ref",
+ description: r##"Checks for `into_iter` calls on references which should be replaced by `iter`
+or `iter_mut`."##,
+ },
+ Lint {
+ label: "clippy::invalid_null_ptr_usage",
+ description: r##"This lint checks for invalid usages of `ptr::null`."##,
+ },
+ Lint {
+ label: "clippy::invalid_regex",
+ description: r##"Checks [regex](https://crates.io/crates/regex) creation
+(with `Regex::new`, `RegexBuilder::new`, or `RegexSet::new`) for correct
+regex syntax."##,
+ },
+ Lint {
+ label: "clippy::invalid_upcast_comparisons",
+ description: r##"Checks for comparisons where the relation is always either
+true or false, but where one side has been upcast so that the comparison is
+necessary. Only integer types are checked."##,
+ },
+ Lint {
+ label: "clippy::invisible_characters",
+ description: r##"Checks for invisible Unicode characters in the code."##,
+ },
+ Lint {
+ label: "clippy::items_after_statements",
+ description: r##"Checks for items declared after some statement in a block."##,
+ },
+ Lint {
+ label: "clippy::iter_cloned_collect",
+ description: r##"Checks for the use of `.cloned().collect()` on slice to
+create a `Vec`."##,
+ },
+ Lint {
+ label: "clippy::iter_count",
+ description: r##"Checks for the use of `.iter().count()`."##,
+ },
+ Lint { label: "clippy::iter_next_loop", description: r##"Checks for loops on `x.next()`."## },
+ Lint {
+ label: "clippy::iter_next_slice",
+ description: r##"Checks for usage of `iter().next()` on a Slice or an Array"##,
+ },
+ Lint {
+ label: "clippy::iter_not_returning_iterator",
+ description: r##"Detects methods named `iter` or `iter_mut` that do not have a return type that implements `Iterator`."##,
+ },
+ Lint {
+ label: "clippy::iter_nth",
+ description: r##"Checks for use of `.iter().nth()` (and the related
+`.iter_mut().nth()`) on standard library types with *O*(1) element access."##,
+ },
+ Lint {
+ label: "clippy::iter_nth_zero",
+ description: r##"Checks for the use of `iter.nth(0)`."##,
+ },
+ Lint {
+ label: "clippy::iter_skip_next",
+ description: r##"Checks for use of `.skip(x).next()` on iterators."##,
+ },
+ Lint {
+ label: "clippy::iterator_step_by_zero",
+ description: r##"Checks for calling `.step_by(0)` on iterators which panics."##,
+ },
+ Lint {
+ label: "clippy::just_underscores_and_digits",
+ description: r##"Checks if you have variables whose name consists of just
+underscores and digits."##,
+ },
+ Lint {
+ label: "clippy::large_const_arrays",
+ description: r##"Checks for large `const` arrays that should
+be defined as `static` instead."##,
+ },
+ Lint {
+ label: "clippy::large_digit_groups",
+ description: r##"Warns if the digits of an integral or floating-point
+constant are grouped into groups that
+are too large."##,
+ },
+ Lint {
+ label: "clippy::large_enum_variant",
+ description: r##"Checks for large size differences between variants on
+`enum`s."##,
+ },
+ Lint {
+ label: "clippy::large_stack_arrays",
+ description: r##"Checks for local arrays that may be too large."##,
+ },
+ Lint {
+ label: "clippy::large_types_passed_by_value",
+ description: r##"Checks for functions taking arguments by value, where
+the argument type is `Copy` and large enough to be worth considering
+passing by reference. Does not trigger if the function is being exported,
+because that might induce API breakage, if the parameter is declared as mutable,
+or if the argument is a `self`."##,
+ },
+ Lint {
+ label: "clippy::len_without_is_empty",
+ description: r##"Checks for items that implement `.len()` but not
+`.is_empty()`."##,
+ },
+ Lint {
+ label: "clippy::len_zero",
+ description: r##"Checks for getting the length of something via `.len()`
+just to compare to zero, and suggests using `.is_empty()` where applicable."##,
+ },
+ Lint {
+ label: "clippy::let_and_return",
+ description: r##"Checks for `let`-bindings, which are subsequently
+returned."##,
+ },
+ Lint {
+ label: "clippy::let_underscore_drop",
+ description: r##"Checks for `let _ = <expr>`
+where expr has a type that implements `Drop`"##,
+ },
+ Lint {
+ label: "clippy::let_underscore_lock",
+ description: r##"Checks for `let _ = sync_lock`.
+This supports `mutex` and `rwlock` in `std::sync` and `parking_lot`."##,
+ },
+ Lint {
+ label: "clippy::let_underscore_must_use",
+ description: r##"Checks for `let _ = <expr>` where expr is `#[must_use]`"##,
+ },
+ Lint { label: "clippy::let_unit_value", description: r##"Checks for binding a unit value."## },
+ Lint {
+ label: "clippy::linkedlist",
+ description: r##"Checks for usage of any `LinkedList`, suggesting to use a
+`Vec` or a `VecDeque` (formerly called `RingBuf`)."##,
+ },
+ Lint {
+ label: "clippy::logic_bug",
+ description: r##"Checks for boolean expressions that contain terminals that
+can be eliminated."##,
+ },
+ Lint {
+ label: "clippy::lossy_float_literal",
+ description: r##"Checks for whole number float literals that
+cannot be represented as the underlying type without loss."##,
+ },
+ Lint {
+ label: "clippy::macro_use_imports",
+ description: r##"Checks for `#[macro_use] use...`."##,
+ },
+ Lint {
+ label: "clippy::main_recursion",
+ description: r##"Checks for recursion using the entrypoint."##,
+ },
+ Lint {
+ label: "clippy::manual_assert",
+ description: r##"Detects `if`-then-`panic!` that can be replaced with `assert!`."##,
+ },
+ Lint {
+ label: "clippy::manual_async_fn",
+ description: r##"It checks for manual implementations of `async` functions."##,
+ },
+ Lint {
+ label: "clippy::manual_filter_map",
+ description: r##"Checks for usage of `_.filter(_).map(_)` that can be written more simply
+as `filter_map(_)`."##,
+ },
+ Lint {
+ label: "clippy::manual_find_map",
+ description: r##"Checks for usage of `_.find(_).map(_)` that can be written more simply
+as `find_map(_)`."##,
+ },
+ Lint {
+ label: "clippy::manual_flatten",
+ description: r##"Check for unnecessary `if let` usage in a for loop
+where only the `Some` or `Ok` variant of the iterator element is used."##,
+ },
+ Lint {
+ label: "clippy::manual_map",
+ description: r##"Checks for usages of `match` which could be implemented using `map`"##,
+ },
+ Lint {
+ label: "clippy::manual_memcpy",
+ description: r##"Checks for for-loops that manually copy items between
+slices that could be optimized by having a memcpy."##,
+ },
+ Lint {
+ label: "clippy::manual_non_exhaustive",
+ description: r##"Checks for manual implementations of the non-exhaustive pattern."##,
+ },
+ Lint {
+ label: "clippy::manual_ok_or",
+ description: r##"Finds patterns that reimplement `Option::ok_or`."##,
+ },
+ Lint {
+ label: "clippy::manual_range_contains",
+ description: r##"Checks for expressions like `x >= 3 && x < 8` that could
+be more readably expressed as `(3..8).contains(x)`."##,
+ },
+ Lint {
+ label: "clippy::manual_saturating_arithmetic",
+ description: r##"Checks for `.checked_add/sub(x).unwrap_or(MAX/MIN)`."##,
+ },
+ Lint {
+ label: "clippy::manual_split_once",
+ description: r##"Checks for usages of `str::splitn(2, _)`"##,
+ },
+ Lint {
+ label: "clippy::manual_str_repeat",
+ description: r##"Checks for manual implementations of `str::repeat`"##,
+ },
+ Lint {
+ label: "clippy::manual_strip",
+ description: r##"Suggests using `strip_{prefix,suffix}` over `str::{starts,ends}_with` and slicing using
+the pattern's length."##,
+ },
+ Lint { label: "clippy::manual_swap", description: r##"Checks for manual swapping."## },
+ Lint {
+ label: "clippy::manual_unwrap_or",
+ description: r##"Finds patterns that reimplement `Option::unwrap_or` or `Result::unwrap_or`."##,
+ },
+ Lint {
+ label: "clippy::many_single_char_names",
+ description: r##"Checks for too many variables whose name consists of a
+single character."##,
+ },
+ Lint {
+ label: "clippy::map_clone",
+ description: r##"Checks for usage of `map(|x| x.clone())` or
+dereferencing closures for `Copy` types, on `Iterator` or `Option`,
+and suggests `cloned()` or `copied()` instead"##,
+ },
+ Lint {
+ label: "clippy::map_collect_result_unit",
+ description: r##"Checks for usage of `_.map(_).collect::<Result<(), _>()`."##,
+ },
+ Lint {
+ label: "clippy::map_entry",
+ description: r##"Checks for uses of `contains_key` + `insert` on `HashMap`
+or `BTreeMap`."##,
+ },
+ Lint {
+ label: "clippy::map_err_ignore",
+ description: r##"Checks for instances of `map_err(|_| Some::Enum)`"##,
+ },
+ Lint {
+ label: "clippy::map_flatten",
+ description: r##"Checks for usage of `_.map(_).flatten(_)` on `Iterator` and `Option`"##,
+ },
+ Lint {
+ label: "clippy::map_identity",
+ description: r##"Checks for instances of `map(f)` where `f` is the identity function."##,
+ },
+ Lint {
+ label: "clippy::map_unwrap_or",
+ description: r##"Checks for usage of `option.map(_).unwrap_or(_)` or `option.map(_).unwrap_or_else(_)` or
+`result.map(_).unwrap_or_else(_)`."##,
+ },
+ Lint {
+ label: "clippy::match_as_ref",
+ description: r##"Checks for match which is used to add a reference to an
+`Option` value."##,
+ },
+ Lint {
+ label: "clippy::match_bool",
+ description: r##"Checks for matches where match expression is a `bool`. It
+suggests to replace the expression with an `if...else` block."##,
+ },
+ Lint {
+ label: "clippy::match_like_matches_macro",
+ description: r##"Checks for `match` or `if let` expressions producing a
+`bool` that could be written using `matches!`"##,
+ },
+ Lint {
+ label: "clippy::match_on_vec_items",
+ description: r##"Checks for `match vec[idx]` or `match vec[n..m]`."##,
+ },
+ Lint {
+ label: "clippy::match_overlapping_arm",
+ description: r##"Checks for overlapping match arms."##,
+ },
+ Lint {
+ label: "clippy::match_ref_pats",
+ description: r##"Checks for matches where all arms match a reference,
+suggesting to remove the reference and deref the matched expression
+instead. It also checks for `if let &foo = bar` blocks."##,
+ },
+ Lint {
+ label: "clippy::match_result_ok",
+ description: r##"Checks for unnecessary `ok()` in `while let`."##,
+ },
+ Lint {
+ label: "clippy::match_same_arms",
+ description: r##"Checks for `match` with identical arm bodies."##,
+ },
+ Lint {
+ label: "clippy::match_single_binding",
+ description: r##"Checks for useless match that binds to only one value."##,
+ },
+ Lint {
+ label: "clippy::match_str_case_mismatch",
+ description: r##"Checks for `match` expressions modifying the case of a string with non-compliant arms"##,
+ },
+ Lint {
+ label: "clippy::match_wild_err_arm",
+ description: r##"Checks for arm which matches all errors with `Err(_)`
+and take drastic actions like `panic!`."##,
+ },
+ Lint {
+ label: "clippy::match_wildcard_for_single_variants",
+ description: r##"Checks for wildcard enum matches for a single variant."##,
+ },
+ Lint {
+ label: "clippy::maybe_infinite_iter",
+ description: r##"Checks for iteration that may be infinite."##,
+ },
+ Lint {
+ label: "clippy::mem_forget",
+ description: r##"Checks for usage of `std::mem::forget(t)` where `t` is
+`Drop`."##,
+ },
+ Lint {
+ label: "clippy::mem_replace_option_with_none",
+ description: r##"Checks for `mem::replace()` on an `Option` with
+`None`."##,
+ },
+ Lint {
+ label: "clippy::mem_replace_with_default",
+ description: r##"Checks for `std::mem::replace` on a value of type
+`T` with `T::default()`."##,
+ },
+ Lint {
+ label: "clippy::mem_replace_with_uninit",
+ description: r##"Checks for `mem::replace(&mut _, mem::uninitialized())`
+and `mem::replace(&mut _, mem::zeroed())`."##,
+ },
+ Lint {
+ label: "clippy::min_max",
+ description: r##"Checks for expressions where `std::cmp::min` and `max` are
+used to clamp values, but switched so that the result is constant."##,
+ },
+ Lint {
+ label: "clippy::misaligned_transmute",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::mismatched_target_os",
+ description: r##"Checks for cfg attributes having operating systems used in target family position."##,
+ },
+ Lint {
+ label: "clippy::misrefactored_assign_op",
+ description: r##"Checks for `a op= a op b` or `a op= b op a` patterns."##,
+ },
+ Lint {
+ label: "clippy::missing_const_for_fn",
+ description: r##"Suggests the use of `const` in functions and methods where possible."##,
+ },
+ Lint {
+ label: "clippy::missing_docs_in_private_items",
+ description: r##"Warns if there is missing doc for any documentable item
+(public or private)."##,
+ },
+ Lint {
+ label: "clippy::missing_enforced_import_renames",
+ description: r##"Checks for imports that do not rename the item as specified
+in the `enforce-import-renames` config option."##,
+ },
+ Lint {
+ label: "clippy::missing_errors_doc",
+ description: r##"Checks the doc comments of publicly visible functions that
+return a `Result` type and warns if there is no `# Errors` section."##,
+ },
+ Lint {
+ label: "clippy::missing_inline_in_public_items",
+ description: r##"It lints if an exported function, method, trait method with default impl,
+or trait method impl is not `#[inline]`."##,
+ },
+ Lint {
+ label: "clippy::missing_panics_doc",
+ description: r##"Checks the doc comments of publicly visible functions that
+may panic and warns if there is no `# Panics` section."##,
+ },
+ Lint {
+ label: "clippy::missing_safety_doc",
+ description: r##"Checks for the doc comments of publicly visible
+unsafe functions and warns if there is no `# Safety` section."##,
+ },
+ Lint {
+ label: "clippy::mistyped_literal_suffixes",
+ description: r##"Warns for mistyped suffix in literals"##,
+ },
+ Lint {
+ label: "clippy::mixed_case_hex_literals",
+ description: r##"Warns on hexadecimal literals with mixed-case letter
+digits."##,
+ },
+ Lint {
+ label: "clippy::mod_module_files",
+ description: r##"Checks that module layout uses only self named module files, bans mod.rs files."##,
+ },
+ Lint {
+ label: "clippy::module_inception",
+ description: r##"Checks for modules that have the same name as their
+parent module"##,
+ },
+ Lint {
+ label: "clippy::module_name_repetitions",
+ description: r##"Detects type names that are prefixed or suffixed by the
+containing module's name."##,
+ },
+ Lint { label: "clippy::modulo_arithmetic", description: r##"Checks for modulo arithmetic."## },
+ Lint {
+ label: "clippy::modulo_one",
+ description: r##"Checks for getting the remainder of a division by one or minus
+one."##,
+ },
+ Lint {
+ label: "clippy::multiple_crate_versions",
+ description: r##"Checks to see if multiple versions of a crate are being
+used."##,
+ },
+ Lint {
+ label: "clippy::multiple_inherent_impl",
+ description: r##"Checks for multiple inherent implementations of a struct"##,
+ },
+ Lint {
+ label: "clippy::must_use_candidate",
+ description: r##"Checks for public functions that have no
+`#[must_use]` attribute, but return something not already marked
+must-use, have no mutable arg and mutate no statics."##,
+ },
+ Lint {
+ label: "clippy::must_use_unit",
+ description: r##"Checks for a `#[must_use]` attribute on
+unit-returning functions and methods."##,
+ },
+ Lint {
+ label: "clippy::mut_from_ref",
+ description: r##"This lint checks for functions that take immutable
+references and return mutable ones."##,
+ },
+ Lint {
+ label: "clippy::mut_mut",
+ description: r##"Checks for instances of `mut mut` references."##,
+ },
+ Lint {
+ label: "clippy::mut_mutex_lock",
+ description: r##"Checks for `&mut Mutex::lock` calls"##,
+ },
+ Lint {
+ label: "clippy::mut_range_bound",
+ description: r##"Checks for loops which have a range bound that is a mutable variable"##,
+ },
+ Lint {
+ label: "clippy::mutable_key_type",
+ description: r##"Checks for sets/maps with mutable key types."##,
+ },
+ Lint {
+ label: "clippy::mutex_atomic",
+ description: r##"Checks for usages of `Mutex<X>` where an atomic will do."##,
+ },
+ Lint {
+ label: "clippy::mutex_integer",
+ description: r##"Checks for usages of `Mutex<X>` where `X` is an integral
+type."##,
+ },
+ Lint { label: "clippy::naive_bytecount", description: r##"Checks for naive byte counts"## },
+ Lint {
+ label: "clippy::needless_arbitrary_self_type",
+ description: r##"The lint checks for `self` in fn parameters that
+specify the `Self`-type explicitly"##,
+ },
+ Lint {
+ label: "clippy::needless_bitwise_bool",
+ description: r##"Checks for uses of bitwise and/or operators between booleans, where performance may be improved by using
+a lazy and."##,
+ },
+ Lint {
+ label: "clippy::needless_bool",
+ description: r##"Checks for expressions of the form `if c { true } else {
+false }` (or vice versa) and suggests using the condition directly."##,
+ },
+ Lint {
+ label: "clippy::needless_borrow",
+ description: r##"Checks for address of operations (`&`) that are going to
+be dereferenced immediately by the compiler."##,
+ },
+ Lint {
+ label: "clippy::needless_borrowed_reference",
+ description: r##"Checks for bindings that destructure a reference and borrow the inner
+value with `&ref`."##,
+ },
+ Lint {
+ label: "clippy::needless_collect",
+ description: r##"Checks for functions collecting an iterator when collect
+is not needed."##,
+ },
+ Lint {
+ label: "clippy::needless_continue",
+ description: r##"The lint checks for `if`-statements appearing in loops
+that contain a `continue` statement in either their main blocks or their
+`else`-blocks, when omitting the `else`-block possibly with some
+rearrangement of code can make the code easier to understand."##,
+ },
+ Lint {
+ label: "clippy::needless_doctest_main",
+ description: r##"Checks for `fn main() { .. }` in doctests"##,
+ },
+ Lint {
+ label: "clippy::needless_for_each",
+ description: r##"Checks for usage of `for_each` that would be more simply written as a
+`for` loop."##,
+ },
+ Lint {
+ label: "clippy::needless_late_init",
+ description: r##"Checks for late initializations that can be replaced by a `let` statement
+with an initializer."##,
+ },
+ Lint {
+ label: "clippy::needless_lifetimes",
+ description: r##"Checks for lifetime annotations which can be removed by
+relying on lifetime elision."##,
+ },
+ Lint {
+ label: "clippy::needless_option_as_deref",
+ description: r##"Checks for no-op uses of Option::{as_deref,as_deref_mut},
+for example, `Option<&T>::as_deref()` returns the same type."##,
+ },
+ Lint {
+ label: "clippy::needless_pass_by_value",
+ description: r##"Checks for functions taking arguments by value, but not
+consuming them in its
+body."##,
+ },
+ Lint {
+ label: "clippy::needless_question_mark",
+ description: r##"Suggests alternatives for useless applications of `?` in terminating expressions"##,
+ },
+ Lint {
+ label: "clippy::needless_range_loop",
+ description: r##"Checks for looping over the range of `0..len` of some
+collection just to get the values by index."##,
+ },
+ Lint {
+ label: "clippy::needless_return",
+ description: r##"Checks for return statements at the end of a block."##,
+ },
+ Lint {
+ label: "clippy::needless_splitn",
+ description: r##"Checks for usages of `str::splitn` (or `str::rsplitn`) where using `str::split` would be the same."##,
+ },
+ Lint {
+ label: "clippy::needless_update",
+ description: r##"Checks for needlessly including a base struct on update
+when all fields are changed anyway.
+
+This lint is not applied to structs marked with
+[non_exhaustive](https://doc.rust-lang.org/reference/attributes/type_system.html)."##,
+ },
+ Lint {
+ label: "clippy::neg_cmp_op_on_partial_ord",
+ description: r##"Checks for the usage of negated comparison operators on types which only implement
+`PartialOrd` (e.g., `f64`)."##,
+ },
+ Lint {
+ label: "clippy::neg_multiply",
+ description: r##"Checks for multiplication by -1 as a form of negation."##,
+ },
+ Lint {
+ label: "clippy::negative_feature_names",
+ description: r##"Checks for negative feature names with prefix `no-` or `not-`"##,
+ },
+ Lint {
+ label: "clippy::never_loop",
+ description: r##"Checks for loops that will always `break`, `return` or
+`continue` an outer loop."##,
+ },
+ Lint {
+ label: "clippy::new_ret_no_self",
+ description: r##"Checks for `new` not returning a type that contains `Self`."##,
+ },
+ Lint {
+ label: "clippy::new_without_default",
+ description: r##"Checks for types with a `fn new() -> Self` method and no
+implementation of
+[`Default`](https://doc.rust-lang.org/std/default/trait.Default.html)."##,
+ },
+ Lint {
+ label: "clippy::no_effect",
+ description: r##"Checks for statements which have no effect."##,
+ },
+ Lint {
+ label: "clippy::no_effect_underscore_binding",
+ description: r##"Checks for binding to underscore prefixed variable without side-effects."##,
+ },
+ Lint {
+ label: "clippy::non_ascii_literal",
+ description: r##"Checks for non-ASCII characters in string and char literals."##,
+ },
+ Lint {
+ label: "clippy::non_octal_unix_permissions",
+ description: r##"Checks for non-octal values used to set Unix file permissions."##,
+ },
+ Lint {
+ label: "clippy::non_send_fields_in_send_ty",
+ description: r##"This lint warns about a `Send` implementation for a type that
+contains fields that are not safe to be sent across threads.
+It tries to detect fields that can cause a soundness issue
+when sent to another thread (e.g., `Rc`) while allowing `!Send` fields
+that are expected to exist in a `Send` type, such as raw pointers."##,
+ },
+ Lint {
+ label: "clippy::nonminimal_bool",
+ description: r##"Checks for boolean expressions that can be written more
+concisely."##,
+ },
+ Lint {
+ label: "clippy::nonsensical_open_options",
+ description: r##"Checks for duplicate open options as well as combinations
+that make no sense."##,
+ },
+ Lint {
+ label: "clippy::nonstandard_macro_braces",
+ description: r##"Checks that common macros are used with consistent bracing."##,
+ },
+ Lint {
+ label: "clippy::not_unsafe_ptr_arg_deref",
+ description: r##"Checks for public functions that dereference raw pointer
+arguments but are not marked `unsafe`."##,
+ },
+ Lint {
+ label: "clippy::octal_escapes",
+ description: r##"Checks for `\\0` escapes in string and byte literals that look like octal
+character escapes in C."##,
+ },
+ Lint { label: "clippy::ok_expect", description: r##"Checks for usage of `ok().expect(..)`."## },
+ Lint {
+ label: "clippy::op_ref",
+ description: r##"Checks for arguments to `==` which have their address
+taken to satisfy a bound
+and suggests to dereference the other argument instead"##,
+ },
+ Lint {
+ label: "clippy::option_as_ref_deref",
+ description: r##"Checks for usage of `_.as_ref().map(Deref::deref)` or it's aliases (such as String::as_str)."##,
+ },
+ Lint {
+ label: "clippy::option_env_unwrap",
+ description: r##"Checks for usage of `option_env!(...).unwrap()` and
+suggests usage of the `env!` macro."##,
+ },
+ Lint {
+ label: "clippy::option_filter_map",
+ description: r##"Checks for indirect collection of populated `Option`"##,
+ },
+ Lint {
+ label: "clippy::option_if_let_else",
+ description: r##"Lints usage of `if let Some(v) = ... { y } else { x }` which is more
+idiomatically done with `Option::map_or` (if the else bit is a pure
+expression) or `Option::map_or_else` (if the else bit is an impure
+expression)."##,
+ },
+ Lint {
+ label: "clippy::option_map_or_none",
+ description: r##"Checks for usage of `_.map_or(None, _)`."##,
+ },
+ Lint {
+ label: "clippy::option_map_unit_fn",
+ description: r##"Checks for usage of `option.map(f)` where f is a function
+or closure that returns the unit type `()`."##,
+ },
+ Lint {
+ label: "clippy::option_option",
+ description: r##"Checks for use of `Option<Option<_>>` in function signatures and type
+definitions"##,
+ },
+ Lint {
+ label: "clippy::or_fun_call",
+ description: r##"Checks for calls to `.or(foo(..))`, `.unwrap_or(foo(..))`,
+etc., and suggests to use `or_else`, `unwrap_or_else`, etc., or
+`unwrap_or_default` instead."##,
+ },
+ Lint {
+ label: "clippy::out_of_bounds_indexing",
+ description: r##"Checks for out of bounds array indexing with a constant
+index."##,
+ },
+ Lint {
+ label: "clippy::overflow_check_conditional",
+ description: r##"Detects classic underflow/overflow checks."##,
+ },
+ Lint { label: "clippy::panic", description: r##"Checks for usage of `panic!`."## },
+ Lint {
+ label: "clippy::panic_in_result_fn",
+ description: r##"Checks for usage of `panic!`, `unimplemented!`, `todo!`, `unreachable!` or assertions in a function of type result."##,
+ },
+ Lint {
+ label: "clippy::panicking_unwrap",
+ description: r##"Checks for calls of `unwrap[_err]()` that will always fail."##,
+ },
+ Lint {
+ label: "clippy::partialeq_ne_impl",
+ description: r##"Checks for manual re-implementations of `PartialEq::ne`."##,
+ },
+ Lint {
+ label: "clippy::path_buf_push_overwrite",
+ description: r##"* Checks for [push](https://doc.rust-lang.org/std/path/struct.PathBuf.html#method.push)
+calls on `PathBuf` that can cause overwrites."##,
+ },
+ Lint {
+ label: "clippy::pattern_type_mismatch",
+ description: r##"Checks for patterns that aren't exact representations of the types
+they are applied to.
+
+To satisfy this lint, you will have to adjust either the expression that is matched
+against or the pattern itself, as well as the bindings that are introduced by the
+adjusted patterns. For matching you will have to either dereference the expression
+with the `*` operator, or amend the patterns to explicitly match against `&<pattern>`
+or `&mut <pattern>` depending on the reference mutability. For the bindings you need
+to use the inverse. You can leave them as plain bindings if you wish for the value
+to be copied, but you must use `ref mut <variable>` or `ref <variable>` to construct
+a reference into the matched structure.
+
+If you are looking for a way to learn about ownership semantics in more detail, it
+is recommended to look at IDE options available to you to highlight types, lifetimes
+and reference semantics in your code. The available tooling would expose these things
+in a general way even outside of the various pattern matching mechanics. Of course
+this lint can still be used to highlight areas of interest and ensure a good understanding
+of ownership semantics."##,
+ },
+ Lint {
+ label: "clippy::possible_missing_comma",
+ description: r##"Checks for possible missing comma in an array. It lints if
+an array element is a binary operator expression and it lies on two lines."##,
+ },
+ Lint {
+ label: "clippy::precedence",
+ description: r##"Checks for operations where precedence may be unclear
+and suggests to add parentheses. Currently it catches the following:
+* mixed usage of arithmetic and bit shifting/combining operators without
+parentheses
+* a negative numeric literal (which is really a unary `-` followed by a
+numeric literal)
+ followed by a method call"##,
+ },
+ Lint {
+ label: "clippy::print_literal",
+ description: r##"This lint warns about the use of literals as `print!`/`println!` args."##,
+ },
+ Lint {
+ label: "clippy::print_stderr",
+ description: r##"Checks for printing on *stderr*. The purpose of this lint
+is to catch debugging remnants."##,
+ },
+ Lint {
+ label: "clippy::print_stdout",
+ description: r##"Checks for printing on *stdout*. The purpose of this lint
+is to catch debugging remnants."##,
+ },
+ Lint {
+ label: "clippy::print_with_newline",
+ description: r##"This lint warns when you use `print!()` with a format
+string that ends in a newline."##,
+ },
+ Lint {
+ label: "clippy::println_empty_string",
+ description: r##"This lint warns when you use `println!()` to
+print a newline."##,
+ },
+ Lint {
+ label: "clippy::ptr_arg",
+ description: r##"This lint checks for function arguments of type `&String`
+or `&Vec` unless the references are mutable. It will also suggest you
+replace `.clone()` calls with the appropriate `.to_owned()`/`to_string()`
+calls."##,
+ },
+ Lint {
+ label: "clippy::ptr_as_ptr",
+ description: r##"Checks for `as` casts between raw pointers without changing its mutability,
+namely `*const T` to `*const U` and `*mut T` to `*mut U`."##,
+ },
+ Lint { label: "clippy::ptr_eq", description: r##"Use `std::ptr::eq` when applicable"## },
+ Lint {
+ label: "clippy::ptr_offset_with_cast",
+ description: r##"Checks for usage of the `offset` pointer method with a `usize` casted to an
+`isize`."##,
+ },
+ Lint {
+ label: "clippy::pub_enum_variant_names",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::question_mark",
+ description: r##"Checks for expressions that could be replaced by the question mark operator."##,
+ },
+ Lint {
+ label: "clippy::range_minus_one",
+ description: r##"Checks for inclusive ranges where 1 is subtracted from
+the upper bound, e.g., `x..=(y-1)`."##,
+ },
+ Lint {
+ label: "clippy::range_plus_one",
+ description: r##"Checks for exclusive ranges where 1 is added to the
+upper bound, e.g., `x..(y+1)`."##,
+ },
+ Lint {
+ label: "clippy::range_step_by_zero",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::range_zip_with_len",
+ description: r##"Checks for zipping a collection with the range of
+`0.._.len()`."##,
+ },
+ Lint {
+ label: "clippy::rc_buffer",
+ description: r##"Checks for `Rc<T>` and `Arc<T>` when `T` is a mutable buffer type such as `String` or `Vec`."##,
+ },
+ Lint { label: "clippy::rc_mutex", description: r##"Checks for `Rc<Mutex<T>>`."## },
+ Lint {
+ label: "clippy::redundant_allocation",
+ description: r##"Checks for use of redundant allocations anywhere in the code."##,
+ },
+ Lint {
+ label: "clippy::redundant_clone",
+ description: r##"Checks for a redundant `clone()` (and its relatives) which clones an owned
+value that is going to be dropped without further use."##,
+ },
+ Lint {
+ label: "clippy::redundant_closure",
+ description: r##"Checks for closures which just call another function where
+the function can be called directly. `unsafe` functions or calls where types
+get adjusted are ignored."##,
+ },
+ Lint {
+ label: "clippy::redundant_closure_call",
+ description: r##"Detects closures called in the same expression where they
+are defined."##,
+ },
+ Lint {
+ label: "clippy::redundant_closure_for_method_calls",
+ description: r##"Checks for closures which only invoke a method on the closure
+argument and can be replaced by referencing the method directly."##,
+ },
+ Lint {
+ label: "clippy::redundant_else",
+ description: r##"Checks for `else` blocks that can be removed without changing semantics."##,
+ },
+ Lint {
+ label: "clippy::redundant_feature_names",
+ description: r##"Checks for feature names with prefix `use-`, `with-` or suffix `-support`"##,
+ },
+ Lint {
+ label: "clippy::redundant_field_names",
+ description: r##"Checks for fields in struct literals where shorthands
+could be used."##,
+ },
+ Lint {
+ label: "clippy::redundant_pattern",
+ description: r##"Checks for patterns in the form `name @ _`."##,
+ },
+ Lint {
+ label: "clippy::redundant_pattern_matching",
+ description: r##"Lint for redundant pattern matching over `Result`, `Option`,
+`std::task::Poll` or `std::net::IpAddr`"##,
+ },
+ Lint {
+ label: "clippy::redundant_pub_crate",
+ description: r##"Checks for items declared `pub(crate)` that are not crate visible because they
+are inside a private module."##,
+ },
+ Lint {
+ label: "clippy::redundant_slicing",
+ description: r##"Checks for redundant slicing expressions which use the full range, and
+do not change the type."##,
+ },
+ Lint {
+ label: "clippy::redundant_static_lifetimes",
+ description: r##"Checks for constants and statics with an explicit `'static` lifetime."##,
+ },
+ Lint {
+ label: "clippy::ref_binding_to_reference",
+ description: r##"Checks for `ref` bindings which create a reference to a reference."##,
+ },
+ Lint {
+ label: "clippy::ref_in_deref",
+ description: r##"Checks for references in expressions that use
+auto dereference."##,
+ },
+ Lint {
+ label: "clippy::ref_option_ref",
+ description: r##"Checks for usage of `&Option<&T>`."##,
+ },
+ Lint {
+ label: "clippy::regex_macro",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::repeat_once",
+ description: r##"Checks for usage of `.repeat(1)` and suggest the following method for each types.
+- `.to_string()` for `str`
+- `.clone()` for `String`
+- `.to_vec()` for `slice`
+
+The lint will evaluate constant expressions and values as arguments of `.repeat(..)` and emit a message if
+they are equivalent to `1`. (Related discussion in [rust-clippy#7306](https://github.com/rust-lang/rust-clippy/issues/7306))"##,
+ },
+ Lint {
+ label: "clippy::replace_consts",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::rest_pat_in_fully_bound_structs",
+ description: r##"Checks for unnecessary '..' pattern binding on struct when all fields are explicitly matched."##,
+ },
+ Lint {
+ label: "clippy::result_map_or_into_option",
+ description: r##"Checks for usage of `_.map_or(None, Some)`."##,
+ },
+ Lint {
+ label: "clippy::result_map_unit_fn",
+ description: r##"Checks for usage of `result.map(f)` where f is a function
+or closure that returns the unit type `()`."##,
+ },
+ Lint {
+ label: "clippy::result_unit_err",
+ description: r##"Checks for public functions that return a `Result`
+with an `Err` type of `()`. It suggests using a custom type that
+implements `std::error::Error`."##,
+ },
+ Lint {
+ label: "clippy::return_self_not_must_use",
+ description: r##"This lint warns when a method returning `Self` doesn't have the `#[must_use]` attribute."##,
+ },
+ Lint {
+ label: "clippy::reversed_empty_ranges",
+ description: r##"Checks for range expressions `x..y` where both `x` and `y`
+are constant and `x` is greater or equal to `y`."##,
+ },
+ Lint {
+ label: "clippy::same_functions_in_if_condition",
+ description: r##"Checks for consecutive `if`s with the same function call."##,
+ },
+ Lint {
+ label: "clippy::same_item_push",
+ description: r##"Checks whether a for loop is being used to push a constant
+value into a Vec."##,
+ },
+ Lint {
+ label: "clippy::same_name_method",
+ description: r##"It lints if a struct has two methods with the same name:
+one from a trait, another not from trait."##,
+ },
+ Lint {
+ label: "clippy::search_is_some",
+ description: r##"Checks for an iterator or string search (such as `find()`,
+`position()`, or `rposition()`) followed by a call to `is_some()` or `is_none()`."##,
+ },
+ Lint {
+ label: "clippy::self_assignment",
+ description: r##"Checks for explicit self-assignments."##,
+ },
+ Lint {
+ label: "clippy::self_named_constructors",
+ description: r##"Warns when constructors have the same name as their types."##,
+ },
+ Lint {
+ label: "clippy::self_named_module_files",
+ description: r##"Checks that module layout uses only mod.rs files."##,
+ },
+ Lint {
+ label: "clippy::semicolon_if_nothing_returned",
+ description: r##"Looks for blocks of expressions and fires if the last expression returns
+`()` but is not followed by a semicolon."##,
+ },
+ Lint {
+ label: "clippy::separated_literal_suffix",
+ description: r##"Warns if literal suffixes are separated by an underscore.
+To enforce separated literal suffix style,
+see the `unseparated_literal_suffix` lint."##,
+ },
+ Lint {
+ label: "clippy::serde_api_misuse",
+ description: r##"Checks for mis-uses of the serde API."##,
+ },
+ Lint {
+ label: "clippy::shadow_reuse",
+ description: r##"Checks for bindings that shadow other bindings already in
+scope, while reusing the original value."##,
+ },
+ Lint {
+ label: "clippy::shadow_same",
+ description: r##"Checks for bindings that shadow other bindings already in
+scope, while just changing reference level or mutability."##,
+ },
+ Lint {
+ label: "clippy::shadow_unrelated",
+ description: r##"Checks for bindings that shadow other bindings already in
+scope, either without an initialization or with one that does not even use
+the original value."##,
+ },
+ Lint {
+ label: "clippy::short_circuit_statement",
+ description: r##"Checks for the use of short circuit boolean conditions as
+a
+statement."##,
+ },
+ Lint {
+ label: "clippy::should_assert_eq",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::should_implement_trait",
+ description: r##"Checks for methods that should live in a trait
+implementation of a `std` trait (see [llogiq's blog
+post](http://llogiq.github.io/2015/07/30/traits.html) for further
+information) instead of an inherent implementation."##,
+ },
+ Lint {
+ label: "clippy::similar_names",
+ description: r##"Checks for names that are very similar and thus confusing."##,
+ },
+ Lint {
+ label: "clippy::single_char_add_str",
+ description: r##"Warns when using `push_str`/`insert_str` with a single-character string literal
+where `push`/`insert` with a `char` would work fine."##,
+ },
+ Lint {
+ label: "clippy::single_char_pattern",
+ description: r##"Checks for string methods that receive a single-character
+`str` as an argument, e.g., `_.split(x)`."##,
+ },
+ Lint {
+ label: "clippy::single_component_path_imports",
+ description: r##"Checking for imports with single component use path."##,
+ },
+ Lint {
+ label: "clippy::single_element_loop",
+ description: r##"Checks whether a for loop has a single element."##,
+ },
+ Lint {
+ label: "clippy::single_match",
+ description: r##"Checks for matches with a single arm where an `if let`
+will usually suffice."##,
+ },
+ Lint {
+ label: "clippy::single_match_else",
+ description: r##"Checks for matches with two arms where an `if let else` will
+usually suffice."##,
+ },
+ Lint {
+ label: "clippy::size_of_in_element_count",
+ description: r##"Detects expressions where
+`size_of::<T>` or `size_of_val::<T>` is used as a
+count of elements of type `T`"##,
+ },
+ Lint {
+ label: "clippy::skip_while_next",
+ description: r##"Checks for usage of `_.skip_while(condition).next()`."##,
+ },
+ Lint {
+ label: "clippy::slow_vector_initialization",
+ description: r##"Checks slow zero-filled vector initialization"##,
+ },
+ Lint {
+ label: "clippy::stable_sort_primitive",
+ description: r##"When sorting primitive values (integers, bools, chars, as well
+as arrays, slices, and tuples of such items), it is better to
+use an unstable sort than a stable sort."##,
+ },
+ Lint {
+ label: "clippy::str_to_string",
+ description: r##"This lint checks for `.to_string()` method calls on values of type `&str`."##,
+ },
+ Lint {
+ label: "clippy::string_add",
+ description: r##"Checks for all instances of `x + _` where `x` is of type
+`String`, but only if [`string_add_assign`](#string_add_assign) does *not*
+match."##,
+ },
+ Lint {
+ label: "clippy::string_add_assign",
+ description: r##"Checks for string appends of the form `x = x + y` (without
+`let`!)."##,
+ },
+ Lint {
+ label: "clippy::string_extend_chars",
+ description: r##"Checks for the use of `.extend(s.chars())` where s is a
+`&str` or `String`."##,
+ },
+ Lint {
+ label: "clippy::string_from_utf8_as_bytes",
+ description: r##"Check if the string is transformed to byte array and casted back to string."##,
+ },
+ Lint {
+ label: "clippy::string_lit_as_bytes",
+ description: r##"Checks for the `as_bytes` method called on string literals
+that contain only ASCII characters."##,
+ },
+ Lint {
+ label: "clippy::string_slice",
+ description: r##"Checks for slice operations on strings"##,
+ },
+ Lint {
+ label: "clippy::string_to_string",
+ description: r##"This lint checks for `.to_string()` method calls on values of type `String`."##,
+ },
+ Lint {
+ label: "clippy::strlen_on_c_strings",
+ description: r##"Checks for usage of `libc::strlen` on a `CString` or `CStr` value,
+and suggest calling `as_bytes().len()` or `to_bytes().len()` respectively instead."##,
+ },
+ Lint {
+ label: "clippy::struct_excessive_bools",
+ description: r##"Checks for excessive
+use of bools in structs."##,
+ },
+ Lint {
+ label: "clippy::suboptimal_flops",
+ description: r##"Looks for floating-point expressions that
+can be expressed using built-in methods to improve both
+accuracy and performance."##,
+ },
+ Lint {
+ label: "clippy::suspicious_arithmetic_impl",
+ description: r##"Lints for suspicious operations in impls of arithmetic operators, e.g.
+subtracting elements in an Add impl."##,
+ },
+ Lint {
+ label: "clippy::suspicious_assignment_formatting",
+ description: r##"Checks for use of the non-existent `=*`, `=!` and `=-`
+operators."##,
+ },
+ Lint {
+ label: "clippy::suspicious_else_formatting",
+ description: r##"Checks for formatting of `else`. It lints if the `else`
+is followed immediately by a newline or the `else` seems to be missing."##,
+ },
+ Lint {
+ label: "clippy::suspicious_map",
+ description: r##"Checks for calls to `map` followed by a `count`."##,
+ },
+ Lint {
+ label: "clippy::suspicious_op_assign_impl",
+ description: r##"Lints for suspicious operations in impls of OpAssign, e.g.
+subtracting elements in an AddAssign impl."##,
+ },
+ Lint {
+ label: "clippy::suspicious_operation_groupings",
+ description: r##"Checks for unlikely usages of binary operators that are almost
+certainly typos and/or copy/paste errors, given the other usages
+of binary operators nearby."##,
+ },
+ Lint {
+ label: "clippy::suspicious_splitn",
+ description: r##"Checks for calls to [`splitn`]
+(https://doc.rust-lang.org/std/primitive.str.html#method.splitn) and
+related functions with either zero or one splits."##,
+ },
+ Lint {
+ label: "clippy::suspicious_unary_op_formatting",
+ description: r##"Checks the formatting of a unary operator on the right hand side
+of a binary operator. It lints if there is no space between the binary and unary operators,
+but there is a space between the unary and its operand."##,
+ },
+ Lint {
+ label: "clippy::tabs_in_doc_comments",
+ description: r##"Checks doc comments for usage of tab characters."##,
+ },
+ Lint {
+ label: "clippy::temporary_assignment",
+ description: r##"Checks for construction of a structure or tuple just to
+assign a value in it."##,
+ },
+ Lint {
+ label: "clippy::to_digit_is_some",
+ description: r##"Checks for `.to_digit(..).is_some()` on `char`s."##,
+ },
+ Lint {
+ label: "clippy::to_string_in_display",
+ description: r##"Checks for uses of `to_string()` in `Display` traits."##,
+ },
+ Lint {
+ label: "clippy::to_string_in_format_args",
+ description: r##"Checks for [`ToString::to_string`](https://doc.rust-lang.org/std/string/trait.ToString.html#tymethod.to_string)
+applied to a type that implements [`Display`](https://doc.rust-lang.org/std/fmt/trait.Display.html)
+in a macro that does formatting."##,
+ },
+ Lint { label: "clippy::todo", description: r##"Checks for usage of `todo!`."## },
+ Lint {
+ label: "clippy::too_many_arguments",
+ description: r##"Checks for functions with too many parameters."##,
+ },
+ Lint {
+ label: "clippy::too_many_lines",
+ description: r##"Checks for functions with a large amount of lines."##,
+ },
+ Lint {
+ label: "clippy::toplevel_ref_arg",
+ description: r##"Checks for function arguments and let bindings denoted as
+`ref`."##,
+ },
+ Lint {
+ label: "clippy::trailing_empty_array",
+ description: r##"Displays a warning when a struct with a trailing zero-sized array is declared without a `repr` attribute."##,
+ },
+ Lint {
+ label: "clippy::trait_duplication_in_bounds",
+ description: r##"Checks for cases where generics are being used and multiple
+syntax specifications for trait bounds are used simultaneously."##,
+ },
+ Lint {
+ label: "clippy::transmute_bytes_to_str",
+ description: r##"Checks for transmutes from a `&[u8]` to a `&str`."##,
+ },
+ Lint {
+ label: "clippy::transmute_float_to_int",
+ description: r##"Checks for transmutes from a float to an integer."##,
+ },
+ Lint {
+ label: "clippy::transmute_int_to_bool",
+ description: r##"Checks for transmutes from an integer to a `bool`."##,
+ },
+ Lint {
+ label: "clippy::transmute_int_to_char",
+ description: r##"Checks for transmutes from an integer to a `char`."##,
+ },
+ Lint {
+ label: "clippy::transmute_int_to_float",
+ description: r##"Checks for transmutes from an integer to a float."##,
+ },
+ Lint {
+ label: "clippy::transmute_num_to_bytes",
+ description: r##"Checks for transmutes from a number to an array of `u8`"##,
+ },
+ Lint {
+ label: "clippy::transmute_ptr_to_ptr",
+ description: r##"Checks for transmutes from a pointer to a pointer, or
+from a reference to a reference."##,
+ },
+ Lint {
+ label: "clippy::transmute_ptr_to_ref",
+ description: r##"Checks for transmutes from a pointer to a reference."##,
+ },
+ Lint {
+ label: "clippy::transmutes_expressible_as_ptr_casts",
+ description: r##"Checks for transmutes that could be a pointer cast."##,
+ },
+ Lint {
+ label: "clippy::transmuting_null",
+ description: r##"Checks for transmute calls which would receive a null pointer."##,
+ },
+ Lint {
+ label: "clippy::trivial_regex",
+ description: r##"Checks for trivial [regex](https://crates.io/crates/regex)
+creation (with `Regex::new`, `RegexBuilder::new`, or `RegexSet::new`)."##,
+ },
+ Lint {
+ label: "clippy::trivially_copy_pass_by_ref",
+ description: r##"Checks for functions taking arguments by reference, where
+the argument type is `Copy` and small enough to be more efficient to always
+pass by value."##,
+ },
+ Lint { label: "clippy::try_err", description: r##"Checks for usages of `Err(x)?`."## },
+ Lint {
+ label: "clippy::type_complexity",
+ description: r##"Checks for types used in structs, parameters and `let`
+declarations above a certain complexity threshold."##,
+ },
+ Lint {
+ label: "clippy::type_repetition_in_bounds",
+ description: r##"This lint warns about unnecessary type repetitions in trait bounds"##,
+ },
+ Lint {
+ label: "clippy::undocumented_unsafe_blocks",
+ description: r##"Checks for `unsafe` blocks without a `// Safety: ` comment
+explaining why the unsafe operations performed inside
+the block are safe."##,
+ },
+ Lint {
+ label: "clippy::undropped_manually_drops",
+ description: r##"Prevents the safe `std::mem::drop` function from being called on `std::mem::ManuallyDrop`."##,
+ },
+ Lint {
+ label: "clippy::unicode_not_nfc",
+ description: r##"Checks for string literals that contain Unicode in a form
+that is not equal to its
+[NFC-recomposition](http://www.unicode.org/reports/tr15/#Norm_Forms)."##,
+ },
+ Lint {
+ label: "clippy::unimplemented",
+ description: r##"Checks for usage of `unimplemented!`."##,
+ },
+ Lint {
+ label: "clippy::uninit_assumed_init",
+ description: r##"Checks for `MaybeUninit::uninit().assume_init()`."##,
+ },
+ Lint {
+ label: "clippy::uninit_vec",
+ description: r##"Checks for `set_len()` call that creates `Vec` with uninitialized elements.
+This is commonly caused by calling `set_len()` right after allocating or
+reserving a buffer with `new()`, `default()`, `with_capacity()`, or `reserve()`."##,
+ },
+ Lint {
+ label: "clippy::unit_arg",
+ description: r##"Checks for passing a unit value as an argument to a function without using a
+unit literal (`()`)."##,
+ },
+ Lint {
+ label: "clippy::unit_cmp",
+ description: r##"Checks for comparisons to unit. This includes all binary
+comparisons (like `==` and `<`) and asserts."##,
+ },
+ Lint { label: "clippy::unit_hash", description: r##"Detects `().hash(_)`."## },
+ Lint {
+ label: "clippy::unit_return_expecting_ord",
+ description: r##"Checks for functions that expect closures of type
+Fn(...) -> Ord where the implemented closure returns the unit type.
+The lint also suggests to remove the semi-colon at the end of the statement if present."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_cast",
+ description: r##"Checks for casts to the same type, casts of int literals to integer types
+and casts of float literals to float types."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_filter_map",
+ description: r##"Checks for `filter_map` calls which could be replaced by `filter` or `map`.
+More specifically it checks if the closure provided is only performing one of the
+filter or map operations and suggests the appropriate option."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_fold",
+ description: r##"Checks for using `fold` when a more succinct alternative exists.
+Specifically, this checks for `fold`s which could be replaced by `any`, `all`,
+`sum` or `product`."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_lazy_evaluations",
+ description: r##"As the counterpart to `or_fun_call`, this lint looks for unnecessary
+lazily evaluated closures on `Option` and `Result`.
+
+This lint suggests changing the following functions, when eager evaluation results in
+simpler code:
+ - `unwrap_or_else` to `unwrap_or`
+ - `and_then` to `and`
+ - `or_else` to `or`
+ - `get_or_insert_with` to `get_or_insert`
+ - `ok_or_else` to `ok_or`"##,
+ },
+ Lint {
+ label: "clippy::unnecessary_mut_passed",
+ description: r##"Detects passing a mutable reference to a function that only
+requires an immutable reference."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_operation",
+ description: r##"Checks for expression statements that can be reduced to a
+sub-expression."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_self_imports",
+ description: r##"Checks for imports ending in `::{self}`."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_sort_by",
+ description: r##"Detects uses of `Vec::sort_by` passing in a closure
+which compares the two arguments, either directly or indirectly."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_to_owned",
+ description: r##"Checks for unnecessary calls to [`ToOwned::to_owned`](https://doc.rust-lang.org/std/borrow/trait.ToOwned.html#tymethod.to_owned)
+and other `to_owned`-like functions."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_unwrap",
+ description: r##"Checks for calls of `unwrap[_err]()` that cannot fail."##,
+ },
+ Lint {
+ label: "clippy::unnecessary_wraps",
+ description: r##"Checks for private functions that only return `Ok` or `Some`."##,
+ },
+ Lint {
+ label: "clippy::unneeded_field_pattern",
+ description: r##"Checks for structure field patterns bound to wildcards."##,
+ },
+ Lint {
+ label: "clippy::unneeded_wildcard_pattern",
+ description: r##"Checks for tuple patterns with a wildcard
+pattern (`_`) is next to a rest pattern (`..`).
+
+_NOTE_: While `_, ..` means there is at least one element left, `..`
+means there are 0 or more elements left. This can make a difference
+when refactoring, but shouldn't result in errors in the refactored code,
+since the wildcard pattern isn't used anyway."##,
+ },
+ Lint {
+ label: "clippy::unnested_or_patterns",
+ description: r##"Checks for unnested or-patterns, e.g., `Some(0) | Some(2)` and
+suggests replacing the pattern with a nested one, `Some(0 | 2)`.
+
+Another way to think of this is that it rewrites patterns in
+*disjunctive normal form (DNF)* into *conjunctive normal form (CNF)*."##,
+ },
+ Lint { label: "clippy::unreachable", description: r##"Checks for usage of `unreachable!`."## },
+ Lint {
+ label: "clippy::unreadable_literal",
+ description: r##"Warns if a long integral or floating-point constant does
+not contain underscores."##,
+ },
+ Lint {
+ label: "clippy::unsafe_derive_deserialize",
+ description: r##"Checks for deriving `serde::Deserialize` on a type that
+has methods using `unsafe`."##,
+ },
+ Lint {
+ label: "clippy::unsafe_removed_from_name",
+ description: r##"Checks for imports that remove unsafe from an item's
+name."##,
+ },
+ Lint {
+ label: "clippy::unsafe_vector_initialization",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::unseparated_literal_suffix",
+ description: r##"Warns if literal suffixes are not separated by an
+underscore.
+To enforce unseparated literal suffix style,
+see the `separated_literal_suffix` lint."##,
+ },
+ Lint {
+ label: "clippy::unsound_collection_transmute",
+ description: r##"Checks for transmutes between collections whose
+types have different ABI, size or alignment."##,
+ },
+ Lint {
+ label: "clippy::unstable_as_mut_slice",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::unstable_as_slice",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::unused_async",
+ description: r##"Checks for functions that are declared `async` but have no `.await`s inside of them."##,
+ },
+ Lint {
+ label: "clippy::unused_collect",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::unused_io_amount",
+ description: r##"Checks for unused written/read amount."##,
+ },
+ Lint {
+ label: "clippy::unused_self",
+ description: r##"Checks methods that contain a `self` argument but don't use it"##,
+ },
+ Lint {
+ label: "clippy::unused_unit",
+ description: r##"Checks for unit (`()`) expressions that can be removed."##,
+ },
+ Lint {
+ label: "clippy::unusual_byte_groupings",
+ description: r##"Warns if hexadecimal or binary literals are not grouped
+by nibble or byte."##,
+ },
+ Lint {
+ label: "clippy::unwrap_in_result",
+ description: r##"Checks for functions of type `Result` that contain `expect()` or `unwrap()`"##,
+ },
+ Lint {
+ label: "clippy::unwrap_or_else_default",
+ description: r##"Checks for usages of `_.unwrap_or_else(Default::default)` on `Option` and
+`Result` values."##,
+ },
+ Lint {
+ label: "clippy::unwrap_used",
+ description: r##"Checks for `.unwrap()` calls on `Option`s and on `Result`s."##,
+ },
+ Lint {
+ label: "clippy::upper_case_acronyms",
+ description: r##"Checks for fully capitalized names and optionally names containing a capitalized acronym."##,
+ },
+ Lint {
+ label: "clippy::use_debug",
+ description: r##"Checks for use of `Debug` formatting. The purpose of this
+lint is to catch debugging remnants."##,
+ },
+ Lint {
+ label: "clippy::use_self",
+ description: r##"Checks for unnecessary repetition of structure name when a
+replacement with `Self` is applicable."##,
+ },
+ Lint {
+ label: "clippy::used_underscore_binding",
+ description: r##"Checks for the use of bindings with a single leading
+underscore."##,
+ },
+ Lint {
+ label: "clippy::useless_asref",
+ description: r##"Checks for usage of `.as_ref()` or `.as_mut()` where the
+types before and after the call are the same."##,
+ },
+ Lint {
+ label: "clippy::useless_attribute",
+ description: r##"Checks for `extern crate` and `use` items annotated with
+lint attributes.
+
+This lint permits `#[allow(unused_imports)]`, `#[allow(deprecated)]`,
+`#[allow(unreachable_pub)]`, `#[allow(clippy::wildcard_imports)]` and
+`#[allow(clippy::enum_glob_use)]` on `use` items and `#[allow(unused_imports)]` on
+`extern crate` items with a `#[macro_use]` attribute."##,
+ },
+ Lint {
+ label: "clippy::useless_conversion",
+ description: r##"Checks for `Into`, `TryInto`, `From`, `TryFrom`, or `IntoIter` calls
+which uselessly convert to the same type."##,
+ },
+ Lint {
+ label: "clippy::useless_format",
+ description: r##"Checks for the use of `format!(string literal with no
+argument)` and `format!({}, foo)` where `foo` is a string."##,
+ },
+ Lint {
+ label: "clippy::useless_let_if_seq",
+ description: r##"Checks for variable declarations immediately followed by a
+conditional affectation."##,
+ },
+ Lint {
+ label: "clippy::useless_transmute",
+ description: r##"Checks for transmutes to the original type of the object
+and transmutes that could be a cast."##,
+ },
+ Lint {
+ label: "clippy::useless_vec",
+ description: r##"Checks for usage of `&vec![..]` when using `&[..]` would
+be possible."##,
+ },
+ Lint {
+ label: "clippy::vec_box",
+ description: r##"Checks for use of `Vec<Box<T>>` where T: Sized anywhere in the code.
+Check the [Box documentation](https://doc.rust-lang.org/std/boxed/index.html) for more information."##,
+ },
+ Lint {
+ label: "clippy::vec_init_then_push",
+ description: r##"Checks for calls to `push` immediately after creating a new `Vec`."##,
+ },
+ Lint {
+ label: "clippy::vec_resize_to_zero",
+ description: r##"Finds occurrences of `Vec::resize(0, an_int)`"##,
+ },
+ Lint {
+ label: "clippy::verbose_bit_mask",
+ description: r##"Checks for bit masks that can be replaced by a call
+to `trailing_zeros`"##,
+ },
+ Lint {
+ label: "clippy::verbose_file_reads",
+ description: r##"Checks for use of File::read_to_end and File::read_to_string."##,
+ },
+ Lint {
+ label: "clippy::vtable_address_comparisons",
+ description: r##"Checks for comparisons with an address of a trait vtable."##,
+ },
+ Lint {
+ label: "clippy::while_immutable_condition",
+ description: r##"Checks whether variables used within while loop condition
+can be (and are) mutated in the body."##,
+ },
+ Lint {
+ label: "clippy::while_let_loop",
+ description: r##"Detects `loop + match` combinations that are easier
+written as a `while let` loop."##,
+ },
+ Lint {
+ label: "clippy::while_let_on_iterator",
+ description: r##"Checks for `while let` expressions on iterators."##,
+ },
+ Lint {
+ label: "clippy::wildcard_dependencies",
+ description: r##"Checks for wildcard dependencies in the `Cargo.toml`."##,
+ },
+ Lint {
+ label: "clippy::wildcard_enum_match_arm",
+ description: r##"Checks for wildcard enum matches using `_`."##,
+ },
+ Lint {
+ label: "clippy::wildcard_imports",
+ description: r##"Checks for wildcard imports `use _::*`."##,
+ },
+ Lint {
+ label: "clippy::wildcard_in_or_patterns",
+ description: r##"Checks for wildcard pattern used with others patterns in same match arm."##,
+ },
+ Lint {
+ label: "clippy::write_literal",
+ description: r##"This lint warns about the use of literals as `write!`/`writeln!` args."##,
+ },
+ Lint {
+ label: "clippy::write_with_newline",
+ description: r##"This lint warns when you use `write!()` with a format
+string that
+ends in a newline."##,
+ },
+ Lint {
+ label: "clippy::writeln_empty_string",
+ description: r##"This lint warns when you use `writeln!(buf, )` to
+print a newline."##,
+ },
+ Lint {
+ label: "clippy::wrong_pub_self_convention",
+ description: r##"Nothing. This lint has been deprecated."##,
+ },
+ Lint {
+ label: "clippy::wrong_self_convention",
+ description: r##"Checks for methods with certain name prefixes and which
+doesn't match how self is taken. The actual rules are:
+
+|Prefix |Postfix |`self` taken | `self` type |
+|-------|------------|-----------------------|--------------|
+|`as_` | none |`&self` or `&mut self` | any |
+|`from_`| none | none | any |
+|`into_`| none |`self` | any |
+|`is_` | none |`&self` or none | any |
+|`to_` | `_mut` |`&mut self` | any |
+|`to_` | not `_mut` |`self` | `Copy` |
+|`to_` | not `_mut` |`&self` | not `Copy` |
+
+Note: Clippy doesn't trigger methods with `to_` prefix in:
+- Traits definition.
+Clippy can not tell if a type that implements a trait is `Copy` or not.
+- Traits implementation, when `&self` is taken.
+The method signature is controlled by the trait and often `&self` is required for all types that implement the trait
+(see e.g. the `std::string::ToString` trait).
+
+Clippy allows `Pin<&Self>` and `Pin<&mut Self>` if `&self` and `&mut self` is required.
+
+Please find more info here:
+https://rust-lang.github.io/api-guidelines/naming.html#ad-hoc-conversions-follow-as_-to_-into_-conventions-c-conv"##,
+ },
+ Lint {
+ label: "clippy::wrong_transmute",
+ description: r##"Checks for transmutes that can't ever be correct on any
+architecture."##,
+ },
+ Lint { label: "clippy::zero_divided_by_zero", description: r##"Checks for `0.0 / 0.0`."## },
+ Lint {
+ label: "clippy::zero_prefixed_literal",
+ description: r##"Warns if an integral constant literal starts with `0`."##,
+ },
+ Lint {
+ label: "clippy::zero_ptr",
+ description: r##"Catch casts from `0` to some pointer type"##,
+ },
+ Lint {
+ label: "clippy::zero_sized_map_values",
+ description: r##"Checks for maps with zero-sized value types anywhere in the code."##,
+ },
+ Lint {
+ label: "clippy::zst_offset",
+ description: r##"Checks for `offset(_)`, `wrapping_`{`add`, `sub`}, etc. on raw pointers to
+zero-sized types"##,
+ },
+];
+pub const CLIPPY_LINT_GROUPS: &[LintGroup] = &[
+ LintGroup {
+ lint: Lint {
+ label: "clippy::cargo",
+ description: r##"lint group for: clippy::cargo_common_metadata, clippy::multiple_crate_versions, clippy::negative_feature_names, clippy::redundant_feature_names, clippy::wildcard_dependencies"##,
+ },
+ children: &[
+ "clippy::cargo_common_metadata",
+ "clippy::multiple_crate_versions",
+ "clippy::negative_feature_names",
+ "clippy::redundant_feature_names",
+ "clippy::wildcard_dependencies",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::complexity",
+ description: r##"lint group for: clippy::bind_instead_of_map, clippy::bool_comparison, clippy::borrowed_box, clippy::char_lit_as_u8, clippy::clone_on_copy, clippy::crosspointer_transmute, clippy::deprecated_cfg_attr, clippy::deref_addrof, clippy::derivable_impls, clippy::diverging_sub_expression, clippy::double_comparisons, clippy::double_parens, clippy::duration_subsec, clippy::explicit_counter_loop, clippy::explicit_write, clippy::extra_unused_lifetimes, clippy::filter_map_identity, clippy::filter_next, clippy::flat_map_identity, clippy::get_last_with_len, clippy::identity_op, clippy::inspect_for_each, clippy::int_plus_one, clippy::iter_count, clippy::manual_filter_map, clippy::manual_find_map, clippy::manual_flatten, clippy::manual_split_once, clippy::manual_strip, clippy::manual_swap, clippy::manual_unwrap_or, clippy::map_flatten, clippy::map_identity, clippy::match_as_ref, clippy::match_single_binding, clippy::needless_arbitrary_self_type, clippy::needless_bool, clippy::needless_borrowed_reference, clippy::needless_lifetimes, clippy::needless_option_as_deref, clippy::needless_question_mark, clippy::needless_splitn, clippy::needless_update, clippy::neg_cmp_op_on_partial_ord, clippy::no_effect, clippy::nonminimal_bool, clippy::option_as_ref_deref, clippy::option_filter_map, clippy::option_map_unit_fn, clippy::overflow_check_conditional, clippy::partialeq_ne_impl, clippy::precedence, clippy::ptr_offset_with_cast, clippy::range_zip_with_len, clippy::redundant_closure_call, clippy::redundant_slicing, clippy::ref_in_deref, clippy::repeat_once, clippy::result_map_unit_fn, clippy::search_is_some, clippy::short_circuit_statement, clippy::single_element_loop, clippy::skip_while_next, clippy::string_from_utf8_as_bytes, clippy::strlen_on_c_strings, clippy::temporary_assignment, clippy::too_many_arguments, clippy::transmute_bytes_to_str, clippy::transmute_float_to_int, clippy::transmute_int_to_bool, clippy::transmute_int_to_char, clippy::transmute_int_to_float, clippy::transmute_num_to_bytes, clippy::transmute_ptr_to_ref, clippy::transmutes_expressible_as_ptr_casts, clippy::type_complexity, clippy::unit_arg, clippy::unnecessary_cast, clippy::unnecessary_filter_map, clippy::unnecessary_operation, clippy::unnecessary_sort_by, clippy::unnecessary_unwrap, clippy::unneeded_wildcard_pattern, clippy::useless_asref, clippy::useless_conversion, clippy::useless_format, clippy::vec_box, clippy::while_let_loop, clippy::wildcard_in_or_patterns, clippy::zero_divided_by_zero, clippy::zero_prefixed_literal"##,
+ },
+ children: &[
+ "clippy::bind_instead_of_map",
+ "clippy::bool_comparison",
+ "clippy::borrowed_box",
+ "clippy::char_lit_as_u8",
+ "clippy::clone_on_copy",
+ "clippy::crosspointer_transmute",
+ "clippy::deprecated_cfg_attr",
+ "clippy::deref_addrof",
+ "clippy::derivable_impls",
+ "clippy::diverging_sub_expression",
+ "clippy::double_comparisons",
+ "clippy::double_parens",
+ "clippy::duration_subsec",
+ "clippy::explicit_counter_loop",
+ "clippy::explicit_write",
+ "clippy::extra_unused_lifetimes",
+ "clippy::filter_map_identity",
+ "clippy::filter_next",
+ "clippy::flat_map_identity",
+ "clippy::get_last_with_len",
+ "clippy::identity_op",
+ "clippy::inspect_for_each",
+ "clippy::int_plus_one",
+ "clippy::iter_count",
+ "clippy::manual_filter_map",
+ "clippy::manual_find_map",
+ "clippy::manual_flatten",
+ "clippy::manual_split_once",
+ "clippy::manual_strip",
+ "clippy::manual_swap",
+ "clippy::manual_unwrap_or",
+ "clippy::map_flatten",
+ "clippy::map_identity",
+ "clippy::match_as_ref",
+ "clippy::match_single_binding",
+ "clippy::needless_arbitrary_self_type",
+ "clippy::needless_bool",
+ "clippy::needless_borrowed_reference",
+ "clippy::needless_lifetimes",
+ "clippy::needless_option_as_deref",
+ "clippy::needless_question_mark",
+ "clippy::needless_splitn",
+ "clippy::needless_update",
+ "clippy::neg_cmp_op_on_partial_ord",
+ "clippy::no_effect",
+ "clippy::nonminimal_bool",
+ "clippy::option_as_ref_deref",
+ "clippy::option_filter_map",
+ "clippy::option_map_unit_fn",
+ "clippy::overflow_check_conditional",
+ "clippy::partialeq_ne_impl",
+ "clippy::precedence",
+ "clippy::ptr_offset_with_cast",
+ "clippy::range_zip_with_len",
+ "clippy::redundant_closure_call",
+ "clippy::redundant_slicing",
+ "clippy::ref_in_deref",
+ "clippy::repeat_once",
+ "clippy::result_map_unit_fn",
+ "clippy::search_is_some",
+ "clippy::short_circuit_statement",
+ "clippy::single_element_loop",
+ "clippy::skip_while_next",
+ "clippy::string_from_utf8_as_bytes",
+ "clippy::strlen_on_c_strings",
+ "clippy::temporary_assignment",
+ "clippy::too_many_arguments",
+ "clippy::transmute_bytes_to_str",
+ "clippy::transmute_float_to_int",
+ "clippy::transmute_int_to_bool",
+ "clippy::transmute_int_to_char",
+ "clippy::transmute_int_to_float",
+ "clippy::transmute_num_to_bytes",
+ "clippy::transmute_ptr_to_ref",
+ "clippy::transmutes_expressible_as_ptr_casts",
+ "clippy::type_complexity",
+ "clippy::unit_arg",
+ "clippy::unnecessary_cast",
+ "clippy::unnecessary_filter_map",
+ "clippy::unnecessary_operation",
+ "clippy::unnecessary_sort_by",
+ "clippy::unnecessary_unwrap",
+ "clippy::unneeded_wildcard_pattern",
+ "clippy::useless_asref",
+ "clippy::useless_conversion",
+ "clippy::useless_format",
+ "clippy::vec_box",
+ "clippy::while_let_loop",
+ "clippy::wildcard_in_or_patterns",
+ "clippy::zero_divided_by_zero",
+ "clippy::zero_prefixed_literal",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::correctness",
+ description: r##"lint group for: clippy::absurd_extreme_comparisons, clippy::almost_swapped, clippy::approx_constant, clippy::async_yields_async, clippy::bad_bit_mask, clippy::cast_ref_to_mut, clippy::clone_double_ref, clippy::cmp_nan, clippy::deprecated_semver, clippy::derive_hash_xor_eq, clippy::derive_ord_xor_partial_ord, clippy::drop_copy, clippy::drop_ref, clippy::enum_clike_unportable_variant, clippy::eq_op, clippy::erasing_op, clippy::fn_address_comparisons, clippy::forget_copy, clippy::forget_ref, clippy::if_let_mutex, clippy::if_same_then_else, clippy::ifs_same_cond, clippy::ineffective_bit_mask, clippy::infinite_iter, clippy::inherent_to_string_shadow_display, clippy::inline_fn_without_body, clippy::invalid_null_ptr_usage, clippy::invalid_regex, clippy::invisible_characters, clippy::iter_next_loop, clippy::iterator_step_by_zero, clippy::let_underscore_lock, clippy::logic_bug, clippy::match_str_case_mismatch, clippy::mem_replace_with_uninit, clippy::min_max, clippy::mismatched_target_os, clippy::mistyped_literal_suffixes, clippy::modulo_one, clippy::mut_from_ref, clippy::never_loop, clippy::non_octal_unix_permissions, clippy::nonsensical_open_options, clippy::not_unsafe_ptr_arg_deref, clippy::option_env_unwrap, clippy::out_of_bounds_indexing, clippy::panicking_unwrap, clippy::possible_missing_comma, clippy::reversed_empty_ranges, clippy::self_assignment, clippy::serde_api_misuse, clippy::size_of_in_element_count, clippy::suspicious_splitn, clippy::to_string_in_display, clippy::transmuting_null, clippy::undropped_manually_drops, clippy::uninit_assumed_init, clippy::uninit_vec, clippy::unit_cmp, clippy::unit_hash, clippy::unit_return_expecting_ord, clippy::unsound_collection_transmute, clippy::unused_io_amount, clippy::useless_attribute, clippy::vec_resize_to_zero, clippy::vtable_address_comparisons, clippy::while_immutable_condition, clippy::wrong_transmute, clippy::zst_offset"##,
+ },
+ children: &[
+ "clippy::absurd_extreme_comparisons",
+ "clippy::almost_swapped",
+ "clippy::approx_constant",
+ "clippy::async_yields_async",
+ "clippy::bad_bit_mask",
+ "clippy::cast_ref_to_mut",
+ "clippy::clone_double_ref",
+ "clippy::cmp_nan",
+ "clippy::deprecated_semver",
+ "clippy::derive_hash_xor_eq",
+ "clippy::derive_ord_xor_partial_ord",
+ "clippy::drop_copy",
+ "clippy::drop_ref",
+ "clippy::enum_clike_unportable_variant",
+ "clippy::eq_op",
+ "clippy::erasing_op",
+ "clippy::fn_address_comparisons",
+ "clippy::forget_copy",
+ "clippy::forget_ref",
+ "clippy::if_let_mutex",
+ "clippy::if_same_then_else",
+ "clippy::ifs_same_cond",
+ "clippy::ineffective_bit_mask",
+ "clippy::infinite_iter",
+ "clippy::inherent_to_string_shadow_display",
+ "clippy::inline_fn_without_body",
+ "clippy::invalid_null_ptr_usage",
+ "clippy::invalid_regex",
+ "clippy::invisible_characters",
+ "clippy::iter_next_loop",
+ "clippy::iterator_step_by_zero",
+ "clippy::let_underscore_lock",
+ "clippy::logic_bug",
+ "clippy::match_str_case_mismatch",
+ "clippy::mem_replace_with_uninit",
+ "clippy::min_max",
+ "clippy::mismatched_target_os",
+ "clippy::mistyped_literal_suffixes",
+ "clippy::modulo_one",
+ "clippy::mut_from_ref",
+ "clippy::never_loop",
+ "clippy::non_octal_unix_permissions",
+ "clippy::nonsensical_open_options",
+ "clippy::not_unsafe_ptr_arg_deref",
+ "clippy::option_env_unwrap",
+ "clippy::out_of_bounds_indexing",
+ "clippy::panicking_unwrap",
+ "clippy::possible_missing_comma",
+ "clippy::reversed_empty_ranges",
+ "clippy::self_assignment",
+ "clippy::serde_api_misuse",
+ "clippy::size_of_in_element_count",
+ "clippy::suspicious_splitn",
+ "clippy::to_string_in_display",
+ "clippy::transmuting_null",
+ "clippy::undropped_manually_drops",
+ "clippy::uninit_assumed_init",
+ "clippy::uninit_vec",
+ "clippy::unit_cmp",
+ "clippy::unit_hash",
+ "clippy::unit_return_expecting_ord",
+ "clippy::unsound_collection_transmute",
+ "clippy::unused_io_amount",
+ "clippy::useless_attribute",
+ "clippy::vec_resize_to_zero",
+ "clippy::vtable_address_comparisons",
+ "clippy::while_immutable_condition",
+ "clippy::wrong_transmute",
+ "clippy::zst_offset",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::deprecated",
+ description: r##"lint group for: clippy::assign_ops, clippy::extend_from_slice, clippy::filter_map, clippy::find_map, clippy::if_let_redundant_pattern_matching, clippy::misaligned_transmute, clippy::pub_enum_variant_names, clippy::range_step_by_zero, clippy::regex_macro, clippy::replace_consts, clippy::should_assert_eq, clippy::unsafe_vector_initialization, clippy::unstable_as_mut_slice, clippy::unstable_as_slice, clippy::unused_collect, clippy::wrong_pub_self_convention"##,
+ },
+ children: &[
+ "clippy::assign_ops",
+ "clippy::extend_from_slice",
+ "clippy::filter_map",
+ "clippy::find_map",
+ "clippy::if_let_redundant_pattern_matching",
+ "clippy::misaligned_transmute",
+ "clippy::pub_enum_variant_names",
+ "clippy::range_step_by_zero",
+ "clippy::regex_macro",
+ "clippy::replace_consts",
+ "clippy::should_assert_eq",
+ "clippy::unsafe_vector_initialization",
+ "clippy::unstable_as_mut_slice",
+ "clippy::unstable_as_slice",
+ "clippy::unused_collect",
+ "clippy::wrong_pub_self_convention",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::nursery",
+ description: r##"lint group for: clippy::branches_sharing_code, clippy::cognitive_complexity, clippy::debug_assert_with_mut_call, clippy::disallowed_methods, clippy::disallowed_types, clippy::empty_line_after_outer_attr, clippy::equatable_if_let, clippy::fallible_impl_from, clippy::future_not_send, clippy::imprecise_flops, clippy::index_refutable_slice, clippy::missing_const_for_fn, clippy::mutex_integer, clippy::non_send_fields_in_send_ty, clippy::nonstandard_macro_braces, clippy::option_if_let_else, clippy::path_buf_push_overwrite, clippy::redundant_pub_crate, clippy::string_lit_as_bytes, clippy::suboptimal_flops, clippy::suspicious_operation_groupings, clippy::trailing_empty_array, clippy::trivial_regex, clippy::use_self, clippy::useless_let_if_seq, clippy::useless_transmute"##,
+ },
+ children: &[
+ "clippy::branches_sharing_code",
+ "clippy::cognitive_complexity",
+ "clippy::debug_assert_with_mut_call",
+ "clippy::disallowed_methods",
+ "clippy::disallowed_types",
+ "clippy::empty_line_after_outer_attr",
+ "clippy::equatable_if_let",
+ "clippy::fallible_impl_from",
+ "clippy::future_not_send",
+ "clippy::imprecise_flops",
+ "clippy::index_refutable_slice",
+ "clippy::missing_const_for_fn",
+ "clippy::mutex_integer",
+ "clippy::non_send_fields_in_send_ty",
+ "clippy::nonstandard_macro_braces",
+ "clippy::option_if_let_else",
+ "clippy::path_buf_push_overwrite",
+ "clippy::redundant_pub_crate",
+ "clippy::string_lit_as_bytes",
+ "clippy::suboptimal_flops",
+ "clippy::suspicious_operation_groupings",
+ "clippy::trailing_empty_array",
+ "clippy::trivial_regex",
+ "clippy::use_self",
+ "clippy::useless_let_if_seq",
+ "clippy::useless_transmute",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::pedantic",
+ description: r##"lint group for: clippy::await_holding_lock, clippy::await_holding_refcell_ref, clippy::case_sensitive_file_extension_comparisons, clippy::cast_lossless, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_ptr_alignment, clippy::cast_sign_loss, clippy::checked_conversions, clippy::cloned_instead_of_copied, clippy::copy_iterator, clippy::default_trait_access, clippy::doc_markdown, clippy::empty_enum, clippy::enum_glob_use, clippy::expl_impl_clone_on_copy, clippy::explicit_deref_methods, clippy::explicit_into_iter_loop, clippy::explicit_iter_loop, clippy::filter_map_next, clippy::flat_map_option, clippy::float_cmp, clippy::fn_params_excessive_bools, clippy::from_iter_instead_of_collect, clippy::if_not_else, clippy::implicit_clone, clippy::implicit_hasher, clippy::implicit_saturating_sub, clippy::inconsistent_struct_constructor, clippy::inefficient_to_string, clippy::inline_always, clippy::invalid_upcast_comparisons, clippy::items_after_statements, clippy::iter_not_returning_iterator, clippy::large_digit_groups, clippy::large_stack_arrays, clippy::large_types_passed_by_value, clippy::let_underscore_drop, clippy::let_unit_value, clippy::linkedlist, clippy::macro_use_imports, clippy::manual_assert, clippy::manual_ok_or, clippy::many_single_char_names, clippy::map_unwrap_or, clippy::match_bool, clippy::match_on_vec_items, clippy::match_same_arms, clippy::match_wild_err_arm, clippy::match_wildcard_for_single_variants, clippy::maybe_infinite_iter, clippy::missing_errors_doc, clippy::missing_panics_doc, clippy::module_name_repetitions, clippy::must_use_candidate, clippy::mut_mut, clippy::naive_bytecount, clippy::needless_bitwise_bool, clippy::needless_continue, clippy::needless_for_each, clippy::needless_pass_by_value, clippy::no_effect_underscore_binding, clippy::option_option, clippy::ptr_as_ptr, clippy::range_minus_one, clippy::range_plus_one, clippy::redundant_closure_for_method_calls, clippy::redundant_else, clippy::ref_binding_to_reference, clippy::ref_option_ref, clippy::same_functions_in_if_condition, clippy::semicolon_if_nothing_returned, clippy::similar_names, clippy::single_match_else, clippy::string_add_assign, clippy::struct_excessive_bools, clippy::too_many_lines, clippy::trait_duplication_in_bounds, clippy::transmute_ptr_to_ptr, clippy::trivially_copy_pass_by_ref, clippy::type_repetition_in_bounds, clippy::unicode_not_nfc, clippy::unnecessary_wraps, clippy::unnested_or_patterns, clippy::unreadable_literal, clippy::unsafe_derive_deserialize, clippy::unused_async, clippy::unused_self, clippy::used_underscore_binding, clippy::verbose_bit_mask, clippy::wildcard_imports, clippy::zero_sized_map_values"##,
+ },
+ children: &[
+ "clippy::await_holding_lock",
+ "clippy::await_holding_refcell_ref",
+ "clippy::case_sensitive_file_extension_comparisons",
+ "clippy::cast_lossless",
+ "clippy::cast_possible_truncation",
+ "clippy::cast_possible_wrap",
+ "clippy::cast_precision_loss",
+ "clippy::cast_ptr_alignment",
+ "clippy::cast_sign_loss",
+ "clippy::checked_conversions",
+ "clippy::cloned_instead_of_copied",
+ "clippy::copy_iterator",
+ "clippy::default_trait_access",
+ "clippy::doc_markdown",
+ "clippy::empty_enum",
+ "clippy::enum_glob_use",
+ "clippy::expl_impl_clone_on_copy",
+ "clippy::explicit_deref_methods",
+ "clippy::explicit_into_iter_loop",
+ "clippy::explicit_iter_loop",
+ "clippy::filter_map_next",
+ "clippy::flat_map_option",
+ "clippy::float_cmp",
+ "clippy::fn_params_excessive_bools",
+ "clippy::from_iter_instead_of_collect",
+ "clippy::if_not_else",
+ "clippy::implicit_clone",
+ "clippy::implicit_hasher",
+ "clippy::implicit_saturating_sub",
+ "clippy::inconsistent_struct_constructor",
+ "clippy::inefficient_to_string",
+ "clippy::inline_always",
+ "clippy::invalid_upcast_comparisons",
+ "clippy::items_after_statements",
+ "clippy::iter_not_returning_iterator",
+ "clippy::large_digit_groups",
+ "clippy::large_stack_arrays",
+ "clippy::large_types_passed_by_value",
+ "clippy::let_underscore_drop",
+ "clippy::let_unit_value",
+ "clippy::linkedlist",
+ "clippy::macro_use_imports",
+ "clippy::manual_assert",
+ "clippy::manual_ok_or",
+ "clippy::many_single_char_names",
+ "clippy::map_unwrap_or",
+ "clippy::match_bool",
+ "clippy::match_on_vec_items",
+ "clippy::match_same_arms",
+ "clippy::match_wild_err_arm",
+ "clippy::match_wildcard_for_single_variants",
+ "clippy::maybe_infinite_iter",
+ "clippy::missing_errors_doc",
+ "clippy::missing_panics_doc",
+ "clippy::module_name_repetitions",
+ "clippy::must_use_candidate",
+ "clippy::mut_mut",
+ "clippy::naive_bytecount",
+ "clippy::needless_bitwise_bool",
+ "clippy::needless_continue",
+ "clippy::needless_for_each",
+ "clippy::needless_pass_by_value",
+ "clippy::no_effect_underscore_binding",
+ "clippy::option_option",
+ "clippy::ptr_as_ptr",
+ "clippy::range_minus_one",
+ "clippy::range_plus_one",
+ "clippy::redundant_closure_for_method_calls",
+ "clippy::redundant_else",
+ "clippy::ref_binding_to_reference",
+ "clippy::ref_option_ref",
+ "clippy::same_functions_in_if_condition",
+ "clippy::semicolon_if_nothing_returned",
+ "clippy::similar_names",
+ "clippy::single_match_else",
+ "clippy::string_add_assign",
+ "clippy::struct_excessive_bools",
+ "clippy::too_many_lines",
+ "clippy::trait_duplication_in_bounds",
+ "clippy::transmute_ptr_to_ptr",
+ "clippy::trivially_copy_pass_by_ref",
+ "clippy::type_repetition_in_bounds",
+ "clippy::unicode_not_nfc",
+ "clippy::unnecessary_wraps",
+ "clippy::unnested_or_patterns",
+ "clippy::unreadable_literal",
+ "clippy::unsafe_derive_deserialize",
+ "clippy::unused_async",
+ "clippy::unused_self",
+ "clippy::used_underscore_binding",
+ "clippy::verbose_bit_mask",
+ "clippy::wildcard_imports",
+ "clippy::zero_sized_map_values",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::perf",
+ description: r##"lint group for: clippy::box_collection, clippy::boxed_local, clippy::cmp_owned, clippy::expect_fun_call, clippy::extend_with_drain, clippy::format_in_format_args, clippy::iter_nth, clippy::large_const_arrays, clippy::large_enum_variant, clippy::manual_memcpy, clippy::manual_str_repeat, clippy::map_entry, clippy::mutex_atomic, clippy::needless_collect, clippy::or_fun_call, clippy::redundant_allocation, clippy::redundant_clone, clippy::single_char_pattern, clippy::slow_vector_initialization, clippy::stable_sort_primitive, clippy::to_string_in_format_args, clippy::unnecessary_to_owned, clippy::useless_vec, clippy::vec_init_then_push"##,
+ },
+ children: &[
+ "clippy::box_collection",
+ "clippy::boxed_local",
+ "clippy::cmp_owned",
+ "clippy::expect_fun_call",
+ "clippy::extend_with_drain",
+ "clippy::format_in_format_args",
+ "clippy::iter_nth",
+ "clippy::large_const_arrays",
+ "clippy::large_enum_variant",
+ "clippy::manual_memcpy",
+ "clippy::manual_str_repeat",
+ "clippy::map_entry",
+ "clippy::mutex_atomic",
+ "clippy::needless_collect",
+ "clippy::or_fun_call",
+ "clippy::redundant_allocation",
+ "clippy::redundant_clone",
+ "clippy::single_char_pattern",
+ "clippy::slow_vector_initialization",
+ "clippy::stable_sort_primitive",
+ "clippy::to_string_in_format_args",
+ "clippy::unnecessary_to_owned",
+ "clippy::useless_vec",
+ "clippy::vec_init_then_push",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::restriction",
+ description: r##"lint group for: clippy::as_conversions, clippy::clone_on_ref_ptr, clippy::create_dir, clippy::dbg_macro, clippy::decimal_literal_representation, clippy::default_numeric_fallback, clippy::disallowed_script_idents, clippy::else_if_without_else, clippy::exhaustive_enums, clippy::exhaustive_structs, clippy::exit, clippy::expect_used, clippy::filetype_is_file, clippy::float_arithmetic, clippy::float_cmp_const, clippy::fn_to_numeric_cast_any, clippy::get_unwrap, clippy::if_then_some_else_none, clippy::implicit_return, clippy::indexing_slicing, clippy::inline_asm_x86_att_syntax, clippy::inline_asm_x86_intel_syntax, clippy::integer_arithmetic, clippy::integer_division, clippy::let_underscore_must_use, clippy::lossy_float_literal, clippy::map_err_ignore, clippy::mem_forget, clippy::missing_docs_in_private_items, clippy::missing_enforced_import_renames, clippy::missing_inline_in_public_items, clippy::mod_module_files, clippy::modulo_arithmetic, clippy::multiple_inherent_impl, clippy::non_ascii_literal, clippy::panic, clippy::panic_in_result_fn, clippy::pattern_type_mismatch, clippy::print_stderr, clippy::print_stdout, clippy::rc_buffer, clippy::rc_mutex, clippy::rest_pat_in_fully_bound_structs, clippy::same_name_method, clippy::self_named_module_files, clippy::separated_literal_suffix, clippy::shadow_reuse, clippy::shadow_same, clippy::shadow_unrelated, clippy::str_to_string, clippy::string_add, clippy::string_slice, clippy::string_to_string, clippy::todo, clippy::undocumented_unsafe_blocks, clippy::unimplemented, clippy::unnecessary_self_imports, clippy::unneeded_field_pattern, clippy::unreachable, clippy::unseparated_literal_suffix, clippy::unwrap_in_result, clippy::unwrap_used, clippy::use_debug, clippy::verbose_file_reads, clippy::wildcard_enum_match_arm"##,
+ },
+ children: &[
+ "clippy::as_conversions",
+ "clippy::clone_on_ref_ptr",
+ "clippy::create_dir",
+ "clippy::dbg_macro",
+ "clippy::decimal_literal_representation",
+ "clippy::default_numeric_fallback",
+ "clippy::disallowed_script_idents",
+ "clippy::else_if_without_else",
+ "clippy::exhaustive_enums",
+ "clippy::exhaustive_structs",
+ "clippy::exit",
+ "clippy::expect_used",
+ "clippy::filetype_is_file",
+ "clippy::float_arithmetic",
+ "clippy::float_cmp_const",
+ "clippy::fn_to_numeric_cast_any",
+ "clippy::get_unwrap",
+ "clippy::if_then_some_else_none",
+ "clippy::implicit_return",
+ "clippy::indexing_slicing",
+ "clippy::inline_asm_x86_att_syntax",
+ "clippy::inline_asm_x86_intel_syntax",
+ "clippy::integer_arithmetic",
+ "clippy::integer_division",
+ "clippy::let_underscore_must_use",
+ "clippy::lossy_float_literal",
+ "clippy::map_err_ignore",
+ "clippy::mem_forget",
+ "clippy::missing_docs_in_private_items",
+ "clippy::missing_enforced_import_renames",
+ "clippy::missing_inline_in_public_items",
+ "clippy::mod_module_files",
+ "clippy::modulo_arithmetic",
+ "clippy::multiple_inherent_impl",
+ "clippy::non_ascii_literal",
+ "clippy::panic",
+ "clippy::panic_in_result_fn",
+ "clippy::pattern_type_mismatch",
+ "clippy::print_stderr",
+ "clippy::print_stdout",
+ "clippy::rc_buffer",
+ "clippy::rc_mutex",
+ "clippy::rest_pat_in_fully_bound_structs",
+ "clippy::same_name_method",
+ "clippy::self_named_module_files",
+ "clippy::separated_literal_suffix",
+ "clippy::shadow_reuse",
+ "clippy::shadow_same",
+ "clippy::shadow_unrelated",
+ "clippy::str_to_string",
+ "clippy::string_add",
+ "clippy::string_slice",
+ "clippy::string_to_string",
+ "clippy::todo",
+ "clippy::undocumented_unsafe_blocks",
+ "clippy::unimplemented",
+ "clippy::unnecessary_self_imports",
+ "clippy::unneeded_field_pattern",
+ "clippy::unreachable",
+ "clippy::unseparated_literal_suffix",
+ "clippy::unwrap_in_result",
+ "clippy::unwrap_used",
+ "clippy::use_debug",
+ "clippy::verbose_file_reads",
+ "clippy::wildcard_enum_match_arm",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::style",
+ description: r##"lint group for: clippy::assertions_on_constants, clippy::assign_op_pattern, clippy::blacklisted_name, clippy::blocks_in_if_conditions, clippy::bool_assert_comparison, clippy::borrow_interior_mutable_const, clippy::builtin_type_shadow, clippy::bytes_nth, clippy::chars_last_cmp, clippy::chars_next_cmp, clippy::cmp_null, clippy::collapsible_else_if, clippy::collapsible_if, clippy::collapsible_match, clippy::comparison_chain, clippy::comparison_to_empty, clippy::declare_interior_mutable_const, clippy::double_must_use, clippy::double_neg, clippy::duplicate_underscore_argument, clippy::enum_variant_names, clippy::excessive_precision, clippy::field_reassign_with_default, clippy::fn_to_numeric_cast, clippy::fn_to_numeric_cast_with_truncation, clippy::for_kv_map, clippy::from_over_into, clippy::from_str_radix_10, clippy::inconsistent_digit_grouping, clippy::infallible_destructuring_match, clippy::inherent_to_string, clippy::into_iter_on_ref, clippy::iter_cloned_collect, clippy::iter_next_slice, clippy::iter_nth_zero, clippy::iter_skip_next, clippy::just_underscores_and_digits, clippy::len_without_is_empty, clippy::len_zero, clippy::let_and_return, clippy::main_recursion, clippy::manual_async_fn, clippy::manual_map, clippy::manual_non_exhaustive, clippy::manual_range_contains, clippy::manual_saturating_arithmetic, clippy::map_clone, clippy::map_collect_result_unit, clippy::match_like_matches_macro, clippy::match_overlapping_arm, clippy::match_ref_pats, clippy::match_result_ok, clippy::mem_replace_option_with_none, clippy::mem_replace_with_default, clippy::missing_safety_doc, clippy::mixed_case_hex_literals, clippy::module_inception, clippy::must_use_unit, clippy::mut_mutex_lock, clippy::needless_borrow, clippy::needless_doctest_main, clippy::needless_late_init, clippy::needless_range_loop, clippy::needless_return, clippy::neg_multiply, clippy::new_ret_no_self, clippy::new_without_default, clippy::ok_expect, clippy::op_ref, clippy::option_map_or_none, clippy::print_literal, clippy::print_with_newline, clippy::println_empty_string, clippy::ptr_arg, clippy::ptr_eq, clippy::question_mark, clippy::redundant_closure, clippy::redundant_field_names, clippy::redundant_pattern, clippy::redundant_pattern_matching, clippy::redundant_static_lifetimes, clippy::result_map_or_into_option, clippy::result_unit_err, clippy::same_item_push, clippy::self_named_constructors, clippy::should_implement_trait, clippy::single_char_add_str, clippy::single_component_path_imports, clippy::single_match, clippy::string_extend_chars, clippy::tabs_in_doc_comments, clippy::to_digit_is_some, clippy::toplevel_ref_arg, clippy::try_err, clippy::unnecessary_fold, clippy::unnecessary_lazy_evaluations, clippy::unnecessary_mut_passed, clippy::unsafe_removed_from_name, clippy::unused_unit, clippy::unusual_byte_groupings, clippy::unwrap_or_else_default, clippy::upper_case_acronyms, clippy::while_let_on_iterator, clippy::write_literal, clippy::write_with_newline, clippy::writeln_empty_string, clippy::wrong_self_convention, clippy::zero_ptr"##,
+ },
+ children: &[
+ "clippy::assertions_on_constants",
+ "clippy::assign_op_pattern",
+ "clippy::blacklisted_name",
+ "clippy::blocks_in_if_conditions",
+ "clippy::bool_assert_comparison",
+ "clippy::borrow_interior_mutable_const",
+ "clippy::builtin_type_shadow",
+ "clippy::bytes_nth",
+ "clippy::chars_last_cmp",
+ "clippy::chars_next_cmp",
+ "clippy::cmp_null",
+ "clippy::collapsible_else_if",
+ "clippy::collapsible_if",
+ "clippy::collapsible_match",
+ "clippy::comparison_chain",
+ "clippy::comparison_to_empty",
+ "clippy::declare_interior_mutable_const",
+ "clippy::double_must_use",
+ "clippy::double_neg",
+ "clippy::duplicate_underscore_argument",
+ "clippy::enum_variant_names",
+ "clippy::excessive_precision",
+ "clippy::field_reassign_with_default",
+ "clippy::fn_to_numeric_cast",
+ "clippy::fn_to_numeric_cast_with_truncation",
+ "clippy::for_kv_map",
+ "clippy::from_over_into",
+ "clippy::from_str_radix_10",
+ "clippy::inconsistent_digit_grouping",
+ "clippy::infallible_destructuring_match",
+ "clippy::inherent_to_string",
+ "clippy::into_iter_on_ref",
+ "clippy::iter_cloned_collect",
+ "clippy::iter_next_slice",
+ "clippy::iter_nth_zero",
+ "clippy::iter_skip_next",
+ "clippy::just_underscores_and_digits",
+ "clippy::len_without_is_empty",
+ "clippy::len_zero",
+ "clippy::let_and_return",
+ "clippy::main_recursion",
+ "clippy::manual_async_fn",
+ "clippy::manual_map",
+ "clippy::manual_non_exhaustive",
+ "clippy::manual_range_contains",
+ "clippy::manual_saturating_arithmetic",
+ "clippy::map_clone",
+ "clippy::map_collect_result_unit",
+ "clippy::match_like_matches_macro",
+ "clippy::match_overlapping_arm",
+ "clippy::match_ref_pats",
+ "clippy::match_result_ok",
+ "clippy::mem_replace_option_with_none",
+ "clippy::mem_replace_with_default",
+ "clippy::missing_safety_doc",
+ "clippy::mixed_case_hex_literals",
+ "clippy::module_inception",
+ "clippy::must_use_unit",
+ "clippy::mut_mutex_lock",
+ "clippy::needless_borrow",
+ "clippy::needless_doctest_main",
+ "clippy::needless_late_init",
+ "clippy::needless_range_loop",
+ "clippy::needless_return",
+ "clippy::neg_multiply",
+ "clippy::new_ret_no_self",
+ "clippy::new_without_default",
+ "clippy::ok_expect",
+ "clippy::op_ref",
+ "clippy::option_map_or_none",
+ "clippy::print_literal",
+ "clippy::print_with_newline",
+ "clippy::println_empty_string",
+ "clippy::ptr_arg",
+ "clippy::ptr_eq",
+ "clippy::question_mark",
+ "clippy::redundant_closure",
+ "clippy::redundant_field_names",
+ "clippy::redundant_pattern",
+ "clippy::redundant_pattern_matching",
+ "clippy::redundant_static_lifetimes",
+ "clippy::result_map_or_into_option",
+ "clippy::result_unit_err",
+ "clippy::same_item_push",
+ "clippy::self_named_constructors",
+ "clippy::should_implement_trait",
+ "clippy::single_char_add_str",
+ "clippy::single_component_path_imports",
+ "clippy::single_match",
+ "clippy::string_extend_chars",
+ "clippy::tabs_in_doc_comments",
+ "clippy::to_digit_is_some",
+ "clippy::toplevel_ref_arg",
+ "clippy::try_err",
+ "clippy::unnecessary_fold",
+ "clippy::unnecessary_lazy_evaluations",
+ "clippy::unnecessary_mut_passed",
+ "clippy::unsafe_removed_from_name",
+ "clippy::unused_unit",
+ "clippy::unusual_byte_groupings",
+ "clippy::unwrap_or_else_default",
+ "clippy::upper_case_acronyms",
+ "clippy::while_let_on_iterator",
+ "clippy::write_literal",
+ "clippy::write_with_newline",
+ "clippy::writeln_empty_string",
+ "clippy::wrong_self_convention",
+ "clippy::zero_ptr",
+ ],
+ },
+ LintGroup {
+ lint: Lint {
+ label: "clippy::suspicious",
+ description: r##"lint group for: clippy::blanket_clippy_restriction_lints, clippy::empty_loop, clippy::eval_order_dependence, clippy::float_equality_without_abs, clippy::for_loops_over_fallibles, clippy::misrefactored_assign_op, clippy::mut_range_bound, clippy::mutable_key_type, clippy::octal_escapes, clippy::return_self_not_must_use, clippy::suspicious_arithmetic_impl, clippy::suspicious_assignment_formatting, clippy::suspicious_else_formatting, clippy::suspicious_map, clippy::suspicious_op_assign_impl, clippy::suspicious_unary_op_formatting"##,
+ },
+ children: &[
+ "clippy::blanket_clippy_restriction_lints",
+ "clippy::empty_loop",
+ "clippy::eval_order_dependence",
+ "clippy::float_equality_without_abs",
+ "clippy::for_loops_over_fallibles",
+ "clippy::misrefactored_assign_op",
+ "clippy::mut_range_bound",
+ "clippy::mutable_key_type",
+ "clippy::octal_escapes",
+ "clippy::return_self_not_must_use",
+ "clippy::suspicious_arithmetic_impl",
+ "clippy::suspicious_assignment_formatting",
+ "clippy::suspicious_else_formatting",
+ "clippy::suspicious_map",
+ "clippy::suspicious_op_assign_impl",
+ "clippy::suspicious_unary_op_formatting",
+ ],
+ },
+];
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/helpers.rs b/src/tools/rust-analyzer/crates/ide-db/src/helpers.rs
new file mode 100644
index 000000000..6e56efe34
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/helpers.rs
@@ -0,0 +1,105 @@
+//! Random assortment of ide helpers for high-level ide features that don't fit in any other module.
+
+use std::collections::VecDeque;
+
+use base_db::FileId;
+use hir::{ItemInNs, ModuleDef, Name, Semantics};
+use syntax::{
+ ast::{self, make},
+ AstToken, SyntaxKind, SyntaxToken, TokenAtOffset,
+};
+
+use crate::{defs::Definition, generated, RootDatabase};
+
+pub fn item_name(db: &RootDatabase, item: ItemInNs) -> Option<Name> {
+ match item {
+ ItemInNs::Types(module_def_id) => module_def_id.name(db),
+ ItemInNs::Values(module_def_id) => module_def_id.name(db),
+ ItemInNs::Macros(macro_def_id) => Some(macro_def_id.name(db)),
+ }
+}
+
+/// Picks the token with the highest rank returned by the passed in function.
+pub fn pick_best_token(
+ tokens: TokenAtOffset<SyntaxToken>,
+ f: impl Fn(SyntaxKind) -> usize,
+) -> Option<SyntaxToken> {
+ tokens.max_by_key(move |t| f(t.kind()))
+}
+pub fn pick_token<T: AstToken>(mut tokens: TokenAtOffset<SyntaxToken>) -> Option<T> {
+ tokens.find_map(T::cast)
+}
+
+/// Converts the mod path struct into its ast representation.
+pub fn mod_path_to_ast(path: &hir::ModPath) -> ast::Path {
+ let _p = profile::span("mod_path_to_ast");
+
+ let mut segments = Vec::new();
+ let mut is_abs = false;
+ match path.kind {
+ hir::PathKind::Plain => {}
+ hir::PathKind::Super(0) => segments.push(make::path_segment_self()),
+ hir::PathKind::Super(n) => segments.extend((0..n).map(|_| make::path_segment_super())),
+ hir::PathKind::DollarCrate(_) | hir::PathKind::Crate => {
+ segments.push(make::path_segment_crate())
+ }
+ hir::PathKind::Abs => is_abs = true,
+ }
+
+ segments.extend(
+ path.segments()
+ .iter()
+ .map(|segment| make::path_segment(make::name_ref(&segment.to_smol_str()))),
+ );
+ make::path_from_segments(segments, is_abs)
+}
+
+/// Iterates all `ModuleDef`s and `Impl` blocks of the given file.
+pub fn visit_file_defs(
+ sema: &Semantics<'_, RootDatabase>,
+ file_id: FileId,
+ cb: &mut dyn FnMut(Definition),
+) {
+ let db = sema.db;
+ let module = match sema.to_module_def(file_id) {
+ Some(it) => it,
+ None => return,
+ };
+ let mut defs: VecDeque<_> = module.declarations(db).into();
+ while let Some(def) = defs.pop_front() {
+ if let ModuleDef::Module(submodule) = def {
+ if let hir::ModuleSource::Module(_) = submodule.definition_source(db).value {
+ defs.extend(submodule.declarations(db));
+ submodule.impl_defs(db).into_iter().for_each(|impl_| cb(impl_.into()));
+ }
+ }
+ cb(def.into());
+ }
+ module.impl_defs(db).into_iter().for_each(|impl_| cb(impl_.into()));
+
+ let is_root = module.is_crate_root(db);
+ module
+ .legacy_macros(db)
+ .into_iter()
+ // don't show legacy macros declared in the crate-root that were already covered in declarations earlier
+ .filter(|it| !(is_root && it.is_macro_export(db)))
+ .for_each(|mac| cb(mac.into()));
+}
+
+/// Checks if the given lint is equal or is contained by the other lint which may or may not be a group.
+pub fn lint_eq_or_in_group(lint: &str, lint_is: &str) -> bool {
+ if lint == lint_is {
+ return true;
+ }
+
+ if let Some(group) = generated::lints::DEFAULT_LINT_GROUPS
+ .iter()
+ .chain(generated::lints::CLIPPY_LINT_GROUPS.iter())
+ .chain(generated::lints::RUSTDOC_LINT_GROUPS.iter())
+ .find(|&check| check.lint.label == lint_is)
+ {
+ group.children.contains(&lint)
+ } else {
+ false
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/imports/import_assets.rs b/src/tools/rust-analyzer/crates/ide-db/src/imports/import_assets.rs
new file mode 100644
index 000000000..26ef86155
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/imports/import_assets.rs
@@ -0,0 +1,674 @@
+//! Look up accessible paths for items.
+use hir::{
+ AsAssocItem, AssocItem, AssocItemContainer, Crate, ItemInNs, ModPath, Module, ModuleDef,
+ PathResolution, PrefixKind, ScopeDef, Semantics, SemanticsScope, Type,
+};
+use itertools::Itertools;
+use rustc_hash::FxHashSet;
+use syntax::{
+ ast::{self, HasName},
+ utils::path_to_string_stripping_turbo_fish,
+ AstNode, SyntaxNode,
+};
+
+use crate::{
+ helpers::item_name,
+ items_locator::{self, AssocItemSearch, DEFAULT_QUERY_SEARCH_LIMIT},
+ RootDatabase,
+};
+
+/// A candidate for import, derived during various IDE activities:
+/// * completion with imports on the fly proposals
+/// * completion edit resolve requests
+/// * assists
+/// * etc.
+#[derive(Debug)]
+pub enum ImportCandidate {
+ /// A path, qualified (`std::collections::HashMap`) or not (`HashMap`).
+ Path(PathImportCandidate),
+ /// A trait associated function (with no self parameter) or an associated constant.
+ /// For 'test_mod::TestEnum::test_function', `ty` is the `test_mod::TestEnum` expression type
+ /// and `name` is the `test_function`
+ TraitAssocItem(TraitImportCandidate),
+ /// A trait method with self parameter.
+ /// For 'test_enum.test_method()', `ty` is the `test_enum` expression type
+ /// and `name` is the `test_method`
+ TraitMethod(TraitImportCandidate),
+}
+
+/// A trait import needed for a given associated item access.
+/// For `some::path::SomeStruct::ASSOC_`, contains the
+/// type of `some::path::SomeStruct` and `ASSOC_` as the item name.
+#[derive(Debug)]
+pub struct TraitImportCandidate {
+ /// A type of the item that has the associated item accessed at.
+ pub receiver_ty: Type,
+ /// The associated item name that the trait to import should contain.
+ pub assoc_item_name: NameToImport,
+}
+
+/// Path import for a given name, qualified or not.
+#[derive(Debug)]
+pub struct PathImportCandidate {
+ /// Optional qualifier before name.
+ pub qualifier: Option<FirstSegmentUnresolved>,
+ /// The name the item (struct, trait, enum, etc.) should have.
+ pub name: NameToImport,
+}
+
+/// A qualifier that has a first segment and it's unresolved.
+#[derive(Debug)]
+pub struct FirstSegmentUnresolved {
+ fist_segment: ast::NameRef,
+ full_qualifier: ast::Path,
+}
+
+/// A name that will be used during item lookups.
+#[derive(Debug, Clone)]
+pub enum NameToImport {
+ /// Requires items with names that exactly match the given string, bool indicates case-sensitivity.
+ Exact(String, bool),
+ /// Requires items with names that case-insensitively contain all letters from the string,
+ /// in the same order, but not necessary adjacent.
+ Fuzzy(String),
+}
+
+impl NameToImport {
+ pub fn exact_case_sensitive(s: String) -> NameToImport {
+ NameToImport::Exact(s, true)
+ }
+}
+
+impl NameToImport {
+ pub fn text(&self) -> &str {
+ match self {
+ NameToImport::Exact(text, _) => text.as_str(),
+ NameToImport::Fuzzy(text) => text.as_str(),
+ }
+ }
+}
+
+/// A struct to find imports in the project, given a certain name (or its part) and the context.
+#[derive(Debug)]
+pub struct ImportAssets {
+ import_candidate: ImportCandidate,
+ candidate_node: SyntaxNode,
+ module_with_candidate: Module,
+}
+
+impl ImportAssets {
+ pub fn for_method_call(
+ method_call: &ast::MethodCallExpr,
+ sema: &Semantics<'_, RootDatabase>,
+ ) -> Option<Self> {
+ let candidate_node = method_call.syntax().clone();
+ Some(Self {
+ import_candidate: ImportCandidate::for_method_call(sema, method_call)?,
+ module_with_candidate: sema.scope(&candidate_node)?.module(),
+ candidate_node,
+ })
+ }
+
+ pub fn for_exact_path(
+ fully_qualified_path: &ast::Path,
+ sema: &Semantics<'_, RootDatabase>,
+ ) -> Option<Self> {
+ let candidate_node = fully_qualified_path.syntax().clone();
+ if let Some(use_tree) = candidate_node.ancestors().find_map(ast::UseTree::cast) {
+ // Path is inside a use tree, then only continue if it is the first segment of a use statement.
+ if use_tree.syntax().parent().and_then(ast::Use::cast).is_none()
+ || fully_qualified_path.qualifier().is_some()
+ {
+ return None;
+ }
+ }
+ Some(Self {
+ import_candidate: ImportCandidate::for_regular_path(sema, fully_qualified_path)?,
+ module_with_candidate: sema.scope(&candidate_node)?.module(),
+ candidate_node,
+ })
+ }
+
+ pub fn for_ident_pat(sema: &Semantics<'_, RootDatabase>, pat: &ast::IdentPat) -> Option<Self> {
+ if !pat.is_simple_ident() {
+ return None;
+ }
+ let name = pat.name()?;
+ let candidate_node = pat.syntax().clone();
+ Some(Self {
+ import_candidate: ImportCandidate::for_name(sema, &name)?,
+ module_with_candidate: sema.scope(&candidate_node)?.module(),
+ candidate_node,
+ })
+ }
+
+ pub fn for_fuzzy_path(
+ module_with_candidate: Module,
+ qualifier: Option<ast::Path>,
+ fuzzy_name: String,
+ sema: &Semantics<'_, RootDatabase>,
+ candidate_node: SyntaxNode,
+ ) -> Option<Self> {
+ Some(Self {
+ import_candidate: ImportCandidate::for_fuzzy_path(qualifier, fuzzy_name, sema)?,
+ module_with_candidate,
+ candidate_node,
+ })
+ }
+
+ pub fn for_fuzzy_method_call(
+ module_with_method_call: Module,
+ receiver_ty: Type,
+ fuzzy_method_name: String,
+ candidate_node: SyntaxNode,
+ ) -> Option<Self> {
+ Some(Self {
+ import_candidate: ImportCandidate::TraitMethod(TraitImportCandidate {
+ receiver_ty,
+ assoc_item_name: NameToImport::Fuzzy(fuzzy_method_name),
+ }),
+ module_with_candidate: module_with_method_call,
+ candidate_node,
+ })
+ }
+}
+
+/// An import (not necessary the only one) that corresponds a certain given [`PathImportCandidate`].
+/// (the structure is not entirely correct, since there can be situations requiring two imports, see FIXME below for the details)
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct LocatedImport {
+ /// The path to use in the `use` statement for a given candidate to be imported.
+ pub import_path: ModPath,
+ /// An item that will be imported with the import path given.
+ pub item_to_import: ItemInNs,
+ /// The path import candidate, resolved.
+ ///
+ /// Not necessary matches the import:
+ /// For any associated constant from the trait, we try to access as `some::path::SomeStruct::ASSOC_`
+ /// the original item is the associated constant, but the import has to be a trait that
+ /// defines this constant.
+ pub original_item: ItemInNs,
+ /// A path of the original item.
+ pub original_path: Option<ModPath>,
+}
+
+impl LocatedImport {
+ pub fn new(
+ import_path: ModPath,
+ item_to_import: ItemInNs,
+ original_item: ItemInNs,
+ original_path: Option<ModPath>,
+ ) -> Self {
+ Self { import_path, item_to_import, original_item, original_path }
+ }
+}
+
+impl ImportAssets {
+ pub fn import_candidate(&self) -> &ImportCandidate {
+ &self.import_candidate
+ }
+
+ pub fn search_for_imports(
+ &self,
+ sema: &Semantics<'_, RootDatabase>,
+ prefix_kind: PrefixKind,
+ ) -> Vec<LocatedImport> {
+ let _p = profile::span("import_assets::search_for_imports");
+ self.search_for(sema, Some(prefix_kind))
+ }
+
+ /// This may return non-absolute paths if a part of the returned path is already imported into scope.
+ pub fn search_for_relative_paths(
+ &self,
+ sema: &Semantics<'_, RootDatabase>,
+ ) -> Vec<LocatedImport> {
+ let _p = profile::span("import_assets::search_for_relative_paths");
+ self.search_for(sema, None)
+ }
+
+ pub fn path_fuzzy_name_to_exact(&mut self, case_sensitive: bool) {
+ if let ImportCandidate::Path(PathImportCandidate { name: to_import, .. }) =
+ &mut self.import_candidate
+ {
+ let name = match to_import {
+ NameToImport::Fuzzy(name) => std::mem::take(name),
+ _ => return,
+ };
+ *to_import = NameToImport::Exact(name, case_sensitive);
+ }
+ }
+
+ fn search_for(
+ &self,
+ sema: &Semantics<'_, RootDatabase>,
+ prefixed: Option<PrefixKind>,
+ ) -> Vec<LocatedImport> {
+ let _p = profile::span("import_assets::search_for");
+
+ let scope_definitions = self.scope_definitions(sema);
+ let mod_path = |item| {
+ get_mod_path(
+ sema.db,
+ item_for_path_search(sema.db, item)?,
+ &self.module_with_candidate,
+ prefixed,
+ )
+ };
+
+ let krate = self.module_with_candidate.krate();
+ let scope = match sema.scope(&self.candidate_node) {
+ Some(it) => it,
+ None => return Vec::new(),
+ };
+
+ match &self.import_candidate {
+ ImportCandidate::Path(path_candidate) => {
+ path_applicable_imports(sema, krate, path_candidate, mod_path)
+ }
+ ImportCandidate::TraitAssocItem(trait_candidate) => {
+ trait_applicable_items(sema, krate, &scope, trait_candidate, true, mod_path)
+ }
+ ImportCandidate::TraitMethod(trait_candidate) => {
+ trait_applicable_items(sema, krate, &scope, trait_candidate, false, mod_path)
+ }
+ }
+ .into_iter()
+ .filter(|import| import.import_path.len() > 1)
+ .filter(|import| !scope_definitions.contains(&ScopeDef::from(import.item_to_import)))
+ .sorted_by(|a, b| a.import_path.cmp(&b.import_path))
+ .collect()
+ }
+
+ fn scope_definitions(&self, sema: &Semantics<'_, RootDatabase>) -> FxHashSet<ScopeDef> {
+ let _p = profile::span("import_assets::scope_definitions");
+ let mut scope_definitions = FxHashSet::default();
+ if let Some(scope) = sema.scope(&self.candidate_node) {
+ scope.process_all_names(&mut |_, scope_def| {
+ scope_definitions.insert(scope_def);
+ });
+ }
+ scope_definitions
+ }
+}
+
+fn path_applicable_imports(
+ sema: &Semantics<'_, RootDatabase>,
+ current_crate: Crate,
+ path_candidate: &PathImportCandidate,
+ mod_path: impl Fn(ItemInNs) -> Option<ModPath> + Copy,
+) -> FxHashSet<LocatedImport> {
+ let _p = profile::span("import_assets::path_applicable_imports");
+
+ match &path_candidate.qualifier {
+ None => {
+ items_locator::items_with_name(
+ sema,
+ current_crate,
+ path_candidate.name.clone(),
+ // FIXME: we could look up assoc items by the input and propose those in completion,
+ // but that requires more preparation first:
+ // * store non-trait assoc items in import_map to fully enable this lookup
+ // * ensure that does not degrade the performance (benchmark it)
+ // * write more logic to check for corresponding trait presence requirement (we're unable to flyimport multiple item right now)
+ // * improve the associated completion item matching and/or scoring to ensure no noisy completions appear
+ //
+ // see also an ignored test under FIXME comment in the qualify_path.rs module
+ AssocItemSearch::Exclude,
+ Some(DEFAULT_QUERY_SEARCH_LIMIT.inner()),
+ )
+ .filter_map(|item| {
+ let mod_path = mod_path(item)?;
+ Some(LocatedImport::new(mod_path.clone(), item, item, Some(mod_path)))
+ })
+ .collect()
+ }
+ Some(first_segment_unresolved) => {
+ let unresolved_qualifier =
+ path_to_string_stripping_turbo_fish(&first_segment_unresolved.full_qualifier);
+ let unresolved_first_segment = first_segment_unresolved.fist_segment.text();
+ items_locator::items_with_name(
+ sema,
+ current_crate,
+ path_candidate.name.clone(),
+ AssocItemSearch::Include,
+ Some(DEFAULT_QUERY_SEARCH_LIMIT.inner()),
+ )
+ .filter_map(|item| {
+ import_for_item(
+ sema.db,
+ mod_path,
+ &unresolved_first_segment,
+ &unresolved_qualifier,
+ item,
+ )
+ })
+ .collect()
+ }
+ }
+}
+
+fn import_for_item(
+ db: &RootDatabase,
+ mod_path: impl Fn(ItemInNs) -> Option<ModPath>,
+ unresolved_first_segment: &str,
+ unresolved_qualifier: &str,
+ original_item: ItemInNs,
+) -> Option<LocatedImport> {
+ let _p = profile::span("import_assets::import_for_item");
+
+ let original_item_candidate = item_for_path_search(db, original_item)?;
+ let import_path_candidate = mod_path(original_item_candidate)?;
+ let import_path_string = import_path_candidate.to_string();
+
+ let expected_import_end = if item_as_assoc(db, original_item).is_some() {
+ unresolved_qualifier.to_string()
+ } else {
+ format!("{}::{}", unresolved_qualifier, item_name(db, original_item)?)
+ };
+ if !import_path_string.contains(unresolved_first_segment)
+ || !import_path_string.ends_with(&expected_import_end)
+ {
+ return None;
+ }
+
+ let segment_import =
+ find_import_for_segment(db, original_item_candidate, unresolved_first_segment)?;
+ let trait_item_to_import = item_as_assoc(db, original_item)
+ .and_then(|assoc| assoc.containing_trait(db))
+ .map(|trait_| ItemInNs::from(ModuleDef::from(trait_)));
+ Some(match (segment_import == original_item_candidate, trait_item_to_import) {
+ (true, Some(_)) => {
+ // FIXME we should be able to import both the trait and the segment,
+ // but it's unclear what to do with overlapping edits (merge imports?)
+ // especially in case of lazy completion edit resolutions.
+ return None;
+ }
+ (false, Some(trait_to_import)) => LocatedImport::new(
+ mod_path(trait_to_import)?,
+ trait_to_import,
+ original_item,
+ mod_path(original_item),
+ ),
+ (true, None) => LocatedImport::new(
+ import_path_candidate,
+ original_item_candidate,
+ original_item,
+ mod_path(original_item),
+ ),
+ (false, None) => LocatedImport::new(
+ mod_path(segment_import)?,
+ segment_import,
+ original_item,
+ mod_path(original_item),
+ ),
+ })
+}
+
+pub fn item_for_path_search(db: &RootDatabase, item: ItemInNs) -> Option<ItemInNs> {
+ Some(match item {
+ ItemInNs::Types(_) | ItemInNs::Values(_) => match item_as_assoc(db, item) {
+ Some(assoc_item) => match assoc_item.container(db) {
+ AssocItemContainer::Trait(trait_) => ItemInNs::from(ModuleDef::from(trait_)),
+ AssocItemContainer::Impl(impl_) => {
+ ItemInNs::from(ModuleDef::from(impl_.self_ty(db).as_adt()?))
+ }
+ },
+ None => item,
+ },
+ ItemInNs::Macros(_) => item,
+ })
+}
+
+fn find_import_for_segment(
+ db: &RootDatabase,
+ original_item: ItemInNs,
+ unresolved_first_segment: &str,
+) -> Option<ItemInNs> {
+ let segment_is_name = item_name(db, original_item)
+ .map(|name| name.to_smol_str() == unresolved_first_segment)
+ .unwrap_or(false);
+
+ Some(if segment_is_name {
+ original_item
+ } else {
+ let matching_module =
+ module_with_segment_name(db, unresolved_first_segment, original_item)?;
+ ItemInNs::from(ModuleDef::from(matching_module))
+ })
+}
+
+fn module_with_segment_name(
+ db: &RootDatabase,
+ segment_name: &str,
+ candidate: ItemInNs,
+) -> Option<Module> {
+ let mut current_module = match candidate {
+ ItemInNs::Types(module_def_id) => module_def_id.module(db),
+ ItemInNs::Values(module_def_id) => module_def_id.module(db),
+ ItemInNs::Macros(macro_def_id) => ModuleDef::from(macro_def_id).module(db),
+ };
+ while let Some(module) = current_module {
+ if let Some(module_name) = module.name(db) {
+ if module_name.to_smol_str() == segment_name {
+ return Some(module);
+ }
+ }
+ current_module = module.parent(db);
+ }
+ None
+}
+
+fn trait_applicable_items(
+ sema: &Semantics<'_, RootDatabase>,
+ current_crate: Crate,
+ scope: &SemanticsScope<'_>,
+ trait_candidate: &TraitImportCandidate,
+ trait_assoc_item: bool,
+ mod_path: impl Fn(ItemInNs) -> Option<ModPath>,
+) -> FxHashSet<LocatedImport> {
+ let _p = profile::span("import_assets::trait_applicable_items");
+
+ let db = sema.db;
+
+ let inherent_traits = trait_candidate.receiver_ty.applicable_inherent_traits(db);
+ let env_traits = trait_candidate.receiver_ty.env_traits(db);
+ let related_traits = inherent_traits.chain(env_traits).collect::<FxHashSet<_>>();
+
+ let mut required_assoc_items = FxHashSet::default();
+ let trait_candidates = items_locator::items_with_name(
+ sema,
+ current_crate,
+ trait_candidate.assoc_item_name.clone(),
+ AssocItemSearch::AssocItemsOnly,
+ Some(DEFAULT_QUERY_SEARCH_LIMIT.inner()),
+ )
+ .filter_map(|input| item_as_assoc(db, input))
+ .filter_map(|assoc| {
+ let assoc_item_trait = assoc.containing_trait(db)?;
+ if related_traits.contains(&assoc_item_trait) {
+ None
+ } else {
+ required_assoc_items.insert(assoc);
+ Some(assoc_item_trait.into())
+ }
+ })
+ .collect();
+
+ let mut located_imports = FxHashSet::default();
+
+ if trait_assoc_item {
+ trait_candidate.receiver_ty.iterate_path_candidates(
+ db,
+ scope,
+ &trait_candidates,
+ None,
+ None,
+ |assoc| {
+ if required_assoc_items.contains(&assoc) {
+ if let AssocItem::Function(f) = assoc {
+ if f.self_param(db).is_some() {
+ return None;
+ }
+ }
+ let located_trait = assoc.containing_trait(db)?;
+ let trait_item = ItemInNs::from(ModuleDef::from(located_trait));
+ let original_item = assoc_to_item(assoc);
+ located_imports.insert(LocatedImport::new(
+ mod_path(trait_item)?,
+ trait_item,
+ original_item,
+ mod_path(original_item),
+ ));
+ }
+ None::<()>
+ },
+ )
+ } else {
+ trait_candidate.receiver_ty.iterate_method_candidates(
+ db,
+ scope,
+ &trait_candidates,
+ None,
+ None,
+ |function| {
+ let assoc = function.as_assoc_item(db)?;
+ if required_assoc_items.contains(&assoc) {
+ let located_trait = assoc.containing_trait(db)?;
+ let trait_item = ItemInNs::from(ModuleDef::from(located_trait));
+ let original_item = assoc_to_item(assoc);
+ located_imports.insert(LocatedImport::new(
+ mod_path(trait_item)?,
+ trait_item,
+ original_item,
+ mod_path(original_item),
+ ));
+ }
+ None::<()>
+ },
+ )
+ };
+
+ located_imports
+}
+
+fn assoc_to_item(assoc: AssocItem) -> ItemInNs {
+ match assoc {
+ AssocItem::Function(f) => ItemInNs::from(ModuleDef::from(f)),
+ AssocItem::Const(c) => ItemInNs::from(ModuleDef::from(c)),
+ AssocItem::TypeAlias(t) => ItemInNs::from(ModuleDef::from(t)),
+ }
+}
+
+fn get_mod_path(
+ db: &RootDatabase,
+ item_to_search: ItemInNs,
+ module_with_candidate: &Module,
+ prefixed: Option<PrefixKind>,
+) -> Option<ModPath> {
+ if let Some(prefix_kind) = prefixed {
+ module_with_candidate.find_use_path_prefixed(db, item_to_search, prefix_kind)
+ } else {
+ module_with_candidate.find_use_path(db, item_to_search)
+ }
+}
+
+impl ImportCandidate {
+ fn for_method_call(
+ sema: &Semantics<'_, RootDatabase>,
+ method_call: &ast::MethodCallExpr,
+ ) -> Option<Self> {
+ match sema.resolve_method_call(method_call) {
+ Some(_) => None,
+ None => Some(Self::TraitMethod(TraitImportCandidate {
+ receiver_ty: sema.type_of_expr(&method_call.receiver()?)?.adjusted(),
+ assoc_item_name: NameToImport::exact_case_sensitive(
+ method_call.name_ref()?.to_string(),
+ ),
+ })),
+ }
+ }
+
+ fn for_regular_path(sema: &Semantics<'_, RootDatabase>, path: &ast::Path) -> Option<Self> {
+ if sema.resolve_path(path).is_some() {
+ return None;
+ }
+ path_import_candidate(
+ sema,
+ path.qualifier(),
+ NameToImport::exact_case_sensitive(path.segment()?.name_ref()?.to_string()),
+ )
+ }
+
+ fn for_name(sema: &Semantics<'_, RootDatabase>, name: &ast::Name) -> Option<Self> {
+ if sema
+ .scope(name.syntax())?
+ .speculative_resolve(&ast::make::ext::ident_path(&name.text()))
+ .is_some()
+ {
+ return None;
+ }
+ Some(ImportCandidate::Path(PathImportCandidate {
+ qualifier: None,
+ name: NameToImport::exact_case_sensitive(name.to_string()),
+ }))
+ }
+
+ fn for_fuzzy_path(
+ qualifier: Option<ast::Path>,
+ fuzzy_name: String,
+ sema: &Semantics<'_, RootDatabase>,
+ ) -> Option<Self> {
+ path_import_candidate(sema, qualifier, NameToImport::Fuzzy(fuzzy_name))
+ }
+}
+
+fn path_import_candidate(
+ sema: &Semantics<'_, RootDatabase>,
+ qualifier: Option<ast::Path>,
+ name: NameToImport,
+) -> Option<ImportCandidate> {
+ Some(match qualifier {
+ Some(qualifier) => match sema.resolve_path(&qualifier) {
+ None => {
+ let qualifier_start =
+ qualifier.syntax().descendants().find_map(ast::NameRef::cast)?;
+ let qualifier_start_path =
+ qualifier_start.syntax().ancestors().find_map(ast::Path::cast)?;
+ if sema.resolve_path(&qualifier_start_path).is_none() {
+ ImportCandidate::Path(PathImportCandidate {
+ qualifier: Some(FirstSegmentUnresolved {
+ fist_segment: qualifier_start,
+ full_qualifier: qualifier,
+ }),
+ name,
+ })
+ } else {
+ return None;
+ }
+ }
+ Some(PathResolution::Def(ModuleDef::Adt(assoc_item_path))) => {
+ ImportCandidate::TraitAssocItem(TraitImportCandidate {
+ receiver_ty: assoc_item_path.ty(sema.db),
+ assoc_item_name: name,
+ })
+ }
+ Some(PathResolution::Def(ModuleDef::TypeAlias(alias))) => {
+ let ty = alias.ty(sema.db);
+ if ty.as_adt().is_some() {
+ ImportCandidate::TraitAssocItem(TraitImportCandidate {
+ receiver_ty: ty,
+ assoc_item_name: name,
+ })
+ } else {
+ return None;
+ }
+ }
+ Some(_) => return None,
+ },
+ None => ImportCandidate::Path(PathImportCandidate { qualifier: None, name }),
+ })
+}
+
+fn item_as_assoc(db: &RootDatabase, item: ItemInNs) -> Option<AssocItem> {
+ item.as_module_def().and_then(|module_def| module_def.as_assoc_item(db))
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/imports/insert_use.rs b/src/tools/rust-analyzer/crates/ide-db/src/imports/insert_use.rs
new file mode 100644
index 000000000..c14182279
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/imports/insert_use.rs
@@ -0,0 +1,446 @@
+//! Handle syntactic aspects of inserting a new `use` item.
+#[cfg(test)]
+mod tests;
+
+use std::cmp::Ordering;
+
+use hir::Semantics;
+use syntax::{
+ algo,
+ ast::{self, make, AstNode, HasAttrs, HasModuleItem, HasVisibility, PathSegmentKind},
+ ted, Direction, NodeOrToken, SyntaxKind, SyntaxNode,
+};
+
+use crate::{
+ imports::merge_imports::{
+ common_prefix, eq_attrs, eq_visibility, try_merge_imports, use_tree_path_cmp, MergeBehavior,
+ },
+ RootDatabase,
+};
+
+pub use hir::PrefixKind;
+
+/// How imports should be grouped into use statements.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum ImportGranularity {
+ /// Do not change the granularity of any imports and preserve the original structure written by the developer.
+ Preserve,
+ /// Merge imports from the same crate into a single use statement.
+ Crate,
+ /// Merge imports from the same module into a single use statement.
+ Module,
+ /// Flatten imports so that each has its own use statement.
+ Item,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct InsertUseConfig {
+ pub granularity: ImportGranularity,
+ pub enforce_granularity: bool,
+ pub prefix_kind: PrefixKind,
+ pub group: bool,
+ pub skip_glob_imports: bool,
+}
+
+#[derive(Debug, Clone)]
+pub enum ImportScope {
+ File(ast::SourceFile),
+ Module(ast::ItemList),
+ Block(ast::StmtList),
+}
+
+impl ImportScope {
+ // FIXME: Remove this?
+ #[cfg(test)]
+ fn from(syntax: SyntaxNode) -> Option<Self> {
+ use syntax::match_ast;
+ fn contains_cfg_attr(attrs: &dyn HasAttrs) -> bool {
+ attrs
+ .attrs()
+ .any(|attr| attr.as_simple_call().map_or(false, |(ident, _)| ident == "cfg"))
+ }
+ match_ast! {
+ match syntax {
+ ast::Module(module) => module.item_list().map(ImportScope::Module),
+ ast::SourceFile(file) => Some(ImportScope::File(file)),
+ ast::Fn(func) => contains_cfg_attr(&func).then(|| func.body().and_then(|it| it.stmt_list().map(ImportScope::Block))).flatten(),
+ ast::Const(konst) => contains_cfg_attr(&konst).then(|| match konst.body()? {
+ ast::Expr::BlockExpr(block) => Some(block),
+ _ => None,
+ }).flatten().and_then(|it| it.stmt_list().map(ImportScope::Block)),
+ ast::Static(statik) => contains_cfg_attr(&statik).then(|| match statik.body()? {
+ ast::Expr::BlockExpr(block) => Some(block),
+ _ => None,
+ }).flatten().and_then(|it| it.stmt_list().map(ImportScope::Block)),
+ _ => None,
+
+ }
+ }
+ }
+
+ /// Determines the containing syntax node in which to insert a `use` statement affecting `position`.
+ /// Returns the original source node inside attributes.
+ pub fn find_insert_use_container(
+ position: &SyntaxNode,
+ sema: &Semantics<'_, RootDatabase>,
+ ) -> Option<Self> {
+ fn contains_cfg_attr(attrs: &dyn HasAttrs) -> bool {
+ attrs
+ .attrs()
+ .any(|attr| attr.as_simple_call().map_or(false, |(ident, _)| ident == "cfg"))
+ }
+
+ // Walk up the ancestor tree searching for a suitable node to do insertions on
+ // with special handling on cfg-gated items, in which case we want to insert imports locally
+ // or FIXME: annotate inserted imports with the same cfg
+ for syntax in sema.ancestors_with_macros(position.clone()) {
+ if let Some(file) = ast::SourceFile::cast(syntax.clone()) {
+ return Some(ImportScope::File(file));
+ } else if let Some(item) = ast::Item::cast(syntax) {
+ return match item {
+ ast::Item::Const(konst) if contains_cfg_attr(&konst) => {
+ // FIXME: Instead of bailing out with None, we should note down that
+ // this import needs an attribute added
+ match sema.original_ast_node(konst)?.body()? {
+ ast::Expr::BlockExpr(block) => block,
+ _ => return None,
+ }
+ .stmt_list()
+ .map(ImportScope::Block)
+ }
+ ast::Item::Fn(func) if contains_cfg_attr(&func) => {
+ // FIXME: Instead of bailing out with None, we should note down that
+ // this import needs an attribute added
+ sema.original_ast_node(func)?.body()?.stmt_list().map(ImportScope::Block)
+ }
+ ast::Item::Static(statik) if contains_cfg_attr(&statik) => {
+ // FIXME: Instead of bailing out with None, we should note down that
+ // this import needs an attribute added
+ match sema.original_ast_node(statik)?.body()? {
+ ast::Expr::BlockExpr(block) => block,
+ _ => return None,
+ }
+ .stmt_list()
+ .map(ImportScope::Block)
+ }
+ ast::Item::Module(module) => {
+ // early return is important here, if we can't find the original module
+ // in the input there is no way for us to insert an import anywhere.
+ sema.original_ast_node(module)?.item_list().map(ImportScope::Module)
+ }
+ _ => continue,
+ };
+ }
+ }
+ None
+ }
+
+ pub fn as_syntax_node(&self) -> &SyntaxNode {
+ match self {
+ ImportScope::File(file) => file.syntax(),
+ ImportScope::Module(item_list) => item_list.syntax(),
+ ImportScope::Block(block) => block.syntax(),
+ }
+ }
+
+ pub fn clone_for_update(&self) -> Self {
+ match self {
+ ImportScope::File(file) => ImportScope::File(file.clone_for_update()),
+ ImportScope::Module(item_list) => ImportScope::Module(item_list.clone_for_update()),
+ ImportScope::Block(block) => ImportScope::Block(block.clone_for_update()),
+ }
+ }
+}
+
+/// Insert an import path into the given file/node. A `merge` value of none indicates that no import merging is allowed to occur.
+pub fn insert_use(scope: &ImportScope, path: ast::Path, cfg: &InsertUseConfig) {
+ let _p = profile::span("insert_use");
+ let mut mb = match cfg.granularity {
+ ImportGranularity::Crate => Some(MergeBehavior::Crate),
+ ImportGranularity::Module => Some(MergeBehavior::Module),
+ ImportGranularity::Item | ImportGranularity::Preserve => None,
+ };
+ if !cfg.enforce_granularity {
+ let file_granularity = guess_granularity_from_scope(scope);
+ mb = match file_granularity {
+ ImportGranularityGuess::Unknown => mb,
+ ImportGranularityGuess::Item => None,
+ ImportGranularityGuess::Module => Some(MergeBehavior::Module),
+ ImportGranularityGuess::ModuleOrItem => mb.and(Some(MergeBehavior::Module)),
+ ImportGranularityGuess::Crate => Some(MergeBehavior::Crate),
+ ImportGranularityGuess::CrateOrModule => mb.or(Some(MergeBehavior::Crate)),
+ };
+ }
+
+ let use_item =
+ make::use_(None, make::use_tree(path.clone(), None, None, false)).clone_for_update();
+ // merge into existing imports if possible
+ if let Some(mb) = mb {
+ let filter = |it: &_| !(cfg.skip_glob_imports && ast::Use::is_simple_glob(it));
+ for existing_use in
+ scope.as_syntax_node().children().filter_map(ast::Use::cast).filter(filter)
+ {
+ if let Some(merged) = try_merge_imports(&existing_use, &use_item, mb) {
+ ted::replace(existing_use.syntax(), merged.syntax());
+ return;
+ }
+ }
+ }
+
+ // either we weren't allowed to merge or there is no import that fits the merge conditions
+ // so look for the place we have to insert to
+ insert_use_(scope, &path, cfg.group, use_item);
+}
+
+pub fn remove_path_if_in_use_stmt(path: &ast::Path) {
+ // FIXME: improve this
+ if path.parent_path().is_some() {
+ return;
+ }
+ if let Some(use_tree) = path.syntax().parent().and_then(ast::UseTree::cast) {
+ if use_tree.use_tree_list().is_some() || use_tree.star_token().is_some() {
+ return;
+ }
+ if let Some(use_) = use_tree.syntax().parent().and_then(ast::Use::cast) {
+ use_.remove();
+ return;
+ }
+ use_tree.remove();
+ }
+}
+
+#[derive(Eq, PartialEq, PartialOrd, Ord)]
+enum ImportGroup {
+ // the order here defines the order of new group inserts
+ Std,
+ ExternCrate,
+ ThisCrate,
+ ThisModule,
+ SuperModule,
+}
+
+impl ImportGroup {
+ fn new(path: &ast::Path) -> ImportGroup {
+ let default = ImportGroup::ExternCrate;
+
+ let first_segment = match path.first_segment() {
+ Some(it) => it,
+ None => return default,
+ };
+
+ let kind = first_segment.kind().unwrap_or(PathSegmentKind::SelfKw);
+ match kind {
+ PathSegmentKind::SelfKw => ImportGroup::ThisModule,
+ PathSegmentKind::SuperKw => ImportGroup::SuperModule,
+ PathSegmentKind::CrateKw => ImportGroup::ThisCrate,
+ PathSegmentKind::Name(name) => match name.text().as_str() {
+ "std" => ImportGroup::Std,
+ "core" => ImportGroup::Std,
+ _ => ImportGroup::ExternCrate,
+ },
+ // these aren't valid use paths, so fall back to something random
+ PathSegmentKind::SelfTypeKw => ImportGroup::ExternCrate,
+ PathSegmentKind::Type { .. } => ImportGroup::ExternCrate,
+ }
+ }
+}
+
+#[derive(PartialEq, PartialOrd, Debug, Clone, Copy)]
+enum ImportGranularityGuess {
+ Unknown,
+ Item,
+ Module,
+ ModuleOrItem,
+ Crate,
+ CrateOrModule,
+}
+
+fn guess_granularity_from_scope(scope: &ImportScope) -> ImportGranularityGuess {
+ // The idea is simple, just check each import as well as the import and its precedent together for
+ // whether they fulfill a granularity criteria.
+ let use_stmt = |item| match item {
+ ast::Item::Use(use_) => {
+ let use_tree = use_.use_tree()?;
+ Some((use_tree, use_.visibility(), use_.attrs()))
+ }
+ _ => None,
+ };
+ let mut use_stmts = match scope {
+ ImportScope::File(f) => f.items(),
+ ImportScope::Module(m) => m.items(),
+ ImportScope::Block(b) => b.items(),
+ }
+ .filter_map(use_stmt);
+ let mut res = ImportGranularityGuess::Unknown;
+ let (mut prev, mut prev_vis, mut prev_attrs) = match use_stmts.next() {
+ Some(it) => it,
+ None => return res,
+ };
+ loop {
+ if let Some(use_tree_list) = prev.use_tree_list() {
+ if use_tree_list.use_trees().any(|tree| tree.use_tree_list().is_some()) {
+ // Nested tree lists can only occur in crate style, or with no proper style being enforced in the file.
+ break ImportGranularityGuess::Crate;
+ } else {
+ // Could still be crate-style so continue looking.
+ res = ImportGranularityGuess::CrateOrModule;
+ }
+ }
+
+ let (curr, curr_vis, curr_attrs) = match use_stmts.next() {
+ Some(it) => it,
+ None => break res,
+ };
+ if eq_visibility(prev_vis, curr_vis.clone()) && eq_attrs(prev_attrs, curr_attrs.clone()) {
+ if let Some((prev_path, curr_path)) = prev.path().zip(curr.path()) {
+ if let Some((prev_prefix, _)) = common_prefix(&prev_path, &curr_path) {
+ if prev.use_tree_list().is_none() && curr.use_tree_list().is_none() {
+ let prefix_c = prev_prefix.qualifiers().count();
+ let curr_c = curr_path.qualifiers().count() - prefix_c;
+ let prev_c = prev_path.qualifiers().count() - prefix_c;
+ if curr_c == 1 && prev_c == 1 {
+ // Same prefix, only differing in the last segment and no use tree lists so this has to be of item style.
+ break ImportGranularityGuess::Item;
+ } else {
+ // Same prefix and no use tree list but differs in more than one segment at the end. This might be module style still.
+ res = ImportGranularityGuess::ModuleOrItem;
+ }
+ } else {
+ // Same prefix with item tree lists, has to be module style as it
+ // can't be crate style since the trees wouldn't share a prefix then.
+ break ImportGranularityGuess::Module;
+ }
+ }
+ }
+ }
+ prev = curr;
+ prev_vis = curr_vis;
+ prev_attrs = curr_attrs;
+ }
+}
+
+fn insert_use_(
+ scope: &ImportScope,
+ insert_path: &ast::Path,
+ group_imports: bool,
+ use_item: ast::Use,
+) {
+ let scope_syntax = scope.as_syntax_node();
+ let group = ImportGroup::new(insert_path);
+ let path_node_iter = scope_syntax
+ .children()
+ .filter_map(|node| ast::Use::cast(node.clone()).zip(Some(node)))
+ .flat_map(|(use_, node)| {
+ let tree = use_.use_tree()?;
+ let path = tree.path()?;
+ let has_tl = tree.use_tree_list().is_some();
+ Some((path, has_tl, node))
+ });
+
+ if group_imports {
+ // Iterator that discards anything thats not in the required grouping
+ // This implementation allows the user to rearrange their import groups as this only takes the first group that fits
+ let group_iter = path_node_iter
+ .clone()
+ .skip_while(|(path, ..)| ImportGroup::new(path) != group)
+ .take_while(|(path, ..)| ImportGroup::new(path) == group);
+
+ // track the last element we iterated over, if this is still None after the iteration then that means we never iterated in the first place
+ let mut last = None;
+ // find the element that would come directly after our new import
+ let post_insert: Option<(_, _, SyntaxNode)> = group_iter
+ .inspect(|(.., node)| last = Some(node.clone()))
+ .find(|&(ref path, has_tl, _)| {
+ use_tree_path_cmp(insert_path, false, path, has_tl) != Ordering::Greater
+ });
+
+ if let Some((.., node)) = post_insert {
+ cov_mark::hit!(insert_group);
+ // insert our import before that element
+ return ted::insert(ted::Position::before(node), use_item.syntax());
+ }
+ if let Some(node) = last {
+ cov_mark::hit!(insert_group_last);
+ // there is no element after our new import, so append it to the end of the group
+ return ted::insert(ted::Position::after(node), use_item.syntax());
+ }
+
+ // the group we were looking for actually doesn't exist, so insert
+
+ let mut last = None;
+ // find the group that comes after where we want to insert
+ let post_group = path_node_iter
+ .inspect(|(.., node)| last = Some(node.clone()))
+ .find(|(p, ..)| ImportGroup::new(p) > group);
+ if let Some((.., node)) = post_group {
+ cov_mark::hit!(insert_group_new_group);
+ ted::insert(ted::Position::before(&node), use_item.syntax());
+ if let Some(node) = algo::non_trivia_sibling(node.into(), Direction::Prev) {
+ ted::insert(ted::Position::after(node), make::tokens::single_newline());
+ }
+ return;
+ }
+ // there is no such group, so append after the last one
+ if let Some(node) = last {
+ cov_mark::hit!(insert_group_no_group);
+ ted::insert(ted::Position::after(&node), use_item.syntax());
+ ted::insert(ted::Position::after(node), make::tokens::single_newline());
+ return;
+ }
+ } else {
+ // There exists a group, so append to the end of it
+ if let Some((_, _, node)) = path_node_iter.last() {
+ cov_mark::hit!(insert_no_grouping_last);
+ ted::insert(ted::Position::after(node), use_item.syntax());
+ return;
+ }
+ }
+
+ let l_curly = match scope {
+ ImportScope::File(_) => None,
+ // don't insert the imports before the item list/block expr's opening curly brace
+ ImportScope::Module(item_list) => item_list.l_curly_token(),
+ // don't insert the imports before the item list's opening curly brace
+ ImportScope::Block(block) => block.l_curly_token(),
+ };
+ // there are no imports in this file at all
+ // so put the import after all inner module attributes and possible license header comments
+ if let Some(last_inner_element) = scope_syntax
+ .children_with_tokens()
+ // skip the curly brace
+ .skip(l_curly.is_some() as usize)
+ .take_while(|child| match child {
+ NodeOrToken::Node(node) => is_inner_attribute(node.clone()),
+ NodeOrToken::Token(token) => {
+ [SyntaxKind::WHITESPACE, SyntaxKind::COMMENT, SyntaxKind::SHEBANG]
+ .contains(&token.kind())
+ }
+ })
+ .filter(|child| child.as_token().map_or(true, |t| t.kind() != SyntaxKind::WHITESPACE))
+ .last()
+ {
+ cov_mark::hit!(insert_empty_inner_attr);
+ ted::insert(ted::Position::after(&last_inner_element), use_item.syntax());
+ ted::insert(ted::Position::after(last_inner_element), make::tokens::single_newline());
+ } else {
+ match l_curly {
+ Some(b) => {
+ cov_mark::hit!(insert_empty_module);
+ ted::insert(ted::Position::after(&b), make::tokens::single_newline());
+ ted::insert(ted::Position::after(&b), use_item.syntax());
+ }
+ None => {
+ cov_mark::hit!(insert_empty_file);
+ ted::insert(
+ ted::Position::first_child_of(scope_syntax),
+ make::tokens::blank_line(),
+ );
+ ted::insert(ted::Position::first_child_of(scope_syntax), use_item.syntax());
+ }
+ }
+ }
+}
+
+fn is_inner_attribute(node: SyntaxNode) -> bool {
+ ast::Attr::cast(node).map(|attr| attr.kind()) == Some(ast::AttrKind::Inner)
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/imports/insert_use/tests.rs b/src/tools/rust-analyzer/crates/ide-db/src/imports/insert_use/tests.rs
new file mode 100644
index 000000000..59673af32
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/imports/insert_use/tests.rs
@@ -0,0 +1,1084 @@
+use base_db::fixture::WithFixture;
+use hir::PrefixKind;
+use stdx::trim_indent;
+use test_utils::{assert_eq_text, CURSOR_MARKER};
+
+use super::*;
+
+#[test]
+fn trailing_comment_in_empty_file() {
+ check(
+ "foo::bar",
+ r#"
+struct Struct;
+// 0 = 1
+"#,
+ r#"
+use foo::bar;
+
+struct Struct;
+// 0 = 1
+"#,
+ ImportGranularity::Crate,
+ );
+}
+
+#[test]
+fn respects_cfg_attr_fn() {
+ check(
+ r"bar::Bar",
+ r#"
+#[cfg(test)]
+fn foo() {$0}
+"#,
+ r#"
+#[cfg(test)]
+fn foo() {
+ use bar::Bar;
+}
+"#,
+ ImportGranularity::Crate,
+ );
+}
+
+#[test]
+fn respects_cfg_attr_const() {
+ check(
+ r"bar::Bar",
+ r#"
+#[cfg(test)]
+const FOO: Bar = {$0};
+"#,
+ r#"
+#[cfg(test)]
+const FOO: Bar = {
+ use bar::Bar;
+};
+"#,
+ ImportGranularity::Crate,
+ );
+}
+
+#[test]
+fn insert_skips_lone_glob_imports() {
+ check(
+ "use foo::baz::A",
+ r"
+use foo::bar::*;
+",
+ r"
+use foo::bar::*;
+use foo::baz::A;
+",
+ ImportGranularity::Crate,
+ );
+}
+
+#[test]
+fn insert_not_group() {
+ cov_mark::check!(insert_no_grouping_last);
+ check_with_config(
+ "use external_crate2::bar::A",
+ r"
+use std::bar::B;
+use external_crate::bar::A;
+use crate::bar::A;
+use self::bar::A;
+use super::bar::A;",
+ r"
+use std::bar::B;
+use external_crate::bar::A;
+use crate::bar::A;
+use self::bar::A;
+use super::bar::A;
+use external_crate2::bar::A;",
+ &InsertUseConfig {
+ granularity: ImportGranularity::Item,
+ enforce_granularity: true,
+ prefix_kind: PrefixKind::Plain,
+ group: false,
+ skip_glob_imports: true,
+ },
+ );
+}
+
+#[test]
+fn insert_existing() {
+ check_crate("std::fs", "use std::fs;", "use std::fs;")
+}
+
+#[test]
+fn insert_start() {
+ check_none(
+ "std::bar::AA",
+ r"
+use std::bar::B;
+use std::bar::D;
+use std::bar::F;
+use std::bar::G;",
+ r"
+use std::bar::AA;
+use std::bar::B;
+use std::bar::D;
+use std::bar::F;
+use std::bar::G;",
+ )
+}
+
+#[test]
+fn insert_start_indent() {
+ check_none(
+ "std::bar::AA",
+ r"
+ use std::bar::B;
+ use std::bar::C;",
+ r"
+ use std::bar::AA;
+ use std::bar::B;
+ use std::bar::C;",
+ );
+}
+
+#[test]
+fn insert_middle() {
+ cov_mark::check!(insert_group);
+ check_none(
+ "std::bar::EE",
+ r"
+use std::bar::A;
+use std::bar::D;
+use std::bar::F;
+use std::bar::G;",
+ r"
+use std::bar::A;
+use std::bar::D;
+use std::bar::EE;
+use std::bar::F;
+use std::bar::G;",
+ )
+}
+
+#[test]
+fn insert_middle_indent() {
+ check_none(
+ "std::bar::EE",
+ r"
+ use std::bar::A;
+ use std::bar::D;
+ use std::bar::F;
+ use std::bar::G;",
+ r"
+ use std::bar::A;
+ use std::bar::D;
+ use std::bar::EE;
+ use std::bar::F;
+ use std::bar::G;",
+ )
+}
+
+#[test]
+fn insert_end() {
+ cov_mark::check!(insert_group_last);
+ check_none(
+ "std::bar::ZZ",
+ r"
+use std::bar::A;
+use std::bar::D;
+use std::bar::F;
+use std::bar::G;",
+ r"
+use std::bar::A;
+use std::bar::D;
+use std::bar::F;
+use std::bar::G;
+use std::bar::ZZ;",
+ )
+}
+
+#[test]
+fn insert_end_indent() {
+ check_none(
+ "std::bar::ZZ",
+ r"
+ use std::bar::A;
+ use std::bar::D;
+ use std::bar::F;
+ use std::bar::G;",
+ r"
+ use std::bar::A;
+ use std::bar::D;
+ use std::bar::F;
+ use std::bar::G;
+ use std::bar::ZZ;",
+ )
+}
+
+#[test]
+fn insert_middle_nested() {
+ check_none(
+ "std::bar::EE",
+ r"
+use std::bar::A;
+use std::bar::{D, Z}; // example of weird imports due to user
+use std::bar::F;
+use std::bar::G;",
+ r"
+use std::bar::A;
+use std::bar::EE;
+use std::bar::{D, Z}; // example of weird imports due to user
+use std::bar::F;
+use std::bar::G;",
+ )
+}
+
+#[test]
+fn insert_middle_groups() {
+ check_none(
+ "foo::bar::GG",
+ r"
+ use std::bar::A;
+ use std::bar::D;
+
+ use foo::bar::F;
+ use foo::bar::H;",
+ r"
+ use std::bar::A;
+ use std::bar::D;
+
+ use foo::bar::F;
+ use foo::bar::GG;
+ use foo::bar::H;",
+ )
+}
+
+#[test]
+fn insert_first_matching_group() {
+ check_none(
+ "foo::bar::GG",
+ r"
+ use foo::bar::A;
+ use foo::bar::D;
+
+ use std;
+
+ use foo::bar::F;
+ use foo::bar::H;",
+ r"
+ use foo::bar::A;
+ use foo::bar::D;
+ use foo::bar::GG;
+
+ use std;
+
+ use foo::bar::F;
+ use foo::bar::H;",
+ )
+}
+
+#[test]
+fn insert_missing_group_std() {
+ cov_mark::check!(insert_group_new_group);
+ check_none(
+ "std::fmt",
+ r"
+ use foo::bar::A;
+ use foo::bar::D;",
+ r"
+ use std::fmt;
+
+ use foo::bar::A;
+ use foo::bar::D;",
+ )
+}
+
+#[test]
+fn insert_missing_group_self() {
+ cov_mark::check!(insert_group_no_group);
+ check_none(
+ "self::fmt",
+ r"
+use foo::bar::A;
+use foo::bar::D;",
+ r"
+use foo::bar::A;
+use foo::bar::D;
+
+use self::fmt;",
+ )
+}
+
+#[test]
+fn insert_no_imports() {
+ check_crate(
+ "foo::bar",
+ "fn main() {}",
+ r"use foo::bar;
+
+fn main() {}",
+ )
+}
+
+#[test]
+fn insert_empty_file() {
+ cov_mark::check_count!(insert_empty_file, 2);
+
+ // Default configuration
+ // empty files will get two trailing newlines
+ // this is due to the test case insert_no_imports above
+ check_crate(
+ "foo::bar",
+ "",
+ r"use foo::bar;
+
+",
+ );
+
+ // "not group" configuration
+ check_with_config(
+ "use external_crate2::bar::A",
+ r"",
+ r"use external_crate2::bar::A;
+
+",
+ &InsertUseConfig {
+ granularity: ImportGranularity::Item,
+ enforce_granularity: true,
+ prefix_kind: PrefixKind::Plain,
+ group: false,
+ skip_glob_imports: true,
+ },
+ );
+}
+
+#[test]
+fn insert_empty_module() {
+ cov_mark::check_count!(insert_empty_module, 2);
+
+ // Default configuration
+ check(
+ "foo::bar",
+ r"
+mod x {$0}
+",
+ r"
+mod x {
+ use foo::bar;
+}
+",
+ ImportGranularity::Item,
+ );
+
+ // "not group" configuration
+ check_with_config(
+ "foo::bar",
+ r"mod x {$0}",
+ r"mod x {
+ use foo::bar;
+}",
+ &InsertUseConfig {
+ granularity: ImportGranularity::Item,
+ enforce_granularity: true,
+ prefix_kind: PrefixKind::Plain,
+ group: false,
+ skip_glob_imports: true,
+ },
+ );
+}
+
+#[test]
+fn insert_after_inner_attr() {
+ cov_mark::check_count!(insert_empty_inner_attr, 2);
+
+ // Default configuration
+ check_crate(
+ "foo::bar",
+ r"#![allow(unused_imports)]",
+ r"#![allow(unused_imports)]
+
+use foo::bar;",
+ );
+
+ // "not group" configuration
+ check_with_config(
+ "foo::bar",
+ r"#![allow(unused_imports)]",
+ r"#![allow(unused_imports)]
+
+use foo::bar;",
+ &InsertUseConfig {
+ granularity: ImportGranularity::Item,
+ enforce_granularity: true,
+ prefix_kind: PrefixKind::Plain,
+ group: false,
+ skip_glob_imports: true,
+ },
+ );
+}
+
+#[test]
+fn insert_after_inner_attr2() {
+ check_crate(
+ "foo::bar",
+ r"#![allow(unused_imports)]
+
+#![no_std]
+fn main() {}",
+ r"#![allow(unused_imports)]
+
+#![no_std]
+
+use foo::bar;
+fn main() {}",
+ );
+}
+
+#[test]
+fn inserts_after_single_line_inner_comments() {
+ check_none(
+ "foo::bar::Baz",
+ "//! Single line inner comments do not allow any code before them.",
+ r#"//! Single line inner comments do not allow any code before them.
+
+use foo::bar::Baz;"#,
+ );
+ check_none(
+ "foo::bar::Baz",
+ r"mod foo {
+ //! Single line inner comments do not allow any code before them.
+$0
+}",
+ r"mod foo {
+ //! Single line inner comments do not allow any code before them.
+
+ use foo::bar::Baz;
+
+}",
+ );
+}
+
+#[test]
+fn inserts_after_single_line_comments() {
+ check_none(
+ "foo::bar::Baz",
+ "// Represents a possible license header and/or general module comments",
+ r#"// Represents a possible license header and/or general module comments
+
+use foo::bar::Baz;"#,
+ );
+}
+
+#[test]
+fn inserts_after_shebang() {
+ check_none(
+ "foo::bar::Baz",
+ "#!/usr/bin/env rust",
+ r#"#!/usr/bin/env rust
+
+use foo::bar::Baz;"#,
+ );
+}
+
+#[test]
+fn inserts_after_multiple_single_line_comments() {
+ check_none(
+ "foo::bar::Baz",
+ "// Represents a possible license header and/or general module comments
+// Second single-line comment
+// Third single-line comment",
+ r#"// Represents a possible license header and/or general module comments
+// Second single-line comment
+// Third single-line comment
+
+use foo::bar::Baz;"#,
+ );
+}
+
+#[test]
+fn inserts_before_single_line_item_comments() {
+ check_none(
+ "foo::bar::Baz",
+ r#"// Represents a comment about a function
+fn foo() {}"#,
+ r#"use foo::bar::Baz;
+
+// Represents a comment about a function
+fn foo() {}"#,
+ );
+}
+
+#[test]
+fn inserts_after_single_line_header_comments_and_before_item() {
+ check_none(
+ "foo::bar::Baz",
+ r#"// Represents a possible license header
+// Line two of possible license header
+
+fn foo() {}"#,
+ r#"// Represents a possible license header
+// Line two of possible license header
+
+use foo::bar::Baz;
+
+fn foo() {}"#,
+ );
+}
+
+#[test]
+fn inserts_after_multiline_inner_comments() {
+ check_none(
+ "foo::bar::Baz",
+ r#"/*! Multiline inner comments do not allow any code before them. */
+
+/*! Still an inner comment, cannot place any code before. */
+fn main() {}"#,
+ r#"/*! Multiline inner comments do not allow any code before them. */
+
+/*! Still an inner comment, cannot place any code before. */
+
+use foo::bar::Baz;
+fn main() {}"#,
+ )
+}
+
+#[test]
+fn inserts_after_all_inner_items() {
+ check_none(
+ "foo::bar::Baz",
+ r#"#![allow(unused_imports)]
+/*! Multiline line comment 2 */
+
+
+//! Single line comment 1
+#![no_std]
+//! Single line comment 2
+fn main() {}"#,
+ r#"#![allow(unused_imports)]
+/*! Multiline line comment 2 */
+
+
+//! Single line comment 1
+#![no_std]
+//! Single line comment 2
+
+use foo::bar::Baz;
+fn main() {}"#,
+ )
+}
+
+#[test]
+fn merge_groups() {
+ check_module("std::io", r"use std::fmt;", r"use std::{fmt, io};")
+}
+
+#[test]
+fn merge_groups_last() {
+ check_module(
+ "std::io",
+ r"use std::fmt::{Result, Display};",
+ r"use std::fmt::{Result, Display};
+use std::io;",
+ )
+}
+
+#[test]
+fn merge_last_into_self() {
+ check_module("foo::bar::baz", r"use foo::bar;", r"use foo::bar::{self, baz};");
+}
+
+#[test]
+fn merge_groups_full() {
+ check_crate(
+ "std::io",
+ r"use std::fmt::{Result, Display};",
+ r"use std::{fmt::{Result, Display}, io};",
+ )
+}
+
+#[test]
+fn merge_groups_long_full() {
+ check_crate("std::foo::bar::Baz", r"use std::foo::bar::Qux;", r"use std::foo::bar::{Qux, Baz};")
+}
+
+#[test]
+fn merge_groups_long_last() {
+ check_module(
+ "std::foo::bar::Baz",
+ r"use std::foo::bar::Qux;",
+ r"use std::foo::bar::{Qux, Baz};",
+ )
+}
+
+#[test]
+fn merge_groups_long_full_list() {
+ check_crate(
+ "std::foo::bar::Baz",
+ r"use std::foo::bar::{Qux, Quux};",
+ r"use std::foo::bar::{Qux, Quux, Baz};",
+ )
+}
+
+#[test]
+fn merge_groups_long_last_list() {
+ check_module(
+ "std::foo::bar::Baz",
+ r"use std::foo::bar::{Qux, Quux};",
+ r"use std::foo::bar::{Qux, Quux, Baz};",
+ )
+}
+
+#[test]
+fn merge_groups_long_full_nested() {
+ check_crate(
+ "std::foo::bar::Baz",
+ r"use std::foo::bar::{Qux, quux::{Fez, Fizz}};",
+ r"use std::foo::bar::{Qux, quux::{Fez, Fizz}, Baz};",
+ )
+}
+
+#[test]
+fn merge_groups_long_last_nested() {
+ check_module(
+ "std::foo::bar::Baz",
+ r"use std::foo::bar::{Qux, quux::{Fez, Fizz}};",
+ r"use std::foo::bar::Baz;
+use std::foo::bar::{Qux, quux::{Fez, Fizz}};",
+ )
+}
+
+#[test]
+fn merge_groups_full_nested_deep() {
+ check_crate(
+ "std::foo::bar::quux::Baz",
+ r"use std::foo::bar::{Qux, quux::{Fez, Fizz}};",
+ r"use std::foo::bar::{Qux, quux::{Fez, Fizz, Baz}};",
+ )
+}
+
+#[test]
+fn merge_groups_full_nested_long() {
+ check_crate(
+ "std::foo::bar::Baz",
+ r"use std::{foo::bar::Qux};",
+ r"use std::{foo::bar::{Qux, Baz}};",
+ );
+}
+
+#[test]
+fn merge_groups_last_nested_long() {
+ check_crate(
+ "std::foo::bar::Baz",
+ r"use std::{foo::bar::Qux};",
+ r"use std::{foo::bar::{Qux, Baz}};",
+ );
+}
+
+#[test]
+fn merge_groups_skip_pub() {
+ check_crate(
+ "std::io",
+ r"pub use std::fmt::{Result, Display};",
+ r"pub use std::fmt::{Result, Display};
+use std::io;",
+ )
+}
+
+#[test]
+fn merge_groups_skip_pub_crate() {
+ check_crate(
+ "std::io",
+ r"pub(crate) use std::fmt::{Result, Display};",
+ r"pub(crate) use std::fmt::{Result, Display};
+use std::io;",
+ )
+}
+
+#[test]
+fn merge_groups_skip_attributed() {
+ check_crate(
+ "std::io",
+ r#"
+#[cfg(feature = "gated")] use std::fmt::{Result, Display};
+"#,
+ r#"
+#[cfg(feature = "gated")] use std::fmt::{Result, Display};
+use std::io;
+"#,
+ )
+}
+
+#[test]
+fn split_out_merge() {
+ // FIXME: This is suboptimal, we want to get `use std::fmt::{self, Result}`
+ // instead.
+ check_module(
+ "std::fmt::Result",
+ r"use std::{fmt, io};",
+ r"use std::fmt::Result;
+use std::{fmt, io};",
+ )
+}
+
+#[test]
+fn merge_into_module_import() {
+ check_crate("std::fmt::Result", r"use std::{fmt, io};", r"use std::{fmt::{self, Result}, io};")
+}
+
+#[test]
+fn merge_groups_self() {
+ check_crate("std::fmt::Debug", r"use std::fmt;", r"use std::fmt::{self, Debug};")
+}
+
+#[test]
+fn merge_mod_into_glob() {
+ check_with_config(
+ "token::TokenKind",
+ r"use token::TokenKind::*;",
+ r"use token::TokenKind::{*, self};",
+ &InsertUseConfig {
+ granularity: ImportGranularity::Crate,
+ enforce_granularity: true,
+ prefix_kind: PrefixKind::Plain,
+ group: false,
+ skip_glob_imports: false,
+ },
+ )
+ // FIXME: have it emit `use token::TokenKind::{self, *}`?
+}
+
+#[test]
+fn merge_self_glob() {
+ check_with_config(
+ "self",
+ r"use self::*;",
+ r"use self::{*, self};",
+ &InsertUseConfig {
+ granularity: ImportGranularity::Crate,
+ enforce_granularity: true,
+ prefix_kind: PrefixKind::Plain,
+ group: false,
+ skip_glob_imports: false,
+ },
+ )
+ // FIXME: have it emit `use {self, *}`?
+}
+
+#[test]
+fn merge_glob() {
+ check_crate(
+ "syntax::SyntaxKind",
+ r"
+use syntax::{SyntaxKind::*};",
+ r"
+use syntax::{SyntaxKind::{*, self}};",
+ )
+}
+
+#[test]
+fn merge_glob_nested() {
+ check_crate(
+ "foo::bar::quux::Fez",
+ r"use foo::bar::{Baz, quux::*};",
+ r"use foo::bar::{Baz, quux::{*, Fez}};",
+ )
+}
+
+#[test]
+fn merge_nested_considers_first_segments() {
+ check_crate(
+ "hir_ty::display::write_bounds_like_dyn_trait",
+ r"use hir_ty::{autoderef, display::{HirDisplayError, HirFormatter}, method_resolution};",
+ r"use hir_ty::{autoderef, display::{HirDisplayError, HirFormatter, write_bounds_like_dyn_trait}, method_resolution};",
+ );
+}
+
+#[test]
+fn skip_merge_last_too_long() {
+ check_module(
+ "foo::bar",
+ r"use foo::bar::baz::Qux;",
+ r"use foo::bar;
+use foo::bar::baz::Qux;",
+ );
+}
+
+#[test]
+fn skip_merge_last_too_long2() {
+ check_module(
+ "foo::bar::baz::Qux",
+ r"use foo::bar;",
+ r"use foo::bar;
+use foo::bar::baz::Qux;",
+ );
+}
+
+#[test]
+fn insert_short_before_long() {
+ check_none(
+ "foo::bar",
+ r"use foo::bar::baz::Qux;",
+ r"use foo::bar;
+use foo::bar::baz::Qux;",
+ );
+}
+
+#[test]
+fn merge_last_fail() {
+ check_merge_only_fail(
+ r"use foo::bar::{baz::{Qux, Fez}};",
+ r"use foo::bar::{baaz::{Quux, Feez}};",
+ MergeBehavior::Module,
+ );
+}
+
+#[test]
+fn merge_last_fail1() {
+ check_merge_only_fail(
+ r"use foo::bar::{baz::{Qux, Fez}};",
+ r"use foo::bar::baaz::{Quux, Feez};",
+ MergeBehavior::Module,
+ );
+}
+
+#[test]
+fn merge_last_fail2() {
+ check_merge_only_fail(
+ r"use foo::bar::baz::{Qux, Fez};",
+ r"use foo::bar::{baaz::{Quux, Feez}};",
+ MergeBehavior::Module,
+ );
+}
+
+#[test]
+fn merge_last_fail3() {
+ check_merge_only_fail(
+ r"use foo::bar::baz::{Qux, Fez};",
+ r"use foo::bar::baaz::{Quux, Feez};",
+ MergeBehavior::Module,
+ );
+}
+
+#[test]
+fn guess_empty() {
+ check_guess("", ImportGranularityGuess::Unknown);
+}
+
+#[test]
+fn guess_single() {
+ check_guess(r"use foo::{baz::{qux, quux}, bar};", ImportGranularityGuess::Crate);
+ check_guess(r"use foo::bar;", ImportGranularityGuess::Unknown);
+ check_guess(r"use foo::bar::{baz, qux};", ImportGranularityGuess::CrateOrModule);
+}
+
+#[test]
+fn guess_unknown() {
+ check_guess(
+ r"
+use foo::bar::baz;
+use oof::rab::xuq;
+",
+ ImportGranularityGuess::Unknown,
+ );
+}
+
+#[test]
+fn guess_item() {
+ check_guess(
+ r"
+use foo::bar::baz;
+use foo::bar::qux;
+",
+ ImportGranularityGuess::Item,
+ );
+}
+
+#[test]
+fn guess_module_or_item() {
+ check_guess(
+ r"
+use foo::bar::Bar;
+use foo::qux;
+",
+ ImportGranularityGuess::ModuleOrItem,
+ );
+ check_guess(
+ r"
+use foo::bar::Bar;
+use foo::bar;
+",
+ ImportGranularityGuess::ModuleOrItem,
+ );
+}
+
+#[test]
+fn guess_module() {
+ check_guess(
+ r"
+use foo::bar::baz;
+use foo::bar::{qux, quux};
+",
+ ImportGranularityGuess::Module,
+ );
+ // this is a rather odd case, technically this file isn't following any style properly.
+ check_guess(
+ r"
+use foo::bar::baz;
+use foo::{baz::{qux, quux}, bar};
+",
+ ImportGranularityGuess::Module,
+ );
+ check_guess(
+ r"
+use foo::bar::Bar;
+use foo::baz::Baz;
+use foo::{Foo, Qux};
+",
+ ImportGranularityGuess::Module,
+ );
+}
+
+#[test]
+fn guess_crate_or_module() {
+ check_guess(
+ r"
+use foo::bar::baz;
+use oof::bar::{qux, quux};
+",
+ ImportGranularityGuess::CrateOrModule,
+ );
+}
+
+#[test]
+fn guess_crate() {
+ check_guess(
+ r"
+use frob::bar::baz;
+use foo::{baz::{qux, quux}, bar};
+",
+ ImportGranularityGuess::Crate,
+ );
+}
+
+#[test]
+fn guess_skips_differing_vis() {
+ check_guess(
+ r"
+use foo::bar::baz;
+pub use foo::bar::qux;
+",
+ ImportGranularityGuess::Unknown,
+ );
+}
+
+#[test]
+fn guess_skips_differing_attrs() {
+ check_guess(
+ r"
+pub use foo::bar::baz;
+#[doc(hidden)]
+pub use foo::bar::qux;
+",
+ ImportGranularityGuess::Unknown,
+ );
+}
+
+#[test]
+fn guess_grouping_matters() {
+ check_guess(
+ r"
+use foo::bar::baz;
+use oof::bar::baz;
+use foo::bar::qux;
+",
+ ImportGranularityGuess::Unknown,
+ );
+}
+
+fn check_with_config(
+ path: &str,
+ ra_fixture_before: &str,
+ ra_fixture_after: &str,
+ config: &InsertUseConfig,
+) {
+ let (db, file_id, pos) = if ra_fixture_before.contains(CURSOR_MARKER) {
+ let (db, file_id, range_or_offset) = RootDatabase::with_range_or_offset(ra_fixture_before);
+ (db, file_id, Some(range_or_offset))
+ } else {
+ let (db, file_id) = RootDatabase::with_single_file(ra_fixture_before);
+ (db, file_id, None)
+ };
+ let sema = &Semantics::new(&db);
+ let source_file = sema.parse(file_id);
+ let syntax = source_file.syntax().clone_for_update();
+ let file = pos
+ .and_then(|pos| syntax.token_at_offset(pos.expect_offset()).next()?.parent())
+ .and_then(|it| ImportScope::find_insert_use_container(&it, sema))
+ .or_else(|| ImportScope::from(syntax))
+ .unwrap();
+ let path = ast::SourceFile::parse(&format!("use {};", path))
+ .tree()
+ .syntax()
+ .descendants()
+ .find_map(ast::Path::cast)
+ .unwrap();
+
+ insert_use(&file, path, config);
+ let result = file.as_syntax_node().ancestors().last().unwrap().to_string();
+ assert_eq_text!(&trim_indent(ra_fixture_after), &result);
+}
+
+fn check(
+ path: &str,
+ ra_fixture_before: &str,
+ ra_fixture_after: &str,
+ granularity: ImportGranularity,
+) {
+ check_with_config(
+ path,
+ ra_fixture_before,
+ ra_fixture_after,
+ &InsertUseConfig {
+ granularity,
+ enforce_granularity: true,
+ prefix_kind: PrefixKind::Plain,
+ group: true,
+ skip_glob_imports: true,
+ },
+ )
+}
+
+fn check_crate(path: &str, ra_fixture_before: &str, ra_fixture_after: &str) {
+ check(path, ra_fixture_before, ra_fixture_after, ImportGranularity::Crate)
+}
+
+fn check_module(path: &str, ra_fixture_before: &str, ra_fixture_after: &str) {
+ check(path, ra_fixture_before, ra_fixture_after, ImportGranularity::Module)
+}
+
+fn check_none(path: &str, ra_fixture_before: &str, ra_fixture_after: &str) {
+ check(path, ra_fixture_before, ra_fixture_after, ImportGranularity::Item)
+}
+
+fn check_merge_only_fail(ra_fixture0: &str, ra_fixture1: &str, mb: MergeBehavior) {
+ let use0 = ast::SourceFile::parse(ra_fixture0)
+ .tree()
+ .syntax()
+ .descendants()
+ .find_map(ast::Use::cast)
+ .unwrap();
+
+ let use1 = ast::SourceFile::parse(ra_fixture1)
+ .tree()
+ .syntax()
+ .descendants()
+ .find_map(ast::Use::cast)
+ .unwrap();
+
+ let result = try_merge_imports(&use0, &use1, mb);
+ assert_eq!(result.map(|u| u.to_string()), None);
+}
+
+fn check_guess(ra_fixture: &str, expected: ImportGranularityGuess) {
+ let syntax = ast::SourceFile::parse(ra_fixture).tree().syntax().clone();
+ let file = ImportScope::from(syntax).unwrap();
+ assert_eq!(super::guess_granularity_from_scope(&file), expected);
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/imports/merge_imports.rs b/src/tools/rust-analyzer/crates/ide-db/src/imports/merge_imports.rs
new file mode 100644
index 000000000..7fb4b90e6
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/imports/merge_imports.rs
@@ -0,0 +1,295 @@
+//! Handle syntactic aspects of merging UseTrees.
+use std::cmp::Ordering;
+
+use itertools::{EitherOrBoth, Itertools};
+use syntax::{
+ ast::{self, AstNode, HasAttrs, HasVisibility, PathSegmentKind},
+ ted,
+};
+
+use crate::syntax_helpers::node_ext::vis_eq;
+
+/// What type of merges are allowed.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum MergeBehavior {
+ /// Merge imports from the same crate into a single use statement.
+ Crate,
+ /// Merge imports from the same module into a single use statement.
+ Module,
+}
+
+impl MergeBehavior {
+ fn is_tree_allowed(&self, tree: &ast::UseTree) -> bool {
+ match self {
+ MergeBehavior::Crate => true,
+ // only simple single segment paths are allowed
+ MergeBehavior::Module => {
+ tree.use_tree_list().is_none() && tree.path().map(path_len) <= Some(1)
+ }
+ }
+ }
+}
+
+/// Merge `rhs` into `lhs` keeping both intact.
+/// Returned AST is mutable.
+pub fn try_merge_imports(
+ lhs: &ast::Use,
+ rhs: &ast::Use,
+ merge_behavior: MergeBehavior,
+) -> Option<ast::Use> {
+ // don't merge imports with different visibilities
+ if !eq_visibility(lhs.visibility(), rhs.visibility()) {
+ return None;
+ }
+ if !eq_attrs(lhs.attrs(), rhs.attrs()) {
+ return None;
+ }
+
+ let lhs = lhs.clone_subtree().clone_for_update();
+ let rhs = rhs.clone_subtree().clone_for_update();
+ let lhs_tree = lhs.use_tree()?;
+ let rhs_tree = rhs.use_tree()?;
+ try_merge_trees_mut(&lhs_tree, &rhs_tree, merge_behavior)?;
+ Some(lhs)
+}
+
+/// Merge `rhs` into `lhs` keeping both intact.
+/// Returned AST is mutable.
+pub fn try_merge_trees(
+ lhs: &ast::UseTree,
+ rhs: &ast::UseTree,
+ merge: MergeBehavior,
+) -> Option<ast::UseTree> {
+ let lhs = lhs.clone_subtree().clone_for_update();
+ let rhs = rhs.clone_subtree().clone_for_update();
+ try_merge_trees_mut(&lhs, &rhs, merge)?;
+ Some(lhs)
+}
+
+fn try_merge_trees_mut(lhs: &ast::UseTree, rhs: &ast::UseTree, merge: MergeBehavior) -> Option<()> {
+ let lhs_path = lhs.path()?;
+ let rhs_path = rhs.path()?;
+
+ let (lhs_prefix, rhs_prefix) = common_prefix(&lhs_path, &rhs_path)?;
+ if !(lhs.is_simple_path()
+ && rhs.is_simple_path()
+ && lhs_path == lhs_prefix
+ && rhs_path == rhs_prefix)
+ {
+ lhs.split_prefix(&lhs_prefix);
+ rhs.split_prefix(&rhs_prefix);
+ }
+ recursive_merge(lhs, rhs, merge)
+}
+
+/// Recursively merges rhs to lhs
+#[must_use]
+fn recursive_merge(lhs: &ast::UseTree, rhs: &ast::UseTree, merge: MergeBehavior) -> Option<()> {
+ let mut use_trees: Vec<ast::UseTree> = lhs
+ .use_tree_list()
+ .into_iter()
+ .flat_map(|list| list.use_trees())
+ // We use Option here to early return from this function(this is not the
+ // same as a `filter` op).
+ .map(|tree| merge.is_tree_allowed(&tree).then(|| tree))
+ .collect::<Option<_>>()?;
+ use_trees.sort_unstable_by(|a, b| path_cmp_for_sort(a.path(), b.path()));
+ for rhs_t in rhs.use_tree_list().into_iter().flat_map(|list| list.use_trees()) {
+ if !merge.is_tree_allowed(&rhs_t) {
+ return None;
+ }
+ let rhs_path = rhs_t.path();
+
+ match use_trees
+ .binary_search_by(|lhs_t| path_cmp_bin_search(lhs_t.path(), rhs_path.as_ref()))
+ {
+ Ok(idx) => {
+ let lhs_t = &mut use_trees[idx];
+ let lhs_path = lhs_t.path()?;
+ let rhs_path = rhs_path?;
+ let (lhs_prefix, rhs_prefix) = common_prefix(&lhs_path, &rhs_path)?;
+ if lhs_prefix == lhs_path && rhs_prefix == rhs_path {
+ let tree_is_self = |tree: &ast::UseTree| {
+ tree.path().as_ref().map(path_is_self).unwrap_or(false)
+ };
+ // Check if only one of the two trees has a tree list, and
+ // whether that then contains `self` or not. If this is the
+ // case we can skip this iteration since the path without
+ // the list is already included in the other one via `self`.
+ let tree_contains_self = |tree: &ast::UseTree| {
+ tree.use_tree_list()
+ .map(|tree_list| tree_list.use_trees().any(|it| tree_is_self(&it)))
+ // Glob imports aren't part of the use-tree lists,
+ // so they need to be handled explicitly
+ .or_else(|| tree.star_token().map(|_| false))
+ };
+ match (tree_contains_self(lhs_t), tree_contains_self(&rhs_t)) {
+ (Some(true), None) => continue,
+ (None, Some(true)) => {
+ ted::replace(lhs_t.syntax(), rhs_t.syntax());
+ *lhs_t = rhs_t;
+ continue;
+ }
+ _ => (),
+ }
+
+ if lhs_t.is_simple_path() && rhs_t.is_simple_path() {
+ continue;
+ }
+ }
+ lhs_t.split_prefix(&lhs_prefix);
+ rhs_t.split_prefix(&rhs_prefix);
+ recursive_merge(lhs_t, &rhs_t, merge)?;
+ }
+ Err(_)
+ if merge == MergeBehavior::Module
+ && !use_trees.is_empty()
+ && rhs_t.use_tree_list().is_some() =>
+ {
+ return None
+ }
+ Err(idx) => {
+ use_trees.insert(idx, rhs_t.clone());
+ lhs.get_or_create_use_tree_list().add_use_tree(rhs_t);
+ }
+ }
+ }
+ Some(())
+}
+
+/// Traverses both paths until they differ, returning the common prefix of both.
+pub fn common_prefix(lhs: &ast::Path, rhs: &ast::Path) -> Option<(ast::Path, ast::Path)> {
+ let mut res = None;
+ let mut lhs_curr = lhs.first_qualifier_or_self();
+ let mut rhs_curr = rhs.first_qualifier_or_self();
+ loop {
+ match (lhs_curr.segment(), rhs_curr.segment()) {
+ (Some(lhs), Some(rhs)) if lhs.syntax().text() == rhs.syntax().text() => (),
+ _ => break res,
+ }
+ res = Some((lhs_curr.clone(), rhs_curr.clone()));
+
+ match lhs_curr.parent_path().zip(rhs_curr.parent_path()) {
+ Some((lhs, rhs)) => {
+ lhs_curr = lhs;
+ rhs_curr = rhs;
+ }
+ _ => break res,
+ }
+ }
+}
+
+/// Orders paths in the following way:
+/// the sole self token comes first, after that come uppercase identifiers, then lowercase identifiers
+// FIXME: rustfmt sorts lowercase idents before uppercase, in general we want to have the same ordering rustfmt has
+// which is `self` and `super` first, then identifier imports with lowercase ones first, then glob imports and at last list imports.
+// Example foo::{self, foo, baz, Baz, Qux, *, {Bar}}
+fn path_cmp_for_sort(a: Option<ast::Path>, b: Option<ast::Path>) -> Ordering {
+ match (a, b) {
+ (None, None) => Ordering::Equal,
+ (None, Some(_)) => Ordering::Less,
+ (Some(_), None) => Ordering::Greater,
+ (Some(ref a), Some(ref b)) => match (path_is_self(a), path_is_self(b)) {
+ (true, true) => Ordering::Equal,
+ (true, false) => Ordering::Less,
+ (false, true) => Ordering::Greater,
+ (false, false) => path_cmp_short(a, b),
+ },
+ }
+}
+
+/// Path comparison func for binary searching for merging.
+fn path_cmp_bin_search(lhs: Option<ast::Path>, rhs: Option<&ast::Path>) -> Ordering {
+ match (lhs.as_ref().and_then(ast::Path::first_segment), rhs.and_then(ast::Path::first_segment))
+ {
+ (None, None) => Ordering::Equal,
+ (None, Some(_)) => Ordering::Less,
+ (Some(_), None) => Ordering::Greater,
+ (Some(ref a), Some(ref b)) => path_segment_cmp(a, b),
+ }
+}
+
+/// Short circuiting comparison, if both paths are equal until one of them ends they are considered
+/// equal
+fn path_cmp_short(a: &ast::Path, b: &ast::Path) -> Ordering {
+ let a = a.segments();
+ let b = b.segments();
+ // cmp_by would be useful for us here but that is currently unstable
+ // cmp doesn't work due the lifetimes on text's return type
+ a.zip(b)
+ .find_map(|(a, b)| match path_segment_cmp(&a, &b) {
+ Ordering::Equal => None,
+ ord => Some(ord),
+ })
+ .unwrap_or(Ordering::Equal)
+}
+
+/// Compares two paths, if one ends earlier than the other the has_tl parameters decide which is
+/// greater as a a path that has a tree list should be greater, while one that just ends without
+/// a tree list should be considered less.
+pub(super) fn use_tree_path_cmp(
+ a: &ast::Path,
+ a_has_tl: bool,
+ b: &ast::Path,
+ b_has_tl: bool,
+) -> Ordering {
+ let a_segments = a.segments();
+ let b_segments = b.segments();
+ // cmp_by would be useful for us here but that is currently unstable
+ // cmp doesn't work due the lifetimes on text's return type
+ a_segments
+ .zip_longest(b_segments)
+ .find_map(|zipped| match zipped {
+ EitherOrBoth::Both(ref a, ref b) => match path_segment_cmp(a, b) {
+ Ordering::Equal => None,
+ ord => Some(ord),
+ },
+ EitherOrBoth::Left(_) if !b_has_tl => Some(Ordering::Greater),
+ EitherOrBoth::Left(_) => Some(Ordering::Less),
+ EitherOrBoth::Right(_) if !a_has_tl => Some(Ordering::Less),
+ EitherOrBoth::Right(_) => Some(Ordering::Greater),
+ })
+ .unwrap_or(Ordering::Equal)
+}
+
+fn path_segment_cmp(a: &ast::PathSegment, b: &ast::PathSegment) -> Ordering {
+ let a = a.kind().and_then(|kind| match kind {
+ PathSegmentKind::Name(name_ref) => Some(name_ref),
+ _ => None,
+ });
+ let b = b.kind().and_then(|kind| match kind {
+ PathSegmentKind::Name(name_ref) => Some(name_ref),
+ _ => None,
+ });
+ a.as_ref().map(ast::NameRef::text).cmp(&b.as_ref().map(ast::NameRef::text))
+}
+
+pub fn eq_visibility(vis0: Option<ast::Visibility>, vis1: Option<ast::Visibility>) -> bool {
+ match (vis0, vis1) {
+ (None, None) => true,
+ (Some(vis0), Some(vis1)) => vis_eq(&vis0, &vis1),
+ _ => false,
+ }
+}
+
+pub fn eq_attrs(
+ attrs0: impl Iterator<Item = ast::Attr>,
+ attrs1: impl Iterator<Item = ast::Attr>,
+) -> bool {
+ // FIXME order of attributes should not matter
+ let attrs0 = attrs0
+ .flat_map(|attr| attr.syntax().descendants_with_tokens())
+ .flat_map(|it| it.into_token());
+ let attrs1 = attrs1
+ .flat_map(|attr| attr.syntax().descendants_with_tokens())
+ .flat_map(|it| it.into_token());
+ stdx::iter_eq_by(attrs0, attrs1, |tok, tok2| tok.text() == tok2.text())
+}
+
+fn path_is_self(path: &ast::Path) -> bool {
+ path.segment().and_then(|seg| seg.self_token()).is_some() && path.qualifier().is_none()
+}
+
+fn path_len(path: ast::Path) -> usize {
+ path.segments().count()
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/items_locator.rs b/src/tools/rust-analyzer/crates/ide-db/src/items_locator.rs
new file mode 100644
index 000000000..07a57c883
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/items_locator.rs
@@ -0,0 +1,151 @@
+//! This module has the functionality to search the project and its dependencies for a certain item,
+//! by its name and a few criteria.
+//! The main reason for this module to exist is the fact that project's items and dependencies' items
+//! are located in different caches, with different APIs.
+use either::Either;
+use hir::{
+ import_map::{self, ImportKind},
+ symbols::FileSymbol,
+ AsAssocItem, Crate, ItemInNs, Semantics,
+};
+use limit::Limit;
+use syntax::{ast, AstNode, SyntaxKind::NAME};
+
+use crate::{
+ defs::{Definition, NameClass},
+ imports::import_assets::NameToImport,
+ symbol_index, RootDatabase,
+};
+
+/// A value to use, when uncertain which limit to pick.
+pub static DEFAULT_QUERY_SEARCH_LIMIT: Limit = Limit::new(40);
+
+/// Three possible ways to search for the name in associated and/or other items.
+#[derive(Debug, Clone, Copy)]
+pub enum AssocItemSearch {
+ /// Search for the name in both associated and other items.
+ Include,
+ /// Search for the name in other items only.
+ Exclude,
+ /// Search for the name in the associated items only.
+ AssocItemsOnly,
+}
+
+/// Searches for importable items with the given name in the crate and its dependencies.
+pub fn items_with_name<'a>(
+ sema: &'a Semantics<'_, RootDatabase>,
+ krate: Crate,
+ name: NameToImport,
+ assoc_item_search: AssocItemSearch,
+ limit: Option<usize>,
+) -> impl Iterator<Item = ItemInNs> + 'a {
+ let _p = profile::span("items_with_name").detail(|| {
+ format!(
+ "Name: {}, crate: {:?}, assoc items: {:?}, limit: {:?}",
+ name.text(),
+ assoc_item_search,
+ krate.display_name(sema.db).map(|name| name.to_string()),
+ limit,
+ )
+ });
+
+ let (mut local_query, mut external_query) = match name {
+ NameToImport::Exact(exact_name, case_sensitive) => {
+ let mut local_query = symbol_index::Query::new(exact_name.clone());
+ local_query.exact();
+
+ let external_query = import_map::Query::new(exact_name)
+ .name_only()
+ .search_mode(import_map::SearchMode::Equals);
+
+ (
+ local_query,
+ if case_sensitive { external_query.case_sensitive() } else { external_query },
+ )
+ }
+ NameToImport::Fuzzy(fuzzy_search_string) => {
+ let mut local_query = symbol_index::Query::new(fuzzy_search_string.clone());
+
+ let mut external_query = import_map::Query::new(fuzzy_search_string.clone())
+ .search_mode(import_map::SearchMode::Fuzzy)
+ .name_only();
+ match assoc_item_search {
+ AssocItemSearch::Include => {}
+ AssocItemSearch::Exclude => {
+ external_query = external_query.exclude_import_kind(ImportKind::AssociatedItem);
+ }
+ AssocItemSearch::AssocItemsOnly => {
+ external_query = external_query.assoc_items_only();
+ }
+ }
+
+ if fuzzy_search_string.to_lowercase() != fuzzy_search_string {
+ local_query.case_sensitive();
+ external_query = external_query.case_sensitive();
+ }
+
+ (local_query, external_query)
+ }
+ };
+
+ if let Some(limit) = limit {
+ external_query = external_query.limit(limit);
+ local_query.limit(limit);
+ }
+
+ find_items(sema, krate, assoc_item_search, local_query, external_query)
+}
+
+fn find_items<'a>(
+ sema: &'a Semantics<'_, RootDatabase>,
+ krate: Crate,
+ assoc_item_search: AssocItemSearch,
+ local_query: symbol_index::Query,
+ external_query: import_map::Query,
+) -> impl Iterator<Item = ItemInNs> + 'a {
+ let _p = profile::span("find_items");
+ let db = sema.db;
+
+ let external_importables =
+ krate.query_external_importables(db, external_query).map(|external_importable| {
+ match external_importable {
+ Either::Left(module_def) => ItemInNs::from(module_def),
+ Either::Right(macro_def) => ItemInNs::from(macro_def),
+ }
+ });
+
+ // Query the local crate using the symbol index.
+ let local_results = symbol_index::crate_symbols(db, krate, local_query)
+ .into_iter()
+ .filter_map(move |local_candidate| get_name_definition(sema, &local_candidate))
+ .filter_map(|name_definition_to_import| match name_definition_to_import {
+ Definition::Macro(macro_def) => Some(ItemInNs::from(macro_def)),
+ def => <Option<_>>::from(def),
+ });
+
+ external_importables.chain(local_results).filter(move |&item| match assoc_item_search {
+ AssocItemSearch::Include => true,
+ AssocItemSearch::Exclude => !is_assoc_item(item, sema.db),
+ AssocItemSearch::AssocItemsOnly => is_assoc_item(item, sema.db),
+ })
+}
+
+fn get_name_definition(
+ sema: &Semantics<'_, RootDatabase>,
+ import_candidate: &FileSymbol,
+) -> Option<Definition> {
+ let _p = profile::span("get_name_definition");
+
+ let candidate_node = import_candidate.loc.syntax(sema)?;
+ let candidate_name_node = if candidate_node.kind() != NAME {
+ candidate_node.children().find(|it| it.kind() == NAME)?
+ } else {
+ candidate_node
+ };
+ let name = ast::Name::cast(candidate_name_node)?;
+ NameClass::classify(sema, &name)?.defined()
+}
+
+fn is_assoc_item(item: ItemInNs, db: &RootDatabase) -> bool {
+ item.as_module_def().and_then(|module_def| module_def.as_assoc_item(db)).is_some()
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/label.rs b/src/tools/rust-analyzer/crates/ide-db/src/label.rs
new file mode 100644
index 000000000..4b6d54b5e
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/label.rs
@@ -0,0 +1,48 @@
+//! See [`Label`]
+use std::fmt;
+
+/// A type to specify UI label, like an entry in the list of assists. Enforces
+/// proper casing:
+///
+/// Frobnicate bar
+///
+/// Note the upper-case first letter and the absence of `.` at the end.
+#[derive(Clone)]
+pub struct Label(String);
+
+impl PartialEq<str> for Label {
+ fn eq(&self, other: &str) -> bool {
+ self.0 == other
+ }
+}
+
+impl PartialEq<&'_ str> for Label {
+ fn eq(&self, other: &&str) -> bool {
+ self == *other
+ }
+}
+
+impl From<Label> for String {
+ fn from(label: Label) -> String {
+ label.0
+ }
+}
+
+impl Label {
+ pub fn new(label: String) -> Label {
+ assert!(label.starts_with(char::is_uppercase) && !label.ends_with('.'));
+ Label(label)
+ }
+}
+
+impl fmt::Display for Label {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+impl fmt::Debug for Label {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.0, f)
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/lib.rs b/src/tools/rust-analyzer/crates/ide-db/src/lib.rs
new file mode 100644
index 000000000..966bba616
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/lib.rs
@@ -0,0 +1,246 @@
+//! This crate defines the core datastructure representing IDE state -- `RootDatabase`.
+//!
+//! It is mainly a `HirDatabase` for semantic analysis, plus a `SymbolsDatabase`, for fuzzy search.
+
+#![warn(rust_2018_idioms, unused_lifetimes, semicolon_in_expressions_from_macros)]
+
+mod apply_change;
+
+pub mod active_parameter;
+pub mod assists;
+pub mod defs;
+pub mod famous_defs;
+pub mod helpers;
+pub mod items_locator;
+pub mod label;
+pub mod line_index;
+pub mod path_transform;
+pub mod rename;
+pub mod rust_doc;
+pub mod search;
+pub mod source_change;
+pub mod symbol_index;
+pub mod traits;
+pub mod ty_filter;
+pub mod use_trivial_contructor;
+
+pub mod imports {
+ pub mod import_assets;
+ pub mod insert_use;
+ pub mod merge_imports;
+}
+
+pub mod generated {
+ pub mod lints;
+}
+
+pub mod syntax_helpers {
+ pub mod node_ext;
+ pub mod insert_whitespace_into_node;
+ pub mod format_string;
+
+ pub use parser::LexedStr;
+}
+
+use std::{fmt, mem::ManuallyDrop, sync::Arc};
+
+use base_db::{
+ salsa::{self, Durability},
+ AnchoredPath, CrateId, FileId, FileLoader, FileLoaderDelegate, SourceDatabase, Upcast,
+};
+use hir::{
+ db::{AstDatabase, DefDatabase, HirDatabase},
+ symbols::FileSymbolKind,
+};
+
+use crate::{line_index::LineIndex, symbol_index::SymbolsDatabase};
+pub use rustc_hash::{FxHashMap, FxHashSet, FxHasher};
+
+/// `base_db` is normally also needed in places where `ide_db` is used, so this re-export is for convenience.
+pub use base_db;
+
+pub type FxIndexSet<T> = indexmap::IndexSet<T, std::hash::BuildHasherDefault<rustc_hash::FxHasher>>;
+pub type FxIndexMap<K, V> =
+ indexmap::IndexMap<K, V, std::hash::BuildHasherDefault<rustc_hash::FxHasher>>;
+
+#[salsa::database(
+ base_db::SourceDatabaseExtStorage,
+ base_db::SourceDatabaseStorage,
+ hir::db::AstDatabaseStorage,
+ hir::db::DefDatabaseStorage,
+ hir::db::HirDatabaseStorage,
+ hir::db::InternDatabaseStorage,
+ LineIndexDatabaseStorage,
+ symbol_index::SymbolsDatabaseStorage
+)]
+pub struct RootDatabase {
+ // We use `ManuallyDrop` here because every codegen unit that contains a
+ // `&RootDatabase -> &dyn OtherDatabase` cast will instantiate its drop glue in the vtable,
+ // which duplicates `Weak::drop` and `Arc::drop` tens of thousands of times, which makes
+ // compile times of all `ide_*` and downstream crates suffer greatly.
+ storage: ManuallyDrop<salsa::Storage<RootDatabase>>,
+}
+
+impl Drop for RootDatabase {
+ fn drop(&mut self) {
+ unsafe { ManuallyDrop::drop(&mut self.storage) };
+ }
+}
+
+impl fmt::Debug for RootDatabase {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RootDatabase").finish()
+ }
+}
+
+impl Upcast<dyn AstDatabase> for RootDatabase {
+ fn upcast(&self) -> &(dyn AstDatabase + 'static) {
+ &*self
+ }
+}
+
+impl Upcast<dyn DefDatabase> for RootDatabase {
+ fn upcast(&self) -> &(dyn DefDatabase + 'static) {
+ &*self
+ }
+}
+
+impl Upcast<dyn HirDatabase> for RootDatabase {
+ fn upcast(&self) -> &(dyn HirDatabase + 'static) {
+ &*self
+ }
+}
+
+impl FileLoader for RootDatabase {
+ fn file_text(&self, file_id: FileId) -> Arc<String> {
+ FileLoaderDelegate(self).file_text(file_id)
+ }
+ fn resolve_path(&self, path: AnchoredPath<'_>) -> Option<FileId> {
+ FileLoaderDelegate(self).resolve_path(path)
+ }
+ fn relevant_crates(&self, file_id: FileId) -> Arc<FxHashSet<CrateId>> {
+ FileLoaderDelegate(self).relevant_crates(file_id)
+ }
+}
+
+impl salsa::Database for RootDatabase {}
+
+impl Default for RootDatabase {
+ fn default() -> RootDatabase {
+ RootDatabase::new(None)
+ }
+}
+
+impl RootDatabase {
+ pub fn new(lru_capacity: Option<usize>) -> RootDatabase {
+ let mut db = RootDatabase { storage: ManuallyDrop::new(salsa::Storage::default()) };
+ db.set_crate_graph_with_durability(Default::default(), Durability::HIGH);
+ db.set_local_roots_with_durability(Default::default(), Durability::HIGH);
+ db.set_library_roots_with_durability(Default::default(), Durability::HIGH);
+ db.set_enable_proc_attr_macros(false);
+ db.update_lru_capacity(lru_capacity);
+ db
+ }
+
+ pub fn update_lru_capacity(&mut self, lru_capacity: Option<usize>) {
+ let lru_capacity = lru_capacity.unwrap_or(base_db::DEFAULT_LRU_CAP);
+ base_db::ParseQuery.in_db_mut(self).set_lru_capacity(lru_capacity);
+ hir::db::ParseMacroExpansionQuery.in_db_mut(self).set_lru_capacity(lru_capacity);
+ hir::db::MacroExpandQuery.in_db_mut(self).set_lru_capacity(lru_capacity);
+ }
+}
+
+impl salsa::ParallelDatabase for RootDatabase {
+ fn snapshot(&self) -> salsa::Snapshot<RootDatabase> {
+ salsa::Snapshot::new(RootDatabase { storage: ManuallyDrop::new(self.storage.snapshot()) })
+ }
+}
+
+#[salsa::query_group(LineIndexDatabaseStorage)]
+pub trait LineIndexDatabase: base_db::SourceDatabase {
+ fn line_index(&self, file_id: FileId) -> Arc<LineIndex>;
+}
+
+fn line_index(db: &dyn LineIndexDatabase, file_id: FileId) -> Arc<LineIndex> {
+ let text = db.file_text(file_id);
+ Arc::new(LineIndex::new(&*text))
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub enum SymbolKind {
+ Attribute,
+ BuiltinAttr,
+ Const,
+ ConstParam,
+ Derive,
+ DeriveHelper,
+ Enum,
+ Field,
+ Function,
+ Impl,
+ Label,
+ LifetimeParam,
+ Local,
+ Macro,
+ Module,
+ SelfParam,
+ SelfType,
+ Static,
+ Struct,
+ ToolModule,
+ Trait,
+ TypeAlias,
+ TypeParam,
+ Union,
+ ValueParam,
+ Variant,
+}
+
+impl From<hir::MacroKind> for SymbolKind {
+ fn from(it: hir::MacroKind) -> Self {
+ match it {
+ hir::MacroKind::Declarative | hir::MacroKind::BuiltIn | hir::MacroKind::ProcMacro => {
+ SymbolKind::Macro
+ }
+ hir::MacroKind::Derive => SymbolKind::Derive,
+ hir::MacroKind::Attr => SymbolKind::Attribute,
+ }
+ }
+}
+
+impl From<FileSymbolKind> for SymbolKind {
+ fn from(it: FileSymbolKind) -> Self {
+ match it {
+ FileSymbolKind::Const => SymbolKind::Const,
+ FileSymbolKind::Enum => SymbolKind::Enum,
+ FileSymbolKind::Function => SymbolKind::Function,
+ FileSymbolKind::Macro => SymbolKind::Macro,
+ FileSymbolKind::Module => SymbolKind::Module,
+ FileSymbolKind::Static => SymbolKind::Static,
+ FileSymbolKind::Struct => SymbolKind::Struct,
+ FileSymbolKind::Trait => SymbolKind::Trait,
+ FileSymbolKind::TypeAlias => SymbolKind::TypeAlias,
+ FileSymbolKind::Union => SymbolKind::Union,
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct SnippetCap {
+ _private: (),
+}
+
+impl SnippetCap {
+ pub const fn new(allow_snippets: bool) -> Option<SnippetCap> {
+ if allow_snippets {
+ Some(SnippetCap { _private: () })
+ } else {
+ None
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ mod sourcegen_lints;
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/line_index.rs b/src/tools/rust-analyzer/crates/ide-db/src/line_index.rs
new file mode 100644
index 000000000..68ad07ee8
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/line_index.rs
@@ -0,0 +1,300 @@
+//! `LineIndex` maps flat `TextSize` offsets into `(Line, Column)`
+//! representation.
+use std::{iter, mem};
+
+use rustc_hash::FxHashMap;
+use syntax::{TextRange, TextSize};
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct LineIndex {
+ /// Offset the the beginning of each line, zero-based
+ pub(crate) newlines: Vec<TextSize>,
+ /// List of non-ASCII characters on each line
+ pub(crate) utf16_lines: FxHashMap<u32, Vec<Utf16Char>>,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct LineColUtf16 {
+ /// Zero-based
+ pub line: u32,
+ /// Zero-based
+ pub col: u32,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct LineCol {
+ /// Zero-based
+ pub line: u32,
+ /// Zero-based utf8 offset
+ pub col: u32,
+}
+
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+pub(crate) struct Utf16Char {
+ /// Start offset of a character inside a line, zero-based
+ pub(crate) start: TextSize,
+ /// End offset of a character inside a line, zero-based
+ pub(crate) end: TextSize,
+}
+
+impl Utf16Char {
+ /// Returns the length in 8-bit UTF-8 code units.
+ fn len(&self) -> TextSize {
+ self.end - self.start
+ }
+
+ /// Returns the length in 16-bit UTF-16 code units.
+ fn len_utf16(&self) -> usize {
+ if self.len() == TextSize::from(4) {
+ 2
+ } else {
+ 1
+ }
+ }
+}
+
+impl LineIndex {
+ pub fn new(text: &str) -> LineIndex {
+ let mut utf16_lines = FxHashMap::default();
+ let mut utf16_chars = Vec::new();
+
+ let mut newlines = vec![0.into()];
+ let mut curr_row @ mut curr_col = 0.into();
+ let mut line = 0;
+ for c in text.chars() {
+ let c_len = TextSize::of(c);
+ curr_row += c_len;
+ if c == '\n' {
+ newlines.push(curr_row);
+
+ // Save any utf-16 characters seen in the previous line
+ if !utf16_chars.is_empty() {
+ utf16_lines.insert(line, mem::take(&mut utf16_chars));
+ }
+
+ // Prepare for processing the next line
+ curr_col = 0.into();
+ line += 1;
+ continue;
+ }
+
+ if !c.is_ascii() {
+ utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + c_len });
+ }
+
+ curr_col += c_len;
+ }
+
+ // Save any utf-16 characters seen in the last line
+ if !utf16_chars.is_empty() {
+ utf16_lines.insert(line, utf16_chars);
+ }
+
+ LineIndex { newlines, utf16_lines }
+ }
+
+ pub fn line_col(&self, offset: TextSize) -> LineCol {
+ let line = self.newlines.partition_point(|&it| it <= offset) - 1;
+ let line_start_offset = self.newlines[line];
+ let col = offset - line_start_offset;
+ LineCol { line: line as u32, col: col.into() }
+ }
+
+ pub fn offset(&self, line_col: LineCol) -> Option<TextSize> {
+ self.newlines
+ .get(line_col.line as usize)
+ .map(|offset| offset + TextSize::from(line_col.col))
+ }
+
+ pub fn to_utf16(&self, line_col: LineCol) -> LineColUtf16 {
+ let col = self.utf8_to_utf16_col(line_col.line, line_col.col.into());
+ LineColUtf16 { line: line_col.line, col: col as u32 }
+ }
+
+ pub fn to_utf8(&self, line_col: LineColUtf16) -> LineCol {
+ let col = self.utf16_to_utf8_col(line_col.line, line_col.col);
+ LineCol { line: line_col.line, col: col.into() }
+ }
+
+ pub fn lines(&self, range: TextRange) -> impl Iterator<Item = TextRange> + '_ {
+ let lo = self.newlines.partition_point(|&it| it < range.start());
+ let hi = self.newlines.partition_point(|&it| it <= range.end());
+ let all = iter::once(range.start())
+ .chain(self.newlines[lo..hi].iter().copied())
+ .chain(iter::once(range.end()));
+
+ all.clone()
+ .zip(all.skip(1))
+ .map(|(lo, hi)| TextRange::new(lo, hi))
+ .filter(|it| !it.is_empty())
+ }
+
+ fn utf8_to_utf16_col(&self, line: u32, col: TextSize) -> usize {
+ let mut res: usize = col.into();
+ if let Some(utf16_chars) = self.utf16_lines.get(&line) {
+ for c in utf16_chars {
+ if c.end <= col {
+ res -= usize::from(c.len()) - c.len_utf16();
+ } else {
+ // From here on, all utf16 characters come *after* the character we are mapping,
+ // so we don't need to take them into account
+ break;
+ }
+ }
+ }
+ res
+ }
+
+ fn utf16_to_utf8_col(&self, line: u32, mut col: u32) -> TextSize {
+ if let Some(utf16_chars) = self.utf16_lines.get(&line) {
+ for c in utf16_chars {
+ if col > u32::from(c.start) {
+ col += u32::from(c.len()) - c.len_utf16() as u32;
+ } else {
+ // From here on, all utf16 characters come *after* the character we are mapping,
+ // so we don't need to take them into account
+ break;
+ }
+ }
+ }
+
+ col.into()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_line_index() {
+ let text = "hello\nworld";
+ let table = [
+ (00, 0, 0),
+ (01, 0, 1),
+ (05, 0, 5),
+ (06, 1, 0),
+ (07, 1, 1),
+ (08, 1, 2),
+ (10, 1, 4),
+ (11, 1, 5),
+ (12, 1, 6),
+ ];
+
+ let index = LineIndex::new(text);
+ for &(offset, line, col) in &table {
+ assert_eq!(index.line_col(offset.into()), LineCol { line, col });
+ }
+
+ let text = "\nhello\nworld";
+ let table = [(0, 0, 0), (1, 1, 0), (2, 1, 1), (6, 1, 5), (7, 2, 0)];
+ let index = LineIndex::new(text);
+ for &(offset, line, col) in &table {
+ assert_eq!(index.line_col(offset.into()), LineCol { line, col });
+ }
+ }
+
+ #[test]
+ fn test_char_len() {
+ assert_eq!('メ'.len_utf8(), 3);
+ assert_eq!('メ'.len_utf16(), 1);
+ }
+
+ #[test]
+ fn test_empty_index() {
+ let col_index = LineIndex::new(
+ "
+const C: char = 'x';
+",
+ );
+ assert_eq!(col_index.utf16_lines.len(), 0);
+ }
+
+ #[test]
+ fn test_single_char() {
+ let col_index = LineIndex::new(
+ "
+const C: char = 'メ';
+",
+ );
+
+ assert_eq!(col_index.utf16_lines.len(), 1);
+ assert_eq!(col_index.utf16_lines[&1].len(), 1);
+ assert_eq!(col_index.utf16_lines[&1][0], Utf16Char { start: 17.into(), end: 20.into() });
+
+ // UTF-8 to UTF-16, no changes
+ assert_eq!(col_index.utf8_to_utf16_col(1, 15.into()), 15);
+
+ // UTF-8 to UTF-16
+ assert_eq!(col_index.utf8_to_utf16_col(1, 22.into()), 20);
+
+ // UTF-16 to UTF-8, no changes
+ assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from(15));
+
+ // UTF-16 to UTF-8
+ assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from(21));
+
+ let col_index = LineIndex::new("a𐐏b");
+ assert_eq!(col_index.utf16_to_utf8_col(0, 3), TextSize::from(5));
+ }
+
+ #[test]
+ fn test_string() {
+ let col_index = LineIndex::new(
+ "
+const C: char = \"メ メ\";
+",
+ );
+
+ assert_eq!(col_index.utf16_lines.len(), 1);
+ assert_eq!(col_index.utf16_lines[&1].len(), 2);
+ assert_eq!(col_index.utf16_lines[&1][0], Utf16Char { start: 17.into(), end: 20.into() });
+ assert_eq!(col_index.utf16_lines[&1][1], Utf16Char { start: 21.into(), end: 24.into() });
+
+ // UTF-8 to UTF-16
+ assert_eq!(col_index.utf8_to_utf16_col(1, 15.into()), 15);
+
+ assert_eq!(col_index.utf8_to_utf16_col(1, 21.into()), 19);
+ assert_eq!(col_index.utf8_to_utf16_col(1, 25.into()), 21);
+
+ assert!(col_index.utf8_to_utf16_col(2, 15.into()) == 15);
+
+ // UTF-16 to UTF-8
+ assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from(15));
+
+ // メ UTF-8: 0xE3 0x83 0xA1, UTF-16: 0x30E1
+ assert_eq!(col_index.utf16_to_utf8_col(1, 17), TextSize::from(17)); // first メ at 17..20
+ assert_eq!(col_index.utf16_to_utf8_col(1, 18), TextSize::from(20)); // space
+ assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from(21)); // second メ at 21..24
+
+ assert_eq!(col_index.utf16_to_utf8_col(2, 15), TextSize::from(15));
+ }
+
+ #[test]
+ fn test_splitlines() {
+ fn r(lo: u32, hi: u32) -> TextRange {
+ TextRange::new(lo.into(), hi.into())
+ }
+
+ let text = "a\nbb\nccc\n";
+ let line_index = LineIndex::new(text);
+
+ let actual = line_index.lines(r(0, 9)).collect::<Vec<_>>();
+ let expected = vec![r(0, 2), r(2, 5), r(5, 9)];
+ assert_eq!(actual, expected);
+
+ let text = "";
+ let line_index = LineIndex::new(text);
+
+ let actual = line_index.lines(r(0, 0)).collect::<Vec<_>>();
+ let expected = vec![];
+ assert_eq!(actual, expected);
+
+ let text = "\n";
+ let line_index = LineIndex::new(text);
+
+ let actual = line_index.lines(r(0, 1)).collect::<Vec<_>>();
+ let expected = vec![r(0, 1)];
+ assert_eq!(actual, expected)
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs b/src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs
new file mode 100644
index 000000000..40af9e6fe
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs
@@ -0,0 +1,287 @@
+//! See [`PathTransform`].
+
+use crate::helpers::mod_path_to_ast;
+use either::Either;
+use hir::{AsAssocItem, HirDisplay, SemanticsScope};
+use rustc_hash::FxHashMap;
+use syntax::{
+ ast::{self, AstNode},
+ ted, SyntaxNode,
+};
+
+/// `PathTransform` substitutes path in SyntaxNodes in bulk.
+///
+/// This is mostly useful for IDE code generation. If you paste some existing
+/// code into a new context (for example, to add method overrides to an `impl`
+/// block), you generally want to appropriately qualify the names, and sometimes
+/// you might want to substitute generic parameters as well:
+///
+/// ```
+/// mod x {
+/// pub struct A<V>;
+/// pub trait T<U> { fn foo(&self, _: U) -> A<U>; }
+/// }
+///
+/// mod y {
+/// use x::T;
+///
+/// impl T<()> for () {
+/// // If we invoke **Add Missing Members** here, we want to copy-paste `foo`.
+/// // But we want a slightly-modified version of it:
+/// fn foo(&self, _: ()) -> x::A<()> {}
+/// }
+/// }
+/// ```
+pub struct PathTransform<'a> {
+ generic_def: hir::GenericDef,
+ substs: Vec<ast::Type>,
+ target_scope: &'a SemanticsScope<'a>,
+ source_scope: &'a SemanticsScope<'a>,
+}
+
+impl<'a> PathTransform<'a> {
+ pub fn trait_impl(
+ target_scope: &'a SemanticsScope<'a>,
+ source_scope: &'a SemanticsScope<'a>,
+ trait_: hir::Trait,
+ impl_: ast::Impl,
+ ) -> PathTransform<'a> {
+ PathTransform {
+ source_scope,
+ target_scope,
+ generic_def: trait_.into(),
+ substs: get_syntactic_substs(impl_).unwrap_or_default(),
+ }
+ }
+
+ pub fn function_call(
+ target_scope: &'a SemanticsScope<'a>,
+ source_scope: &'a SemanticsScope<'a>,
+ function: hir::Function,
+ generic_arg_list: ast::GenericArgList,
+ ) -> PathTransform<'a> {
+ PathTransform {
+ source_scope,
+ target_scope,
+ generic_def: function.into(),
+ substs: get_type_args_from_arg_list(generic_arg_list).unwrap_or_default(),
+ }
+ }
+
+ pub fn apply(&self, syntax: &SyntaxNode) {
+ self.build_ctx().apply(syntax)
+ }
+
+ fn build_ctx(&self) -> Ctx<'a> {
+ let db = self.source_scope.db;
+ let target_module = self.target_scope.module();
+ let source_module = self.source_scope.module();
+ let skip = match self.generic_def {
+ // this is a trait impl, so we need to skip the first type parameter -- this is a bit hacky
+ hir::GenericDef::Trait(_) => 1,
+ _ => 0,
+ };
+ let substs_by_param: FxHashMap<_, _> = self
+ .generic_def
+ .type_params(db)
+ .into_iter()
+ .skip(skip)
+ // The actual list of trait type parameters may be longer than the one
+ // used in the `impl` block due to trailing default type parameters.
+ // For that case we extend the `substs` with an empty iterator so we
+ // can still hit those trailing values and check if they actually have
+ // a default type. If they do, go for that type from `hir` to `ast` so
+ // the resulting change can be applied correctly.
+ .zip(self.substs.iter().map(Some).chain(std::iter::repeat(None)))
+ .filter_map(|(k, v)| match k.split(db) {
+ Either::Left(_) => None,
+ Either::Right(t) => match v {
+ Some(v) => Some((k, v.clone())),
+ None => {
+ let default = t.default(db)?;
+ Some((
+ k,
+ ast::make::ty(
+ &default.display_source_code(db, source_module.into()).ok()?,
+ ),
+ ))
+ }
+ },
+ })
+ .collect();
+ Ctx { substs: substs_by_param, target_module, source_scope: self.source_scope }
+ }
+}
+
+struct Ctx<'a> {
+ substs: FxHashMap<hir::TypeOrConstParam, ast::Type>,
+ target_module: hir::Module,
+ source_scope: &'a SemanticsScope<'a>,
+}
+
+impl<'a> Ctx<'a> {
+ fn apply(&self, item: &SyntaxNode) {
+ // `transform_path` may update a node's parent and that would break the
+ // tree traversal. Thus all paths in the tree are collected into a vec
+ // so that such operation is safe.
+ let paths = item
+ .preorder()
+ .filter_map(|event| match event {
+ syntax::WalkEvent::Enter(_) => None,
+ syntax::WalkEvent::Leave(node) => Some(node),
+ })
+ .filter_map(ast::Path::cast)
+ .collect::<Vec<_>>();
+
+ for path in paths {
+ self.transform_path(path);
+ }
+ }
+ fn transform_path(&self, path: ast::Path) -> Option<()> {
+ if path.qualifier().is_some() {
+ return None;
+ }
+ if path.segment().map_or(false, |s| {
+ s.param_list().is_some() || (s.self_token().is_some() && path.parent_path().is_none())
+ }) {
+ // don't try to qualify `Fn(Foo) -> Bar` paths, they are in prelude anyway
+ // don't try to qualify sole `self` either, they are usually locals, but are returned as modules due to namespace clashing
+ return None;
+ }
+
+ let resolution = self.source_scope.speculative_resolve(&path)?;
+
+ match resolution {
+ hir::PathResolution::TypeParam(tp) => {
+ if let Some(subst) = self.substs.get(&tp.merge()) {
+ let parent = path.syntax().parent()?;
+ if let Some(parent) = ast::Path::cast(parent.clone()) {
+ // Path inside path means that there is an associated
+ // type/constant on the type parameter. It is necessary
+ // to fully qualify the type with `as Trait`. Even
+ // though it might be unnecessary if `subst` is generic
+ // type, always fully qualifying the path is safer
+ // because of potential clash of associated types from
+ // multiple traits
+
+ let trait_ref = find_trait_for_assoc_item(
+ self.source_scope,
+ tp,
+ parent.segment()?.name_ref()?,
+ )
+ .and_then(|trait_ref| {
+ let found_path = self.target_module.find_use_path(
+ self.source_scope.db.upcast(),
+ hir::ModuleDef::Trait(trait_ref),
+ )?;
+ match ast::make::ty_path(mod_path_to_ast(&found_path)) {
+ ast::Type::PathType(path_ty) => Some(path_ty),
+ _ => None,
+ }
+ });
+
+ let segment = ast::make::path_segment_ty(subst.clone(), trait_ref);
+ let qualified =
+ ast::make::path_from_segments(std::iter::once(segment), false);
+ ted::replace(path.syntax(), qualified.clone_for_update().syntax());
+ } else if let Some(path_ty) = ast::PathType::cast(parent) {
+ ted::replace(
+ path_ty.syntax(),
+ subst.clone_subtree().clone_for_update().syntax(),
+ );
+ } else {
+ ted::replace(
+ path.syntax(),
+ subst.clone_subtree().clone_for_update().syntax(),
+ );
+ }
+ }
+ }
+ hir::PathResolution::Def(def) if def.as_assoc_item(self.source_scope.db).is_none() => {
+ if let hir::ModuleDef::Trait(_) = def {
+ if matches!(path.segment()?.kind()?, ast::PathSegmentKind::Type { .. }) {
+ // `speculative_resolve` resolves segments like `<T as
+ // Trait>` into `Trait`, but just the trait name should
+ // not be used as the replacement of the original
+ // segment.
+ return None;
+ }
+ }
+
+ let found_path =
+ self.target_module.find_use_path(self.source_scope.db.upcast(), def)?;
+ let res = mod_path_to_ast(&found_path).clone_for_update();
+ if let Some(args) = path.segment().and_then(|it| it.generic_arg_list()) {
+ if let Some(segment) = res.segment() {
+ let old = segment.get_or_create_generic_arg_list();
+ ted::replace(old.syntax(), args.clone_subtree().syntax().clone_for_update())
+ }
+ }
+ ted::replace(path.syntax(), res.syntax())
+ }
+ hir::PathResolution::Local(_)
+ | hir::PathResolution::ConstParam(_)
+ | hir::PathResolution::SelfType(_)
+ | hir::PathResolution::Def(_)
+ | hir::PathResolution::BuiltinAttr(_)
+ | hir::PathResolution::ToolModule(_)
+ | hir::PathResolution::DeriveHelper(_) => (),
+ }
+ Some(())
+ }
+}
+
+// FIXME: It would probably be nicer if we could get this via HIR (i.e. get the
+// trait ref, and then go from the types in the substs back to the syntax).
+fn get_syntactic_substs(impl_def: ast::Impl) -> Option<Vec<ast::Type>> {
+ let target_trait = impl_def.trait_()?;
+ let path_type = match target_trait {
+ ast::Type::PathType(path) => path,
+ _ => return None,
+ };
+ let generic_arg_list = path_type.path()?.segment()?.generic_arg_list()?;
+
+ get_type_args_from_arg_list(generic_arg_list)
+}
+
+fn get_type_args_from_arg_list(generic_arg_list: ast::GenericArgList) -> Option<Vec<ast::Type>> {
+ let mut result = Vec::new();
+ for generic_arg in generic_arg_list.generic_args() {
+ if let ast::GenericArg::TypeArg(type_arg) = generic_arg {
+ result.push(type_arg.ty()?)
+ }
+ }
+
+ Some(result)
+}
+
+fn find_trait_for_assoc_item(
+ scope: &SemanticsScope<'_>,
+ type_param: hir::TypeParam,
+ assoc_item: ast::NameRef,
+) -> Option<hir::Trait> {
+ let db = scope.db;
+ let trait_bounds = type_param.trait_bounds(db);
+
+ let assoc_item_name = assoc_item.text();
+
+ for trait_ in trait_bounds {
+ let names = trait_.items(db).into_iter().filter_map(|item| match item {
+ hir::AssocItem::TypeAlias(ta) => Some(ta.name(db)),
+ hir::AssocItem::Const(cst) => cst.name(db),
+ _ => None,
+ });
+
+ for name in names {
+ if assoc_item_name.as_str() == name.as_text()?.as_str() {
+ // It is fine to return the first match because in case of
+ // multiple possibilities, the exact trait must be disambiguated
+ // in the definition of trait being implemented, so this search
+ // should not be needed.
+ return Some(trait_);
+ }
+ }
+ }
+
+ None
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/rename.rs b/src/tools/rust-analyzer/crates/ide-db/src/rename.rs
new file mode 100644
index 000000000..517fe3f24
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/rename.rs
@@ -0,0 +1,540 @@
+//! Rename infrastructure for rust-analyzer. It is used primarily for the
+//! literal "rename" in the ide (look for tests there), but it is also available
+//! as a general-purpose service. For example, it is used by the fix for the
+//! "incorrect case" diagnostic.
+//!
+//! It leverages the [`crate::search`] functionality to find what needs to be
+//! renamed. The actual renames are tricky -- field shorthands need special
+//! attention, and, when renaming modules, you also want to rename files on the
+//! file system.
+//!
+//! Another can of worms are macros:
+//!
+//! ```ignore
+//! macro_rules! m { () => { fn f() {} } }
+//! m!();
+//! fn main() {
+//! f() // <- rename me
+//! }
+//! ```
+//!
+//! The correct behavior in such cases is probably to show a dialog to the user.
+//! Our current behavior is ¯\_(ツ)_/¯.
+use std::fmt;
+
+use base_db::{AnchoredPathBuf, FileId, FileRange};
+use either::Either;
+use hir::{FieldSource, HasSource, InFile, ModuleSource, Semantics};
+use stdx::never;
+use syntax::{
+ ast::{self, HasName},
+ AstNode, SyntaxKind, TextRange, T,
+};
+use text_edit::{TextEdit, TextEditBuilder};
+
+use crate::{
+ defs::Definition,
+ search::FileReference,
+ source_change::{FileSystemEdit, SourceChange},
+ syntax_helpers::node_ext::expr_as_name_ref,
+ traits::convert_to_def_in_trait,
+ RootDatabase,
+};
+
+pub type Result<T, E = RenameError> = std::result::Result<T, E>;
+
+#[derive(Debug)]
+pub struct RenameError(pub String);
+
+impl fmt::Display for RenameError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+#[macro_export]
+macro_rules! _format_err {
+ ($fmt:expr) => { RenameError(format!($fmt)) };
+ ($fmt:expr, $($arg:tt)+) => { RenameError(format!($fmt, $($arg)+)) }
+}
+pub use _format_err as format_err;
+
+#[macro_export]
+macro_rules! _bail {
+ ($($tokens:tt)*) => { return Err(format_err!($($tokens)*)) }
+}
+pub use _bail as bail;
+
+impl Definition {
+ pub fn rename(
+ &self,
+ sema: &Semantics<'_, RootDatabase>,
+ new_name: &str,
+ ) -> Result<SourceChange> {
+ match *self {
+ Definition::Module(module) => rename_mod(sema, module, new_name),
+ Definition::BuiltinType(_) => {
+ bail!("Cannot rename builtin type")
+ }
+ Definition::SelfType(_) => bail!("Cannot rename `Self`"),
+ def => rename_reference(sema, def, new_name),
+ }
+ }
+
+ /// Textual range of the identifier which will change when renaming this
+ /// `Definition`. Note that some definitions, like buitin types, can't be
+ /// renamed.
+ pub fn range_for_rename(self, sema: &Semantics<'_, RootDatabase>) -> Option<FileRange> {
+ let res = match self {
+ Definition::Macro(mac) => {
+ let src = mac.source(sema.db)?;
+ let name = match &src.value {
+ Either::Left(it) => it.name()?,
+ Either::Right(it) => it.name()?,
+ };
+ src.with_value(name.syntax()).original_file_range_opt(sema.db)
+ }
+ Definition::Field(field) => {
+ let src = field.source(sema.db)?;
+ match &src.value {
+ FieldSource::Named(record_field) => {
+ let name = record_field.name()?;
+ src.with_value(name.syntax()).original_file_range_opt(sema.db)
+ }
+ FieldSource::Pos(_) => None,
+ }
+ }
+ Definition::Module(module) => {
+ let src = module.declaration_source(sema.db)?;
+ let name = src.value.name()?;
+ src.with_value(name.syntax()).original_file_range_opt(sema.db)
+ }
+ Definition::Function(it) => name_range(it, sema),
+ Definition::Adt(adt) => match adt {
+ hir::Adt::Struct(it) => name_range(it, sema),
+ hir::Adt::Union(it) => name_range(it, sema),
+ hir::Adt::Enum(it) => name_range(it, sema),
+ },
+ Definition::Variant(it) => name_range(it, sema),
+ Definition::Const(it) => name_range(it, sema),
+ Definition::Static(it) => name_range(it, sema),
+ Definition::Trait(it) => name_range(it, sema),
+ Definition::TypeAlias(it) => name_range(it, sema),
+ Definition::Local(local) => {
+ let src = local.source(sema.db);
+ let name = match &src.value {
+ Either::Left(bind_pat) => bind_pat.name()?,
+ Either::Right(_) => return None,
+ };
+ src.with_value(name.syntax()).original_file_range_opt(sema.db)
+ }
+ Definition::GenericParam(generic_param) => match generic_param {
+ hir::GenericParam::LifetimeParam(lifetime_param) => {
+ let src = lifetime_param.source(sema.db)?;
+ src.with_value(src.value.lifetime()?.syntax()).original_file_range_opt(sema.db)
+ }
+ _ => {
+ let x = match generic_param {
+ hir::GenericParam::TypeParam(it) => it.merge(),
+ hir::GenericParam::ConstParam(it) => it.merge(),
+ hir::GenericParam::LifetimeParam(_) => return None,
+ };
+ let src = x.source(sema.db)?;
+ let name = match &src.value {
+ Either::Left(x) => x.name()?,
+ Either::Right(_) => return None,
+ };
+ src.with_value(name.syntax()).original_file_range_opt(sema.db)
+ }
+ },
+ Definition::Label(label) => {
+ let src = label.source(sema.db);
+ let lifetime = src.value.lifetime()?;
+ src.with_value(lifetime.syntax()).original_file_range_opt(sema.db)
+ }
+ Definition::BuiltinType(_) => return None,
+ Definition::SelfType(_) => return None,
+ Definition::BuiltinAttr(_) => return None,
+ Definition::ToolModule(_) => return None,
+ // FIXME: This should be doable in theory
+ Definition::DeriveHelper(_) => return None,
+ };
+ return res;
+
+ fn name_range<D>(def: D, sema: &Semantics<'_, RootDatabase>) -> Option<FileRange>
+ where
+ D: HasSource,
+ D::Ast: ast::HasName,
+ {
+ let src = def.source(sema.db)?;
+ let name = src.value.name()?;
+ src.with_value(name.syntax()).original_file_range_opt(sema.db)
+ }
+ }
+}
+
+fn rename_mod(
+ sema: &Semantics<'_, RootDatabase>,
+ module: hir::Module,
+ new_name: &str,
+) -> Result<SourceChange> {
+ if IdentifierKind::classify(new_name)? != IdentifierKind::Ident {
+ bail!("Invalid name `{0}`: cannot rename module to {0}", new_name);
+ }
+
+ let mut source_change = SourceChange::default();
+
+ if module.is_crate_root(sema.db) {
+ return Ok(source_change);
+ }
+
+ let InFile { file_id, value: def_source } = module.definition_source(sema.db);
+ if let ModuleSource::SourceFile(..) = def_source {
+ let anchor = file_id.original_file(sema.db);
+
+ let is_mod_rs = module.is_mod_rs(sema.db);
+ let has_detached_child = module.children(sema.db).any(|child| !child.is_inline(sema.db));
+
+ // Module exists in a named file
+ if !is_mod_rs {
+ let path = format!("{}.rs", new_name);
+ let dst = AnchoredPathBuf { anchor, path };
+ source_change.push_file_system_edit(FileSystemEdit::MoveFile { src: anchor, dst })
+ }
+
+ // Rename the dir if:
+ // - Module source is in mod.rs
+ // - Module has submodules defined in separate files
+ let dir_paths = match (is_mod_rs, has_detached_child, module.name(sema.db)) {
+ // Go up one level since the anchor is inside the dir we're trying to rename
+ (true, _, Some(mod_name)) => {
+ Some((format!("../{}", mod_name), format!("../{}", new_name)))
+ }
+ // The anchor is on the same level as target dir
+ (false, true, Some(mod_name)) => Some((mod_name.to_string(), new_name.to_string())),
+ _ => None,
+ };
+
+ if let Some((src, dst)) = dir_paths {
+ let src = AnchoredPathBuf { anchor, path: src };
+ let dst = AnchoredPathBuf { anchor, path: dst };
+ source_change.push_file_system_edit(FileSystemEdit::MoveDir {
+ src,
+ src_id: anchor,
+ dst,
+ })
+ }
+ }
+
+ if let Some(src) = module.declaration_source(sema.db) {
+ let file_id = src.file_id.original_file(sema.db);
+ match src.value.name() {
+ Some(name) => {
+ if let Some(file_range) =
+ src.with_value(name.syntax()).original_file_range_opt(sema.db)
+ {
+ source_change.insert_source_edit(
+ file_id,
+ TextEdit::replace(file_range.range, new_name.to_string()),
+ )
+ };
+ }
+ _ => never!("Module source node is missing a name"),
+ }
+ }
+
+ let def = Definition::Module(module);
+ let usages = def.usages(sema).all();
+ let ref_edits = usages.iter().map(|(&file_id, references)| {
+ (file_id, source_edit_from_references(references, def, new_name))
+ });
+ source_change.extend(ref_edits);
+
+ Ok(source_change)
+}
+
+fn rename_reference(
+ sema: &Semantics<'_, RootDatabase>,
+ def: Definition,
+ new_name: &str,
+) -> Result<SourceChange> {
+ let ident_kind = IdentifierKind::classify(new_name)?;
+
+ if matches!(
+ def,
+ Definition::GenericParam(hir::GenericParam::LifetimeParam(_)) | Definition::Label(_)
+ ) {
+ match ident_kind {
+ IdentifierKind::Ident | IdentifierKind::Underscore => {
+ cov_mark::hit!(rename_not_a_lifetime_ident_ref);
+ bail!("Invalid name `{}`: not a lifetime identifier", new_name);
+ }
+ IdentifierKind::Lifetime => cov_mark::hit!(rename_lifetime),
+ }
+ } else {
+ match ident_kind {
+ IdentifierKind::Lifetime => {
+ cov_mark::hit!(rename_not_an_ident_ref);
+ bail!("Invalid name `{}`: not an identifier", new_name);
+ }
+ IdentifierKind::Ident => cov_mark::hit!(rename_non_local),
+ IdentifierKind::Underscore => (),
+ }
+ }
+
+ let def = convert_to_def_in_trait(sema.db, def);
+ let usages = def.usages(sema).all();
+
+ if !usages.is_empty() && ident_kind == IdentifierKind::Underscore {
+ cov_mark::hit!(rename_underscore_multiple);
+ bail!("Cannot rename reference to `_` as it is being referenced multiple times");
+ }
+ let mut source_change = SourceChange::default();
+ source_change.extend(usages.iter().map(|(&file_id, references)| {
+ (file_id, source_edit_from_references(references, def, new_name))
+ }));
+
+ let mut insert_def_edit = |def| {
+ let (file_id, edit) = source_edit_from_def(sema, def, new_name)?;
+ source_change.insert_source_edit(file_id, edit);
+ Ok(())
+ };
+ match def {
+ Definition::Local(l) => l
+ .associated_locals(sema.db)
+ .iter()
+ .try_for_each(|&local| insert_def_edit(Definition::Local(local))),
+ def => insert_def_edit(def),
+ }?;
+ Ok(source_change)
+}
+
+pub fn source_edit_from_references(
+ references: &[FileReference],
+ def: Definition,
+ new_name: &str,
+) -> TextEdit {
+ let mut edit = TextEdit::builder();
+ // macros can cause multiple refs to occur for the same text range, so keep track of what we have edited so far
+ let mut edited_ranges = Vec::new();
+ for &FileReference { range, ref name, .. } in references {
+ let name_range = name.syntax().text_range();
+ if name_range.len() != range.len() {
+ // This usage comes from a different token kind that was downmapped to a NameLike in a macro
+ // Renaming this will most likely break things syntax-wise
+ continue;
+ }
+ let has_emitted_edit = match name {
+ // if the ranges differ then the node is inside a macro call, we can't really attempt
+ // to make special rewrites like shorthand syntax and such, so just rename the node in
+ // the macro input
+ ast::NameLike::NameRef(name_ref) if name_range == range => {
+ source_edit_from_name_ref(&mut edit, name_ref, new_name, def)
+ }
+ ast::NameLike::Name(name) if name_range == range => {
+ source_edit_from_name(&mut edit, name, new_name)
+ }
+ _ => false,
+ };
+ if !has_emitted_edit {
+ if !edited_ranges.contains(&range.start()) {
+ edit.replace(range, new_name.to_string());
+ edited_ranges.push(range.start());
+ }
+ }
+ }
+
+ edit.finish()
+}
+
+fn source_edit_from_name(edit: &mut TextEditBuilder, name: &ast::Name, new_name: &str) -> bool {
+ if ast::RecordPatField::for_field_name(name).is_some() {
+ if let Some(ident_pat) = name.syntax().parent().and_then(ast::IdentPat::cast) {
+ cov_mark::hit!(rename_record_pat_field_name_split);
+ // Foo { ref mut field } -> Foo { new_name: ref mut field }
+ // ^ insert `new_name: `
+
+ // FIXME: instead of splitting the shorthand, recursively trigger a rename of the
+ // other name https://github.com/rust-lang/rust-analyzer/issues/6547
+ edit.insert(ident_pat.syntax().text_range().start(), format!("{}: ", new_name));
+ return true;
+ }
+ }
+
+ false
+}
+
+fn source_edit_from_name_ref(
+ edit: &mut TextEditBuilder,
+ name_ref: &ast::NameRef,
+ new_name: &str,
+ def: Definition,
+) -> bool {
+ if name_ref.super_token().is_some() {
+ return true;
+ }
+
+ if let Some(record_field) = ast::RecordExprField::for_name_ref(name_ref) {
+ let rcf_name_ref = record_field.name_ref();
+ let rcf_expr = record_field.expr();
+ match &(rcf_name_ref, rcf_expr.and_then(|it| expr_as_name_ref(&it))) {
+ // field: init-expr, check if we can use a field init shorthand
+ (Some(field_name), Some(init)) => {
+ if field_name == name_ref {
+ if init.text() == new_name {
+ cov_mark::hit!(test_rename_field_put_init_shorthand);
+ // Foo { field: local } -> Foo { local }
+ // ^^^^^^^ delete this
+
+ // same names, we can use a shorthand here instead.
+ // we do not want to erase attributes hence this range start
+ let s = field_name.syntax().text_range().start();
+ let e = init.syntax().text_range().start();
+ edit.delete(TextRange::new(s, e));
+ return true;
+ }
+ } else if init == name_ref {
+ if field_name.text() == new_name {
+ cov_mark::hit!(test_rename_local_put_init_shorthand);
+ // Foo { field: local } -> Foo { field }
+ // ^^^^^^^ delete this
+
+ // same names, we can use a shorthand here instead.
+ // we do not want to erase attributes hence this range start
+ let s = field_name.syntax().text_range().end();
+ let e = init.syntax().text_range().end();
+ edit.delete(TextRange::new(s, e));
+ return true;
+ }
+ }
+ }
+ // init shorthand
+ (None, Some(_)) if matches!(def, Definition::Field(_)) => {
+ cov_mark::hit!(test_rename_field_in_field_shorthand);
+ // Foo { field } -> Foo { new_name: field }
+ // ^ insert `new_name: `
+ let offset = name_ref.syntax().text_range().start();
+ edit.insert(offset, format!("{}: ", new_name));
+ return true;
+ }
+ (None, Some(_)) if matches!(def, Definition::Local(_)) => {
+ cov_mark::hit!(test_rename_local_in_field_shorthand);
+ // Foo { field } -> Foo { field: new_name }
+ // ^ insert `: new_name`
+ let offset = name_ref.syntax().text_range().end();
+ edit.insert(offset, format!(": {}", new_name));
+ return true;
+ }
+ _ => (),
+ }
+ } else if let Some(record_field) = ast::RecordPatField::for_field_name_ref(name_ref) {
+ let rcf_name_ref = record_field.name_ref();
+ let rcf_pat = record_field.pat();
+ match (rcf_name_ref, rcf_pat) {
+ // field: rename
+ (Some(field_name), Some(ast::Pat::IdentPat(pat)))
+ if field_name == *name_ref && pat.at_token().is_none() =>
+ {
+ // field name is being renamed
+ if let Some(name) = pat.name() {
+ if name.text() == new_name {
+ cov_mark::hit!(test_rename_field_put_init_shorthand_pat);
+ // Foo { field: ref mut local } -> Foo { ref mut field }
+ // ^^^^^^^ delete this
+ // ^^^^^ replace this with `field`
+
+ // same names, we can use a shorthand here instead/
+ // we do not want to erase attributes hence this range start
+ let s = field_name.syntax().text_range().start();
+ let e = pat.syntax().text_range().start();
+ edit.delete(TextRange::new(s, e));
+ edit.replace(name.syntax().text_range(), new_name.to_string());
+ return true;
+ }
+ }
+ }
+ _ => (),
+ }
+ }
+ false
+}
+
+fn source_edit_from_def(
+ sema: &Semantics<'_, RootDatabase>,
+ def: Definition,
+ new_name: &str,
+) -> Result<(FileId, TextEdit)> {
+ let FileRange { file_id, range } = def
+ .range_for_rename(sema)
+ .ok_or_else(|| format_err!("No identifier available to rename"))?;
+
+ let mut edit = TextEdit::builder();
+ if let Definition::Local(local) = def {
+ if let Either::Left(pat) = local.source(sema.db).value {
+ // special cases required for renaming fields/locals in Record patterns
+ if let Some(pat_field) = pat.syntax().parent().and_then(ast::RecordPatField::cast) {
+ let name_range = pat.name().unwrap().syntax().text_range();
+ if let Some(name_ref) = pat_field.name_ref() {
+ if new_name == name_ref.text() && pat.at_token().is_none() {
+ // Foo { field: ref mut local } -> Foo { ref mut field }
+ // ^^^^^^ delete this
+ // ^^^^^ replace this with `field`
+ cov_mark::hit!(test_rename_local_put_init_shorthand_pat);
+ edit.delete(
+ name_ref
+ .syntax()
+ .text_range()
+ .cover_offset(pat.syntax().text_range().start()),
+ );
+ edit.replace(name_range, name_ref.text().to_string());
+ } else {
+ // Foo { field: ref mut local @ local 2} -> Foo { field: ref mut new_name @ local2 }
+ // Foo { field: ref mut local } -> Foo { field: ref mut new_name }
+ // ^^^^^ replace this with `new_name`
+ edit.replace(name_range, new_name.to_string());
+ }
+ } else {
+ // Foo { ref mut field } -> Foo { field: ref mut new_name }
+ // ^ insert `field: `
+ // ^^^^^ replace this with `new_name`
+ edit.insert(
+ pat.syntax().text_range().start(),
+ format!("{}: ", pat_field.field_name().unwrap()),
+ );
+ edit.replace(name_range, new_name.to_string());
+ }
+ }
+ }
+ }
+ if edit.is_empty() {
+ edit.replace(range, new_name.to_string());
+ }
+ Ok((file_id, edit.finish()))
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum IdentifierKind {
+ Ident,
+ Lifetime,
+ Underscore,
+}
+
+impl IdentifierKind {
+ pub fn classify(new_name: &str) -> Result<IdentifierKind> {
+ match parser::LexedStr::single_token(new_name) {
+ Some(res) => match res {
+ (SyntaxKind::IDENT, _) => Ok(IdentifierKind::Ident),
+ (T![_], _) => Ok(IdentifierKind::Underscore),
+ (SyntaxKind::LIFETIME_IDENT, _) if new_name != "'static" && new_name != "'_" => {
+ Ok(IdentifierKind::Lifetime)
+ }
+ (SyntaxKind::LIFETIME_IDENT, _) => {
+ bail!("Invalid name `{}`: not a lifetime identifier", new_name)
+ }
+ (_, Some(syntax_error)) => bail!("Invalid name `{}`: {}", new_name, syntax_error),
+ (_, None) => bail!("Invalid name `{}`: not an identifier", new_name),
+ },
+ None => bail!("Invalid name `{}`: not an identifier", new_name),
+ }
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/rust_doc.rs b/src/tools/rust-analyzer/crates/ide-db/src/rust_doc.rs
new file mode 100644
index 000000000..e27e23867
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/rust_doc.rs
@@ -0,0 +1,34 @@
+//! Rustdoc specific doc comment handling
+
+// stripped down version of https://github.com/rust-lang/rust/blob/392ba2ba1a7d6c542d2459fb8133bebf62a4a423/src/librustdoc/html/markdown.rs#L810-L933
+pub fn is_rust_fence(s: &str) -> bool {
+ let mut seen_rust_tags = false;
+ let mut seen_other_tags = false;
+
+ let tokens = s
+ .trim()
+ .split(|c| c == ',' || c == ' ' || c == '\t')
+ .map(str::trim)
+ .filter(|t| !t.is_empty());
+
+ for token in tokens {
+ match token {
+ "should_panic" | "no_run" | "ignore" | "allow_fail" => {
+ seen_rust_tags = !seen_other_tags
+ }
+ "rust" => seen_rust_tags = true,
+ "test_harness" | "compile_fail" => seen_rust_tags = !seen_other_tags || seen_rust_tags,
+ x if x.starts_with("edition") => {}
+ x if x.starts_with('E') && x.len() == 5 => {
+ if x[1..].parse::<u32>().is_ok() {
+ seen_rust_tags = !seen_other_tags || seen_rust_tags;
+ } else {
+ seen_other_tags = true;
+ }
+ }
+ _ => seen_other_tags = true,
+ }
+ }
+
+ !seen_other_tags || seen_rust_tags
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/search.rs b/src/tools/rust-analyzer/crates/ide-db/src/search.rs
new file mode 100644
index 000000000..bd038cdaa
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/search.rs
@@ -0,0 +1,785 @@
+//! Implementation of find-usages functionality.
+//!
+//! It is based on the standard ide trick: first, we run a fast text search to
+//! get a super-set of matches. Then, we we confirm each match using precise
+//! name resolution.
+
+use std::{convert::TryInto, mem, sync::Arc};
+
+use base_db::{FileId, FileRange, SourceDatabase, SourceDatabaseExt};
+use hir::{DefWithBody, HasAttrs, HasSource, InFile, ModuleSource, Semantics, Visibility};
+use once_cell::unsync::Lazy;
+use rustc_hash::FxHashMap;
+use syntax::{ast, match_ast, AstNode, TextRange, TextSize};
+
+use crate::{
+ defs::{Definition, NameClass, NameRefClass},
+ traits::{as_trait_assoc_def, convert_to_def_in_trait},
+ RootDatabase,
+};
+
+#[derive(Debug, Default, Clone)]
+pub struct UsageSearchResult {
+ pub references: FxHashMap<FileId, Vec<FileReference>>,
+}
+
+impl UsageSearchResult {
+ pub fn is_empty(&self) -> bool {
+ self.references.is_empty()
+ }
+
+ pub fn len(&self) -> usize {
+ self.references.len()
+ }
+
+ pub fn iter(&self) -> impl Iterator<Item = (&FileId, &[FileReference])> + '_ {
+ self.references.iter().map(|(file_id, refs)| (file_id, &**refs))
+ }
+
+ pub fn file_ranges(&self) -> impl Iterator<Item = FileRange> + '_ {
+ self.references.iter().flat_map(|(&file_id, refs)| {
+ refs.iter().map(move |&FileReference { range, .. }| FileRange { file_id, range })
+ })
+ }
+}
+
+impl IntoIterator for UsageSearchResult {
+ type Item = (FileId, Vec<FileReference>);
+ type IntoIter = <FxHashMap<FileId, Vec<FileReference>> as IntoIterator>::IntoIter;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.references.into_iter()
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct FileReference {
+ /// The range of the reference in the original file
+ pub range: TextRange,
+ /// The node of the reference in the (macro-)file
+ pub name: ast::NameLike,
+ pub category: Option<ReferenceCategory>,
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum ReferenceCategory {
+ // FIXME: Add this variant and delete the `retain_adt_literal_usages` function.
+ // Create
+ Write,
+ Read,
+ // FIXME: Some day should be able to search in doc comments. Would probably
+ // need to switch from enum to bitflags then?
+ // DocComment
+}
+
+/// Generally, `search_scope` returns files that might contain references for the element.
+/// For `pub(crate)` things it's a crate, for `pub` things it's a crate and dependant crates.
+/// In some cases, the location of the references is known to within a `TextRange`,
+/// e.g. for things like local variables.
+#[derive(Clone, Debug)]
+pub struct SearchScope {
+ entries: FxHashMap<FileId, Option<TextRange>>,
+}
+
+impl SearchScope {
+ fn new(entries: FxHashMap<FileId, Option<TextRange>>) -> SearchScope {
+ SearchScope { entries }
+ }
+
+ /// Build a search scope spanning the entire crate graph of files.
+ fn crate_graph(db: &RootDatabase) -> SearchScope {
+ let mut entries = FxHashMap::default();
+
+ let graph = db.crate_graph();
+ for krate in graph.iter() {
+ let root_file = graph[krate].root_file_id;
+ let source_root_id = db.file_source_root(root_file);
+ let source_root = db.source_root(source_root_id);
+ entries.extend(source_root.iter().map(|id| (id, None)));
+ }
+ SearchScope { entries }
+ }
+
+ /// Build a search scope spanning all the reverse dependencies of the given crate.
+ fn reverse_dependencies(db: &RootDatabase, of: hir::Crate) -> SearchScope {
+ let mut entries = FxHashMap::default();
+ for rev_dep in of.transitive_reverse_dependencies(db) {
+ let root_file = rev_dep.root_file(db);
+ let source_root_id = db.file_source_root(root_file);
+ let source_root = db.source_root(source_root_id);
+ entries.extend(source_root.iter().map(|id| (id, None)));
+ }
+ SearchScope { entries }
+ }
+
+ /// Build a search scope spanning the given crate.
+ fn krate(db: &RootDatabase, of: hir::Crate) -> SearchScope {
+ let root_file = of.root_file(db);
+ let source_root_id = db.file_source_root(root_file);
+ let source_root = db.source_root(source_root_id);
+ SearchScope {
+ entries: source_root.iter().map(|id| (id, None)).collect::<FxHashMap<_, _>>(),
+ }
+ }
+
+ /// Build a search scope spanning the given module and all its submodules.
+ fn module_and_children(db: &RootDatabase, module: hir::Module) -> SearchScope {
+ let mut entries = FxHashMap::default();
+
+ let (file_id, range) = {
+ let InFile { file_id, value } = module.definition_source(db);
+ if let Some((file_id, call_source)) = file_id.original_call_node(db) {
+ (file_id, Some(call_source.text_range()))
+ } else {
+ (
+ file_id.original_file(db),
+ match value {
+ ModuleSource::SourceFile(_) => None,
+ ModuleSource::Module(it) => Some(it.syntax().text_range()),
+ ModuleSource::BlockExpr(it) => Some(it.syntax().text_range()),
+ },
+ )
+ }
+ };
+ entries.insert(file_id, range);
+
+ let mut to_visit: Vec<_> = module.children(db).collect();
+ while let Some(module) = to_visit.pop() {
+ if let InFile { file_id, value: ModuleSource::SourceFile(_) } =
+ module.definition_source(db)
+ {
+ entries.insert(file_id.original_file(db), None);
+ }
+ to_visit.extend(module.children(db));
+ }
+ SearchScope { entries }
+ }
+
+ /// Build an empty search scope.
+ pub fn empty() -> SearchScope {
+ SearchScope::new(FxHashMap::default())
+ }
+
+ /// Build a empty search scope spanning the given file.
+ pub fn single_file(file: FileId) -> SearchScope {
+ SearchScope::new(std::iter::once((file, None)).collect())
+ }
+
+ /// Build a empty search scope spanning the text range of the given file.
+ pub fn file_range(range: FileRange) -> SearchScope {
+ SearchScope::new(std::iter::once((range.file_id, Some(range.range))).collect())
+ }
+
+ /// Build a empty search scope spanning the given files.
+ pub fn files(files: &[FileId]) -> SearchScope {
+ SearchScope::new(files.iter().map(|f| (*f, None)).collect())
+ }
+
+ pub fn intersection(&self, other: &SearchScope) -> SearchScope {
+ let (mut small, mut large) = (&self.entries, &other.entries);
+ if small.len() > large.len() {
+ mem::swap(&mut small, &mut large)
+ }
+
+ let intersect_ranges =
+ |r1: Option<TextRange>, r2: Option<TextRange>| -> Option<Option<TextRange>> {
+ match (r1, r2) {
+ (None, r) | (r, None) => Some(r),
+ (Some(r1), Some(r2)) => r1.intersect(r2).map(Some),
+ }
+ };
+ let res = small
+ .iter()
+ .filter_map(|(&file_id, &r1)| {
+ let &r2 = large.get(&file_id)?;
+ let r = intersect_ranges(r1, r2)?;
+ Some((file_id, r))
+ })
+ .collect();
+
+ SearchScope::new(res)
+ }
+}
+
+impl IntoIterator for SearchScope {
+ type Item = (FileId, Option<TextRange>);
+ type IntoIter = std::collections::hash_map::IntoIter<FileId, Option<TextRange>>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.entries.into_iter()
+ }
+}
+
+impl Definition {
+ fn search_scope(&self, db: &RootDatabase) -> SearchScope {
+ let _p = profile::span("search_scope");
+
+ if let Definition::BuiltinType(_) = self {
+ return SearchScope::crate_graph(db);
+ }
+
+ // def is crate root
+ // FIXME: We don't do searches for crates currently, as a crate does not actually have a single name
+ if let &Definition::Module(module) = self {
+ if module.is_crate_root(db) {
+ return SearchScope::reverse_dependencies(db, module.krate());
+ }
+ }
+
+ let module = match self.module(db) {
+ Some(it) => it,
+ None => return SearchScope::empty(),
+ };
+ let InFile { file_id, value: module_source } = module.definition_source(db);
+ let file_id = file_id.original_file(db);
+
+ if let Definition::Local(var) = self {
+ let def = match var.parent(db) {
+ DefWithBody::Function(f) => f.source(db).map(|src| src.syntax().cloned()),
+ DefWithBody::Const(c) => c.source(db).map(|src| src.syntax().cloned()),
+ DefWithBody::Static(s) => s.source(db).map(|src| src.syntax().cloned()),
+ };
+ return match def {
+ Some(def) => SearchScope::file_range(def.as_ref().original_file_range(db)),
+ None => SearchScope::single_file(file_id),
+ };
+ }
+
+ if let Definition::SelfType(impl_) = self {
+ return match impl_.source(db).map(|src| src.syntax().cloned()) {
+ Some(def) => SearchScope::file_range(def.as_ref().original_file_range(db)),
+ None => SearchScope::single_file(file_id),
+ };
+ }
+
+ if let Definition::GenericParam(hir::GenericParam::LifetimeParam(param)) = self {
+ let def = match param.parent(db) {
+ hir::GenericDef::Function(it) => it.source(db).map(|src| src.syntax().cloned()),
+ hir::GenericDef::Adt(it) => it.source(db).map(|src| src.syntax().cloned()),
+ hir::GenericDef::Trait(it) => it.source(db).map(|src| src.syntax().cloned()),
+ hir::GenericDef::TypeAlias(it) => it.source(db).map(|src| src.syntax().cloned()),
+ hir::GenericDef::Impl(it) => it.source(db).map(|src| src.syntax().cloned()),
+ hir::GenericDef::Variant(it) => it.source(db).map(|src| src.syntax().cloned()),
+ hir::GenericDef::Const(it) => it.source(db).map(|src| src.syntax().cloned()),
+ };
+ return match def {
+ Some(def) => SearchScope::file_range(def.as_ref().original_file_range(db)),
+ None => SearchScope::single_file(file_id),
+ };
+ }
+
+ if let Definition::Macro(macro_def) = self {
+ return match macro_def.kind(db) {
+ hir::MacroKind::Declarative => {
+ if macro_def.attrs(db).by_key("macro_export").exists() {
+ SearchScope::reverse_dependencies(db, module.krate())
+ } else {
+ SearchScope::krate(db, module.krate())
+ }
+ }
+ hir::MacroKind::BuiltIn => SearchScope::crate_graph(db),
+ hir::MacroKind::Derive | hir::MacroKind::Attr | hir::MacroKind::ProcMacro => {
+ SearchScope::reverse_dependencies(db, module.krate())
+ }
+ };
+ }
+
+ if let Definition::DeriveHelper(_) = self {
+ return SearchScope::reverse_dependencies(db, module.krate());
+ }
+
+ let vis = self.visibility(db);
+ if let Some(Visibility::Public) = vis {
+ return SearchScope::reverse_dependencies(db, module.krate());
+ }
+ if let Some(Visibility::Module(module)) = vis {
+ return SearchScope::module_and_children(db, module.into());
+ }
+
+ let range = match module_source {
+ ModuleSource::Module(m) => Some(m.syntax().text_range()),
+ ModuleSource::BlockExpr(b) => Some(b.syntax().text_range()),
+ ModuleSource::SourceFile(_) => None,
+ };
+ match range {
+ Some(range) => SearchScope::file_range(FileRange { file_id, range }),
+ None => SearchScope::single_file(file_id),
+ }
+ }
+
+ pub fn usages<'a>(self, sema: &'a Semantics<'_, RootDatabase>) -> FindUsages<'a> {
+ FindUsages {
+ local_repr: match self {
+ Definition::Local(local) => Some(local.representative(sema.db)),
+ _ => None,
+ },
+ def: self,
+ trait_assoc_def: as_trait_assoc_def(sema.db, self),
+ sema,
+ scope: None,
+ include_self_kw_refs: None,
+ search_self_mod: false,
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct FindUsages<'a> {
+ def: Definition,
+ /// If def is an assoc item from a trait or trait impl, this is the corresponding item of the trait definition
+ trait_assoc_def: Option<Definition>,
+ sema: &'a Semantics<'a, RootDatabase>,
+ scope: Option<SearchScope>,
+ include_self_kw_refs: Option<hir::Type>,
+ local_repr: Option<hir::Local>,
+ search_self_mod: bool,
+}
+
+impl<'a> FindUsages<'a> {
+ /// Enable searching for `Self` when the definition is a type or `self` for modules.
+ pub fn include_self_refs(mut self) -> FindUsages<'a> {
+ self.include_self_kw_refs = def_to_ty(self.sema, &self.def);
+ self.search_self_mod = true;
+ self
+ }
+
+ /// Limit the search to a given [`SearchScope`].
+ pub fn in_scope(self, scope: SearchScope) -> FindUsages<'a> {
+ self.set_scope(Some(scope))
+ }
+
+ /// Limit the search to a given [`SearchScope`].
+ pub fn set_scope(mut self, scope: Option<SearchScope>) -> FindUsages<'a> {
+ assert!(self.scope.is_none());
+ self.scope = scope;
+ self
+ }
+
+ pub fn at_least_one(&self) -> bool {
+ let mut found = false;
+ self.search(&mut |_, _| {
+ found = true;
+ true
+ });
+ found
+ }
+
+ pub fn all(self) -> UsageSearchResult {
+ let mut res = UsageSearchResult::default();
+ self.search(&mut |file_id, reference| {
+ res.references.entry(file_id).or_default().push(reference);
+ false
+ });
+ res
+ }
+
+ fn search(&self, sink: &mut dyn FnMut(FileId, FileReference) -> bool) {
+ let _p = profile::span("FindUsages:search");
+ let sema = self.sema;
+
+ let search_scope = {
+ let base = self.trait_assoc_def.unwrap_or(self.def).search_scope(sema.db);
+ match &self.scope {
+ None => base,
+ Some(scope) => base.intersection(scope),
+ }
+ };
+
+ let name = match self.def {
+ // special case crate modules as these do not have a proper name
+ Definition::Module(module) if module.is_crate_root(self.sema.db) => {
+ // FIXME: This assumes the crate name is always equal to its display name when it really isn't
+ module
+ .krate()
+ .display_name(self.sema.db)
+ .map(|crate_name| crate_name.crate_name().as_smol_str().clone())
+ }
+ _ => {
+ let self_kw_refs = || {
+ self.include_self_kw_refs.as_ref().and_then(|ty| {
+ ty.as_adt()
+ .map(|adt| adt.name(self.sema.db))
+ .or_else(|| ty.as_builtin().map(|builtin| builtin.name()))
+ })
+ };
+ self.def.name(sema.db).or_else(self_kw_refs).map(|it| it.to_smol_str())
+ }
+ };
+ let name = match &name {
+ Some(s) => s.as_str(),
+ None => return,
+ };
+
+ // these can't be closures because rust infers the lifetimes wrong ...
+ fn match_indices<'a>(
+ text: &'a str,
+ name: &'a str,
+ search_range: TextRange,
+ ) -> impl Iterator<Item = TextSize> + 'a {
+ text.match_indices(name).filter_map(move |(idx, _)| {
+ let offset: TextSize = idx.try_into().unwrap();
+ if !search_range.contains_inclusive(offset) {
+ return None;
+ }
+ Some(offset)
+ })
+ }
+
+ fn scope_files<'a>(
+ sema: &'a Semantics<'_, RootDatabase>,
+ scope: &'a SearchScope,
+ ) -> impl Iterator<Item = (Arc<String>, FileId, TextRange)> + 'a {
+ scope.entries.iter().map(|(&file_id, &search_range)| {
+ let text = sema.db.file_text(file_id);
+ let search_range =
+ search_range.unwrap_or_else(|| TextRange::up_to(TextSize::of(text.as_str())));
+
+ (text, file_id, search_range)
+ })
+ }
+
+ // FIXME: There should be optimization potential here
+ // Currently we try to descend everything we find which
+ // means we call `Semantics::descend_into_macros` on
+ // every textual hit. That function is notoriously
+ // expensive even for things that do not get down mapped
+ // into macros.
+ for (text, file_id, search_range) in scope_files(sema, &search_scope) {
+ let tree = Lazy::new(move || sema.parse(file_id).syntax().clone());
+
+ // Search for occurrences of the items name
+ for offset in match_indices(&text, name, search_range) {
+ for name in sema.find_nodes_at_offset_with_descend(&tree, offset) {
+ if match name {
+ ast::NameLike::NameRef(name_ref) => self.found_name_ref(&name_ref, sink),
+ ast::NameLike::Name(name) => self.found_name(&name, sink),
+ ast::NameLike::Lifetime(lifetime) => self.found_lifetime(&lifetime, sink),
+ } {
+ return;
+ }
+ }
+ }
+ // Search for occurrences of the `Self` referring to our type
+ if let Some(self_ty) = &self.include_self_kw_refs {
+ for offset in match_indices(&text, "Self", search_range) {
+ for name_ref in sema.find_nodes_at_offset_with_descend(&tree, offset) {
+ if self.found_self_ty_name_ref(self_ty, &name_ref, sink) {
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ // Search for `super` and `crate` resolving to our module
+ match self.def {
+ Definition::Module(module) => {
+ let scope = search_scope
+ .intersection(&SearchScope::module_and_children(self.sema.db, module));
+
+ let is_crate_root = module.is_crate_root(self.sema.db);
+
+ for (text, file_id, search_range) in scope_files(sema, &scope) {
+ let tree = Lazy::new(move || sema.parse(file_id).syntax().clone());
+
+ for offset in match_indices(&text, "super", search_range) {
+ for name_ref in sema.find_nodes_at_offset_with_descend(&tree, offset) {
+ if self.found_name_ref(&name_ref, sink) {
+ return;
+ }
+ }
+ }
+ if is_crate_root {
+ for offset in match_indices(&text, "crate", search_range) {
+ for name_ref in sema.find_nodes_at_offset_with_descend(&tree, offset) {
+ if self.found_name_ref(&name_ref, sink) {
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ _ => (),
+ }
+
+ // search for module `self` references in our module's definition source
+ match self.def {
+ Definition::Module(module) if self.search_self_mod => {
+ let src = module.definition_source(sema.db);
+ let file_id = src.file_id.original_file(sema.db);
+ let (file_id, search_range) = match src.value {
+ ModuleSource::Module(m) => (file_id, Some(m.syntax().text_range())),
+ ModuleSource::BlockExpr(b) => (file_id, Some(b.syntax().text_range())),
+ ModuleSource::SourceFile(_) => (file_id, None),
+ };
+
+ let search_range = if let Some(&range) = search_scope.entries.get(&file_id) {
+ match (range, search_range) {
+ (None, range) | (range, None) => range,
+ (Some(range), Some(search_range)) => match range.intersect(search_range) {
+ Some(range) => Some(range),
+ None => return,
+ },
+ }
+ } else {
+ return;
+ };
+
+ let text = sema.db.file_text(file_id);
+ let search_range =
+ search_range.unwrap_or_else(|| TextRange::up_to(TextSize::of(text.as_str())));
+
+ let tree = Lazy::new(|| sema.parse(file_id).syntax().clone());
+
+ for offset in match_indices(&text, "self", search_range) {
+ for name_ref in sema.find_nodes_at_offset_with_descend(&tree, offset) {
+ if self.found_self_module_name_ref(&name_ref, sink) {
+ return;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ fn found_self_ty_name_ref(
+ &self,
+ self_ty: &hir::Type,
+ name_ref: &ast::NameRef,
+ sink: &mut dyn FnMut(FileId, FileReference) -> bool,
+ ) -> bool {
+ match NameRefClass::classify(self.sema, name_ref) {
+ Some(NameRefClass::Definition(Definition::SelfType(impl_)))
+ if impl_.self_ty(self.sema.db) == *self_ty =>
+ {
+ let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::NameRef(name_ref.clone()),
+ category: None,
+ };
+ sink(file_id, reference)
+ }
+ _ => false,
+ }
+ }
+
+ fn found_self_module_name_ref(
+ &self,
+ name_ref: &ast::NameRef,
+ sink: &mut dyn FnMut(FileId, FileReference) -> bool,
+ ) -> bool {
+ match NameRefClass::classify(self.sema, name_ref) {
+ Some(NameRefClass::Definition(def @ Definition::Module(_))) if def == self.def => {
+ let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::NameRef(name_ref.clone()),
+ category: None,
+ };
+ sink(file_id, reference)
+ }
+ _ => false,
+ }
+ }
+
+ fn found_lifetime(
+ &self,
+ lifetime: &ast::Lifetime,
+ sink: &mut dyn FnMut(FileId, FileReference) -> bool,
+ ) -> bool {
+ match NameRefClass::classify_lifetime(self.sema, lifetime) {
+ Some(NameRefClass::Definition(def)) if def == self.def => {
+ let FileRange { file_id, range } = self.sema.original_range(lifetime.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::Lifetime(lifetime.clone()),
+ category: None,
+ };
+ sink(file_id, reference)
+ }
+ _ => false,
+ }
+ }
+
+ fn found_name_ref(
+ &self,
+ name_ref: &ast::NameRef,
+ sink: &mut dyn FnMut(FileId, FileReference) -> bool,
+ ) -> bool {
+ match NameRefClass::classify(self.sema, name_ref) {
+ Some(NameRefClass::Definition(def @ Definition::Local(local)))
+ if matches!(
+ self.local_repr, Some(repr) if repr == local.representative(self.sema.db)
+ ) =>
+ {
+ let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::NameRef(name_ref.clone()),
+ category: ReferenceCategory::new(&def, name_ref),
+ };
+ sink(file_id, reference)
+ }
+ Some(NameRefClass::Definition(def))
+ if match self.trait_assoc_def {
+ Some(trait_assoc_def) => {
+ // we have a trait assoc item, so force resolve all assoc items to their trait version
+ convert_to_def_in_trait(self.sema.db, def) == trait_assoc_def
+ }
+ None => self.def == def,
+ } =>
+ {
+ let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::NameRef(name_ref.clone()),
+ category: ReferenceCategory::new(&def, name_ref),
+ };
+ sink(file_id, reference)
+ }
+ Some(NameRefClass::Definition(def)) if self.include_self_kw_refs.is_some() => {
+ if self.include_self_kw_refs == def_to_ty(self.sema, &def) {
+ let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::NameRef(name_ref.clone()),
+ category: ReferenceCategory::new(&def, name_ref),
+ };
+ sink(file_id, reference)
+ } else {
+ false
+ }
+ }
+ Some(NameRefClass::FieldShorthand { local_ref: local, field_ref: field }) => {
+ let field = Definition::Field(field);
+ let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
+ let access = match self.def {
+ Definition::Field(_) if field == self.def => {
+ ReferenceCategory::new(&field, name_ref)
+ }
+ Definition::Local(_) if matches!(self.local_repr, Some(repr) if repr == local.representative(self.sema.db)) => {
+ ReferenceCategory::new(&Definition::Local(local), name_ref)
+ }
+ _ => return false,
+ };
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::NameRef(name_ref.clone()),
+ category: access,
+ };
+ sink(file_id, reference)
+ }
+ _ => false,
+ }
+ }
+
+ fn found_name(
+ &self,
+ name: &ast::Name,
+ sink: &mut dyn FnMut(FileId, FileReference) -> bool,
+ ) -> bool {
+ match NameClass::classify(self.sema, name) {
+ Some(NameClass::PatFieldShorthand { local_def: _, field_ref })
+ if matches!(
+ self.def, Definition::Field(_) if Definition::Field(field_ref) == self.def
+ ) =>
+ {
+ let FileRange { file_id, range } = self.sema.original_range(name.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::Name(name.clone()),
+ // FIXME: mutable patterns should have `Write` access
+ category: Some(ReferenceCategory::Read),
+ };
+ sink(file_id, reference)
+ }
+ Some(NameClass::ConstReference(def)) if self.def == def => {
+ let FileRange { file_id, range } = self.sema.original_range(name.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::Name(name.clone()),
+ category: None,
+ };
+ sink(file_id, reference)
+ }
+ Some(NameClass::Definition(def @ Definition::Local(local))) if def != self.def => {
+ if matches!(
+ self.local_repr,
+ Some(repr) if local.representative(self.sema.db) == repr
+ ) {
+ let FileRange { file_id, range } = self.sema.original_range(name.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::Name(name.clone()),
+ category: None,
+ };
+ return sink(file_id, reference);
+ }
+ false
+ }
+ Some(NameClass::Definition(def)) if def != self.def => {
+ // if the def we are looking for is a trait (impl) assoc item, we'll have to resolve the items to trait definition assoc item
+ if !matches!(
+ self.trait_assoc_def,
+ Some(trait_assoc_def)
+ if convert_to_def_in_trait(self.sema.db, def) == trait_assoc_def
+ ) {
+ return false;
+ }
+ let FileRange { file_id, range } = self.sema.original_range(name.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::Name(name.clone()),
+ category: None,
+ };
+ sink(file_id, reference)
+ }
+ _ => false,
+ }
+ }
+}
+
+fn def_to_ty(sema: &Semantics<'_, RootDatabase>, def: &Definition) -> Option<hir::Type> {
+ match def {
+ Definition::Adt(adt) => Some(adt.ty(sema.db)),
+ Definition::TypeAlias(it) => Some(it.ty(sema.db)),
+ Definition::BuiltinType(it) => Some(it.ty(sema.db)),
+ Definition::SelfType(it) => Some(it.self_ty(sema.db)),
+ _ => None,
+ }
+}
+
+impl ReferenceCategory {
+ fn new(def: &Definition, r: &ast::NameRef) -> Option<ReferenceCategory> {
+ // Only Locals and Fields have accesses for now.
+ if !matches!(def, Definition::Local(_) | Definition::Field(_)) {
+ return None;
+ }
+
+ let mode = r.syntax().ancestors().find_map(|node| {
+ match_ast! {
+ match node {
+ ast::BinExpr(expr) => {
+ if matches!(expr.op_kind()?, ast::BinaryOp::Assignment { .. }) {
+ // If the variable or field ends on the LHS's end then it's a Write (covers fields and locals).
+ // FIXME: This is not terribly accurate.
+ if let Some(lhs) = expr.lhs() {
+ if lhs.syntax().text_range().end() == r.syntax().text_range().end() {
+ return Some(ReferenceCategory::Write);
+ }
+ }
+ }
+ Some(ReferenceCategory::Read)
+ },
+ _ => None
+ }
+ }
+ });
+
+ // Default Locals and Fields to read
+ mode.or(Some(ReferenceCategory::Read))
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/source_change.rs b/src/tools/rust-analyzer/crates/ide-db/src/source_change.rs
new file mode 100644
index 000000000..8132c73ef
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/source_change.rs
@@ -0,0 +1,99 @@
+//! This modules defines type to represent changes to the source code, that flow
+//! from the server to the client.
+//!
+//! It can be viewed as a dual for `Change`.
+
+use std::{collections::hash_map::Entry, iter};
+
+use base_db::{AnchoredPathBuf, FileId};
+use rustc_hash::FxHashMap;
+use stdx::never;
+use text_edit::TextEdit;
+
+#[derive(Default, Debug, Clone)]
+pub struct SourceChange {
+ pub source_file_edits: FxHashMap<FileId, TextEdit>,
+ pub file_system_edits: Vec<FileSystemEdit>,
+ pub is_snippet: bool,
+}
+
+impl SourceChange {
+ /// Creates a new SourceChange with the given label
+ /// from the edits.
+ pub fn from_edits(
+ source_file_edits: FxHashMap<FileId, TextEdit>,
+ file_system_edits: Vec<FileSystemEdit>,
+ ) -> Self {
+ SourceChange { source_file_edits, file_system_edits, is_snippet: false }
+ }
+
+ pub fn from_text_edit(file_id: FileId, edit: TextEdit) -> Self {
+ SourceChange {
+ source_file_edits: iter::once((file_id, edit)).collect(),
+ ..Default::default()
+ }
+ }
+
+ /// Inserts a [`TextEdit`] for the given [`FileId`]. This properly handles merging existing
+ /// edits for a file if some already exist.
+ pub fn insert_source_edit(&mut self, file_id: FileId, edit: TextEdit) {
+ match self.source_file_edits.entry(file_id) {
+ Entry::Occupied(mut entry) => {
+ never!(entry.get_mut().union(edit).is_err(), "overlapping edits for same file");
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(edit);
+ }
+ }
+ }
+
+ pub fn push_file_system_edit(&mut self, edit: FileSystemEdit) {
+ self.file_system_edits.push(edit);
+ }
+
+ pub fn get_source_edit(&self, file_id: FileId) -> Option<&TextEdit> {
+ self.source_file_edits.get(&file_id)
+ }
+
+ pub fn merge(mut self, other: SourceChange) -> SourceChange {
+ self.extend(other.source_file_edits);
+ self.extend(other.file_system_edits);
+ self.is_snippet |= other.is_snippet;
+ self
+ }
+}
+
+impl Extend<(FileId, TextEdit)> for SourceChange {
+ fn extend<T: IntoIterator<Item = (FileId, TextEdit)>>(&mut self, iter: T) {
+ iter.into_iter().for_each(|(file_id, edit)| self.insert_source_edit(file_id, edit));
+ }
+}
+
+impl Extend<FileSystemEdit> for SourceChange {
+ fn extend<T: IntoIterator<Item = FileSystemEdit>>(&mut self, iter: T) {
+ iter.into_iter().for_each(|edit| self.push_file_system_edit(edit));
+ }
+}
+
+impl From<FxHashMap<FileId, TextEdit>> for SourceChange {
+ fn from(source_file_edits: FxHashMap<FileId, TextEdit>) -> SourceChange {
+ SourceChange { source_file_edits, file_system_edits: Vec::new(), is_snippet: false }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub enum FileSystemEdit {
+ CreateFile { dst: AnchoredPathBuf, initial_contents: String },
+ MoveFile { src: FileId, dst: AnchoredPathBuf },
+ MoveDir { src: AnchoredPathBuf, src_id: FileId, dst: AnchoredPathBuf },
+}
+
+impl From<FileSystemEdit> for SourceChange {
+ fn from(edit: FileSystemEdit) -> SourceChange {
+ SourceChange {
+ source_file_edits: Default::default(),
+ file_system_edits: vec![edit],
+ is_snippet: false,
+ }
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs b/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs
new file mode 100644
index 000000000..bfb003127
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs
@@ -0,0 +1,429 @@
+//! This module handles fuzzy-searching of functions, structs and other symbols
+//! by name across the whole workspace and dependencies.
+//!
+//! It works by building an incrementally-updated text-search index of all
+//! symbols. The backbone of the index is the **awesome** `fst` crate by
+//! @BurntSushi.
+//!
+//! In a nutshell, you give a set of strings to `fst`, and it builds a
+//! finite state machine describing this set of strings. The strings which
+//! could fuzzy-match a pattern can also be described by a finite state machine.
+//! What is freaking cool is that you can now traverse both state machines in
+//! lock-step to enumerate the strings which are both in the input set and
+//! fuzz-match the query. Or, more formally, given two languages described by
+//! FSTs, one can build a product FST which describes the intersection of the
+//! languages.
+//!
+//! `fst` does not support cheap updating of the index, but it supports unioning
+//! of state machines. So, to account for changing source code, we build an FST
+//! for each library (which is assumed to never change) and an FST for each Rust
+//! file in the current workspace, and run a query against the union of all
+//! those FSTs.
+
+use std::{
+ cmp::Ordering,
+ fmt,
+ hash::{Hash, Hasher},
+ mem,
+ sync::Arc,
+};
+
+use base_db::{
+ salsa::{self, ParallelDatabase},
+ SourceDatabaseExt, SourceRootId, Upcast,
+};
+use fst::{self, Streamer};
+use hir::{
+ db::HirDatabase,
+ symbols::{FileSymbol, SymbolCollector},
+ Crate, Module,
+};
+use rayon::prelude::*;
+use rustc_hash::FxHashSet;
+
+use crate::RootDatabase;
+
+#[derive(Debug)]
+pub struct Query {
+ query: String,
+ lowercased: String,
+ only_types: bool,
+ libs: bool,
+ exact: bool,
+ case_sensitive: bool,
+ limit: usize,
+}
+
+impl Query {
+ pub fn new(query: String) -> Query {
+ let lowercased = query.to_lowercase();
+ Query {
+ query,
+ lowercased,
+ only_types: false,
+ libs: false,
+ exact: false,
+ case_sensitive: false,
+ limit: usize::max_value(),
+ }
+ }
+
+ pub fn only_types(&mut self) {
+ self.only_types = true;
+ }
+
+ pub fn libs(&mut self) {
+ self.libs = true;
+ }
+
+ pub fn exact(&mut self) {
+ self.exact = true;
+ }
+
+ pub fn case_sensitive(&mut self) {
+ self.case_sensitive = true;
+ }
+
+ pub fn limit(&mut self, limit: usize) {
+ self.limit = limit
+ }
+}
+
+#[salsa::query_group(SymbolsDatabaseStorage)]
+pub trait SymbolsDatabase: HirDatabase + SourceDatabaseExt + Upcast<dyn HirDatabase> {
+ /// The symbol index for a given module. These modules should only be in source roots that
+ /// are inside local_roots.
+ fn module_symbols(&self, module: Module) -> Arc<SymbolIndex>;
+
+ /// The symbol index for a given source root within library_roots.
+ fn library_symbols(&self, source_root_id: SourceRootId) -> Arc<SymbolIndex>;
+
+ /// The set of "local" (that is, from the current workspace) roots.
+ /// Files in local roots are assumed to change frequently.
+ #[salsa::input]
+ fn local_roots(&self) -> Arc<FxHashSet<SourceRootId>>;
+
+ /// The set of roots for crates.io libraries.
+ /// Files in libraries are assumed to never change.
+ #[salsa::input]
+ fn library_roots(&self) -> Arc<FxHashSet<SourceRootId>>;
+}
+
+fn library_symbols(db: &dyn SymbolsDatabase, source_root_id: SourceRootId) -> Arc<SymbolIndex> {
+ let _p = profile::span("library_symbols");
+
+ // todo: this could be parallelized, once I figure out how to do that...
+ let symbols = db
+ .source_root_crates(source_root_id)
+ .iter()
+ .flat_map(|&krate| Crate::from(krate).modules(db.upcast()))
+ // we specifically avoid calling SymbolsDatabase::module_symbols here, even they do the same thing,
+ // as the index for a library is not going to really ever change, and we do not want to store each
+ // module's index in salsa.
+ .flat_map(|module| SymbolCollector::collect(db.upcast(), module))
+ .collect();
+
+ Arc::new(SymbolIndex::new(symbols))
+}
+
+fn module_symbols(db: &dyn SymbolsDatabase, module: Module) -> Arc<SymbolIndex> {
+ let _p = profile::span("module_symbols");
+ let symbols = SymbolCollector::collect(db.upcast(), module);
+ Arc::new(SymbolIndex::new(symbols))
+}
+
+/// Need to wrap Snapshot to provide `Clone` impl for `map_with`
+struct Snap<DB>(DB);
+impl<DB: ParallelDatabase> Snap<salsa::Snapshot<DB>> {
+ fn new(db: &DB) -> Self {
+ Self(db.snapshot())
+ }
+}
+impl<DB: ParallelDatabase> Clone for Snap<salsa::Snapshot<DB>> {
+ fn clone(&self) -> Snap<salsa::Snapshot<DB>> {
+ Snap(self.0.snapshot())
+ }
+}
+impl<DB> std::ops::Deref for Snap<DB> {
+ type Target = DB;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+// Feature: Workspace Symbol
+//
+// Uses fuzzy-search to find types, modules and functions by name across your
+// project and dependencies. This is **the** most useful feature, which improves code
+// navigation tremendously. It mostly works on top of the built-in LSP
+// functionality, however `#` and `*` symbols can be used to narrow down the
+// search. Specifically,
+//
+// - `Foo` searches for `Foo` type in the current workspace
+// - `foo#` searches for `foo` function in the current workspace
+// - `Foo*` searches for `Foo` type among dependencies, including `stdlib`
+// - `foo#*` searches for `foo` function among dependencies
+//
+// That is, `#` switches from "types" to all symbols, `*` switches from the current
+// workspace to dependencies.
+//
+// Note that filtering does not currently work in VSCode due to the editor never
+// sending the special symbols to the language server. Instead, you can configure
+// the filtering via the `rust-analyzer.workspace.symbol.search.scope` and
+// `rust-analyzer.workspace.symbol.search.kind` settings.
+//
+// |===
+// | Editor | Shortcut
+//
+// | VS Code | kbd:[Ctrl+T]
+// |===
+pub fn world_symbols(db: &RootDatabase, query: Query) -> Vec<FileSymbol> {
+ let _p = profile::span("world_symbols").detail(|| query.query.clone());
+
+ let indices: Vec<_> = if query.libs {
+ db.library_roots()
+ .par_iter()
+ .map_with(Snap::new(db), |snap, &root| snap.library_symbols(root))
+ .collect()
+ } else {
+ let mut modules = Vec::new();
+
+ for &root in db.local_roots().iter() {
+ let crates = db.source_root_crates(root);
+ for &krate in crates.iter() {
+ modules.extend(Crate::from(krate).modules(db));
+ }
+ }
+
+ modules
+ .par_iter()
+ .map_with(Snap::new(db), |snap, &module| snap.module_symbols(module))
+ .collect()
+ };
+
+ query.search(&indices)
+}
+
+pub fn crate_symbols(db: &RootDatabase, krate: Crate, query: Query) -> Vec<FileSymbol> {
+ let _p = profile::span("crate_symbols").detail(|| format!("{:?}", query));
+
+ let modules = krate.modules(db);
+ let indices: Vec<_> = modules
+ .par_iter()
+ .map_with(Snap::new(db), |snap, &module| snap.module_symbols(module))
+ .collect();
+
+ query.search(&indices)
+}
+
+#[derive(Default)]
+pub struct SymbolIndex {
+ symbols: Vec<FileSymbol>,
+ map: fst::Map<Vec<u8>>,
+}
+
+impl fmt::Debug for SymbolIndex {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SymbolIndex").field("n_symbols", &self.symbols.len()).finish()
+ }
+}
+
+impl PartialEq for SymbolIndex {
+ fn eq(&self, other: &SymbolIndex) -> bool {
+ self.symbols == other.symbols
+ }
+}
+
+impl Eq for SymbolIndex {}
+
+impl Hash for SymbolIndex {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ self.symbols.hash(hasher)
+ }
+}
+
+impl SymbolIndex {
+ fn new(mut symbols: Vec<FileSymbol>) -> SymbolIndex {
+ fn cmp(lhs: &FileSymbol, rhs: &FileSymbol) -> Ordering {
+ let lhs_chars = lhs.name.chars().map(|c| c.to_ascii_lowercase());
+ let rhs_chars = rhs.name.chars().map(|c| c.to_ascii_lowercase());
+ lhs_chars.cmp(rhs_chars)
+ }
+
+ symbols.par_sort_by(cmp);
+
+ let mut builder = fst::MapBuilder::memory();
+
+ let mut last_batch_start = 0;
+
+ for idx in 0..symbols.len() {
+ if let Some(next_symbol) = symbols.get(idx + 1) {
+ if cmp(&symbols[last_batch_start], next_symbol) == Ordering::Equal {
+ continue;
+ }
+ }
+
+ let start = last_batch_start;
+ let end = idx + 1;
+ last_batch_start = end;
+
+ let key = symbols[start].name.as_str().to_ascii_lowercase();
+ let value = SymbolIndex::range_to_map_value(start, end);
+
+ builder.insert(key, value).unwrap();
+ }
+
+ let map = fst::Map::new(builder.into_inner().unwrap()).unwrap();
+ SymbolIndex { symbols, map }
+ }
+
+ pub fn len(&self) -> usize {
+ self.symbols.len()
+ }
+
+ pub fn memory_size(&self) -> usize {
+ self.map.as_fst().size() + self.symbols.len() * mem::size_of::<FileSymbol>()
+ }
+
+ fn range_to_map_value(start: usize, end: usize) -> u64 {
+ debug_assert![start <= (std::u32::MAX as usize)];
+ debug_assert![end <= (std::u32::MAX as usize)];
+
+ ((start as u64) << 32) | end as u64
+ }
+
+ fn map_value_to_range(value: u64) -> (usize, usize) {
+ let end = value as u32 as usize;
+ let start = (value >> 32) as usize;
+ (start, end)
+ }
+}
+
+impl Query {
+ pub(crate) fn search(self, indices: &[Arc<SymbolIndex>]) -> Vec<FileSymbol> {
+ let _p = profile::span("symbol_index::Query::search");
+ let mut op = fst::map::OpBuilder::new();
+ for file_symbols in indices.iter() {
+ let automaton = fst::automaton::Subsequence::new(&self.lowercased);
+ op = op.add(file_symbols.map.search(automaton))
+ }
+ let mut stream = op.union();
+ let mut res = Vec::new();
+ while let Some((_, indexed_values)) = stream.next() {
+ for indexed_value in indexed_values {
+ let symbol_index = &indices[indexed_value.index];
+ let (start, end) = SymbolIndex::map_value_to_range(indexed_value.value);
+
+ for symbol in &symbol_index.symbols[start..end] {
+ if self.only_types && !symbol.kind.is_type() {
+ continue;
+ }
+ if self.exact {
+ if symbol.name != self.query {
+ continue;
+ }
+ } else if self.case_sensitive {
+ if self.query.chars().any(|c| !symbol.name.contains(c)) {
+ continue;
+ }
+ }
+
+ res.push(symbol.clone());
+ if res.len() >= self.limit {
+ return res;
+ }
+ }
+ }
+ }
+ res
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use base_db::fixture::WithFixture;
+ use expect_test::expect_file;
+ use hir::symbols::SymbolCollector;
+
+ use super::*;
+
+ #[test]
+ fn test_symbol_index_collection() {
+ let (db, _) = RootDatabase::with_many_files(
+ r#"
+//- /main.rs
+
+macro_rules! macro_rules_macro {
+ () => {}
+};
+
+macro_rules! define_struct {
+ () => {
+ struct StructFromMacro;
+ }
+};
+
+define_struct!();
+
+macro Macro { }
+
+struct Struct;
+enum Enum {
+ A, B
+}
+union Union {}
+
+impl Struct {
+ fn impl_fn() {}
+}
+
+trait Trait {
+ fn trait_fn(&self);
+}
+
+fn main() {
+ struct StructInFn;
+}
+
+const CONST: u32 = 1;
+static STATIC: &'static str = "2";
+type Alias = Struct;
+
+mod a_mod {
+ struct StructInModA;
+}
+
+const _: () = {
+ struct StructInUnnamedConst;
+
+ ()
+};
+
+const CONST_WITH_INNER: () = {
+ struct StructInNamedConst;
+
+ ()
+};
+
+mod b_mod;
+
+//- /b_mod.rs
+struct StructInModB;
+ "#,
+ );
+
+ let symbols: Vec<_> = Crate::from(db.test_crate())
+ .modules(&db)
+ .into_iter()
+ .map(|module_id| {
+ let mut symbols = SymbolCollector::collect(&db, module_id);
+ symbols.sort_by_key(|it| it.name.clone());
+ (module_id, symbols)
+ })
+ .collect();
+
+ expect_file!["./test_data/test_symbol_index_collection.txt"].assert_debug_eq(&symbols);
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/format_string.rs b/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/format_string.rs
new file mode 100644
index 000000000..f48a57008
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/format_string.rs
@@ -0,0 +1,308 @@
+//! Tools to work with format string literals for the `format_args!` family of macros.
+use syntax::{
+ ast::{self, IsString},
+ AstNode, AstToken, TextRange, TextSize,
+};
+
+pub fn is_format_string(string: &ast::String) -> bool {
+ // Check if `string` is a format string argument of a macro invocation.
+ // `string` is a string literal, mapped down into the innermost macro expansion.
+ // Since `format_args!` etc. remove the format string when expanding, but place all arguments
+ // in the expanded output, we know that the string token is (part of) the format string if it
+ // appears in `format_args!` (otherwise it would have been mapped down further).
+ //
+ // This setup lets us correctly highlight the components of `concat!("{}", "bla")` format
+ // strings. It still fails for `concat!("{", "}")`, but that is rare.
+ (|| {
+ let macro_call = string.syntax().parent_ancestors().find_map(ast::MacroCall::cast)?;
+ let name = macro_call.path()?.segment()?.name_ref()?;
+
+ if !matches!(
+ name.text().as_str(),
+ "format_args" | "format_args_nl" | "const_format_args" | "panic_2015" | "panic_2021"
+ ) {
+ return None;
+ }
+
+ // NB: we match against `panic_2015`/`panic_2021` here because they have a special-cased arm for
+ // `"{}"`, which otherwise wouldn't get highlighted.
+
+ Some(())
+ })()
+ .is_some()
+}
+
+#[derive(Debug)]
+pub enum FormatSpecifier {
+ Open,
+ Close,
+ Integer,
+ Identifier,
+ Colon,
+ Fill,
+ Align,
+ Sign,
+ NumberSign,
+ Zero,
+ DollarSign,
+ Dot,
+ Asterisk,
+ QuestionMark,
+ Escape,
+}
+
+pub fn lex_format_specifiers(
+ string: &ast::String,
+ mut callback: &mut dyn FnMut(TextRange, FormatSpecifier),
+) {
+ let mut char_ranges = Vec::new();
+ string.escaped_char_ranges(&mut |range, res| char_ranges.push((range, res)));
+ let mut chars = char_ranges
+ .iter()
+ .filter_map(|(range, res)| Some((*range, *res.as_ref().ok()?)))
+ .peekable();
+
+ while let Some((range, first_char)) = chars.next() {
+ if let '{' = first_char {
+ // Format specifier, see syntax at https://doc.rust-lang.org/std/fmt/index.html#syntax
+ if let Some((_, '{')) = chars.peek() {
+ // Escaped format specifier, `{{`
+ read_escaped_format_specifier(&mut chars, &mut callback);
+ continue;
+ }
+
+ callback(range, FormatSpecifier::Open);
+
+ // check for integer/identifier
+ let (_, int_char) = chars.peek().copied().unwrap_or_default();
+ match int_char {
+ // integer
+ '0'..='9' => read_integer(&mut chars, &mut callback),
+ // identifier
+ c if c == '_' || c.is_alphabetic() => read_identifier(&mut chars, &mut callback),
+ _ => {}
+ }
+
+ if let Some((_, ':')) = chars.peek() {
+ skip_char_and_emit(&mut chars, FormatSpecifier::Colon, &mut callback);
+
+ // check for fill/align
+ let mut cloned = chars.clone().take(2);
+ let (_, first) = cloned.next().unwrap_or_default();
+ let (_, second) = cloned.next().unwrap_or_default();
+ match second {
+ '<' | '^' | '>' => {
+ // alignment specifier, first char specifies fillment
+ skip_char_and_emit(&mut chars, FormatSpecifier::Fill, &mut callback);
+ skip_char_and_emit(&mut chars, FormatSpecifier::Align, &mut callback);
+ }
+ _ => {
+ if let '<' | '^' | '>' = first {
+ skip_char_and_emit(&mut chars, FormatSpecifier::Align, &mut callback);
+ }
+ }
+ }
+
+ // check for sign
+ match chars.peek().copied().unwrap_or_default().1 {
+ '+' | '-' => {
+ skip_char_and_emit(&mut chars, FormatSpecifier::Sign, &mut callback);
+ }
+ _ => {}
+ }
+
+ // check for `#`
+ if let Some((_, '#')) = chars.peek() {
+ skip_char_and_emit(&mut chars, FormatSpecifier::NumberSign, &mut callback);
+ }
+
+ // check for `0`
+ let mut cloned = chars.clone().take(2);
+ let first = cloned.next().map(|next| next.1);
+ let second = cloned.next().map(|next| next.1);
+
+ if first == Some('0') && second != Some('$') {
+ skip_char_and_emit(&mut chars, FormatSpecifier::Zero, &mut callback);
+ }
+
+ // width
+ match chars.peek().copied().unwrap_or_default().1 {
+ '0'..='9' => {
+ read_integer(&mut chars, &mut callback);
+ if let Some((_, '$')) = chars.peek() {
+ skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::DollarSign,
+ &mut callback,
+ );
+ }
+ }
+ c if c == '_' || c.is_alphabetic() => {
+ read_identifier(&mut chars, &mut callback);
+
+ if chars.peek().map(|&(_, c)| c) == Some('?') {
+ skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::QuestionMark,
+ &mut callback,
+ );
+ }
+
+ // can be either width (indicated by dollar sign, or type in which case
+ // the next sign has to be `}`)
+ let next = chars.peek().map(|&(_, c)| c);
+
+ match next {
+ Some('$') => skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::DollarSign,
+ &mut callback,
+ ),
+ Some('}') => {
+ skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::Close,
+ &mut callback,
+ );
+ continue;
+ }
+ _ => continue,
+ };
+ }
+ _ => {}
+ }
+
+ // precision
+ if let Some((_, '.')) = chars.peek() {
+ skip_char_and_emit(&mut chars, FormatSpecifier::Dot, &mut callback);
+
+ match chars.peek().copied().unwrap_or_default().1 {
+ '*' => {
+ skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::Asterisk,
+ &mut callback,
+ );
+ }
+ '0'..='9' => {
+ read_integer(&mut chars, &mut callback);
+ if let Some((_, '$')) = chars.peek() {
+ skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::DollarSign,
+ &mut callback,
+ );
+ }
+ }
+ c if c == '_' || c.is_alphabetic() => {
+ read_identifier(&mut chars, &mut callback);
+ if chars.peek().map(|&(_, c)| c) != Some('$') {
+ continue;
+ }
+ skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::DollarSign,
+ &mut callback,
+ );
+ }
+ _ => {
+ continue;
+ }
+ }
+ }
+
+ // type
+ match chars.peek().copied().unwrap_or_default().1 {
+ '?' => {
+ skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::QuestionMark,
+ &mut callback,
+ );
+ }
+ c if c == '_' || c.is_alphabetic() => {
+ read_identifier(&mut chars, &mut callback);
+
+ if chars.peek().map(|&(_, c)| c) == Some('?') {
+ skip_char_and_emit(
+ &mut chars,
+ FormatSpecifier::QuestionMark,
+ &mut callback,
+ );
+ }
+ }
+ _ => {}
+ }
+ }
+
+ if let Some((_, '}')) = chars.peek() {
+ skip_char_and_emit(&mut chars, FormatSpecifier::Close, &mut callback);
+ }
+ continue;
+ } else if let '}' = first_char {
+ if let Some((_, '}')) = chars.peek() {
+ // Escaped format specifier, `}}`
+ read_escaped_format_specifier(&mut chars, &mut callback);
+ }
+ }
+ }
+
+ fn skip_char_and_emit<I, F>(
+ chars: &mut std::iter::Peekable<I>,
+ emit: FormatSpecifier,
+ callback: &mut F,
+ ) where
+ I: Iterator<Item = (TextRange, char)>,
+ F: FnMut(TextRange, FormatSpecifier),
+ {
+ let (range, _) = chars.next().unwrap();
+ callback(range, emit);
+ }
+
+ fn read_integer<I, F>(chars: &mut std::iter::Peekable<I>, callback: &mut F)
+ where
+ I: Iterator<Item = (TextRange, char)>,
+ F: FnMut(TextRange, FormatSpecifier),
+ {
+ let (mut range, c) = chars.next().unwrap();
+ assert!(c.is_ascii_digit());
+ while let Some(&(r, next_char)) = chars.peek() {
+ if next_char.is_ascii_digit() {
+ chars.next();
+ range = range.cover(r);
+ } else {
+ break;
+ }
+ }
+ callback(range, FormatSpecifier::Integer);
+ }
+
+ fn read_identifier<I, F>(chars: &mut std::iter::Peekable<I>, callback: &mut F)
+ where
+ I: Iterator<Item = (TextRange, char)>,
+ F: FnMut(TextRange, FormatSpecifier),
+ {
+ let (mut range, c) = chars.next().unwrap();
+ assert!(c.is_alphabetic() || c == '_');
+ while let Some(&(r, next_char)) = chars.peek() {
+ if next_char == '_' || next_char.is_ascii_digit() || next_char.is_alphabetic() {
+ chars.next();
+ range = range.cover(r);
+ } else {
+ break;
+ }
+ }
+ callback(range, FormatSpecifier::Identifier);
+ }
+
+ fn read_escaped_format_specifier<I, F>(chars: &mut std::iter::Peekable<I>, callback: &mut F)
+ where
+ I: Iterator<Item = (TextRange, char)>,
+ F: FnMut(TextRange, FormatSpecifier),
+ {
+ let (range, _) = chars.peek().unwrap();
+ let offset = TextSize::from(1);
+ callback(TextRange::new(range.start() - offset, range.end()), FormatSpecifier::Escape);
+ chars.next();
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/insert_whitespace_into_node.rs b/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/insert_whitespace_into_node.rs
new file mode 100644
index 000000000..f54ae6c92
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/insert_whitespace_into_node.rs
@@ -0,0 +1,136 @@
+//! Utilities for formatting macro expanded nodes until we get a proper formatter.
+use syntax::{
+ ast::make,
+ ted::{self, Position},
+ NodeOrToken,
+ SyntaxKind::{self, *},
+ SyntaxNode, SyntaxToken, WalkEvent, T,
+};
+
+// FIXME: It would also be cool to share logic here and in the mbe tests,
+// which are pretty unreadable at the moment.
+/// Renders a [`SyntaxNode`] with whitespace inserted between tokens that require them.
+pub fn insert_ws_into(syn: SyntaxNode) -> SyntaxNode {
+ let mut indent = 0;
+ let mut last: Option<SyntaxKind> = None;
+ let mut mods = Vec::new();
+ let syn = syn.clone_subtree().clone_for_update();
+
+ let before = Position::before;
+ let after = Position::after;
+
+ let do_indent = |pos: fn(_) -> Position, token: &SyntaxToken, indent| {
+ (pos(token.clone()), make::tokens::whitespace(&" ".repeat(2 * indent)))
+ };
+ let do_ws = |pos: fn(_) -> Position, token: &SyntaxToken| {
+ (pos(token.clone()), make::tokens::single_space())
+ };
+ let do_nl = |pos: fn(_) -> Position, token: &SyntaxToken| {
+ (pos(token.clone()), make::tokens::single_newline())
+ };
+
+ for event in syn.preorder_with_tokens() {
+ let token = match event {
+ WalkEvent::Enter(NodeOrToken::Token(token)) => token,
+ WalkEvent::Leave(NodeOrToken::Node(node))
+ if matches!(
+ node.kind(),
+ ATTR | MATCH_ARM | STRUCT | ENUM | UNION | FN | IMPL | MACRO_RULES
+ ) =>
+ {
+ if indent > 0 {
+ mods.push((
+ Position::after(node.clone()),
+ make::tokens::whitespace(&" ".repeat(2 * indent)),
+ ));
+ }
+ if node.parent().is_some() {
+ mods.push((Position::after(node), make::tokens::single_newline()));
+ }
+ continue;
+ }
+ _ => continue,
+ };
+ let tok = &token;
+
+ let is_next = |f: fn(SyntaxKind) -> bool, default| -> bool {
+ tok.next_token().map(|it| f(it.kind())).unwrap_or(default)
+ };
+ let is_last =
+ |f: fn(SyntaxKind) -> bool, default| -> bool { last.map(f).unwrap_or(default) };
+
+ match tok.kind() {
+ k if is_text(k) && is_next(|it| !it.is_punct() || it == UNDERSCORE, false) => {
+ mods.push(do_ws(after, tok));
+ }
+ L_CURLY if is_next(|it| it != R_CURLY, true) => {
+ indent += 1;
+ if is_last(is_text, false) {
+ mods.push(do_ws(before, tok));
+ }
+
+ mods.push(do_indent(after, tok, indent));
+ mods.push(do_nl(after, tok));
+ }
+ R_CURLY if is_last(|it| it != L_CURLY, true) => {
+ indent = indent.saturating_sub(1);
+
+ if indent > 0 {
+ mods.push(do_indent(before, tok, indent));
+ }
+ mods.push(do_nl(before, tok));
+ }
+ R_CURLY => {
+ if indent > 0 {
+ mods.push(do_indent(after, tok, indent));
+ }
+ mods.push(do_nl(after, tok));
+ }
+ LIFETIME_IDENT if is_next(is_text, true) => {
+ mods.push(do_ws(after, tok));
+ }
+ MUT_KW if is_next(|it| it == SELF_KW, false) => {
+ mods.push(do_ws(after, tok));
+ }
+ AS_KW | DYN_KW | IMPL_KW | CONST_KW => {
+ mods.push(do_ws(after, tok));
+ }
+ T![;] => {
+ if indent > 0 {
+ mods.push(do_indent(after, tok, indent));
+ }
+ mods.push(do_nl(after, tok));
+ }
+ T![=] if is_next(|it| it == T![>], false) => {
+ // FIXME: this branch is for `=>` in macro_rules!, which is currently parsed as
+ // two separate symbols.
+ mods.push(do_ws(before, tok));
+ mods.push(do_ws(after, &tok.next_token().unwrap()));
+ }
+ T![->] | T![=] | T![=>] => {
+ mods.push(do_ws(before, tok));
+ mods.push(do_ws(after, tok));
+ }
+ T![!] if is_last(|it| it == MACRO_RULES_KW, false) && is_next(is_text, false) => {
+ mods.push(do_ws(after, tok));
+ }
+ _ => (),
+ }
+
+ last = Some(tok.kind());
+ }
+
+ for (pos, insert) in mods {
+ ted::insert(pos, insert);
+ }
+
+ if let Some(it) = syn.last_token().filter(|it| it.kind() == SyntaxKind::WHITESPACE) {
+ ted::remove(it);
+ }
+
+ syn
+}
+
+fn is_text(k: SyntaxKind) -> bool {
+ k.is_keyword() || k.is_literal() || k == IDENT || k == UNDERSCORE
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/node_ext.rs b/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/node_ext.rs
new file mode 100644
index 000000000..84bde4d44
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/syntax_helpers/node_ext.rs
@@ -0,0 +1,460 @@
+//! Various helper functions to work with SyntaxNodes.
+use itertools::Itertools;
+use parser::T;
+use syntax::{
+ ast::{self, HasLoopBody, PathSegmentKind, VisibilityKind},
+ AstNode, Preorder, RustLanguage, WalkEvent,
+};
+
+pub fn expr_as_name_ref(expr: &ast::Expr) -> Option<ast::NameRef> {
+ if let ast::Expr::PathExpr(expr) = expr {
+ let path = expr.path()?;
+ path.as_single_name_ref()
+ } else {
+ None
+ }
+}
+
+pub fn full_path_of_name_ref(name_ref: &ast::NameRef) -> Option<ast::Path> {
+ let mut ancestors = name_ref.syntax().ancestors();
+ let _ = ancestors.next()?; // skip self
+ let _ = ancestors.next().filter(|it| ast::PathSegment::can_cast(it.kind()))?; // skip self
+ ancestors.take_while(|it| ast::Path::can_cast(it.kind())).last().and_then(ast::Path::cast)
+}
+
+pub fn block_as_lone_tail(block: &ast::BlockExpr) -> Option<ast::Expr> {
+ block.statements().next().is_none().then(|| block.tail_expr()).flatten()
+}
+
+/// Preorder walk all the expression's child expressions.
+pub fn walk_expr(expr: &ast::Expr, cb: &mut dyn FnMut(ast::Expr)) {
+ preorder_expr(expr, &mut |ev| {
+ if let WalkEvent::Enter(expr) = ev {
+ cb(expr);
+ }
+ false
+ })
+}
+
+/// Preorder walk all the expression's child expressions preserving events.
+/// If the callback returns true on an [`WalkEvent::Enter`], the subtree of the expression will be skipped.
+/// Note that the subtree may already be skipped due to the context analysis this function does.
+pub fn preorder_expr(start: &ast::Expr, cb: &mut dyn FnMut(WalkEvent<ast::Expr>) -> bool) {
+ let mut preorder = start.syntax().preorder();
+ while let Some(event) = preorder.next() {
+ let node = match event {
+ WalkEvent::Enter(node) => node,
+ WalkEvent::Leave(node) => {
+ if let Some(expr) = ast::Expr::cast(node) {
+ cb(WalkEvent::Leave(expr));
+ }
+ continue;
+ }
+ };
+ if let Some(let_stmt) = node.parent().and_then(ast::LetStmt::cast) {
+ if Some(node.clone()) != let_stmt.initializer().map(|it| it.syntax().clone()) {
+ // skipping potential const pat expressions in let statements
+ preorder.skip_subtree();
+ continue;
+ }
+ }
+
+ match ast::Stmt::cast(node.clone()) {
+ // Don't skip subtree since we want to process the expression child next
+ Some(ast::Stmt::ExprStmt(_)) | Some(ast::Stmt::LetStmt(_)) => (),
+ // skip inner items which might have their own expressions
+ Some(ast::Stmt::Item(_)) => preorder.skip_subtree(),
+ None => {
+ // skip const args, those expressions are a different context
+ if ast::GenericArg::can_cast(node.kind()) {
+ preorder.skip_subtree();
+ } else if let Some(expr) = ast::Expr::cast(node) {
+ let is_different_context = match &expr {
+ ast::Expr::BlockExpr(block_expr) => {
+ matches!(
+ block_expr.modifier(),
+ Some(
+ ast::BlockModifier::Async(_)
+ | ast::BlockModifier::Try(_)
+ | ast::BlockModifier::Const(_)
+ )
+ )
+ }
+ ast::Expr::ClosureExpr(_) => true,
+ _ => false,
+ } && expr.syntax() != start.syntax();
+ let skip = cb(WalkEvent::Enter(expr));
+ if skip || is_different_context {
+ preorder.skip_subtree();
+ }
+ }
+ }
+ }
+ }
+}
+
+/// Preorder walk all the expression's child patterns.
+pub fn walk_patterns_in_expr(start: &ast::Expr, cb: &mut dyn FnMut(ast::Pat)) {
+ let mut preorder = start.syntax().preorder();
+ while let Some(event) = preorder.next() {
+ let node = match event {
+ WalkEvent::Enter(node) => node,
+ WalkEvent::Leave(_) => continue,
+ };
+ match ast::Stmt::cast(node.clone()) {
+ Some(ast::Stmt::LetStmt(l)) => {
+ if let Some(pat) = l.pat() {
+ walk_pat(&pat, cb);
+ }
+ if let Some(expr) = l.initializer() {
+ walk_patterns_in_expr(&expr, cb);
+ }
+ preorder.skip_subtree();
+ }
+ // Don't skip subtree since we want to process the expression child next
+ Some(ast::Stmt::ExprStmt(_)) => (),
+ // skip inner items which might have their own patterns
+ Some(ast::Stmt::Item(_)) => preorder.skip_subtree(),
+ None => {
+ // skip const args, those are a different context
+ if ast::GenericArg::can_cast(node.kind()) {
+ preorder.skip_subtree();
+ } else if let Some(expr) = ast::Expr::cast(node.clone()) {
+ let is_different_context = match &expr {
+ ast::Expr::BlockExpr(block_expr) => {
+ matches!(
+ block_expr.modifier(),
+ Some(
+ ast::BlockModifier::Async(_)
+ | ast::BlockModifier::Try(_)
+ | ast::BlockModifier::Const(_)
+ )
+ )
+ }
+ ast::Expr::ClosureExpr(_) => true,
+ _ => false,
+ } && expr.syntax() != start.syntax();
+ if is_different_context {
+ preorder.skip_subtree();
+ }
+ } else if let Some(pat) = ast::Pat::cast(node) {
+ preorder.skip_subtree();
+ walk_pat(&pat, cb);
+ }
+ }
+ }
+ }
+}
+
+/// Preorder walk all the pattern's sub patterns.
+pub fn walk_pat(pat: &ast::Pat, cb: &mut dyn FnMut(ast::Pat)) {
+ let mut preorder = pat.syntax().preorder();
+ while let Some(event) = preorder.next() {
+ let node = match event {
+ WalkEvent::Enter(node) => node,
+ WalkEvent::Leave(_) => continue,
+ };
+ let kind = node.kind();
+ match ast::Pat::cast(node) {
+ Some(pat @ ast::Pat::ConstBlockPat(_)) => {
+ preorder.skip_subtree();
+ cb(pat);
+ }
+ Some(pat) => {
+ cb(pat);
+ }
+ // skip const args
+ None if ast::GenericArg::can_cast(kind) => {
+ preorder.skip_subtree();
+ }
+ None => (),
+ }
+ }
+}
+
+/// Preorder walk all the type's sub types.
+pub fn walk_ty(ty: &ast::Type, cb: &mut dyn FnMut(ast::Type)) {
+ let mut preorder = ty.syntax().preorder();
+ while let Some(event) = preorder.next() {
+ let node = match event {
+ WalkEvent::Enter(node) => node,
+ WalkEvent::Leave(_) => continue,
+ };
+ let kind = node.kind();
+ match ast::Type::cast(node) {
+ Some(ty @ ast::Type::MacroType(_)) => {
+ preorder.skip_subtree();
+ cb(ty)
+ }
+ Some(ty) => {
+ cb(ty);
+ }
+ // skip const args
+ None if ast::ConstArg::can_cast(kind) => {
+ preorder.skip_subtree();
+ }
+ None => (),
+ }
+ }
+}
+
+pub fn vis_eq(this: &ast::Visibility, other: &ast::Visibility) -> bool {
+ match (this.kind(), other.kind()) {
+ (VisibilityKind::In(this), VisibilityKind::In(other)) => {
+ stdx::iter_eq_by(this.segments(), other.segments(), |lhs, rhs| {
+ lhs.kind().zip(rhs.kind()).map_or(false, |it| match it {
+ (PathSegmentKind::CrateKw, PathSegmentKind::CrateKw)
+ | (PathSegmentKind::SelfKw, PathSegmentKind::SelfKw)
+ | (PathSegmentKind::SuperKw, PathSegmentKind::SuperKw) => true,
+ (PathSegmentKind::Name(lhs), PathSegmentKind::Name(rhs)) => {
+ lhs.text() == rhs.text()
+ }
+ _ => false,
+ })
+ })
+ }
+ (VisibilityKind::PubSelf, VisibilityKind::PubSelf)
+ | (VisibilityKind::PubSuper, VisibilityKind::PubSuper)
+ | (VisibilityKind::PubCrate, VisibilityKind::PubCrate)
+ | (VisibilityKind::Pub, VisibilityKind::Pub) => true,
+ _ => false,
+ }
+}
+
+/// Returns the `let` only if there is exactly one (that is, `let pat = expr`
+/// or `((let pat = expr))`, but not `let pat = expr && expr` or `non_let_expr`).
+pub fn single_let(expr: ast::Expr) -> Option<ast::LetExpr> {
+ match expr {
+ ast::Expr::ParenExpr(expr) => expr.expr().and_then(single_let),
+ ast::Expr::LetExpr(expr) => Some(expr),
+ _ => None,
+ }
+}
+
+pub fn is_pattern_cond(expr: ast::Expr) -> bool {
+ match expr {
+ ast::Expr::BinExpr(expr)
+ if expr.op_kind() == Some(ast::BinaryOp::LogicOp(ast::LogicOp::And)) =>
+ {
+ expr.lhs()
+ .map(is_pattern_cond)
+ .or_else(|| expr.rhs().map(is_pattern_cond))
+ .unwrap_or(false)
+ }
+ ast::Expr::ParenExpr(expr) => expr.expr().map_or(false, is_pattern_cond),
+ ast::Expr::LetExpr(_) => true,
+ _ => false,
+ }
+}
+
+/// Calls `cb` on each expression inside `expr` that is at "tail position".
+/// Does not walk into `break` or `return` expressions.
+/// Note that modifying the tree while iterating it will cause undefined iteration which might
+/// potentially results in an out of bounds panic.
+pub fn for_each_tail_expr(expr: &ast::Expr, cb: &mut dyn FnMut(&ast::Expr)) {
+ match expr {
+ ast::Expr::BlockExpr(b) => {
+ match b.modifier() {
+ Some(
+ ast::BlockModifier::Async(_)
+ | ast::BlockModifier::Try(_)
+ | ast::BlockModifier::Const(_),
+ ) => return cb(expr),
+
+ Some(ast::BlockModifier::Label(label)) => {
+ for_each_break_expr(Some(label), b.stmt_list(), &mut |b| {
+ cb(&ast::Expr::BreakExpr(b))
+ });
+ }
+ Some(ast::BlockModifier::Unsafe(_)) => (),
+ None => (),
+ }
+ if let Some(stmt_list) = b.stmt_list() {
+ if let Some(e) = stmt_list.tail_expr() {
+ for_each_tail_expr(&e, cb);
+ }
+ }
+ }
+ ast::Expr::IfExpr(if_) => {
+ let mut if_ = if_.clone();
+ loop {
+ if let Some(block) = if_.then_branch() {
+ for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
+ }
+ match if_.else_branch() {
+ Some(ast::ElseBranch::IfExpr(it)) => if_ = it,
+ Some(ast::ElseBranch::Block(block)) => {
+ for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
+ break;
+ }
+ None => break,
+ }
+ }
+ }
+ ast::Expr::LoopExpr(l) => {
+ for_each_break_expr(l.label(), l.loop_body().and_then(|it| it.stmt_list()), &mut |b| {
+ cb(&ast::Expr::BreakExpr(b))
+ })
+ }
+ ast::Expr::MatchExpr(m) => {
+ if let Some(arms) = m.match_arm_list() {
+ arms.arms().filter_map(|arm| arm.expr()).for_each(|e| for_each_tail_expr(&e, cb));
+ }
+ }
+ ast::Expr::ArrayExpr(_)
+ | ast::Expr::AwaitExpr(_)
+ | ast::Expr::BinExpr(_)
+ | ast::Expr::BoxExpr(_)
+ | ast::Expr::BreakExpr(_)
+ | ast::Expr::CallExpr(_)
+ | ast::Expr::CastExpr(_)
+ | ast::Expr::ClosureExpr(_)
+ | ast::Expr::ContinueExpr(_)
+ | ast::Expr::FieldExpr(_)
+ | ast::Expr::ForExpr(_)
+ | ast::Expr::IndexExpr(_)
+ | ast::Expr::Literal(_)
+ | ast::Expr::MacroExpr(_)
+ | ast::Expr::MacroStmts(_)
+ | ast::Expr::MethodCallExpr(_)
+ | ast::Expr::ParenExpr(_)
+ | ast::Expr::PathExpr(_)
+ | ast::Expr::PrefixExpr(_)
+ | ast::Expr::RangeExpr(_)
+ | ast::Expr::RecordExpr(_)
+ | ast::Expr::RefExpr(_)
+ | ast::Expr::ReturnExpr(_)
+ | ast::Expr::TryExpr(_)
+ | ast::Expr::TupleExpr(_)
+ | ast::Expr::WhileExpr(_)
+ | ast::Expr::LetExpr(_)
+ | ast::Expr::UnderscoreExpr(_)
+ | ast::Expr::YieldExpr(_) => cb(expr),
+ }
+}
+
+pub fn for_each_break_and_continue_expr(
+ label: Option<ast::Label>,
+ body: Option<ast::StmtList>,
+ cb: &mut dyn FnMut(ast::Expr),
+) {
+ let label = label.and_then(|lbl| lbl.lifetime());
+ if let Some(b) = body {
+ let tree_depth_iterator = TreeWithDepthIterator::new(b);
+ for (expr, depth) in tree_depth_iterator {
+ match expr {
+ ast::Expr::BreakExpr(b)
+ if (depth == 0 && b.lifetime().is_none())
+ || eq_label_lt(&label, &b.lifetime()) =>
+ {
+ cb(ast::Expr::BreakExpr(b));
+ }
+ ast::Expr::ContinueExpr(c)
+ if (depth == 0 && c.lifetime().is_none())
+ || eq_label_lt(&label, &c.lifetime()) =>
+ {
+ cb(ast::Expr::ContinueExpr(c));
+ }
+ _ => (),
+ }
+ }
+ }
+}
+
+fn for_each_break_expr(
+ label: Option<ast::Label>,
+ body: Option<ast::StmtList>,
+ cb: &mut dyn FnMut(ast::BreakExpr),
+) {
+ let label = label.and_then(|lbl| lbl.lifetime());
+ if let Some(b) = body {
+ let tree_depth_iterator = TreeWithDepthIterator::new(b);
+ for (expr, depth) in tree_depth_iterator {
+ match expr {
+ ast::Expr::BreakExpr(b)
+ if (depth == 0 && b.lifetime().is_none())
+ || eq_label_lt(&label, &b.lifetime()) =>
+ {
+ cb(b);
+ }
+ _ => (),
+ }
+ }
+ }
+}
+
+fn eq_label_lt(lt1: &Option<ast::Lifetime>, lt2: &Option<ast::Lifetime>) -> bool {
+ lt1.as_ref().zip(lt2.as_ref()).map_or(false, |(lt, lbl)| lt.text() == lbl.text())
+}
+
+struct TreeWithDepthIterator {
+ preorder: Preorder<RustLanguage>,
+ depth: u32,
+}
+
+impl TreeWithDepthIterator {
+ fn new(body: ast::StmtList) -> Self {
+ let preorder = body.syntax().preorder();
+ Self { preorder, depth: 0 }
+ }
+}
+
+impl Iterator for TreeWithDepthIterator {
+ type Item = (ast::Expr, u32);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ while let Some(event) = self.preorder.find_map(|ev| match ev {
+ WalkEvent::Enter(it) => ast::Expr::cast(it).map(WalkEvent::Enter),
+ WalkEvent::Leave(it) => ast::Expr::cast(it).map(WalkEvent::Leave),
+ }) {
+ match event {
+ WalkEvent::Enter(
+ ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_),
+ ) => {
+ self.depth += 1;
+ }
+ WalkEvent::Leave(
+ ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_),
+ ) => {
+ self.depth -= 1;
+ }
+ WalkEvent::Enter(ast::Expr::BlockExpr(e)) if e.label().is_some() => {
+ self.depth += 1;
+ }
+ WalkEvent::Leave(ast::Expr::BlockExpr(e)) if e.label().is_some() => {
+ self.depth -= 1;
+ }
+ WalkEvent::Enter(expr) => return Some((expr, self.depth)),
+ _ => (),
+ }
+ }
+ None
+ }
+}
+
+/// Parses the input token tree as comma separated plain paths.
+pub fn parse_tt_as_comma_sep_paths(input: ast::TokenTree) -> Option<Vec<ast::Path>> {
+ let r_paren = input.r_paren_token();
+ let tokens =
+ input.syntax().children_with_tokens().skip(1).map_while(|it| match it.into_token() {
+ // seeing a keyword means the attribute is unclosed so stop parsing here
+ Some(tok) if tok.kind().is_keyword() => None,
+ // don't include the right token tree parenthesis if it exists
+ tok @ Some(_) if tok == r_paren => None,
+ // only nodes that we can find are other TokenTrees, those are unexpected in this parse though
+ None => None,
+ Some(tok) => Some(tok),
+ });
+ let input_expressions = tokens.group_by(|tok| tok.kind() == T![,]);
+ let paths = input_expressions
+ .into_iter()
+ .filter_map(|(is_sep, group)| (!is_sep).then(|| group))
+ .filter_map(|mut tokens| {
+ syntax::hacks::parse_expr_from_str(&tokens.join("")).and_then(|expr| match expr {
+ ast::Expr::PathExpr(it) => it.path(),
+ _ => None,
+ })
+ })
+ .collect();
+ Some(paths)
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/test_data/test_symbol_index_collection.txt b/src/tools/rust-analyzer/crates/ide-db/src/test_data/test_symbol_index_collection.txt
new file mode 100644
index 000000000..2f531ca0c
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/test_data/test_symbol_index_collection.txt
@@ -0,0 +1,533 @@
+[
+ (
+ Module {
+ id: ModuleId {
+ krate: CrateId(
+ 0,
+ ),
+ block: None,
+ local_id: Idx::<ModuleData>(0),
+ },
+ },
+ [
+ FileSymbol {
+ name: "Alias",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: TYPE_ALIAS,
+ range: 397..417,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 402..407,
+ },
+ },
+ kind: TypeAlias,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "CONST",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: CONST,
+ range: 340..361,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 346..351,
+ },
+ },
+ kind: Const,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "CONST_WITH_INNER",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: CONST,
+ range: 520..592,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 526..542,
+ },
+ },
+ kind: Const,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "Enum",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: ENUM,
+ range: 185..207,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 190..194,
+ },
+ },
+ kind: Enum,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "Macro",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: MACRO_DEF,
+ range: 153..168,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 159..164,
+ },
+ },
+ kind: Macro,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "STATIC",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: STATIC,
+ range: 362..396,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 369..375,
+ },
+ },
+ kind: Static,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "Struct",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: STRUCT,
+ range: 170..184,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 177..183,
+ },
+ },
+ kind: Struct,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "StructFromMacro",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ MacroFile(
+ MacroFile {
+ macro_call_id: MacroCallId(
+ 0,
+ ),
+ },
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: STRUCT,
+ range: 0..22,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 6..21,
+ },
+ },
+ kind: Struct,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "StructInFn",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: STRUCT,
+ range: 318..336,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 325..335,
+ },
+ },
+ kind: Struct,
+ container_name: Some(
+ "main",
+ ),
+ },
+ FileSymbol {
+ name: "StructInNamedConst",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: STRUCT,
+ range: 555..581,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 562..580,
+ },
+ },
+ kind: Struct,
+ container_name: Some(
+ "CONST_WITH_INNER",
+ ),
+ },
+ FileSymbol {
+ name: "StructInUnnamedConst",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: STRUCT,
+ range: 479..507,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 486..506,
+ },
+ },
+ kind: Struct,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "Trait",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: TRAIT,
+ range: 261..300,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 267..272,
+ },
+ },
+ kind: Trait,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "Union",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: UNION,
+ range: 208..222,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 214..219,
+ },
+ },
+ kind: Union,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "a_mod",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: MODULE,
+ range: 419..457,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 423..428,
+ },
+ },
+ kind: Module,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "b_mod",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: MODULE,
+ range: 594..604,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 598..603,
+ },
+ },
+ kind: Module,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "define_struct",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: MACRO_RULES,
+ range: 51..131,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 64..77,
+ },
+ },
+ kind: Macro,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "impl_fn",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: FN,
+ range: 242..257,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 245..252,
+ },
+ },
+ kind: Function,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "macro_rules_macro",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: MACRO_RULES,
+ range: 1..48,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 14..31,
+ },
+ },
+ kind: Macro,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "main",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: FN,
+ range: 302..338,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 305..309,
+ },
+ },
+ kind: Function,
+ container_name: None,
+ },
+ FileSymbol {
+ name: "trait_fn",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: FN,
+ range: 279..298,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 282..290,
+ },
+ },
+ kind: Function,
+ container_name: Some(
+ "Trait",
+ ),
+ },
+ ],
+ ),
+ (
+ Module {
+ id: ModuleId {
+ krate: CrateId(
+ 0,
+ ),
+ block: None,
+ local_id: Idx::<ModuleData>(1),
+ },
+ },
+ [
+ FileSymbol {
+ name: "StructInModA",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 0,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: STRUCT,
+ range: 435..455,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 442..454,
+ },
+ },
+ kind: Struct,
+ container_name: None,
+ },
+ ],
+ ),
+ (
+ Module {
+ id: ModuleId {
+ krate: CrateId(
+ 0,
+ ),
+ block: None,
+ local_id: Idx::<ModuleData>(2),
+ },
+ },
+ [
+ FileSymbol {
+ name: "StructInModB",
+ loc: DeclarationLocation {
+ hir_file_id: HirFileId(
+ FileId(
+ FileId(
+ 1,
+ ),
+ ),
+ ),
+ ptr: SyntaxNodePtr {
+ kind: STRUCT,
+ range: 0..20,
+ },
+ name_ptr: SyntaxNodePtr {
+ kind: NAME,
+ range: 7..19,
+ },
+ },
+ kind: Struct,
+ container_name: None,
+ },
+ ],
+ ),
+]
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/tests/sourcegen_lints.rs b/src/tools/rust-analyzer/crates/ide-db/src/tests/sourcegen_lints.rs
new file mode 100644
index 000000000..5042f6d81
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/tests/sourcegen_lints.rs
@@ -0,0 +1,284 @@
+//! Generates descriptors structure for unstable feature from Unstable Book
+use std::{borrow::Cow, fs, path::Path};
+
+use itertools::Itertools;
+use stdx::format_to;
+use test_utils::project_root;
+use xshell::{cmd, Shell};
+
+/// This clones rustc repo, and so is not worth to keep up-to-date. We update
+/// manually by un-ignoring the test from time to time.
+#[test]
+#[ignore]
+fn sourcegen_lint_completions() {
+ let sh = &Shell::new().unwrap();
+
+ let rust_repo = project_root().join("./target/rust");
+ if !rust_repo.exists() {
+ cmd!(sh, "git clone --depth=1 https://github.com/rust-lang/rust {rust_repo}")
+ .run()
+ .unwrap();
+ }
+
+ let mut contents = String::from(
+ r"
+#[derive(Clone)]
+pub struct Lint {
+ pub label: &'static str,
+ pub description: &'static str,
+}
+pub struct LintGroup {
+ pub lint: Lint,
+ pub children: &'static [&'static str],
+}
+",
+ );
+
+ generate_lint_descriptor(sh, &mut contents);
+ contents.push('\n');
+
+ generate_feature_descriptor(&mut contents, &rust_repo.join("src/doc/unstable-book/src"));
+ contents.push('\n');
+
+ let lints_json = project_root().join("./target/clippy_lints.json");
+ cmd!(
+ sh,
+ "curl https://rust-lang.github.io/rust-clippy/master/lints.json --output {lints_json}"
+ )
+ .run()
+ .unwrap();
+ generate_descriptor_clippy(&mut contents, &lints_json);
+
+ let contents = sourcegen::add_preamble("sourcegen_lints", sourcegen::reformat(contents));
+
+ let destination = project_root().join("crates/ide_db/src/generated/lints.rs");
+ sourcegen::ensure_file_contents(destination.as_path(), &contents);
+}
+
+fn generate_lint_descriptor(sh: &Shell, buf: &mut String) {
+ // FIXME: rustdoc currently requires an input file for -Whelp cc https://github.com/rust-lang/rust/pull/88831
+ let file = project_root().join(file!());
+ let stdout = cmd!(sh, "rustdoc -W help {file}").read().unwrap();
+ let start_lints = stdout.find("---- ------- -------").unwrap();
+ let start_lint_groups = stdout.find("---- ---------").unwrap();
+ let start_lints_rustdoc =
+ stdout.find("Lint checks provided by plugins loaded by this crate:").unwrap();
+ let start_lint_groups_rustdoc =
+ stdout.find("Lint groups provided by plugins loaded by this crate:").unwrap();
+
+ buf.push_str(r#"pub const DEFAULT_LINTS: &[Lint] = &["#);
+ buf.push('\n');
+
+ let lints = stdout[start_lints..].lines().skip(1).take_while(|l| !l.is_empty()).map(|line| {
+ let (name, rest) = line.trim().split_once(char::is_whitespace).unwrap();
+ let (_default_level, description) = rest.trim().split_once(char::is_whitespace).unwrap();
+ (name.trim(), Cow::Borrowed(description.trim()), vec![])
+ });
+ let lint_groups =
+ stdout[start_lint_groups..].lines().skip(1).take_while(|l| !l.is_empty()).map(|line| {
+ let (name, lints) = line.trim().split_once(char::is_whitespace).unwrap();
+ (
+ name.trim(),
+ format!("lint group for: {}", lints.trim()).into(),
+ lints
+ .split_ascii_whitespace()
+ .map(|s| s.trim().trim_matches(',').replace('-', "_"))
+ .collect(),
+ )
+ });
+
+ let lints = lints
+ .chain(lint_groups)
+ .sorted_by(|(ident, ..), (ident2, ..)| ident.cmp(ident2))
+ .collect::<Vec<_>>();
+ for (name, description, ..) in &lints {
+ push_lint_completion(buf, &name.replace('-', "_"), description);
+ }
+ buf.push_str("];\n");
+ buf.push_str(r#"pub const DEFAULT_LINT_GROUPS: &[LintGroup] = &["#);
+ for (name, description, children) in &lints {
+ if !children.is_empty() {
+ // HACK: warnings is emitted with a general description, not with its members
+ if name == &"warnings" {
+ push_lint_group(buf, name, description, &Vec::new());
+ continue;
+ }
+ push_lint_group(buf, &name.replace('-', "_"), description, children);
+ }
+ }
+ buf.push('\n');
+ buf.push_str("];\n");
+
+ // rustdoc
+
+ buf.push('\n');
+ buf.push_str(r#"pub const RUSTDOC_LINTS: &[Lint] = &["#);
+ buf.push('\n');
+
+ let lints_rustdoc =
+ stdout[start_lints_rustdoc..].lines().skip(2).take_while(|l| !l.is_empty()).map(|line| {
+ let (name, rest) = line.trim().split_once(char::is_whitespace).unwrap();
+ let (_default_level, description) =
+ rest.trim().split_once(char::is_whitespace).unwrap();
+ (name.trim(), Cow::Borrowed(description.trim()), vec![])
+ });
+ let lint_groups_rustdoc =
+ stdout[start_lint_groups_rustdoc..].lines().skip(2).take_while(|l| !l.is_empty()).map(
+ |line| {
+ let (name, lints) = line.trim().split_once(char::is_whitespace).unwrap();
+ (
+ name.trim(),
+ format!("lint group for: {}", lints.trim()).into(),
+ lints
+ .split_ascii_whitespace()
+ .map(|s| s.trim().trim_matches(',').replace('-', "_"))
+ .collect(),
+ )
+ },
+ );
+
+ let lints_rustdoc = lints_rustdoc
+ .chain(lint_groups_rustdoc)
+ .sorted_by(|(ident, ..), (ident2, ..)| ident.cmp(ident2))
+ .collect::<Vec<_>>();
+
+ for (name, description, ..) in &lints_rustdoc {
+ push_lint_completion(buf, &name.replace('-', "_"), description)
+ }
+ buf.push_str("];\n");
+
+ buf.push_str(r#"pub const RUSTDOC_LINT_GROUPS: &[LintGroup] = &["#);
+ for (name, description, children) in &lints_rustdoc {
+ if !children.is_empty() {
+ push_lint_group(buf, &name.replace('-', "_"), description, children);
+ }
+ }
+ buf.push('\n');
+ buf.push_str("];\n");
+}
+
+fn generate_feature_descriptor(buf: &mut String, src_dir: &Path) {
+ let mut features = ["language-features", "library-features"]
+ .into_iter()
+ .flat_map(|it| sourcegen::list_files(&src_dir.join(it)))
+ .filter(|path| {
+ // Get all `.md ` files
+ path.extension().unwrap_or_default().to_str().unwrap_or_default() == "md"
+ })
+ .map(|path| {
+ let feature_ident = path.file_stem().unwrap().to_str().unwrap().replace('-', "_");
+ let doc = fs::read_to_string(path).unwrap();
+ (feature_ident, doc)
+ })
+ .collect::<Vec<_>>();
+ features.sort_by(|(feature_ident, _), (feature_ident2, _)| feature_ident.cmp(feature_ident2));
+
+ buf.push_str(r#"pub const FEATURES: &[Lint] = &["#);
+ for (feature_ident, doc) in features.into_iter() {
+ push_lint_completion(buf, &feature_ident, &doc)
+ }
+ buf.push('\n');
+ buf.push_str("];\n");
+}
+
+#[derive(Default)]
+struct ClippyLint {
+ help: String,
+ id: String,
+}
+
+fn unescape(s: &str) -> String {
+ s.replace(r#"\""#, "").replace(r#"\n"#, "\n").replace(r#"\r"#, "")
+}
+
+fn generate_descriptor_clippy(buf: &mut String, path: &Path) {
+ let file_content = std::fs::read_to_string(path).unwrap();
+ let mut clippy_lints: Vec<ClippyLint> = Vec::new();
+ let mut clippy_groups: std::collections::BTreeMap<String, Vec<String>> = Default::default();
+
+ for line in file_content.lines().map(|line| line.trim()) {
+ if let Some(line) = line.strip_prefix(r#""id": ""#) {
+ let clippy_lint = ClippyLint {
+ id: line.strip_suffix(r#"","#).expect("should be suffixed by comma").into(),
+ help: String::new(),
+ };
+ clippy_lints.push(clippy_lint)
+ } else if let Some(line) = line.strip_prefix(r#""group": ""#) {
+ if let Some(group) = line.strip_suffix("\",") {
+ clippy_groups
+ .entry(group.to_owned())
+ .or_default()
+ .push(clippy_lints.last().unwrap().id.clone());
+ }
+ } else if let Some(line) = line.strip_prefix(r#""docs": ""#) {
+ let prefix_to_strip = r#" ### What it does"#;
+ let line = match line.strip_prefix(prefix_to_strip) {
+ Some(line) => line,
+ None => {
+ eprintln!("unexpected clippy prefix for {}", clippy_lints.last().unwrap().id);
+ continue;
+ }
+ };
+ // Only take the description, any more than this is a lot of additional data we would embed into the exe
+ // which seems unnecessary
+ let up_to = line.find(r#"###"#).expect("no second section found?");
+ let line = &line[..up_to];
+
+ let clippy_lint = clippy_lints.last_mut().expect("clippy lint must already exist");
+ clippy_lint.help = unescape(line).trim().to_string();
+ }
+ }
+ clippy_lints.sort_by(|lint, lint2| lint.id.cmp(&lint2.id));
+
+ buf.push_str(r#"pub const CLIPPY_LINTS: &[Lint] = &["#);
+ buf.push('\n');
+ for clippy_lint in clippy_lints.into_iter() {
+ let lint_ident = format!("clippy::{}", clippy_lint.id);
+ let doc = clippy_lint.help;
+ push_lint_completion(buf, &lint_ident, &doc);
+ }
+ buf.push_str("];\n");
+
+ buf.push_str(r#"pub const CLIPPY_LINT_GROUPS: &[LintGroup] = &["#);
+ for (id, children) in clippy_groups {
+ let children = children.iter().map(|id| format!("clippy::{}", id)).collect::<Vec<_>>();
+ if !children.is_empty() {
+ let lint_ident = format!("clippy::{}", id);
+ let description = format!("lint group for: {}", children.iter().join(", "));
+ push_lint_group(buf, &lint_ident, &description, &children);
+ }
+ }
+ buf.push('\n');
+ buf.push_str("];\n");
+}
+
+fn push_lint_completion(buf: &mut String, label: &str, description: &str) {
+ format_to!(
+ buf,
+ r###" Lint {{
+ label: "{}",
+ description: r##"{}"##,
+ }},"###,
+ label,
+ description,
+ );
+}
+
+fn push_lint_group(buf: &mut String, label: &str, description: &str, children: &[String]) {
+ buf.push_str(
+ r###" LintGroup {
+ lint:
+ "###,
+ );
+
+ push_lint_completion(buf, label, description);
+
+ let children = format!("&[{}]", children.iter().map(|it| format!("\"{}\"", it)).join(", "));
+ format_to!(
+ buf,
+ r###"
+ children: {},
+ }},"###,
+ children,
+ );
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/traits.rs b/src/tools/rust-analyzer/crates/ide-db/src/traits.rs
new file mode 100644
index 000000000..6a7ea7c19
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/traits.rs
@@ -0,0 +1,273 @@
+//! Functionality for obtaining data related to traits from the DB.
+
+use crate::{defs::Definition, RootDatabase};
+use hir::{db::HirDatabase, AsAssocItem, Semantics};
+use rustc_hash::FxHashSet;
+use syntax::{ast, AstNode};
+
+/// Given the `impl` block, attempts to find the trait this `impl` corresponds to.
+pub fn resolve_target_trait(
+ sema: &Semantics<'_, RootDatabase>,
+ impl_def: &ast::Impl,
+) -> Option<hir::Trait> {
+ let ast_path =
+ impl_def.trait_().map(|it| it.syntax().clone()).and_then(ast::PathType::cast)?.path()?;
+
+ match sema.resolve_path(&ast_path) {
+ Some(hir::PathResolution::Def(hir::ModuleDef::Trait(def))) => Some(def),
+ _ => None,
+ }
+}
+
+/// Given the `impl` block, returns the list of associated items (e.g. functions or types) that are
+/// missing in this `impl` block.
+pub fn get_missing_assoc_items(
+ sema: &Semantics<'_, RootDatabase>,
+ impl_def: &ast::Impl,
+) -> Vec<hir::AssocItem> {
+ let imp = match sema.to_def(impl_def) {
+ Some(it) => it,
+ None => return vec![],
+ };
+
+ // Names must be unique between constants and functions. However, type aliases
+ // may share the same name as a function or constant.
+ let mut impl_fns_consts = FxHashSet::default();
+ let mut impl_type = FxHashSet::default();
+
+ for item in imp.items(sema.db) {
+ match item {
+ hir::AssocItem::Function(it) => {
+ impl_fns_consts.insert(it.name(sema.db).to_string());
+ }
+ hir::AssocItem::Const(it) => {
+ if let Some(name) = it.name(sema.db) {
+ impl_fns_consts.insert(name.to_string());
+ }
+ }
+ hir::AssocItem::TypeAlias(it) => {
+ impl_type.insert(it.name(sema.db).to_string());
+ }
+ }
+ }
+
+ resolve_target_trait(sema, impl_def).map_or(vec![], |target_trait| {
+ target_trait
+ .items(sema.db)
+ .into_iter()
+ .filter(|i| match i {
+ hir::AssocItem::Function(f) => {
+ !impl_fns_consts.contains(&f.name(sema.db).to_string())
+ }
+ hir::AssocItem::TypeAlias(t) => !impl_type.contains(&t.name(sema.db).to_string()),
+ hir::AssocItem::Const(c) => c
+ .name(sema.db)
+ .map(|n| !impl_fns_consts.contains(&n.to_string()))
+ .unwrap_or_default(),
+ })
+ .collect()
+ })
+}
+
+/// Converts associated trait impl items to their trait definition counterpart
+pub(crate) fn convert_to_def_in_trait(db: &dyn HirDatabase, def: Definition) -> Definition {
+ (|| {
+ let assoc = def.as_assoc_item(db)?;
+ let trait_ = assoc.containing_trait_impl(db)?;
+ assoc_item_of_trait(db, assoc, trait_)
+ })()
+ .unwrap_or(def)
+}
+
+/// If this is an trait (impl) assoc item, returns the assoc item of the corresponding trait definition.
+pub(crate) fn as_trait_assoc_def(db: &dyn HirDatabase, def: Definition) -> Option<Definition> {
+ let assoc = def.as_assoc_item(db)?;
+ let trait_ = match assoc.container(db) {
+ hir::AssocItemContainer::Trait(_) => return Some(def),
+ hir::AssocItemContainer::Impl(i) => i.trait_(db),
+ }?;
+ assoc_item_of_trait(db, assoc, trait_)
+}
+
+fn assoc_item_of_trait(
+ db: &dyn HirDatabase,
+ assoc: hir::AssocItem,
+ trait_: hir::Trait,
+) -> Option<Definition> {
+ use hir::AssocItem::*;
+ let name = match assoc {
+ Function(it) => it.name(db),
+ Const(it) => it.name(db)?,
+ TypeAlias(it) => it.name(db),
+ };
+ let item = trait_.items(db).into_iter().find(|it| match (it, assoc) {
+ (Function(trait_func), Function(_)) => trait_func.name(db) == name,
+ (Const(trait_konst), Const(_)) => trait_konst.name(db).map_or(false, |it| it == name),
+ (TypeAlias(trait_type_alias), TypeAlias(_)) => trait_type_alias.name(db) == name,
+ _ => false,
+ })?;
+ Some(Definition::from(item))
+}
+
+#[cfg(test)]
+mod tests {
+ use base_db::{fixture::ChangeFixture, FilePosition};
+ use expect_test::{expect, Expect};
+ use hir::Semantics;
+ use syntax::ast::{self, AstNode};
+
+ use crate::RootDatabase;
+
+ /// Creates analysis from a multi-file fixture, returns positions marked with $0.
+ pub(crate) fn position(ra_fixture: &str) -> (RootDatabase, FilePosition) {
+ let change_fixture = ChangeFixture::parse(ra_fixture);
+ let mut database = RootDatabase::default();
+ database.apply_change(change_fixture.change);
+ let (file_id, range_or_offset) =
+ change_fixture.file_position.expect("expected a marker ($0)");
+ let offset = range_or_offset.expect_offset();
+ (database, FilePosition { file_id, offset })
+ }
+
+ fn check_trait(ra_fixture: &str, expect: Expect) {
+ let (db, position) = position(ra_fixture);
+ let sema = Semantics::new(&db);
+ let file = sema.parse(position.file_id);
+ let impl_block: ast::Impl =
+ sema.find_node_at_offset_with_descend(file.syntax(), position.offset).unwrap();
+ let trait_ = crate::traits::resolve_target_trait(&sema, &impl_block);
+ let actual = match trait_ {
+ Some(trait_) => trait_.name(&db).to_string(),
+ None => String::new(),
+ };
+ expect.assert_eq(&actual);
+ }
+
+ fn check_missing_assoc(ra_fixture: &str, expect: Expect) {
+ let (db, position) = position(ra_fixture);
+ let sema = Semantics::new(&db);
+ let file = sema.parse(position.file_id);
+ let impl_block: ast::Impl =
+ sema.find_node_at_offset_with_descend(file.syntax(), position.offset).unwrap();
+ let items = crate::traits::get_missing_assoc_items(&sema, &impl_block);
+ let actual = items
+ .into_iter()
+ .map(|item| item.name(&db).unwrap().to_string())
+ .collect::<Vec<_>>()
+ .join("\n");
+ expect.assert_eq(&actual);
+ }
+
+ #[test]
+ fn resolve_trait() {
+ check_trait(
+ r#"
+pub trait Foo {
+ fn bar();
+}
+impl Foo for u8 {
+ $0
+}
+ "#,
+ expect![["Foo"]],
+ );
+ check_trait(
+ r#"
+pub trait Foo {
+ fn bar();
+}
+impl Foo for u8 {
+ fn bar() {
+ fn baz() {
+ $0
+ }
+ baz();
+ }
+}
+ "#,
+ expect![["Foo"]],
+ );
+ check_trait(
+ r#"
+pub trait Foo {
+ fn bar();
+}
+pub struct Bar;
+impl Bar {
+ $0
+}
+ "#,
+ expect![[""]],
+ );
+ }
+
+ #[test]
+ fn missing_assoc_items() {
+ check_missing_assoc(
+ r#"
+pub trait Foo {
+ const FOO: u8;
+ fn bar();
+}
+impl Foo for u8 {
+ $0
+}"#,
+ expect![[r#"
+ FOO
+ bar"#]],
+ );
+
+ check_missing_assoc(
+ r#"
+pub trait Foo {
+ const FOO: u8;
+ fn bar();
+}
+impl Foo for u8 {
+ const FOO: u8 = 10;
+ $0
+}"#,
+ expect![[r#"
+ bar"#]],
+ );
+
+ check_missing_assoc(
+ r#"
+pub trait Foo {
+ const FOO: u8;
+ fn bar();
+}
+impl Foo for u8 {
+ const FOO: u8 = 10;
+ fn bar() {$0}
+}"#,
+ expect![[r#""#]],
+ );
+
+ check_missing_assoc(
+ r#"
+pub struct Foo;
+impl Foo {
+ fn bar() {$0}
+}"#,
+ expect![[r#""#]],
+ );
+
+ check_missing_assoc(
+ r#"
+trait Tr {
+ fn required();
+}
+macro_rules! m {
+ () => { fn required() {} };
+}
+impl Tr for () {
+ m!();
+ $0
+}
+
+ "#,
+ expect![[r#""#]],
+ );
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/ty_filter.rs b/src/tools/rust-analyzer/crates/ide-db/src/ty_filter.rs
new file mode 100644
index 000000000..46f47f258
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/ty_filter.rs
@@ -0,0 +1,86 @@
+//! This module contains structures for filtering the expected types.
+//! Use case for structures in this module is, for example, situation when you need to process
+//! only certain `Enum`s.
+
+use std::iter;
+
+use hir::Semantics;
+use syntax::ast::{self, make, Pat};
+
+use crate::RootDatabase;
+
+/// Enum types that implement `std::ops::Try` trait.
+#[derive(Clone, Copy)]
+pub enum TryEnum {
+ Result,
+ Option,
+}
+
+impl TryEnum {
+ const ALL: [TryEnum; 2] = [TryEnum::Option, TryEnum::Result];
+
+ /// Returns `Some(..)` if the provided type is an enum that implements `std::ops::Try`.
+ pub fn from_ty(sema: &Semantics<'_, RootDatabase>, ty: &hir::Type) -> Option<TryEnum> {
+ let enum_ = match ty.as_adt() {
+ Some(hir::Adt::Enum(it)) => it,
+ _ => return None,
+ };
+ TryEnum::ALL.iter().find_map(|&var| {
+ if enum_.name(sema.db).to_smol_str() == var.type_name() {
+ return Some(var);
+ }
+ None
+ })
+ }
+
+ pub fn happy_case(self) -> &'static str {
+ match self {
+ TryEnum::Result => "Ok",
+ TryEnum::Option => "Some",
+ }
+ }
+
+ pub fn sad_pattern(self) -> ast::Pat {
+ match self {
+ TryEnum::Result => make::tuple_struct_pat(
+ make::ext::ident_path("Err"),
+ iter::once(make::wildcard_pat().into()),
+ )
+ .into(),
+ TryEnum::Option => make::ext::simple_ident_pat(make::name("None")).into(),
+ }
+ }
+
+ pub fn happy_pattern(self, pat: Pat) -> ast::Pat {
+ match self {
+ TryEnum::Result => {
+ make::tuple_struct_pat(make::ext::ident_path("Ok"), iter::once(pat)).into()
+ }
+ TryEnum::Option => {
+ make::tuple_struct_pat(make::ext::ident_path("Some"), iter::once(pat)).into()
+ }
+ }
+ }
+
+ pub fn happy_pattern_wildcard(self) -> ast::Pat {
+ match self {
+ TryEnum::Result => make::tuple_struct_pat(
+ make::ext::ident_path("Ok"),
+ iter::once(make::wildcard_pat().into()),
+ )
+ .into(),
+ TryEnum::Option => make::tuple_struct_pat(
+ make::ext::ident_path("Some"),
+ iter::once(make::wildcard_pat().into()),
+ )
+ .into(),
+ }
+ }
+
+ fn type_name(self) -> &'static str {
+ match self {
+ TryEnum::Result => "Result",
+ TryEnum::Option => "Option",
+ }
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/use_trivial_contructor.rs b/src/tools/rust-analyzer/crates/ide-db/src/use_trivial_contructor.rs
new file mode 100644
index 000000000..39431bed3
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/ide-db/src/use_trivial_contructor.rs
@@ -0,0 +1,34 @@
+//! Functionality for generating trivial contructors
+
+use hir::StructKind;
+use syntax::ast;
+
+/// given a type return the trivial contructor (if one exists)
+pub fn use_trivial_constructor(
+ db: &crate::RootDatabase,
+ path: ast::Path,
+ ty: &hir::Type,
+) -> Option<ast::Expr> {
+ match ty.as_adt() {
+ Some(hir::Adt::Enum(x)) => {
+ if let &[variant] = &*x.variants(db) {
+ if variant.kind(db) == hir::StructKind::Unit {
+ let path = ast::make::path_qualified(
+ path,
+ syntax::ast::make::path_segment(ast::make::name_ref(
+ &variant.name(db).to_smol_str(),
+ )),
+ );
+
+ return Some(syntax::ast::make::expr_path(path));
+ }
+ }
+ }
+ Some(hir::Adt::Struct(x)) if x.kind(db) == StructKind::Unit => {
+ return Some(syntax::ast::make::expr_path(path));
+ }
+ _ => {}
+ }
+
+ None
+}