summaryrefslogtreecommitdiffstats
path: root/src/tools/rust-analyzer/crates/ide-db/src
diff options
context:
space:
mode:
Diffstat (limited to 'src/tools/rust-analyzer/crates/ide-db/src')
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs80
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/defs.rs2
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/line_index.rs199
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs24
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/rename.rs76
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/search.rs183
-rw-r--r--src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs8
7 files changed, 353 insertions, 219 deletions
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs b/src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs
index 7109c6fd1..2b6b60547 100644
--- a/src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs
+++ b/src/tools/rust-analyzer/crates/ide-db/src/active_parameter.rs
@@ -2,9 +2,10 @@
use either::Either;
use hir::{Semantics, Type};
+use parser::T;
use syntax::{
ast::{self, HasArgList, HasName},
- AstNode, SyntaxToken,
+ match_ast, AstNode, NodeOrToken, SyntaxToken,
};
use crate::RootDatabase;
@@ -58,7 +59,7 @@ pub fn callable_for_node(
calling_node: &ast::CallableExpr,
token: &SyntaxToken,
) -> Option<(hir::Callable, Option<usize>)> {
- let callable = match &calling_node {
+ let callable = match calling_node {
ast::CallableExpr::Call(call) => {
let expr = call.expr()?;
sema.type_of_expr(&expr)?.adjusted().as_callable(sema.db)
@@ -66,13 +67,78 @@ pub fn callable_for_node(
ast::CallableExpr::MethodCall(call) => sema.resolve_method_call_as_callable(call),
}?;
let active_param = if let Some(arg_list) = calling_node.arg_list() {
- let param = arg_list
- .args()
- .take_while(|arg| arg.syntax().text_range().end() <= token.text_range().start())
- .count();
- Some(param)
+ Some(
+ arg_list
+ .syntax()
+ .children_with_tokens()
+ .filter_map(NodeOrToken::into_token)
+ .filter(|t| t.kind() == T![,])
+ .take_while(|t| t.text_range().start() <= token.text_range().start())
+ .count(),
+ )
} else {
None
};
Some((callable, active_param))
}
+
+pub fn generic_def_for_node(
+ sema: &Semantics<'_, RootDatabase>,
+ generic_arg_list: &ast::GenericArgList,
+ token: &SyntaxToken,
+) -> Option<(hir::GenericDef, usize, bool)> {
+ let parent = generic_arg_list.syntax().parent()?;
+ let def = match_ast! {
+ match parent {
+ ast::PathSegment(ps) => {
+ let res = sema.resolve_path(&ps.parent_path())?;
+ let generic_def: hir::GenericDef = match res {
+ hir::PathResolution::Def(hir::ModuleDef::Adt(it)) => it.into(),
+ hir::PathResolution::Def(hir::ModuleDef::Function(it)) => it.into(),
+ hir::PathResolution::Def(hir::ModuleDef::Trait(it)) => it.into(),
+ hir::PathResolution::Def(hir::ModuleDef::TypeAlias(it)) => it.into(),
+ hir::PathResolution::Def(hir::ModuleDef::Variant(it)) => it.into(),
+ hir::PathResolution::Def(hir::ModuleDef::BuiltinType(_))
+ | hir::PathResolution::Def(hir::ModuleDef::Const(_))
+ | hir::PathResolution::Def(hir::ModuleDef::Macro(_))
+ | hir::PathResolution::Def(hir::ModuleDef::Module(_))
+ | hir::PathResolution::Def(hir::ModuleDef::Static(_)) => return None,
+ hir::PathResolution::BuiltinAttr(_)
+ | hir::PathResolution::ToolModule(_)
+ | hir::PathResolution::Local(_)
+ | hir::PathResolution::TypeParam(_)
+ | hir::PathResolution::ConstParam(_)
+ | hir::PathResolution::SelfType(_)
+ | hir::PathResolution::DeriveHelper(_) => return None,
+ };
+
+ generic_def
+ },
+ ast::AssocTypeArg(_) => {
+ // FIXME: We don't record the resolutions for this anywhere atm
+ return None;
+ },
+ ast::MethodCallExpr(mcall) => {
+ // recv.method::<$0>()
+ let method = sema.resolve_method_call(&mcall)?;
+ method.into()
+ },
+ _ => return None,
+ }
+ };
+
+ let active_param = generic_arg_list
+ .syntax()
+ .children_with_tokens()
+ .filter_map(NodeOrToken::into_token)
+ .filter(|t| t.kind() == T![,])
+ .take_while(|t| t.text_range().start() <= token.text_range().start())
+ .count();
+
+ let first_arg_is_non_lifetime = generic_arg_list
+ .generic_args()
+ .next()
+ .map_or(false, |arg| !matches!(arg, ast::GenericArg::LifetimeArg(_)));
+
+ Some((def, active_param, first_arg_is_non_lifetime))
+}
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/defs.rs b/src/tools/rust-analyzer/crates/ide-db/src/defs.rs
index 6c13c0397..ed7f04fd8 100644
--- a/src/tools/rust-analyzer/crates/ide-db/src/defs.rs
+++ b/src/tools/rust-analyzer/crates/ide-db/src/defs.rs
@@ -34,8 +34,8 @@ pub enum Definition {
TypeAlias(TypeAlias),
BuiltinType(BuiltinType),
SelfType(Impl),
- Local(Local),
GenericParam(GenericParam),
+ Local(Local),
Label(Label),
DeriveHelper(DeriveHelper),
BuiltinAttr(BuiltinAttr),
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/line_index.rs b/src/tools/rust-analyzer/crates/ide-db/src/line_index.rs
index 1b8f56187..16814a1e6 100644
--- a/src/tools/rust-analyzer/crates/ide-db/src/line_index.rs
+++ b/src/tools/rust-analyzer/crates/ide-db/src/line_index.rs
@@ -7,56 +7,72 @@ use syntax::{TextRange, TextSize};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct LineIndex {
- /// Offset the the beginning of each line, zero-based
+ /// Offset the beginning of each line, zero-based.
pub(crate) newlines: Vec<TextSize>,
- /// List of non-ASCII characters on each line
- pub(crate) utf16_lines: NoHashHashMap<u32, Vec<Utf16Char>>,
+ /// List of non-ASCII characters on each line.
+ pub(crate) line_wide_chars: NoHashHashMap<u32, Vec<WideChar>>,
}
+/// Line/Column information in native, utf8 format.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-pub struct LineColUtf16 {
+pub struct LineCol {
/// Zero-based
pub line: u32,
- /// Zero-based
+ /// Zero-based utf8 offset
pub col: u32,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-pub struct LineCol {
+pub enum WideEncoding {
+ Utf16,
+ Utf32,
+}
+
+/// Line/Column information in legacy encodings.
+///
+/// Deliberately not a generic type and different from `LineCol`.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct WideLineCol {
/// Zero-based
pub line: u32,
- /// Zero-based utf8 offset
+ /// Zero-based
pub col: u32,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
-pub(crate) struct Utf16Char {
+pub(crate) struct WideChar {
/// Start offset of a character inside a line, zero-based
pub(crate) start: TextSize,
/// End offset of a character inside a line, zero-based
pub(crate) end: TextSize,
}
-impl Utf16Char {
+impl WideChar {
/// Returns the length in 8-bit UTF-8 code units.
fn len(&self) -> TextSize {
self.end - self.start
}
- /// Returns the length in 16-bit UTF-16 code units.
- fn len_utf16(&self) -> usize {
- if self.len() == TextSize::from(4) {
- 2
- } else {
- 1
+ /// Returns the length in UTF-16 or UTF-32 code units.
+ fn wide_len(&self, enc: WideEncoding) -> usize {
+ match enc {
+ WideEncoding::Utf16 => {
+ if self.len() == TextSize::from(4) {
+ 2
+ } else {
+ 1
+ }
+ }
+
+ WideEncoding::Utf32 => 1,
}
}
}
impl LineIndex {
pub fn new(text: &str) -> LineIndex {
- let mut utf16_lines = NoHashHashMap::default();
- let mut utf16_chars = Vec::new();
+ let mut line_wide_chars = NoHashHashMap::default();
+ let mut wide_chars = Vec::new();
let mut newlines = Vec::with_capacity(16);
newlines.push(TextSize::from(0));
@@ -71,8 +87,8 @@ impl LineIndex {
newlines.push(curr_row);
// Save any utf-16 characters seen in the previous line
- if !utf16_chars.is_empty() {
- utf16_lines.insert(line, mem::take(&mut utf16_chars));
+ if !wide_chars.is_empty() {
+ line_wide_chars.insert(line, mem::take(&mut wide_chars));
}
// Prepare for processing the next line
@@ -82,18 +98,18 @@ impl LineIndex {
}
if !c.is_ascii() {
- utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + c_len });
+ wide_chars.push(WideChar { start: curr_col, end: curr_col + c_len });
}
curr_col += c_len;
}
// Save any utf-16 characters seen in the last line
- if !utf16_chars.is_empty() {
- utf16_lines.insert(line, utf16_chars);
+ if !wide_chars.is_empty() {
+ line_wide_chars.insert(line, wide_chars);
}
- LineIndex { newlines, utf16_lines }
+ LineIndex { newlines, line_wide_chars }
}
pub fn line_col(&self, offset: TextSize) -> LineCol {
@@ -109,13 +125,13 @@ impl LineIndex {
.map(|offset| offset + TextSize::from(line_col.col))
}
- pub fn to_utf16(&self, line_col: LineCol) -> LineColUtf16 {
- let col = self.utf8_to_utf16_col(line_col.line, line_col.col.into());
- LineColUtf16 { line: line_col.line, col: col as u32 }
+ pub fn to_wide(&self, enc: WideEncoding, line_col: LineCol) -> WideLineCol {
+ let col = self.utf8_to_wide_col(enc, line_col.line, line_col.col.into());
+ WideLineCol { line: line_col.line, col: col as u32 }
}
- pub fn to_utf8(&self, line_col: LineColUtf16) -> LineCol {
- let col = self.utf16_to_utf8_col(line_col.line, line_col.col);
+ pub fn to_utf8(&self, enc: WideEncoding, line_col: WideLineCol) -> LineCol {
+ let col = self.wide_to_utf8_col(enc, line_col.line, line_col.col);
LineCol { line: line_col.line, col: col.into() }
}
@@ -132,12 +148,12 @@ impl LineIndex {
.filter(|it| !it.is_empty())
}
- fn utf8_to_utf16_col(&self, line: u32, col: TextSize) -> usize {
+ fn utf8_to_wide_col(&self, enc: WideEncoding, line: u32, col: TextSize) -> usize {
let mut res: usize = col.into();
- if let Some(utf16_chars) = self.utf16_lines.get(&line) {
- for c in utf16_chars {
+ if let Some(wide_chars) = self.line_wide_chars.get(&line) {
+ for c in wide_chars {
if c.end <= col {
- res -= usize::from(c.len()) - c.len_utf16();
+ res -= usize::from(c.len()) - c.wide_len(enc);
} else {
// From here on, all utf16 characters come *after* the character we are mapping,
// so we don't need to take them into account
@@ -148,11 +164,11 @@ impl LineIndex {
res
}
- fn utf16_to_utf8_col(&self, line: u32, mut col: u32) -> TextSize {
- if let Some(utf16_chars) = self.utf16_lines.get(&line) {
- for c in utf16_chars {
+ fn wide_to_utf8_col(&self, enc: WideEncoding, line: u32, mut col: u32) -> TextSize {
+ if let Some(wide_chars) = self.line_wide_chars.get(&line) {
+ for c in wide_chars {
if col > u32::from(c.start) {
- col += u32::from(c.len()) - c.len_utf16() as u32;
+ col += u32::from(c.len()) - c.wide_len(enc) as u32;
} else {
// From here on, all utf16 characters come *after* the character we are mapping,
// so we don't need to take them into account
@@ -167,6 +183,9 @@ impl LineIndex {
#[cfg(test)]
mod tests {
+ use test_utils::skip_slow_tests;
+
+ use super::WideEncoding::{Utf16, Utf32};
use super::*;
#[test]
@@ -185,14 +204,14 @@ mod tests {
];
let index = LineIndex::new(text);
- for &(offset, line, col) in &table {
+ for (offset, line, col) in table {
assert_eq!(index.line_col(offset.into()), LineCol { line, col });
}
let text = "\nhello\nworld";
let table = [(0, 0, 0), (1, 1, 0), (2, 1, 1), (6, 1, 5), (7, 2, 0)];
let index = LineIndex::new(text);
- for &(offset, line, col) in &table {
+ for (offset, line, col) in table {
assert_eq!(index.line_col(offset.into()), LineCol { line, col });
}
}
@@ -210,67 +229,59 @@ mod tests {
const C: char = 'x';
",
);
- assert_eq!(col_index.utf16_lines.len(), 0);
+ assert_eq!(col_index.line_wide_chars.len(), 0);
}
#[test]
- fn test_single_char() {
- let col_index = LineIndex::new(
- "
-const C: char = 'メ';
-",
- );
-
- assert_eq!(col_index.utf16_lines.len(), 1);
- assert_eq!(col_index.utf16_lines[&1].len(), 1);
- assert_eq!(col_index.utf16_lines[&1][0], Utf16Char { start: 17.into(), end: 20.into() });
-
- // UTF-8 to UTF-16, no changes
- assert_eq!(col_index.utf8_to_utf16_col(1, 15.into()), 15);
-
- // UTF-8 to UTF-16
- assert_eq!(col_index.utf8_to_utf16_col(1, 22.into()), 20);
-
- // UTF-16 to UTF-8, no changes
- assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from(15));
-
- // UTF-16 to UTF-8
- assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from(21));
-
- let col_index = LineIndex::new("a𐐏b");
- assert_eq!(col_index.utf16_to_utf8_col(0, 3), TextSize::from(5));
- }
-
- #[test]
- fn test_string() {
- let col_index = LineIndex::new(
- "
-const C: char = \"メ メ\";
-",
- );
-
- assert_eq!(col_index.utf16_lines.len(), 1);
- assert_eq!(col_index.utf16_lines[&1].len(), 2);
- assert_eq!(col_index.utf16_lines[&1][0], Utf16Char { start: 17.into(), end: 20.into() });
- assert_eq!(col_index.utf16_lines[&1][1], Utf16Char { start: 21.into(), end: 24.into() });
-
- // UTF-8 to UTF-16
- assert_eq!(col_index.utf8_to_utf16_col(1, 15.into()), 15);
-
- assert_eq!(col_index.utf8_to_utf16_col(1, 21.into()), 19);
- assert_eq!(col_index.utf8_to_utf16_col(1, 25.into()), 21);
-
- assert!(col_index.utf8_to_utf16_col(2, 15.into()) == 15);
-
- // UTF-16 to UTF-8
- assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from(15));
+ fn test_every_chars() {
+ if skip_slow_tests() {
+ return;
+ }
- // メ UTF-8: 0xE3 0x83 0xA1, UTF-16: 0x30E1
- assert_eq!(col_index.utf16_to_utf8_col(1, 17), TextSize::from(17)); // first メ at 17..20
- assert_eq!(col_index.utf16_to_utf8_col(1, 18), TextSize::from(20)); // space
- assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from(21)); // second メ at 21..24
+ let text: String = {
+ let mut chars: Vec<char> = ((0 as char)..char::MAX).collect(); // Neat!
+ chars.extend("\n".repeat(chars.len() / 16).chars());
+ let mut rng = oorandom::Rand32::new(stdx::rand::seed());
+ stdx::rand::shuffle(&mut chars, |i| rng.rand_range(0..i as u32) as usize);
+ chars.into_iter().collect()
+ };
+ assert!(text.contains('💩')); // Sanity check.
+
+ let line_index = LineIndex::new(&text);
+
+ let mut lin_col = LineCol { line: 0, col: 0 };
+ let mut col_utf16 = 0;
+ let mut col_utf32 = 0;
+ for (offset, c) in text.char_indices() {
+ let got_offset = line_index.offset(lin_col).unwrap();
+ assert_eq!(usize::from(got_offset), offset);
+
+ let got_lin_col = line_index.line_col(got_offset);
+ assert_eq!(got_lin_col, lin_col);
+
+ for enc in [Utf16, Utf32] {
+ let wide_lin_col = line_index.to_wide(enc, lin_col);
+ let got_lin_col = line_index.to_utf8(enc, wide_lin_col);
+ assert_eq!(got_lin_col, lin_col);
+
+ let want_col = match enc {
+ Utf16 => col_utf16,
+ Utf32 => col_utf32,
+ };
+ assert_eq!(wide_lin_col.col, want_col)
+ }
- assert_eq!(col_index.utf16_to_utf8_col(2, 15), TextSize::from(15));
+ if c == '\n' {
+ lin_col.line += 1;
+ lin_col.col = 0;
+ col_utf16 = 0;
+ col_utf32 = 0;
+ } else {
+ lin_col.col += c.len_utf8() as u32;
+ col_utf16 += c.len_utf16() as u32;
+ col_utf32 += 1;
+ }
+ }
}
#[test]
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs b/src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs
index 12d873b4a..6402a84a6 100644
--- a/src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs
+++ b/src/tools/rust-analyzer/crates/ide-db/src/path_transform.rs
@@ -33,7 +33,7 @@ use syntax::{
/// }
/// ```
pub struct PathTransform<'a> {
- generic_def: hir::GenericDef,
+ generic_def: Option<hir::GenericDef>,
substs: Vec<ast::Type>,
target_scope: &'a SemanticsScope<'a>,
source_scope: &'a SemanticsScope<'a>,
@@ -49,7 +49,7 @@ impl<'a> PathTransform<'a> {
PathTransform {
source_scope,
target_scope,
- generic_def: trait_.into(),
+ generic_def: Some(trait_.into()),
substs: get_syntactic_substs(impl_).unwrap_or_default(),
}
}
@@ -63,28 +63,42 @@ impl<'a> PathTransform<'a> {
PathTransform {
source_scope,
target_scope,
- generic_def: function.into(),
+ generic_def: Some(function.into()),
substs: get_type_args_from_arg_list(generic_arg_list).unwrap_or_default(),
}
}
+ pub fn generic_transformation(
+ target_scope: &'a SemanticsScope<'a>,
+ source_scope: &'a SemanticsScope<'a>,
+ ) -> PathTransform<'a> {
+ PathTransform { source_scope, target_scope, generic_def: None, substs: Vec::new() }
+ }
+
pub fn apply(&self, syntax: &SyntaxNode) {
self.build_ctx().apply(syntax)
}
+ pub fn apply_all<'b>(&self, nodes: impl IntoIterator<Item = &'b SyntaxNode>) {
+ let ctx = self.build_ctx();
+ for node in nodes {
+ ctx.apply(node);
+ }
+ }
+
fn build_ctx(&self) -> Ctx<'a> {
let db = self.source_scope.db;
let target_module = self.target_scope.module();
let source_module = self.source_scope.module();
let skip = match self.generic_def {
// this is a trait impl, so we need to skip the first type parameter -- this is a bit hacky
- hir::GenericDef::Trait(_) => 1,
+ Some(hir::GenericDef::Trait(_)) => 1,
_ => 0,
};
let substs_by_param: FxHashMap<_, _> = self
.generic_def
- .type_params(db)
.into_iter()
+ .flat_map(|it| it.type_params(db))
.skip(skip)
// The actual list of trait type parameters may be longer than the one
// used in the `impl` block due to trailing default type parameters.
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/rename.rs b/src/tools/rust-analyzer/crates/ide-db/src/rename.rs
index cd4a7e155..84d70b258 100644
--- a/src/tools/rust-analyzer/crates/ide-db/src/rename.rs
+++ b/src/tools/rust-analyzer/crates/ide-db/src/rename.rs
@@ -190,6 +190,7 @@ fn rename_mod(
let InFile { file_id, value: def_source } = module.definition_source(sema.db);
if let ModuleSource::SourceFile(..) = def_source {
+ let new_name = new_name.trim_start_matches("r#");
let anchor = file_id.original_file(sema.db);
let is_mod_rs = module.is_mod_rs(sema.db);
@@ -207,9 +208,13 @@ fn rename_mod(
// - Module has submodules defined in separate files
let dir_paths = match (is_mod_rs, has_detached_child, module.name(sema.db)) {
// Go up one level since the anchor is inside the dir we're trying to rename
- (true, _, Some(mod_name)) => Some((format!("../{mod_name}"), format!("../{new_name}"))),
+ (true, _, Some(mod_name)) => {
+ Some((format!("../{}", mod_name.unescaped()), format!("../{new_name}")))
+ }
// The anchor is on the same level as target dir
- (false, true, Some(mod_name)) => Some((mod_name.to_string(), new_name.to_string())),
+ (false, true, Some(mod_name)) => {
+ Some((mod_name.unescaped().to_string(), new_name.to_string()))
+ }
_ => None,
};
@@ -263,11 +268,10 @@ fn rename_reference(
Definition::GenericParam(hir::GenericParam::LifetimeParam(_)) | Definition::Label(_)
) {
match ident_kind {
- IdentifierKind::Ident | IdentifierKind::Underscore => {
- cov_mark::hit!(rename_not_a_lifetime_ident_ref);
+ IdentifierKind::Underscore => {
bail!("Invalid name `{}`: not a lifetime identifier", new_name);
}
- IdentifierKind::Lifetime => cov_mark::hit!(rename_lifetime),
+ _ => cov_mark::hit!(rename_lifetime),
}
} else {
match ident_kind {
@@ -334,11 +338,17 @@ pub fn source_edit_from_references(
}
_ => false,
};
- if !has_emitted_edit {
- if !edited_ranges.contains(&range.start()) {
- edit.replace(range, new_name.to_string());
- edited_ranges.push(range.start());
- }
+ if !has_emitted_edit && !edited_ranges.contains(&range.start()) {
+ let (range, new_name) = match name {
+ ast::NameLike::Lifetime(_) => (
+ TextRange::new(range.start() + syntax::TextSize::from(1), range.end()),
+ new_name.strip_prefix('\'').unwrap_or(new_name).to_owned(),
+ ),
+ _ => (range, new_name.to_owned()),
+ };
+
+ edit.replace(range, new_name);
+ edited_ranges.push(range.start());
}
}
@@ -391,19 +401,17 @@ fn source_edit_from_name_ref(
edit.delete(TextRange::new(s, e));
return true;
}
- } else if init == name_ref {
- if field_name.text() == new_name {
- cov_mark::hit!(test_rename_local_put_init_shorthand);
- // Foo { field: local } -> Foo { field }
- // ^^^^^^^ delete this
-
- // same names, we can use a shorthand here instead.
- // we do not want to erase attributes hence this range start
- let s = field_name.syntax().text_range().end();
- let e = init.syntax().text_range().end();
- edit.delete(TextRange::new(s, e));
- return true;
- }
+ } else if init == name_ref && field_name.text() == new_name {
+ cov_mark::hit!(test_rename_local_put_init_shorthand);
+ // Foo { field: local } -> Foo { field }
+ // ^^^^^^^ delete this
+
+ // same names, we can use a shorthand here instead.
+ // we do not want to erase attributes hence this range start
+ let s = field_name.syntax().text_range().end();
+ let e = init.syntax().text_range().end();
+ edit.delete(TextRange::new(s, e));
+ return true;
}
}
// init shorthand
@@ -505,7 +513,15 @@ fn source_edit_from_def(
}
}
if edit.is_empty() {
- edit.replace(range, new_name.to_string());
+ let (range, new_name) = match def {
+ Definition::GenericParam(hir::GenericParam::LifetimeParam(_))
+ | Definition::Label(_) => (
+ TextRange::new(range.start() + syntax::TextSize::from(1), range.end()),
+ new_name.strip_prefix('\'').unwrap_or(new_name).to_owned(),
+ ),
+ _ => (range, new_name.to_owned()),
+ };
+ edit.replace(range, new_name);
}
Ok((file_id, edit.finish()))
}
@@ -521,14 +537,18 @@ impl IdentifierKind {
pub fn classify(new_name: &str) -> Result<IdentifierKind> {
match parser::LexedStr::single_token(new_name) {
Some(res) => match res {
- (SyntaxKind::IDENT, _) => Ok(IdentifierKind::Ident),
+ (SyntaxKind::IDENT, _) => {
+ if let Some(inner) = new_name.strip_prefix("r#") {
+ if matches!(inner, "self" | "crate" | "super" | "Self") {
+ bail!("Invalid name: `{}` cannot be a raw identifier", inner);
+ }
+ }
+ Ok(IdentifierKind::Ident)
+ }
(T![_], _) => Ok(IdentifierKind::Underscore),
(SyntaxKind::LIFETIME_IDENT, _) if new_name != "'static" && new_name != "'_" => {
Ok(IdentifierKind::Lifetime)
}
- (SyntaxKind::LIFETIME_IDENT, _) => {
- bail!("Invalid name `{}`: not a lifetime identifier", new_name)
- }
(_, Some(syntax_error)) => bail!("Invalid name `{}`: {}", new_name, syntax_error),
(_, None) => bail!("Invalid name `{}`: not an identifier", new_name),
},
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/search.rs b/src/tools/rust-analyzer/crates/ide-db/src/search.rs
index b2b0e4908..c18a27f17 100644
--- a/src/tools/rust-analyzer/crates/ide-db/src/search.rs
+++ b/src/tools/rust-analyzer/crates/ide-db/src/search.rs
@@ -7,7 +7,9 @@
use std::{mem, sync::Arc};
use base_db::{FileId, FileRange, SourceDatabase, SourceDatabaseExt};
-use hir::{DefWithBody, HasAttrs, HasSource, InFile, ModuleSource, Semantics, Visibility};
+use hir::{
+ AsAssocItem, DefWithBody, HasAttrs, HasSource, InFile, ModuleSource, Semantics, Visibility,
+};
use memchr::memmem::Finder;
use once_cell::unsync::Lazy;
use parser::SyntaxKind;
@@ -311,15 +313,15 @@ impl Definition {
pub fn usages<'a>(self, sema: &'a Semantics<'_, RootDatabase>) -> FindUsages<'a> {
FindUsages {
- local_repr: match self {
- Definition::Local(local) => Some(local.representative(sema.db)),
- _ => None,
- },
def: self,
- trait_assoc_def: as_trait_assoc_def(sema.db, self),
+ assoc_item_container: self.as_assoc_item(sema.db).map(|a| a.container(sema.db)),
sema,
scope: None,
include_self_kw_refs: None,
+ local_repr: match self {
+ Definition::Local(local) => Some(local.representative(sema.db)),
+ _ => None,
+ },
search_self_mod: false,
}
}
@@ -328,12 +330,16 @@ impl Definition {
#[derive(Clone)]
pub struct FindUsages<'a> {
def: Definition,
- /// If def is an assoc item from a trait or trait impl, this is the corresponding item of the trait definition
- trait_assoc_def: Option<Definition>,
sema: &'a Semantics<'a, RootDatabase>,
scope: Option<SearchScope>,
+ /// The container of our definition should it be an assoc item
+ assoc_item_container: Option<hir::AssocItemContainer>,
+ /// whether to search for the `Self` type of the definition
include_self_kw_refs: Option<hir::Type>,
+ /// the local representative for the local definition we are searching for
+ /// (this is required for finding all local declarations in a or-pattern)
local_repr: Option<hir::Local>,
+ /// whether to search for the `self` module
search_self_mod: bool,
}
@@ -380,7 +386,9 @@ impl<'a> FindUsages<'a> {
let sema = self.sema;
let search_scope = {
- let base = self.trait_assoc_def.unwrap_or(self.def).search_scope(sema.db);
+ // FIXME: Is the trait scope needed for trait impl assoc items?
+ let base =
+ as_trait_assoc_def(sema.db, self.def).unwrap_or(self.def).search_scope(sema.db);
match &self.scope {
None => base,
Some(scope) => base.intersection(scope),
@@ -447,15 +455,21 @@ impl<'a> FindUsages<'a> {
}
let find_nodes = move |name: &str, node: &syntax::SyntaxNode, offset: TextSize| {
- node.token_at_offset(offset).find(|it| it.text() == name).map(|token| {
- // FIXME: There should be optimization potential here
- // Currently we try to descend everything we find which
- // means we call `Semantics::descend_into_macros` on
- // every textual hit. That function is notoriously
- // expensive even for things that do not get down mapped
- // into macros.
- sema.descend_into_macros(token).into_iter().filter_map(|it| it.parent())
- })
+ node.token_at_offset(offset)
+ .find(|it| {
+ // `name` is stripped of raw ident prefix. See the comment on name retrieval above.
+ it.text().trim_start_matches("r#") == name
+ })
+ .into_iter()
+ .flat_map(|token| {
+ // FIXME: There should be optimization potential here
+ // Currently we try to descend everything we find which
+ // means we call `Semantics::descend_into_macros` on
+ // every textual hit. That function is notoriously
+ // expensive even for things that do not get down mapped
+ // into macros.
+ sema.descend_into_macros(token).into_iter().filter_map(|it| it.parent())
+ })
};
for (text, file_id, search_range) in scope_files(sema, &search_scope) {
@@ -463,30 +477,23 @@ impl<'a> FindUsages<'a> {
// Search for occurrences of the items name
for offset in match_indices(&text, finder, search_range) {
- if let Some(iter) = find_nodes(name, &tree, offset) {
- for name in iter.filter_map(ast::NameLike::cast) {
- if match name {
- ast::NameLike::NameRef(name_ref) => {
- self.found_name_ref(&name_ref, sink)
- }
- ast::NameLike::Name(name) => self.found_name(&name, sink),
- ast::NameLike::Lifetime(lifetime) => {
- self.found_lifetime(&lifetime, sink)
- }
- } {
- return;
- }
+ for name in find_nodes(name, &tree, offset).filter_map(ast::NameLike::cast) {
+ if match name {
+ ast::NameLike::NameRef(name_ref) => self.found_name_ref(&name_ref, sink),
+ ast::NameLike::Name(name) => self.found_name(&name, sink),
+ ast::NameLike::Lifetime(lifetime) => self.found_lifetime(&lifetime, sink),
+ } {
+ return;
}
}
}
// Search for occurrences of the `Self` referring to our type
if let Some((self_ty, finder)) = &include_self_kw_refs {
for offset in match_indices(&text, finder, search_range) {
- if let Some(iter) = find_nodes("Self", &tree, offset) {
- for name_ref in iter.filter_map(ast::NameRef::cast) {
- if self.found_self_ty_name_ref(self_ty, &name_ref, sink) {
- return;
- }
+ for name_ref in find_nodes("Self", &tree, offset).filter_map(ast::NameRef::cast)
+ {
+ if self.found_self_ty_name_ref(self_ty, &name_ref, sink) {
+ return;
}
}
}
@@ -494,41 +501,37 @@ impl<'a> FindUsages<'a> {
}
// Search for `super` and `crate` resolving to our module
- match self.def {
- Definition::Module(module) => {
- let scope = search_scope
- .intersection(&SearchScope::module_and_children(self.sema.db, module));
+ if let Definition::Module(module) = self.def {
+ let scope =
+ search_scope.intersection(&SearchScope::module_and_children(self.sema.db, module));
- let is_crate_root =
- module.is_crate_root(self.sema.db).then(|| Finder::new("crate"));
- let finder = &Finder::new("super");
+ let is_crate_root = module.is_crate_root(self.sema.db).then(|| Finder::new("crate"));
+ let finder = &Finder::new("super");
- for (text, file_id, search_range) in scope_files(sema, &scope) {
- let tree = Lazy::new(move || sema.parse(file_id).syntax().clone());
+ for (text, file_id, search_range) in scope_files(sema, &scope) {
+ let tree = Lazy::new(move || sema.parse(file_id).syntax().clone());
- for offset in match_indices(&text, finder, search_range) {
- if let Some(iter) = find_nodes("super", &tree, offset) {
- for name_ref in iter.filter_map(ast::NameRef::cast) {
- if self.found_name_ref(&name_ref, sink) {
- return;
- }
- }
+ for offset in match_indices(&text, finder, search_range) {
+ for name_ref in
+ find_nodes("super", &tree, offset).filter_map(ast::NameRef::cast)
+ {
+ if self.found_name_ref(&name_ref, sink) {
+ return;
}
}
- if let Some(finder) = &is_crate_root {
- for offset in match_indices(&text, finder, search_range) {
- if let Some(iter) = find_nodes("crate", &tree, offset) {
- for name_ref in iter.filter_map(ast::NameRef::cast) {
- if self.found_name_ref(&name_ref, sink) {
- return;
- }
- }
+ }
+ if let Some(finder) = &is_crate_root {
+ for offset in match_indices(&text, finder, search_range) {
+ for name_ref in
+ find_nodes("crate", &tree, offset).filter_map(ast::NameRef::cast)
+ {
+ if self.found_name_ref(&name_ref, sink) {
+ return;
}
}
}
}
}
- _ => (),
}
// search for module `self` references in our module's definition source
@@ -562,11 +565,10 @@ impl<'a> FindUsages<'a> {
let finder = &Finder::new("self");
for offset in match_indices(&text, finder, search_range) {
- if let Some(iter) = find_nodes("self", &tree, offset) {
- for name_ref in iter.filter_map(ast::NameRef::cast) {
- if self.found_self_module_name_ref(&name_ref, sink) {
- return;
- }
+ for name_ref in find_nodes("self", &tree, offset).filter_map(ast::NameRef::cast)
+ {
+ if self.found_self_module_name_ref(&name_ref, sink) {
+ return;
}
}
}
@@ -655,13 +657,26 @@ impl<'a> FindUsages<'a> {
sink(file_id, reference)
}
Some(NameRefClass::Definition(def))
- if match self.trait_assoc_def {
- Some(trait_assoc_def) => {
- // we have a trait assoc item, so force resolve all assoc items to their trait version
- convert_to_def_in_trait(self.sema.db, def) == trait_assoc_def
- }
- None => self.def == def,
- } =>
+ if self.def == def
+ // is our def a trait assoc item? then we want to find all assoc items from trait impls of our trait
+ || matches!(self.assoc_item_container, Some(hir::AssocItemContainer::Trait(_)))
+ && convert_to_def_in_trait(self.sema.db, def) == self.def =>
+ {
+ let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
+ let reference = FileReference {
+ range,
+ name: ast::NameLike::NameRef(name_ref.clone()),
+ category: ReferenceCategory::new(&def, name_ref),
+ };
+ sink(file_id, reference)
+ }
+ // FIXME: special case type aliases, we can't filter between impl and trait defs here as we lack the substitutions
+ // so we always resolve all assoc type aliases to both their trait def and impl defs
+ Some(NameRefClass::Definition(def))
+ if self.assoc_item_container.is_some()
+ && matches!(self.def, Definition::TypeAlias(_))
+ && convert_to_def_in_trait(self.sema.db, def)
+ == convert_to_def_in_trait(self.sema.db, self.def) =>
{
let FileRange { file_id, range } = self.sema.original_range(name_ref.syntax());
let reference = FileReference {
@@ -752,13 +767,21 @@ impl<'a> FindUsages<'a> {
false
}
Some(NameClass::Definition(def)) if def != self.def => {
- // if the def we are looking for is a trait (impl) assoc item, we'll have to resolve the items to trait definition assoc item
- if !matches!(
- self.trait_assoc_def,
- Some(trait_assoc_def)
- if convert_to_def_in_trait(self.sema.db, def) == trait_assoc_def
- ) {
- return false;
+ match (&self.assoc_item_container, self.def) {
+ // for type aliases we always want to reference the trait def and all the trait impl counterparts
+ // FIXME: only until we can resolve them correctly, see FIXME above
+ (Some(_), Definition::TypeAlias(_))
+ if convert_to_def_in_trait(self.sema.db, def)
+ != convert_to_def_in_trait(self.sema.db, self.def) =>
+ {
+ return false
+ }
+ (Some(_), Definition::TypeAlias(_)) => {}
+ // We looking at an assoc item of a trait definition, so reference all the
+ // corresponding assoc items belonging to this trait's trait implementations
+ (Some(hir::AssocItemContainer::Trait(_)), _)
+ if convert_to_def_in_trait(self.sema.db, def) == self.def => {}
+ _ => return false,
}
let FileRange { file_id, range } = self.sema.original_range(name.syntax());
let reference = FileReference {
diff --git a/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs b/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs
index c054cc159..a91ffd1ec 100644
--- a/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs
+++ b/src/tools/rust-analyzer/crates/ide-db/src/symbol_index.rs
@@ -323,10 +323,10 @@ impl Query {
if symbol.name != self.query {
continue;
}
- } else if self.case_sensitive {
- if self.query.chars().any(|c| !symbol.name.contains(c)) {
- continue;
- }
+ } else if self.case_sensitive
+ && self.query.chars().any(|c| !symbol.name.contains(c))
+ {
+ continue;
}
res.push(symbol.clone());