summaryrefslogtreecommitdiffstats
path: root/vendor/line-index
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 09:26:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 09:26:03 +0000
commit9918693037dce8aa4bb6f08741b6812923486c18 (patch)
tree21d2b40bec7e6a7ea664acee056eb3d08e15a1cf /vendor/line-index
parentReleasing progress-linux version 1.75.0+dfsg1-5~progress7.99u1. (diff)
downloadrustc-9918693037dce8aa4bb6f08741b6812923486c18.tar.xz
rustc-9918693037dce8aa4bb6f08741b6812923486c18.zip
Merging upstream version 1.76.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/line-index')
-rw-r--r--vendor/line-index/.cargo-checksum.json2
-rw-r--r--vendor/line-index/Cargo.toml5
-rw-r--r--vendor/line-index/src/lib.rs221
-rw-r--r--vendor/line-index/src/tests.rs149
-rw-r--r--vendor/line-index/tests/it.rs62
5 files changed, 327 insertions, 112 deletions
diff --git a/vendor/line-index/.cargo-checksum.json b/vendor/line-index/.cargo-checksum.json
index d96ced783..9fc8f5c0c 100644
--- a/vendor/line-index/.cargo-checksum.json
+++ b/vendor/line-index/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"a10220f394cb62baef1dbf0d96c188b21eef910259d21c7f7d60dd622306c961","src/lib.rs":"12d63393f2a07750ad7ad1a344b543792800518d9cf74955301ea33782640bae","src/tests.rs":"4741ca88d75c136fedf6c698cd58aaae7a2c092f754f0fecee80774e53f4e8e4","tests/it.rs":"aa3cc4fb79acd647d7d9f74134fd05b728039bd6eefea19b28eaff450e830b24"},"package":"2cad96769710c1745e11d4f940a8ff36000ade4bbada4285b001cb8aa2f745ce"} \ No newline at end of file
+{"files":{"Cargo.toml":"94a15a9eba39b8aeff4011d9cd970e713bcc71a475567e34b7cf5174b485a2ed","src/lib.rs":"8867fdfa6b4286f70f6d3ac7c22711adbc81152766c927a03ec55d5a346852c8","src/tests.rs":"6aeb4dbcb1f2b2c17b31bcb097f048a05627de88d98da57a982298a8167130b2"},"package":"67d61795376ae2683928c218fda7d7d7db136fd38c06b7552904667f0d55580a"} \ No newline at end of file
diff --git a/vendor/line-index/Cargo.toml b/vendor/line-index/Cargo.toml
index 06efc2071..bdf35aba6 100644
--- a/vendor/line-index/Cargo.toml
+++ b/vendor/line-index/Cargo.toml
@@ -12,14 +12,13 @@
[package]
edition = "2021"
name = "line-index"
-version = "0.1.0-pre.1"
+version = "0.1.1"
description = "Maps flat `TextSize` offsets to/from `(line, column)` representation."
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/rust-analyzer/tree/master/lib/line-index"
-resolver = "1"
[dependencies.nohash-hasher]
version = "0.2.0"
[dependencies.text-size]
-version = "1.1.0"
+version = "1.1.1"
diff --git a/vendor/line-index/src/lib.rs b/vendor/line-index/src/lib.rs
index ad67d3f24..58f266d67 100644
--- a/vendor/line-index/src/lib.rs
+++ b/vendor/line-index/src/lib.rs
@@ -94,44 +94,7 @@ pub struct LineIndex {
impl LineIndex {
/// Returns a `LineIndex` for the `text`.
pub fn new(text: &str) -> LineIndex {
- let mut newlines = Vec::<TextSize>::with_capacity(16);
- let mut line_wide_chars = IntMap::<u32, Box<[WideChar]>>::default();
-
- let mut wide_chars = Vec::<WideChar>::new();
- let mut cur_row = TextSize::from(0);
- let mut cur_col = TextSize::from(0);
- let mut line = 0u32;
-
- for c in text.chars() {
- let c_len = TextSize::of(c);
- cur_row += c_len;
- if c == '\n' {
- newlines.push(cur_row);
-
- // Save any wide characters seen in the previous line
- if !wide_chars.is_empty() {
- let cs = std::mem::take(&mut wide_chars).into_boxed_slice();
- line_wide_chars.insert(line, cs);
- }
-
- // Prepare for processing the next line
- cur_col = TextSize::from(0);
- line += 1;
- continue;
- }
-
- if !c.is_ascii() {
- wide_chars.push(WideChar { start: cur_col, end: cur_col + c_len });
- }
-
- cur_col += c_len;
- }
-
- // Save any wide characters seen in the last line
- if !wide_chars.is_empty() {
- line_wide_chars.insert(line, wide_chars.into_boxed_slice());
- }
-
+ let (newlines, line_wide_chars) = analyze_source_file(text);
LineIndex {
newlines: newlines.into_boxed_slice(),
line_wide_chars,
@@ -235,3 +198,185 @@ impl LineIndex {
self.len
}
}
+
+/// This is adapted from the rustc_span crate, https://github.com/rust-lang/rust/blob/de59844c98f7925242a798a72c59dc3610dd0e2c/compiler/rustc_span/src/analyze_source_file.rs
+fn analyze_source_file(src: &str) -> (Vec<TextSize>, IntMap<u32, Box<[WideChar]>>) {
+ assert!(src.len() < !0u32 as usize);
+ let mut lines = vec![];
+ let mut line_wide_chars = IntMap::<u32, Vec<WideChar>>::default();
+
+ // Calls the right implementation, depending on hardware support available.
+ analyze_source_file_dispatch(src, &mut lines, &mut line_wide_chars);
+
+ (lines, line_wide_chars.into_iter().map(|(k, v)| (k, v.into_boxed_slice())).collect())
+}
+
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+fn analyze_source_file_dispatch(
+ src: &str,
+ lines: &mut Vec<TextSize>,
+ multi_byte_chars: &mut IntMap<u32, Vec<WideChar>>,
+) {
+ if is_x86_feature_detected!("sse2") {
+ // SAFETY: SSE2 support was checked
+ unsafe {
+ analyze_source_file_sse2(src, lines, multi_byte_chars);
+ }
+ } else {
+ analyze_source_file_generic(src, src.len(), TextSize::from(0), lines, multi_byte_chars);
+ }
+}
+
+/// Checks 16 byte chunks of text at a time. If the chunk contains
+/// something other than printable ASCII characters and newlines, the
+/// function falls back to the generic implementation. Otherwise it uses
+/// SSE2 intrinsics to quickly find all newlines.
+#[target_feature(enable = "sse2")]
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+unsafe fn analyze_source_file_sse2(
+ src: &str,
+ lines: &mut Vec<TextSize>,
+ multi_byte_chars: &mut IntMap<u32, Vec<WideChar>>,
+) {
+ #[cfg(target_arch = "x86")]
+ use std::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use std::arch::x86_64::*;
+
+ const CHUNK_SIZE: usize = 16;
+
+ let src_bytes = src.as_bytes();
+
+ let chunk_count = src.len() / CHUNK_SIZE;
+
+ // This variable keeps track of where we should start decoding a
+ // chunk. If a multi-byte character spans across chunk boundaries,
+ // we need to skip that part in the next chunk because we already
+ // handled it.
+ let mut intra_chunk_offset = 0;
+
+ for chunk_index in 0..chunk_count {
+ let ptr = src_bytes.as_ptr() as *const __m128i;
+ // We don't know if the pointer is aligned to 16 bytes, so we
+ // use `loadu`, which supports unaligned loading.
+ let chunk = _mm_loadu_si128(ptr.add(chunk_index));
+
+ // For character in the chunk, see if its byte value is < 0, which
+ // indicates that it's part of a UTF-8 char.
+ let multibyte_test = _mm_cmplt_epi8(chunk, _mm_set1_epi8(0));
+ // Create a bit mask from the comparison results.
+ let multibyte_mask = _mm_movemask_epi8(multibyte_test);
+
+ // If the bit mask is all zero, we only have ASCII chars here:
+ if multibyte_mask == 0 {
+ assert!(intra_chunk_offset == 0);
+
+ // Check for newlines in the chunk
+ let newlines_test = _mm_cmpeq_epi8(chunk, _mm_set1_epi8(b'\n' as i8));
+ let newlines_mask = _mm_movemask_epi8(newlines_test);
+
+ if newlines_mask != 0 {
+ // All control characters are newlines, record them
+ let mut newlines_mask = 0xFFFF0000 | newlines_mask as u32;
+ let output_offset = TextSize::from((chunk_index * CHUNK_SIZE + 1) as u32);
+
+ loop {
+ let index = newlines_mask.trailing_zeros();
+
+ if index >= CHUNK_SIZE as u32 {
+ // We have arrived at the end of the chunk.
+ break;
+ }
+
+ lines.push(TextSize::from(index) + output_offset);
+
+ // Clear the bit, so we can find the next one.
+ newlines_mask &= (!1) << index;
+ }
+ }
+ continue;
+ }
+
+ // The slow path.
+ // There are control chars in here, fallback to generic decoding.
+ let scan_start = chunk_index * CHUNK_SIZE + intra_chunk_offset;
+ intra_chunk_offset = analyze_source_file_generic(
+ &src[scan_start..],
+ CHUNK_SIZE - intra_chunk_offset,
+ TextSize::from(scan_start as u32),
+ lines,
+ multi_byte_chars,
+ );
+ }
+
+ // There might still be a tail left to analyze
+ let tail_start = chunk_count * CHUNK_SIZE + intra_chunk_offset;
+ if tail_start < src.len() {
+ analyze_source_file_generic(
+ &src[tail_start..],
+ src.len() - tail_start,
+ TextSize::from(tail_start as u32),
+ lines,
+ multi_byte_chars,
+ );
+ }
+}
+
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+// The target (or compiler version) does not support SSE2 ...
+fn analyze_source_file_dispatch(
+ src: &str,
+ lines: &mut Vec<TextSize>,
+ multi_byte_chars: &mut IntMap<u32, Vec<WideChar>>,
+) {
+ analyze_source_file_generic(src, src.len(), TextSize::from(0), lines, multi_byte_chars);
+}
+
+// `scan_len` determines the number of bytes in `src` to scan. Note that the
+// function can read past `scan_len` if a multi-byte character start within the
+// range but extends past it. The overflow is returned by the function.
+fn analyze_source_file_generic(
+ src: &str,
+ scan_len: usize,
+ output_offset: TextSize,
+ lines: &mut Vec<TextSize>,
+ multi_byte_chars: &mut IntMap<u32, Vec<WideChar>>,
+) -> usize {
+ assert!(src.len() >= scan_len);
+ let mut i = 0;
+ let src_bytes = src.as_bytes();
+
+ while i < scan_len {
+ let byte = unsafe {
+ // We verified that i < scan_len <= src.len()
+ *src_bytes.get_unchecked(i)
+ };
+
+ // How much to advance in order to get to the next UTF-8 char in the
+ // string.
+ let mut char_len = 1;
+
+ if byte == b'\n' {
+ lines.push(TextSize::from(i as u32 + 1) + output_offset);
+ } else if byte >= 127 {
+ // The slow path: Just decode to `char`.
+ let c = src[i..].chars().next().unwrap();
+ char_len = c.len_utf8();
+
+ // The last element of `lines` represents the offset of the start of
+ // current line. To get the offset inside the line, we subtract it.
+ let pos = TextSize::from(i as u32) + output_offset
+ - lines.last().unwrap_or(&TextSize::default());
+
+ if char_len > 1 {
+ assert!((2..=4).contains(&char_len));
+ let mbc = WideChar { start: pos, end: pos + TextSize::from(char_len as u32) };
+ multi_byte_chars.entry(lines.len() as u32).or_default().push(mbc);
+ }
+ }
+
+ i += char_len;
+ }
+
+ i - scan_len
+}
diff --git a/vendor/line-index/src/tests.rs b/vendor/line-index/src/tests.rs
index 31c01c20e..981008e34 100644
--- a/vendor/line-index/src/tests.rs
+++ b/vendor/line-index/src/tests.rs
@@ -1,11 +1,144 @@
-use super::LineIndex;
+use crate::{LineCol, LineIndex, TextSize, WideChar, WideEncoding, WideLineCol};
+
+macro_rules! test {
+ (
+ case: $test_name:ident,
+ text: $text:expr,
+ lines: $lines:expr,
+ multi_byte_chars: $multi_byte_chars:expr,
+ ) => {
+ #[test]
+ fn $test_name() {
+ let line_index = LineIndex::new($text);
+
+ let expected_lines: Vec<TextSize> =
+ $lines.into_iter().map(<TextSize as From<u32>>::from).collect();
+
+ assert_eq!(&*line_index.newlines, &*expected_lines);
+
+ let expected_mbcs: Vec<_> = $multi_byte_chars
+ .into_iter()
+ .map(|(line, (pos, end)): (u32, (u32, u32))| {
+ (line, WideChar { start: TextSize::from(pos), end: TextSize::from(end) })
+ })
+ .collect();
+
+ assert_eq!(
+ line_index
+ .line_wide_chars
+ .iter()
+ .flat_map(|(line, val)| std::iter::repeat(*line).zip(val.iter().copied()))
+ .collect::<Vec<_>>(),
+ expected_mbcs
+ );
+ }
+ };
+}
+
+test!(
+ case: empty_text,
+ text: "",
+ lines: vec![],
+ multi_byte_chars: vec![],
+);
+
+test!(
+ case: newlines_short,
+ text: "a\nc",
+ lines: vec![2],
+ multi_byte_chars: vec![],
+);
+
+test!(
+ case: newlines_long,
+ text: "012345678\nabcdef012345678\na",
+ lines: vec![10, 26],
+ multi_byte_chars: vec![],
+);
+
+test!(
+ case: newline_and_multi_byte_char_in_same_chunk,
+ text: "01234β789\nbcdef0123456789abcdef",
+ lines: vec![11],
+ multi_byte_chars: vec![(0, (5, 7))],
+);
+
+test!(
+ case: newline_and_control_char_in_same_chunk,
+ text: "01234\u{07}6789\nbcdef0123456789abcdef",
+ lines: vec![11],
+ multi_byte_chars: vec![],
+);
+
+test!(
+ case: multi_byte_char_short,
+ text: "aβc",
+ lines: vec![],
+ multi_byte_chars: vec![(0, (1, 3))],
+);
+
+test!(
+ case: multi_byte_char_long,
+ text: "0123456789abcΔf012345β",
+ lines: vec![],
+ multi_byte_chars: vec![(0, (13, 15)), (0, (22, 24))],
+);
+
+test!(
+ case: multi_byte_char_across_chunk_boundary,
+ text: "0123456789abcdeΔ123456789abcdef01234",
+ lines: vec![],
+ multi_byte_chars: vec![(0, (15, 17))],
+);
+
+test!(
+ case: multi_byte_char_across_chunk_boundary_tail,
+ text: "0123456789abcdeΔ....",
+ lines: vec![],
+ multi_byte_chars: vec![(0, (15, 17))],
+);
+
+test!(
+ case: multi_byte_with_new_lines,
+ text: "01\t345\n789abcΔf01234567\u{07}9\nbcΔf",
+ lines: vec![7, 27],
+ multi_byte_chars: vec![(1, (6, 8)), (2, (2, 4))],
+);
+
+test!(
+ case: trailing_newline,
+ text: "0123456789\n",
+ lines: vec![11],
+ multi_byte_chars: vec![],
+);
+
+test!(
+ case: trailing_newline_chunk_boundary,
+ text: "0123456789abcde\n",
+ lines: vec![16],
+ multi_byte_chars: vec![],
+);
+
+#[test]
+fn test_try_line_col() {
+ let text = "\n\n\n\n\n宽3456";
+ assert_eq!(&text[5..8], "宽");
+ assert_eq!(&text[11..12], "6");
+ let line_index = LineIndex::new(text);
+ let before_6 = TextSize::from(11);
+ let line_col = line_index.try_line_col(before_6);
+ assert_eq!(line_col, Some(LineCol { line: 5, col: 6 }));
+}
#[test]
-fn test_empty_index() {
- let col_index = LineIndex::new(
- "
-const C: char = 'x';
-",
- );
- assert_eq!(col_index.line_wide_chars.len(), 0);
+fn test_to_wide() {
+ let text = "\n\n\n\n\n宽3456";
+ assert_eq!(&text[5..8], "宽");
+ assert_eq!(&text[11..12], "6");
+ let line_index = LineIndex::new(text);
+ let before_6 = TextSize::from(11);
+ let line_col = line_index.try_line_col(before_6);
+ assert_eq!(line_col, Some(LineCol { line: 5, col: 6 }));
+ let wide_line_col = line_index.to_wide(WideEncoding::Utf16, line_col.unwrap());
+ assert_eq!(wide_line_col, Some(WideLineCol { line: 5, col: 4 }));
}
diff --git a/vendor/line-index/tests/it.rs b/vendor/line-index/tests/it.rs
deleted file mode 100644
index ce1c0bc6f..000000000
--- a/vendor/line-index/tests/it.rs
+++ /dev/null
@@ -1,62 +0,0 @@
-use line_index::{LineCol, LineIndex, TextRange};
-
-#[test]
-fn test_line_index() {
- let text = "hello\nworld";
- let table = [
- (00, 0, 0),
- (01, 0, 1),
- (05, 0, 5),
- (06, 1, 0),
- (07, 1, 1),
- (08, 1, 2),
- (10, 1, 4),
- (11, 1, 5),
- ];
-
- let index = LineIndex::new(text);
- for (offset, line, col) in table {
- assert_eq!(index.line_col(offset.into()), LineCol { line, col });
- }
-
- let text = "\nhello\nworld";
- let table = [(0, 0, 0), (1, 1, 0), (2, 1, 1), (6, 1, 5), (7, 2, 0)];
- let index = LineIndex::new(text);
- for (offset, line, col) in table {
- assert_eq!(index.line_col(offset.into()), LineCol { line, col });
- }
-}
-
-#[test]
-fn test_char_len() {
- assert_eq!('メ'.len_utf8(), 3);
- assert_eq!('メ'.len_utf16(), 1);
-}
-
-#[test]
-fn test_splitlines() {
- fn r(lo: u32, hi: u32) -> TextRange {
- TextRange::new(lo.into(), hi.into())
- }
-
- let text = "a\nbb\nccc\n";
- let line_index = LineIndex::new(text);
-
- let actual = line_index.lines(r(0, 9)).collect::<Vec<_>>();
- let expected = vec![r(0, 2), r(2, 5), r(5, 9)];
- assert_eq!(actual, expected);
-
- let text = "";
- let line_index = LineIndex::new(text);
-
- let actual = line_index.lines(r(0, 0)).collect::<Vec<_>>();
- let expected = vec![];
- assert_eq!(actual, expected);
-
- let text = "\n";
- let line_index = LineIndex::new(text);
-
- let actual = line_index.lines(r(0, 1)).collect::<Vec<_>>();
- let expected = vec![r(0, 1)];
- assert_eq!(actual, expected)
-}