summaryrefslogtreecommitdiffstats
path: root/vendor/rowan
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /vendor/rowan
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/rowan')
-rw-r--r--vendor/rowan/.cargo-checksum.json1
-rw-r--r--vendor/rowan/Cargo.lock105
-rw-r--r--vendor/rowan/Cargo.toml57
-rw-r--r--vendor/rowan/LICENSE-APACHE201
-rw-r--r--vendor/rowan/LICENSE-MIT23
-rw-r--r--vendor/rowan/README.md22
-rw-r--r--vendor/rowan/examples/math.rs152
-rw-r--r--vendor/rowan/examples/s_expressions.rs425
-rw-r--r--vendor/rowan/src/api.rs487
-rw-r--r--vendor/rowan/src/arc.rs463
-rw-r--r--vendor/rowan/src/ast.rs206
-rw-r--r--vendor/rowan/src/cow_mut.rs30
-rw-r--r--vendor/rowan/src/cursor.rs1284
-rw-r--r--vendor/rowan/src/green.rs42
-rw-r--r--vendor/rowan/src/green/builder.rs119
-rw-r--r--vendor/rowan/src/green/element.rs89
-rw-r--r--vendor/rowan/src/green/node.rs361
-rw-r--r--vendor/rowan/src/green/node_cache.rs157
-rw-r--r--vendor/rowan/src/green/token.rs145
-rw-r--r--vendor/rowan/src/lib.rs41
-rw-r--r--vendor/rowan/src/serde_impls.rs66
-rw-r--r--vendor/rowan/src/sll.rs129
-rw-r--r--vendor/rowan/src/syntax_text.rs311
-rw-r--r--vendor/rowan/src/utility_types.rs180
-rw-r--r--vendor/rowan/tests/tidy.rs46
25 files changed, 5142 insertions, 0 deletions
diff --git a/vendor/rowan/.cargo-checksum.json b/vendor/rowan/.cargo-checksum.json
new file mode 100644
index 000000000..f2cbdfc1d
--- /dev/null
+++ b/vendor/rowan/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.lock":"d89194369a60b64062043f52664726c5f114542c20c2fbfaafac9fad682c139b","Cargo.toml":"fbd2ff15249776d6bbc4d3646ed0a7681172aad7c7f7a42b512308b7672b43b4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"69657dd291f7b245820ee01fa7bdbfdc5dc2b47284767641ffb8f94e9f9bc522","examples/math.rs":"e7557914686973cc16c6b2736a9934708465ef996b4f0a54fdb454b524641dc9","examples/s_expressions.rs":"6d963a8c030978e2648a059f2dcd5314d9fef47f0dda45e4134caedae1429eee","src/api.rs":"b5d48c0d401014c09ef9d8ee54c849735b1235e5faa798f7291be3d6a8a335c9","src/arc.rs":"fef1aa58a57d167bf4ba99b186b7e9a9c0df8c6096c2f604fbff86430dbaa6e3","src/ast.rs":"6eeedc3fcfb7b1d3d53aa54fc68a8f3bc6381ba2433e12d82b13f51fb5a82446","src/cow_mut.rs":"c90f47a013ae8cfb00bee089b75bbc2e493424e344fb4aaeff78cb2db8558ec2","src/cursor.rs":"b150f5d016aee8b196f5c02af6e7473b32e62f39fc62242dd7adbfddb21f8b69","src/green.rs":"34d58d497d2af2bbd1533aea6b4aa91496d65275b3b6def8dd6f5b1210ec0ce0","src/green/builder.rs":"517e4f6393f8fe6deaabd288f5e5b6a670dc7deec99e5bd92df34cdb4f02db49","src/green/element.rs":"df199a8b6adefef54398f209e7316525eaf84133e952325d46641b66cb89ada9","src/green/node.rs":"02701fc84ad6fdf44bd2c9b63923b0c919ca77acdb09c40d1d2b949830eab185","src/green/node_cache.rs":"f85ca104edb7d2261fd111a6fa4165f076e802aa9414457bdb028788db8dde9d","src/green/token.rs":"a1440de773044e82686dc9173ca7732a6a415a7ba6272c8bc68b3dd896f0cd7b","src/lib.rs":"e0eb1ba73978636b577094ebb726e2e222f347587928ba3930b196b809f17f86","src/serde_impls.rs":"603a459c1e3ad6edd2915e6ef3d103bae36364e01d781a8521486d618ba7b678","src/sll.rs":"f476ecf40629de8f45f5d952a88ebd91f91c71ac635ddc7cd2eebf5a90ce07bf","src/syntax_text.rs":"e387fa481a53aa9dd220a350acabd150daa8440c07ade49ea8a2f69275c6b366","src/utility_types.rs":"ce0874ae3e2b1eccb77025322c4453ceafef983f5d41b442448fb848259edf06","tests/tidy.rs":"15f655d7022a23cea887c6ac52727811f72c7d6a0fd54cdbb0476521ab4fea7f"},"package":"e88acf7b001007e9e8c989fe7449f6601d909e5dd2c56399fc158977ad6c56e8"} \ No newline at end of file
diff --git a/vendor/rowan/Cargo.lock b/vendor/rowan/Cargo.lock
new file mode 100644
index 000000000..a53655c96
--- /dev/null
+++ b/vendor/rowan/Cargo.lock
@@ -0,0 +1,105 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "countme"
+version = "3.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7704b5fdd17b18ae31c4c1da5a2e0305a2bf17b5249300a9ee9ed7b72114c636"
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "m_lexer"
+version = "0.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7e51ebf91162d585a5bae05e4779efc4a276171cb880d61dd6fab11c98467a7"
+dependencies = [
+ "regex",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "memoffset"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "regex"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
+
+[[package]]
+name = "rowan"
+version = "0.15.8"
+dependencies = [
+ "countme",
+ "hashbrown",
+ "m_lexer",
+ "memoffset",
+ "rustc-hash",
+ "serde",
+ "text-size",
+]
+
+[[package]]
+name = "rustc-hash"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+
+[[package]]
+name = "serde"
+version = "1.0.140"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03"
+
+[[package]]
+name = "text-size"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "288cb548dbe72b652243ea797201f3d481a0609a967980fcc5b2315ea811560a"
+dependencies = [
+ "serde",
+]
diff --git a/vendor/rowan/Cargo.toml b/vendor/rowan/Cargo.toml
new file mode 100644
index 000000000..c717f55d0
--- /dev/null
+++ b/vendor/rowan/Cargo.toml
@@ -0,0 +1,57 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "rowan"
+version = "0.15.8"
+authors = ["Aleksey Kladov <aleksey.kladov@gmail.com>"]
+exclude = [
+ ".github/",
+ "bors.toml",
+ "rustfmt.toml",
+]
+description = "Library for generic lossless syntax trees"
+readme = "README.md"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-analyzer/rowan"
+resolver = "2"
+
+[dependencies.countme]
+version = "3.0.0"
+
+[dependencies.hashbrown]
+version = "0.12"
+features = ["inline-more"]
+default-features = false
+
+[dependencies.memoffset]
+version = "0.6"
+
+[dependencies.rustc-hash]
+version = "1.0.1"
+
+[dependencies.serde]
+version = "1.0.89"
+optional = true
+default-features = false
+
+[dependencies.text-size]
+version = "1.1.0"
+
+[dev-dependencies.m_lexer]
+version = "0.0.4"
+
+[features]
+serde1 = [
+ "serde",
+ "text-size/serde",
+]
diff --git a/vendor/rowan/LICENSE-APACHE b/vendor/rowan/LICENSE-APACHE
new file mode 100644
index 000000000..16fe87b06
--- /dev/null
+++ b/vendor/rowan/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/rowan/LICENSE-MIT b/vendor/rowan/LICENSE-MIT
new file mode 100644
index 000000000..31aa79387
--- /dev/null
+++ b/vendor/rowan/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/rowan/README.md b/vendor/rowan/README.md
new file mode 100644
index 000000000..f0d5b133e
--- /dev/null
+++ b/vendor/rowan/README.md
@@ -0,0 +1,22 @@
+# Rowan
+
+[![Crates.io](https://img.shields.io/crates/v/rowan.svg)](https://crates.io/crates/rowan)
+[![Crates.io](https://img.shields.io/crates/d/rowan.svg)](https://crates.io/crates/rowan)
+
+Rowan is a library for lossless syntax trees, inspired in part by
+Swift's [libsyntax](https://github.com/apple/swift/tree/5e2c815edfd758f9b1309ce07bfc01c4bc20ec23/lib/Syntax).
+
+A conceptual overview is available in the [rust-analyzer repo](https://github.com/rust-analyzer/rust-analyzer/blob/master/docs/dev/syntax.md).
+
+See `examples/s_expressions` for a tutorial, and [rust-analyzer](https://github.com/rust-analyzer/rust-analyzer/) for real-world usage.
+
+## Testing
+
+This crate is primarily tested by various integration tests in rust-analyzer.
+
+## License
+
+Rowan is primarily distributed under the terms of both the MIT
+license and the Apache License (Version 2.0).
+
+See LICENSE-APACHE and LICENSE-MIT for details.
diff --git a/vendor/rowan/examples/math.rs b/vendor/rowan/examples/math.rs
new file mode 100644
index 000000000..930c591b0
--- /dev/null
+++ b/vendor/rowan/examples/math.rs
@@ -0,0 +1,152 @@
+//! Example that takes the input
+//! 1 + 2 * 3 + 4
+//! and builds the tree
+//! - Marker(Root)
+//! - Marker(Operation)
+//! - Marker(Operation)
+//! - "1" Token(Number)
+//! - "+" Token(Add)
+//! - Marker(Operation)
+//! - "2" Token(Number)
+//! - "*" Token(Mul)
+//! - "3" Token(Number)
+//! - "+" Token(Add)
+//! - "4" Token(Number)
+
+use rowan::{GreenNodeBuilder, NodeOrToken};
+use std::iter::Peekable;
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[allow(non_camel_case_types)]
+#[repr(u16)]
+enum SyntaxKind {
+ WHITESPACE = 0,
+
+ ADD,
+ SUB,
+ MUL,
+ DIV,
+
+ NUMBER,
+ ERROR,
+ OPERATION,
+ ROOT,
+}
+use SyntaxKind::*;
+
+impl From<SyntaxKind> for rowan::SyntaxKind {
+ fn from(kind: SyntaxKind) -> Self {
+ Self(kind as u16)
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+enum Lang {}
+impl rowan::Language for Lang {
+ type Kind = SyntaxKind;
+ fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
+ assert!(raw.0 <= ROOT as u16);
+ unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
+ }
+ fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
+ kind.into()
+ }
+}
+
+type SyntaxNode = rowan::SyntaxNode<Lang>;
+#[allow(unused)]
+type SyntaxToken = rowan::SyntaxToken<Lang>;
+#[allow(unused)]
+type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
+
+struct Parser<I: Iterator<Item = (SyntaxKind, String)>> {
+ builder: GreenNodeBuilder<'static>,
+ iter: Peekable<I>,
+}
+impl<I: Iterator<Item = (SyntaxKind, String)>> Parser<I> {
+ fn peek(&mut self) -> Option<SyntaxKind> {
+ while self.iter.peek().map(|&(t, _)| t == WHITESPACE).unwrap_or(false) {
+ self.bump();
+ }
+ self.iter.peek().map(|&(t, _)| t)
+ }
+ fn bump(&mut self) {
+ if let Some((token, string)) = self.iter.next() {
+ self.builder.token(token.into(), string.as_str());
+ }
+ }
+ fn parse_val(&mut self) {
+ match self.peek() {
+ Some(NUMBER) => self.bump(),
+ _ => {
+ self.builder.start_node(ERROR.into());
+ self.bump();
+ self.builder.finish_node();
+ }
+ }
+ }
+ fn handle_operation(&mut self, tokens: &[SyntaxKind], next: fn(&mut Self)) {
+ let checkpoint = self.builder.checkpoint();
+ next(self);
+ while self.peek().map(|t| tokens.contains(&t)).unwrap_or(false) {
+ self.builder.start_node_at(checkpoint, OPERATION.into());
+ self.bump();
+ next(self);
+ self.builder.finish_node();
+ }
+ }
+ fn parse_mul(&mut self) {
+ self.handle_operation(&[MUL, DIV], Self::parse_val)
+ }
+ fn parse_add(&mut self) {
+ self.handle_operation(&[ADD, SUB], Self::parse_mul)
+ }
+ fn parse(mut self) -> SyntaxNode {
+ self.builder.start_node(ROOT.into());
+ self.parse_add();
+ self.builder.finish_node();
+
+ SyntaxNode::new_root(self.builder.finish())
+ }
+}
+
+fn print(indent: usize, element: SyntaxElement) {
+ let kind: SyntaxKind = element.kind().into();
+ print!("{:indent$}", "", indent = indent);
+ match element {
+ NodeOrToken::Node(node) => {
+ println!("- {:?}", kind);
+ for child in node.children_with_tokens() {
+ print(indent + 2, child);
+ }
+ }
+
+ NodeOrToken::Token(token) => println!("- {:?} {:?}", token.text(), kind),
+ }
+}
+
+fn main() {
+ let ast = Parser {
+ builder: GreenNodeBuilder::new(),
+ iter: vec![
+ // 1 + 2 * 3 + 4
+ (NUMBER, "1".into()),
+ (WHITESPACE, " ".into()),
+ (ADD, "+".into()),
+ (WHITESPACE, " ".into()),
+ (NUMBER, "2".into()),
+ (WHITESPACE, " ".into()),
+ (MUL, "*".into()),
+ (WHITESPACE, " ".into()),
+ (NUMBER, "3".into()),
+ (WHITESPACE, " ".into()),
+ (ADD, "+".into()),
+ (WHITESPACE, " ".into()),
+ (NUMBER, "4".into()),
+ ]
+ .into_iter()
+ .peekable(),
+ }
+ .parse();
+ print(0, ast.into());
+}
diff --git a/vendor/rowan/examples/s_expressions.rs b/vendor/rowan/examples/s_expressions.rs
new file mode 100644
index 000000000..e7df4c644
--- /dev/null
+++ b/vendor/rowan/examples/s_expressions.rs
@@ -0,0 +1,425 @@
+//! In this tutorial, we will write parser
+//! and evaluator of arithmetic S-expressions,
+//! which look like this:
+//! ```
+//! (+ (* 15 2) 62)
+//! ```
+//!
+//! It's suggested to read the conceptual overview of the design
+//! alongside this tutorial:
+//! https://github.com/rust-analyzer/rust-analyzer/blob/master/docs/dev/syntax.md
+
+/// Let's start with defining all kinds of tokens and
+/// composite nodes.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[allow(non_camel_case_types)]
+#[repr(u16)]
+enum SyntaxKind {
+ L_PAREN = 0, // '('
+ R_PAREN, // ')'
+ WORD, // '+', '15'
+ WHITESPACE, // whitespaces is explicit
+ ERROR, // as well as errors
+
+ // composite nodes
+ LIST, // `(+ 2 3)`
+ ATOM, // `+`, `15`, wraps a WORD token
+ ROOT, // top-level node: a list of s-expressions
+}
+use SyntaxKind::*;
+
+/// Some boilerplate is needed, as rowan settled on using its own
+/// `struct SyntaxKind(u16)` internally, instead of accepting the
+/// user's `enum SyntaxKind` as a type parameter.
+///
+/// First, to easily pass the enum variants into rowan via `.into()`:
+impl From<SyntaxKind> for rowan::SyntaxKind {
+ fn from(kind: SyntaxKind) -> Self {
+ Self(kind as u16)
+ }
+}
+
+/// Second, implementing the `Language` trait teaches rowan to convert between
+/// these two SyntaxKind types, allowing for a nicer SyntaxNode API where
+/// "kinds" are values from our `enum SyntaxKind`, instead of plain u16 values.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+enum Lang {}
+impl rowan::Language for Lang {
+ type Kind = SyntaxKind;
+ fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
+ assert!(raw.0 <= ROOT as u16);
+ unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
+ }
+ fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
+ kind.into()
+ }
+}
+
+/// GreenNode is an immutable tree, which is cheap to change,
+/// but doesn't contain offsets and parent pointers.
+use rowan::GreenNode;
+
+/// You can construct GreenNodes by hand, but a builder
+/// is helpful for top-down parsers: it maintains a stack
+/// of currently in-progress nodes
+use rowan::GreenNodeBuilder;
+
+/// The parse results are stored as a "green tree".
+/// We'll discuss working with the results later
+struct Parse {
+ green_node: GreenNode,
+ #[allow(unused)]
+ errors: Vec<String>,
+}
+
+/// Now, let's write a parser.
+/// Note that `parse` does not return a `Result`:
+/// by design, syntax tree can be built even for
+/// completely invalid source code.
+fn parse(text: &str) -> Parse {
+ struct Parser {
+ /// input tokens, including whitespace,
+ /// in *reverse* order.
+ tokens: Vec<(SyntaxKind, String)>,
+ /// the in-progress tree.
+ builder: GreenNodeBuilder<'static>,
+ /// the list of syntax errors we've accumulated
+ /// so far.
+ errors: Vec<String>,
+ }
+
+ /// The outcome of parsing a single S-expression
+ enum SexpRes {
+ /// An S-expression (i.e. an atom, or a list) was successfully parsed
+ Ok,
+ /// Nothing was parsed, as no significant tokens remained
+ Eof,
+ /// An unexpected ')' was found
+ RParen,
+ }
+
+ impl Parser {
+ fn parse(mut self) -> Parse {
+ // Make sure that the root node covers all source
+ self.builder.start_node(ROOT.into());
+ // Parse zero or more S-expressions
+ loop {
+ match self.sexp() {
+ SexpRes::Eof => break,
+ SexpRes::RParen => {
+ self.builder.start_node(ERROR.into());
+ self.errors.push("unmatched `)`".to_string());
+ self.bump(); // be sure to chug along in case of error
+ self.builder.finish_node();
+ }
+ SexpRes::Ok => (),
+ }
+ }
+ // Don't forget to eat *trailing* whitespace
+ self.skip_ws();
+ // Close the root node.
+ self.builder.finish_node();
+
+ // Turn the builder into a GreenNode
+ Parse { green_node: self.builder.finish(), errors: self.errors }
+ }
+ fn list(&mut self) {
+ assert_eq!(self.current(), Some(L_PAREN));
+ // Start the list node
+ self.builder.start_node(LIST.into());
+ self.bump(); // '('
+ loop {
+ match self.sexp() {
+ SexpRes::Eof => {
+ self.errors.push("expected `)`".to_string());
+ break;
+ }
+ SexpRes::RParen => {
+ self.bump();
+ break;
+ }
+ SexpRes::Ok => (),
+ }
+ }
+ // close the list node
+ self.builder.finish_node();
+ }
+ fn sexp(&mut self) -> SexpRes {
+ // Eat leading whitespace
+ self.skip_ws();
+ // Either a list, an atom, a closing paren,
+ // or an eof.
+ let t = match self.current() {
+ None => return SexpRes::Eof,
+ Some(R_PAREN) => return SexpRes::RParen,
+ Some(t) => t,
+ };
+ match t {
+ L_PAREN => self.list(),
+ WORD => {
+ self.builder.start_node(ATOM.into());
+ self.bump();
+ self.builder.finish_node();
+ }
+ ERROR => self.bump(),
+ _ => unreachable!(),
+ }
+ SexpRes::Ok
+ }
+ /// Advance one token, adding it to the current branch of the tree builder.
+ fn bump(&mut self) {
+ let (kind, text) = self.tokens.pop().unwrap();
+ self.builder.token(kind.into(), text.as_str());
+ }
+ /// Peek at the first unprocessed token
+ fn current(&self) -> Option<SyntaxKind> {
+ self.tokens.last().map(|(kind, _)| *kind)
+ }
+ fn skip_ws(&mut self) {
+ while self.current() == Some(WHITESPACE) {
+ self.bump()
+ }
+ }
+ }
+
+ let mut tokens = lex(text);
+ tokens.reverse();
+ Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
+}
+
+/// To work with the parse results we need a view into the
+/// green tree - the Syntax tree.
+/// It is also immutable, like a GreenNode,
+/// but it contains parent pointers, offsets, and
+/// has identity semantics.
+
+type SyntaxNode = rowan::SyntaxNode<Lang>;
+#[allow(unused)]
+type SyntaxToken = rowan::SyntaxToken<Lang>;
+#[allow(unused)]
+type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
+
+impl Parse {
+ fn syntax(&self) -> SyntaxNode {
+ SyntaxNode::new_root(self.green_node.clone())
+ }
+}
+
+/// Let's check that the parser works as expected
+#[test]
+fn test_parser() {
+ let text = "(+ (* 15 2) 62)";
+ let node = parse(text).syntax();
+ assert_eq!(
+ format!("{:?}", node),
+ "ROOT@0..15", // root node, spanning 15 bytes
+ );
+ assert_eq!(node.children().count(), 1);
+ let list = node.children().next().unwrap();
+ let children = list
+ .children_with_tokens()
+ .map(|child| format!("{:?}@{:?}", child.kind(), child.text_range()))
+ .collect::<Vec<_>>();
+
+ assert_eq!(
+ children,
+ vec![
+ "L_PAREN@0..1".to_string(),
+ "ATOM@1..2".to_string(),
+ "WHITESPACE@2..3".to_string(), // note, explicit whitespace!
+ "LIST@3..11".to_string(),
+ "WHITESPACE@11..12".to_string(),
+ "ATOM@12..14".to_string(),
+ "R_PAREN@14..15".to_string(),
+ ]
+ );
+}
+
+/// So far, we've been working with a homogeneous untyped tree.
+/// It's nice to provide generic tree operations, like traversals,
+/// but it's a bad fit for semantic analysis.
+/// This crate itself does not provide AST facilities directly,
+/// but it is possible to layer AST on top of `SyntaxNode` API.
+/// Let's write a function to evaluate S-expression.
+///
+/// For that, let's define AST nodes.
+/// It'll be quite a bunch of repetitive code, so we'll use a macro.
+///
+/// For a real language, you'd want to generate an AST. I find a
+/// combination of `serde`, `ron` and `tera` crates invaluable for that!
+macro_rules! ast_node {
+ ($ast:ident, $kind:ident) => {
+ #[derive(PartialEq, Eq, Hash)]
+ #[repr(transparent)]
+ struct $ast(SyntaxNode);
+ impl $ast {
+ #[allow(unused)]
+ fn cast(node: SyntaxNode) -> Option<Self> {
+ if node.kind() == $kind {
+ Some(Self(node))
+ } else {
+ None
+ }
+ }
+ }
+ };
+}
+
+ast_node!(Root, ROOT);
+ast_node!(Atom, ATOM);
+ast_node!(List, LIST);
+
+// Sexp is slightly different, so let's do it by hand.
+#[derive(PartialEq, Eq, Hash)]
+#[repr(transparent)]
+struct Sexp(SyntaxNode);
+
+enum SexpKind {
+ Atom(Atom),
+ List(List),
+}
+
+impl Sexp {
+ fn cast(node: SyntaxNode) -> Option<Self> {
+ if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
+ Some(Sexp(node))
+ } else {
+ None
+ }
+ }
+
+ fn kind(&self) -> SexpKind {
+ Atom::cast(self.0.clone())
+ .map(SexpKind::Atom)
+ .or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
+ .unwrap()
+ }
+}
+
+// Let's enhance AST nodes with ancillary functions and
+// eval.
+impl Root {
+ fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
+ self.0.children().filter_map(Sexp::cast)
+ }
+}
+
+enum Op {
+ Add,
+ Sub,
+ Div,
+ Mul,
+}
+
+impl Atom {
+ fn eval(&self) -> Option<i64> {
+ self.text().parse().ok()
+ }
+ fn as_op(&self) -> Option<Op> {
+ let op = match self.text().as_str() {
+ "+" => Op::Add,
+ "-" => Op::Sub,
+ "*" => Op::Mul,
+ "/" => Op::Div,
+ _ => return None,
+ };
+ Some(op)
+ }
+ fn text(&self) -> String {
+ match self.0.green().children().next() {
+ Some(rowan::NodeOrToken::Token(token)) => token.text().to_string(),
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl List {
+ fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
+ self.0.children().filter_map(Sexp::cast)
+ }
+ fn eval(&self) -> Option<i64> {
+ let op = match self.sexps().nth(0)?.kind() {
+ SexpKind::Atom(atom) => atom.as_op()?,
+ _ => return None,
+ };
+ let arg1 = self.sexps().nth(1)?.eval()?;
+ let arg2 = self.sexps().nth(2)?.eval()?;
+ let res = match op {
+ Op::Add => arg1 + arg2,
+ Op::Sub => arg1 - arg2,
+ Op::Mul => arg1 * arg2,
+ Op::Div if arg2 == 0 => return None,
+ Op::Div => arg1 / arg2,
+ };
+ Some(res)
+ }
+}
+
+impl Sexp {
+ fn eval(&self) -> Option<i64> {
+ match self.kind() {
+ SexpKind::Atom(atom) => atom.eval(),
+ SexpKind::List(list) => list.eval(),
+ }
+ }
+}
+
+impl Parse {
+ fn root(&self) -> Root {
+ Root::cast(self.syntax()).unwrap()
+ }
+}
+
+/// Let's test the eval!
+fn main() {
+ let sexps = "
+92
+(+ 62 30)
+(/ 92 0)
+nan
+(+ (* 15 2) 62)
+";
+ let root = parse(sexps).root();
+ let res = root.sexps().map(|it| it.eval()).collect::<Vec<_>>();
+ eprintln!("{:?}", res);
+ assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
+}
+
+/// Split the input string into a flat list of tokens
+/// (such as L_PAREN, WORD, and WHITESPACE)
+fn lex(text: &str) -> Vec<(SyntaxKind, String)> {
+ fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
+ m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
+ }
+ fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
+ match t.0 {
+ 0 => L_PAREN,
+ 1 => R_PAREN,
+ 2 => WORD,
+ 3 => WHITESPACE,
+ 4 => ERROR,
+ _ => unreachable!(),
+ }
+ }
+
+ let lexer = m_lexer::LexerBuilder::new()
+ .error_token(tok(ERROR))
+ .tokens(&[
+ (tok(L_PAREN), r"\("),
+ (tok(R_PAREN), r"\)"),
+ (tok(WORD), r"[^\s()]+"),
+ (tok(WHITESPACE), r"\s+"),
+ ])
+ .build();
+
+ lexer
+ .tokenize(text)
+ .into_iter()
+ .map(|t| (t.len, kind(t.kind)))
+ .scan(0usize, |start_offset, (len, kind)| {
+ let s: String = text[*start_offset..*start_offset + len].into();
+ *start_offset += len;
+ Some((kind, s))
+ })
+ .collect()
+}
diff --git a/vendor/rowan/src/api.rs b/vendor/rowan/src/api.rs
new file mode 100644
index 000000000..605fcc50c
--- /dev/null
+++ b/vendor/rowan/src/api.rs
@@ -0,0 +1,487 @@
+use std::{borrow::Cow, fmt, iter, marker::PhantomData, ops::Range};
+
+use crate::{
+ cursor, green::GreenTokenData, Direction, GreenNode, GreenNodeData, GreenToken, NodeOrToken,
+ SyntaxKind, SyntaxText, TextRange, TextSize, TokenAtOffset, WalkEvent,
+};
+
+pub trait Language: Sized + Copy + fmt::Debug + Eq + Ord + std::hash::Hash {
+ type Kind: Sized + Copy + fmt::Debug + Eq + Ord + std::hash::Hash;
+
+ fn kind_from_raw(raw: SyntaxKind) -> Self::Kind;
+ fn kind_to_raw(kind: Self::Kind) -> SyntaxKind;
+}
+
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct SyntaxNode<L: Language> {
+ raw: cursor::SyntaxNode,
+ _p: PhantomData<L>,
+}
+
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct SyntaxToken<L: Language> {
+ raw: cursor::SyntaxToken,
+ _p: PhantomData<L>,
+}
+
+pub type SyntaxElement<L> = NodeOrToken<SyntaxNode<L>, SyntaxToken<L>>;
+
+impl<L: Language> fmt::Debug for SyntaxNode<L> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if f.alternate() {
+ let mut level = 0;
+ for event in self.preorder_with_tokens() {
+ match event {
+ WalkEvent::Enter(element) => {
+ for _ in 0..level {
+ write!(f, " ")?;
+ }
+ match element {
+ NodeOrToken::Node(node) => writeln!(f, "{:?}", node)?,
+ NodeOrToken::Token(token) => writeln!(f, "{:?}", token)?,
+ }
+ level += 1;
+ }
+ WalkEvent::Leave(_) => level -= 1,
+ }
+ }
+ assert_eq!(level, 0);
+ Ok(())
+ } else {
+ write!(f, "{:?}@{:?}", self.kind(), self.text_range())
+ }
+ }
+}
+
+impl<L: Language> fmt::Display for SyntaxNode<L> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.raw, f)
+ }
+}
+
+impl<L: Language> fmt::Debug for SyntaxToken<L> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}@{:?}", self.kind(), self.text_range())?;
+ if self.text().len() < 25 {
+ return write!(f, " {:?}", self.text());
+ }
+ let text = self.text();
+ for idx in 21..25 {
+ if text.is_char_boundary(idx) {
+ let text = format!("{} ...", &text[..idx]);
+ return write!(f, " {:?}", text);
+ }
+ }
+ unreachable!()
+ }
+}
+
+impl<L: Language> fmt::Display for SyntaxToken<L> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.raw, f)
+ }
+}
+
+impl<L: Language> From<SyntaxNode<L>> for SyntaxElement<L> {
+ fn from(node: SyntaxNode<L>) -> SyntaxElement<L> {
+ NodeOrToken::Node(node)
+ }
+}
+
+impl<L: Language> From<SyntaxToken<L>> for SyntaxElement<L> {
+ fn from(token: SyntaxToken<L>) -> SyntaxElement<L> {
+ NodeOrToken::Token(token)
+ }
+}
+
+impl<L: Language> SyntaxNode<L> {
+ pub fn new_root(green: GreenNode) -> SyntaxNode<L> {
+ SyntaxNode::from(cursor::SyntaxNode::new_root(green))
+ }
+ /// Returns a green tree, equal to the green tree this node
+ /// belongs two, except with this node substitute. The complexity
+ /// of operation is proportional to the depth of the tree
+ pub fn replace_with(&self, replacement: GreenNode) -> GreenNode {
+ self.raw.replace_with(replacement)
+ }
+
+ pub fn kind(&self) -> L::Kind {
+ L::kind_from_raw(self.raw.kind())
+ }
+
+ pub fn text_range(&self) -> TextRange {
+ self.raw.text_range()
+ }
+
+ pub fn index(&self) -> usize {
+ self.raw.index()
+ }
+
+ pub fn text(&self) -> SyntaxText {
+ self.raw.text()
+ }
+
+ pub fn green(&self) -> Cow<'_, GreenNodeData> {
+ self.raw.green()
+ }
+
+ pub fn parent(&self) -> Option<SyntaxNode<L>> {
+ self.raw.parent().map(Self::from)
+ }
+
+ pub fn ancestors(&self) -> impl Iterator<Item = SyntaxNode<L>> {
+ self.raw.ancestors().map(SyntaxNode::from)
+ }
+
+ pub fn children(&self) -> SyntaxNodeChildren<L> {
+ SyntaxNodeChildren { raw: self.raw.children(), _p: PhantomData }
+ }
+
+ pub fn children_with_tokens(&self) -> SyntaxElementChildren<L> {
+ SyntaxElementChildren { raw: self.raw.children_with_tokens(), _p: PhantomData }
+ }
+
+ pub fn first_child(&self) -> Option<SyntaxNode<L>> {
+ self.raw.first_child().map(Self::from)
+ }
+ pub fn last_child(&self) -> Option<SyntaxNode<L>> {
+ self.raw.last_child().map(Self::from)
+ }
+
+ pub fn first_child_or_token(&self) -> Option<SyntaxElement<L>> {
+ self.raw.first_child_or_token().map(NodeOrToken::from)
+ }
+ pub fn last_child_or_token(&self) -> Option<SyntaxElement<L>> {
+ self.raw.last_child_or_token().map(NodeOrToken::from)
+ }
+
+ pub fn next_sibling(&self) -> Option<SyntaxNode<L>> {
+ self.raw.next_sibling().map(Self::from)
+ }
+ pub fn prev_sibling(&self) -> Option<SyntaxNode<L>> {
+ self.raw.prev_sibling().map(Self::from)
+ }
+
+ pub fn next_sibling_or_token(&self) -> Option<SyntaxElement<L>> {
+ self.raw.next_sibling_or_token().map(NodeOrToken::from)
+ }
+ pub fn prev_sibling_or_token(&self) -> Option<SyntaxElement<L>> {
+ self.raw.prev_sibling_or_token().map(NodeOrToken::from)
+ }
+
+ /// Return the leftmost token in the subtree of this node.
+ pub fn first_token(&self) -> Option<SyntaxToken<L>> {
+ self.raw.first_token().map(SyntaxToken::from)
+ }
+ /// Return the rightmost token in the subtree of this node.
+ pub fn last_token(&self) -> Option<SyntaxToken<L>> {
+ self.raw.last_token().map(SyntaxToken::from)
+ }
+
+ pub fn siblings(&self, direction: Direction) -> impl Iterator<Item = SyntaxNode<L>> {
+ self.raw.siblings(direction).map(SyntaxNode::from)
+ }
+
+ pub fn siblings_with_tokens(
+ &self,
+ direction: Direction,
+ ) -> impl Iterator<Item = SyntaxElement<L>> {
+ self.raw.siblings_with_tokens(direction).map(SyntaxElement::from)
+ }
+
+ pub fn descendants(&self) -> impl Iterator<Item = SyntaxNode<L>> {
+ self.raw.descendants().map(SyntaxNode::from)
+ }
+
+ pub fn descendants_with_tokens(&self) -> impl Iterator<Item = SyntaxElement<L>> {
+ self.raw.descendants_with_tokens().map(NodeOrToken::from)
+ }
+
+ /// Traverse the subtree rooted at the current node (including the current
+ /// node) in preorder, excluding tokens.
+ pub fn preorder(&self) -> Preorder<L> {
+ Preorder { raw: self.raw.preorder(), _p: PhantomData }
+ }
+
+ /// Traverse the subtree rooted at the current node (including the current
+ /// node) in preorder, including tokens.
+ pub fn preorder_with_tokens(&self) -> PreorderWithTokens<L> {
+ PreorderWithTokens { raw: self.raw.preorder_with_tokens(), _p: PhantomData }
+ }
+
+ /// Find a token in the subtree corresponding to this node, which covers the offset.
+ /// Precondition: offset must be withing node's range.
+ pub fn token_at_offset(&self, offset: TextSize) -> TokenAtOffset<SyntaxToken<L>> {
+ self.raw.token_at_offset(offset).map(SyntaxToken::from)
+ }
+
+ /// Return the deepest node or token in the current subtree that fully
+ /// contains the range. If the range is empty and is contained in two leaf
+ /// nodes, either one can be returned. Precondition: range must be contained
+ /// withing the current node
+ pub fn covering_element(&self, range: TextRange) -> SyntaxElement<L> {
+ NodeOrToken::from(self.raw.covering_element(range))
+ }
+
+ /// Finds a [`SyntaxElement`] which intersects with a given `range`. If
+ /// there are several intersecting elements, any one can be returned.
+ ///
+ /// The method uses binary search internally, so it's complexity is
+ /// `O(log(N))` where `N = self.children_with_tokens().count()`.
+ pub fn child_or_token_at_range(&self, range: TextRange) -> Option<SyntaxElement<L>> {
+ self.raw.child_or_token_at_range(range).map(SyntaxElement::from)
+ }
+
+ /// Returns an independent copy of the subtree rooted at this node.
+ ///
+ /// The parent of the returned node will be `None`, the start offset will be
+ /// zero, but, otherwise, it'll be equivalent to the source node.
+ pub fn clone_subtree(&self) -> SyntaxNode<L> {
+ SyntaxNode::from(self.raw.clone_subtree())
+ }
+
+ pub fn clone_for_update(&self) -> SyntaxNode<L> {
+ SyntaxNode::from(self.raw.clone_for_update())
+ }
+
+ pub fn detach(&self) {
+ self.raw.detach()
+ }
+
+ pub fn splice_children(&self, to_delete: Range<usize>, to_insert: Vec<SyntaxElement<L>>) {
+ let to_insert = to_insert.into_iter().map(cursor::SyntaxElement::from).collect::<Vec<_>>();
+ self.raw.splice_children(to_delete, to_insert)
+ }
+}
+
+impl<L: Language> SyntaxToken<L> {
+ /// Returns a green tree, equal to the green tree this token
+ /// belongs two, except with this token substitute. The complexity
+ /// of operation is proportional to the depth of the tree
+ pub fn replace_with(&self, new_token: GreenToken) -> GreenNode {
+ self.raw.replace_with(new_token)
+ }
+
+ pub fn kind(&self) -> L::Kind {
+ L::kind_from_raw(self.raw.kind())
+ }
+
+ pub fn text_range(&self) -> TextRange {
+ self.raw.text_range()
+ }
+
+ pub fn index(&self) -> usize {
+ self.raw.index()
+ }
+
+ pub fn text(&self) -> &str {
+ self.raw.text()
+ }
+
+ pub fn green(&self) -> &GreenTokenData {
+ self.raw.green()
+ }
+
+ pub fn parent(&self) -> Option<SyntaxNode<L>> {
+ self.raw.parent().map(SyntaxNode::from)
+ }
+
+ /// Iterator over all the ancestors of this token excluding itself.
+ #[deprecated = "use `SyntaxToken::parent_ancestors` instead"]
+ pub fn ancestors(&self) -> impl Iterator<Item = SyntaxNode<L>> {
+ self.parent_ancestors()
+ }
+
+ /// Iterator over all the ancestors of this token excluding itself.
+ pub fn parent_ancestors(&self) -> impl Iterator<Item = SyntaxNode<L>> {
+ self.raw.ancestors().map(SyntaxNode::from)
+ }
+
+ pub fn next_sibling_or_token(&self) -> Option<SyntaxElement<L>> {
+ self.raw.next_sibling_or_token().map(NodeOrToken::from)
+ }
+ pub fn prev_sibling_or_token(&self) -> Option<SyntaxElement<L>> {
+ self.raw.prev_sibling_or_token().map(NodeOrToken::from)
+ }
+
+ pub fn siblings_with_tokens(
+ &self,
+ direction: Direction,
+ ) -> impl Iterator<Item = SyntaxElement<L>> {
+ self.raw.siblings_with_tokens(direction).map(SyntaxElement::from)
+ }
+
+ /// Next token in the tree (i.e, not necessary a sibling).
+ pub fn next_token(&self) -> Option<SyntaxToken<L>> {
+ self.raw.next_token().map(SyntaxToken::from)
+ }
+ /// Previous token in the tree (i.e, not necessary a sibling).
+ pub fn prev_token(&self) -> Option<SyntaxToken<L>> {
+ self.raw.prev_token().map(SyntaxToken::from)
+ }
+
+ pub fn detach(&self) {
+ self.raw.detach()
+ }
+}
+
+impl<L: Language> SyntaxElement<L> {
+ pub fn text_range(&self) -> TextRange {
+ match self {
+ NodeOrToken::Node(it) => it.text_range(),
+ NodeOrToken::Token(it) => it.text_range(),
+ }
+ }
+
+ pub fn index(&self) -> usize {
+ match self {
+ NodeOrToken::Node(it) => it.index(),
+ NodeOrToken::Token(it) => it.index(),
+ }
+ }
+
+ pub fn kind(&self) -> L::Kind {
+ match self {
+ NodeOrToken::Node(it) => it.kind(),
+ NodeOrToken::Token(it) => it.kind(),
+ }
+ }
+
+ pub fn parent(&self) -> Option<SyntaxNode<L>> {
+ match self {
+ NodeOrToken::Node(it) => it.parent(),
+ NodeOrToken::Token(it) => it.parent(),
+ }
+ }
+
+ pub fn ancestors(&self) -> impl Iterator<Item = SyntaxNode<L>> {
+ let first = match self {
+ NodeOrToken::Node(it) => Some(it.clone()),
+ NodeOrToken::Token(it) => it.parent(),
+ };
+ iter::successors(first, SyntaxNode::parent)
+ }
+
+ pub fn next_sibling_or_token(&self) -> Option<SyntaxElement<L>> {
+ match self {
+ NodeOrToken::Node(it) => it.next_sibling_or_token(),
+ NodeOrToken::Token(it) => it.next_sibling_or_token(),
+ }
+ }
+ pub fn prev_sibling_or_token(&self) -> Option<SyntaxElement<L>> {
+ match self {
+ NodeOrToken::Node(it) => it.prev_sibling_or_token(),
+ NodeOrToken::Token(it) => it.prev_sibling_or_token(),
+ }
+ }
+ pub fn detach(&self) {
+ match self {
+ NodeOrToken::Node(it) => it.detach(),
+ NodeOrToken::Token(it) => it.detach(),
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct SyntaxNodeChildren<L: Language> {
+ raw: cursor::SyntaxNodeChildren,
+ _p: PhantomData<L>,
+}
+
+impl<L: Language> Iterator for SyntaxNodeChildren<L> {
+ type Item = SyntaxNode<L>;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.raw.next().map(SyntaxNode::from)
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct SyntaxElementChildren<L: Language> {
+ raw: cursor::SyntaxElementChildren,
+ _p: PhantomData<L>,
+}
+
+impl<L: Language> Iterator for SyntaxElementChildren<L> {
+ type Item = SyntaxElement<L>;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.raw.next().map(NodeOrToken::from)
+ }
+}
+
+pub struct Preorder<L: Language> {
+ raw: cursor::Preorder,
+ _p: PhantomData<L>,
+}
+
+impl<L: Language> Preorder<L> {
+ pub fn skip_subtree(&mut self) {
+ self.raw.skip_subtree()
+ }
+}
+
+impl<L: Language> Iterator for Preorder<L> {
+ type Item = WalkEvent<SyntaxNode<L>>;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.raw.next().map(|it| it.map(SyntaxNode::from))
+ }
+}
+
+pub struct PreorderWithTokens<L: Language> {
+ raw: cursor::PreorderWithTokens,
+ _p: PhantomData<L>,
+}
+
+impl<L: Language> PreorderWithTokens<L> {
+ pub fn skip_subtree(&mut self) {
+ self.raw.skip_subtree()
+ }
+}
+
+impl<L: Language> Iterator for PreorderWithTokens<L> {
+ type Item = WalkEvent<SyntaxElement<L>>;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.raw.next().map(|it| it.map(SyntaxElement::from))
+ }
+}
+
+impl<L: Language> From<cursor::SyntaxNode> for SyntaxNode<L> {
+ fn from(raw: cursor::SyntaxNode) -> SyntaxNode<L> {
+ SyntaxNode { raw, _p: PhantomData }
+ }
+}
+
+impl<L: Language> From<SyntaxNode<L>> for cursor::SyntaxNode {
+ fn from(node: SyntaxNode<L>) -> cursor::SyntaxNode {
+ node.raw
+ }
+}
+
+impl<L: Language> From<cursor::SyntaxToken> for SyntaxToken<L> {
+ fn from(raw: cursor::SyntaxToken) -> SyntaxToken<L> {
+ SyntaxToken { raw, _p: PhantomData }
+ }
+}
+
+impl<L: Language> From<SyntaxToken<L>> for cursor::SyntaxToken {
+ fn from(token: SyntaxToken<L>) -> cursor::SyntaxToken {
+ token.raw
+ }
+}
+
+impl<L: Language> From<cursor::SyntaxElement> for SyntaxElement<L> {
+ fn from(raw: cursor::SyntaxElement) -> SyntaxElement<L> {
+ match raw {
+ NodeOrToken::Node(it) => NodeOrToken::Node(it.into()),
+ NodeOrToken::Token(it) => NodeOrToken::Token(it.into()),
+ }
+ }
+}
+
+impl<L: Language> From<SyntaxElement<L>> for cursor::SyntaxElement {
+ fn from(element: SyntaxElement<L>) -> cursor::SyntaxElement {
+ match element {
+ NodeOrToken::Node(it) => NodeOrToken::Node(it.into()),
+ NodeOrToken::Token(it) => NodeOrToken::Token(it.into()),
+ }
+ }
+}
diff --git a/vendor/rowan/src/arc.rs b/vendor/rowan/src/arc.rs
new file mode 100644
index 000000000..348831290
--- /dev/null
+++ b/vendor/rowan/src/arc.rs
@@ -0,0 +1,463 @@
+//! Vendored and stripped down version of triomphe
+use std::{
+ alloc::{self, Layout},
+ cmp::Ordering,
+ hash::{Hash, Hasher},
+ marker::PhantomData,
+ mem::{self, ManuallyDrop},
+ ops::Deref,
+ ptr,
+ sync::atomic::{
+ self,
+ Ordering::{Acquire, Relaxed, Release},
+ },
+};
+
+use memoffset::offset_of;
+
+/// A soft limit on the amount of references that may be made to an `Arc`.
+///
+/// Going above this limit will abort your program (although not
+/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+
+/// The object allocated by an Arc<T>
+#[repr(C)]
+pub(crate) struct ArcInner<T: ?Sized> {
+ pub(crate) count: atomic::AtomicUsize,
+ pub(crate) data: T,
+}
+
+unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
+unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
+
+/// An atomically reference counted shared pointer
+///
+/// See the documentation for [`Arc`] in the standard library. Unlike the
+/// standard library `Arc`, this `Arc` does not support weak reference counting.
+///
+/// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
+#[repr(transparent)]
+pub(crate) struct Arc<T: ?Sized> {
+ pub(crate) p: ptr::NonNull<ArcInner<T>>,
+ pub(crate) phantom: PhantomData<T>,
+}
+
+unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
+unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
+
+impl<T> Arc<T> {
+ /// Reconstruct the Arc<T> from a raw pointer obtained from into_raw()
+ ///
+ /// Note: This raw pointer will be offset in the allocation and must be preceded
+ /// by the atomic count.
+ ///
+ /// It is recommended to use OffsetArc for this
+ #[inline]
+ pub(crate) unsafe fn from_raw(ptr: *const T) -> Self {
+ // To find the corresponding pointer to the `ArcInner` we need
+ // to subtract the offset of the `data` field from the pointer.
+ let ptr = (ptr as *const u8).sub(offset_of!(ArcInner<T>, data));
+ Arc { p: ptr::NonNull::new_unchecked(ptr as *mut ArcInner<T>), phantom: PhantomData }
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ #[inline]
+ fn inner(&self) -> &ArcInner<T> {
+ // This unsafety is ok because while this arc is alive we're guaranteed
+ // that the inner pointer is valid. Furthermore, we know that the
+ // `ArcInner` structure itself is `Sync` because the inner data is
+ // `Sync` as well, so we're ok loaning out an immutable pointer to these
+ // contents.
+ unsafe { &*self.ptr() }
+ }
+
+ // Non-inlined part of `drop`. Just invokes the destructor.
+ #[inline(never)]
+ unsafe fn drop_slow(&mut self) {
+ let _ = Box::from_raw(self.ptr());
+ }
+
+ /// Test pointer equality between the two Arcs, i.e. they must be the _same_
+ /// allocation
+ #[inline]
+ pub(crate) fn ptr_eq(this: &Self, other: &Self) -> bool {
+ this.ptr() == other.ptr()
+ }
+
+ pub(crate) fn ptr(&self) -> *mut ArcInner<T> {
+ self.p.as_ptr()
+ }
+}
+
+impl<T: ?Sized> Clone for Arc<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ // Using a relaxed ordering is alright here, as knowledge of the
+ // original reference prevents other threads from erroneously deleting
+ // the object.
+ //
+ // As explained in the [Boost documentation][1], Increasing the
+ // reference counter can always be done with memory_order_relaxed: New
+ // references to an object can only be formed from an existing
+ // reference, and passing an existing reference from one thread to
+ // another must already provide any required synchronization.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ let old_size = self.inner().count.fetch_add(1, Relaxed);
+
+ // However we need to guard against massive refcounts in case someone
+ // is `mem::forget`ing Arcs. If we don't do this the count can overflow
+ // and users will use-after free. We racily saturate to `isize::MAX` on
+ // the assumption that there aren't ~2 billion threads incrementing
+ // the reference count at once. This branch will never be taken in
+ // any realistic program.
+ //
+ // We abort because such a program is incredibly degenerate, and we
+ // don't care to support it.
+ if old_size > MAX_REFCOUNT {
+ std::process::abort();
+ }
+
+ unsafe { Arc { p: ptr::NonNull::new_unchecked(self.ptr()), phantom: PhantomData } }
+ }
+}
+
+impl<T: ?Sized> Deref for Arc<T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ &self.inner().data
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned.
+ #[inline]
+ pub(crate) fn get_mut(this: &mut Self) -> Option<&mut T> {
+ if this.is_unique() {
+ unsafe {
+ // See make_mut() for documentation of the threadsafety here.
+ Some(&mut (*this.ptr()).data)
+ }
+ } else {
+ None
+ }
+ }
+
+ /// Whether or not the `Arc` is uniquely owned (is the refcount 1?).
+ pub(crate) fn is_unique(&self) -> bool {
+ // See the extensive discussion in [1] for why this needs to be Acquire.
+ //
+ // [1] https://github.com/servo/servo/issues/21186
+ self.inner().count.load(Acquire) == 1
+ }
+}
+
+impl<T: ?Sized> Drop for Arc<T> {
+ #[inline]
+ fn drop(&mut self) {
+ // Because `fetch_sub` is already atomic, we do not need to synchronize
+ // with other threads unless we are going to delete the object.
+ if self.inner().count.fetch_sub(1, Release) != 1 {
+ return;
+ }
+
+ // FIXME(bholley): Use the updated comment when [2] is merged.
+ //
+ // This load is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the decreasing
+ // of the reference count synchronizes with this `Acquire` load. This
+ // means that use of the data happens before decreasing the reference
+ // count, which happens before this load, which happens before the
+ // deletion of the data.
+ //
+ // As explained in the [Boost documentation][1],
+ //
+ // > It is important to enforce any possible access to the object in one
+ // > thread (through an existing reference) to *happen before* deleting
+ // > the object in a different thread. This is achieved by a "release"
+ // > operation after dropping a reference (any access to the object
+ // > through this reference must obviously happened before), and an
+ // > "acquire" operation before deleting the object.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ // [2]: https://github.com/rust-lang/rust/pull/41714
+ self.inner().count.load(Acquire);
+
+ unsafe {
+ self.drop_slow();
+ }
+ }
+}
+
+impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
+ fn eq(&self, other: &Arc<T>) -> bool {
+ Self::ptr_eq(self, other) || *(*self) == *(*other)
+ }
+
+ fn ne(&self, other: &Arc<T>) -> bool {
+ !Self::ptr_eq(self, other) && *(*self) != *(*other)
+ }
+}
+
+impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
+ fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
+ (**self).partial_cmp(&**other)
+ }
+
+ fn lt(&self, other: &Arc<T>) -> bool {
+ *(*self) < *(*other)
+ }
+
+ fn le(&self, other: &Arc<T>) -> bool {
+ *(*self) <= *(*other)
+ }
+
+ fn gt(&self, other: &Arc<T>) -> bool {
+ *(*self) > *(*other)
+ }
+
+ fn ge(&self, other: &Arc<T>) -> bool {
+ *(*self) >= *(*other)
+ }
+}
+
+impl<T: ?Sized + Ord> Ord for Arc<T> {
+ fn cmp(&self, other: &Arc<T>) -> Ordering {
+ (**self).cmp(&**other)
+ }
+}
+
+impl<T: ?Sized + Eq> Eq for Arc<T> {}
+
+impl<T: ?Sized + Hash> Hash for Arc<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state)
+ }
+}
+
+#[derive(Debug, Eq, PartialEq, Hash, PartialOrd)]
+#[repr(C)]
+pub(crate) struct HeaderSlice<H, T: ?Sized> {
+ pub(crate) header: H,
+ length: usize,
+ slice: T,
+}
+
+impl<H, T> HeaderSlice<H, [T]> {
+ pub(crate) fn slice(&self) -> &[T] {
+ &self.slice
+ }
+}
+
+impl<H, T> Deref for HeaderSlice<H, [T; 0]> {
+ type Target = HeaderSlice<H, [T]>;
+
+ fn deref(&self) -> &Self::Target {
+ unsafe {
+ let len = self.length;
+ let fake_slice: *const [T] =
+ ptr::slice_from_raw_parts(self as *const _ as *const T, len);
+ &*(fake_slice as *const HeaderSlice<H, [T]>)
+ }
+ }
+}
+
+/// A "thin" `Arc` containing dynamically sized data
+///
+/// This is functionally equivalent to `Arc<(H, [T])>`
+///
+/// When you create an `Arc` containing a dynamically sized type
+/// like `HeaderSlice<H, [T]>`, the `Arc` is represented on the stack
+/// as a "fat pointer", where the length of the slice is stored
+/// alongside the `Arc`'s pointer. In some situations you may wish to
+/// have a thin pointer instead, perhaps for FFI compatibility
+/// or space efficiency.
+///
+/// Note that we use `[T; 0]` in order to have the right alignment for `T`.
+///
+/// `ThinArc` solves this by storing the length in the allocation itself,
+/// via `HeaderSlice`.
+#[repr(transparent)]
+pub(crate) struct ThinArc<H, T> {
+ ptr: ptr::NonNull<ArcInner<HeaderSlice<H, [T; 0]>>>,
+ phantom: PhantomData<(H, T)>,
+}
+
+unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {}
+unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {}
+
+// Synthesize a fat pointer from a thin pointer.
+fn thin_to_thick<H, T>(
+ thin: *mut ArcInner<HeaderSlice<H, [T; 0]>>,
+) -> *mut ArcInner<HeaderSlice<H, [T]>> {
+ let len = unsafe { (*thin).data.length };
+ let fake_slice: *mut [T] = ptr::slice_from_raw_parts_mut(thin as *mut T, len);
+ // Transplants metadata.
+ fake_slice as *mut ArcInner<HeaderSlice<H, [T]>>
+}
+
+impl<H, T> ThinArc<H, T> {
+ /// Temporarily converts |self| into a bonafide Arc and exposes it to the
+ /// provided callback. The refcount is not modified.
+ #[inline]
+ pub(crate) fn with_arc<F, U>(&self, f: F) -> U
+ where
+ F: FnOnce(&Arc<HeaderSlice<H, [T]>>) -> U,
+ {
+ // Synthesize transient Arc, which never touches the refcount of the ArcInner.
+ let transient = unsafe {
+ ManuallyDrop::new(Arc {
+ p: ptr::NonNull::new_unchecked(thin_to_thick(self.ptr.as_ptr())),
+ phantom: PhantomData,
+ })
+ };
+
+ // Expose the transient Arc to the callback, which may clone it if it wants.
+ let result = f(&transient);
+
+ // Forward the result.
+ result
+ }
+
+ /// Creates a `ThinArc` for a HeaderSlice using the given header struct and
+ /// iterator to generate the slice.
+ pub(crate) fn from_header_and_iter<I>(header: H, mut items: I) -> Self
+ where
+ I: Iterator<Item = T> + ExactSizeIterator,
+ {
+ assert_ne!(mem::size_of::<T>(), 0, "Need to think about ZST");
+
+ let num_items = items.len();
+
+ // Offset of the start of the slice in the allocation.
+ let inner_to_data_offset = offset_of!(ArcInner<HeaderSlice<H, [T; 0]>>, data);
+ let data_to_slice_offset = offset_of!(HeaderSlice<H, [T; 0]>, slice);
+ let slice_offset = inner_to_data_offset + data_to_slice_offset;
+
+ // Compute the size of the real payload.
+ let slice_size = mem::size_of::<T>().checked_mul(num_items).expect("size overflows");
+ let usable_size = slice_offset.checked_add(slice_size).expect("size overflows");
+
+ // Round up size to alignment.
+ let align = mem::align_of::<ArcInner<HeaderSlice<H, [T; 0]>>>();
+ let size = usable_size.wrapping_add(align - 1) & !(align - 1);
+ assert!(size >= usable_size, "size overflows");
+ let layout = Layout::from_size_align(size, align).expect("invalid layout");
+
+ let ptr: *mut ArcInner<HeaderSlice<H, [T; 0]>>;
+ unsafe {
+ let buffer = alloc::alloc(layout);
+
+ if buffer.is_null() {
+ alloc::handle_alloc_error(layout);
+ }
+
+ // // Synthesize the fat pointer. We do this by claiming we have a direct
+ // // pointer to a [T], and then changing the type of the borrow. The key
+ // // point here is that the length portion of the fat pointer applies
+ // // only to the number of elements in the dynamically-sized portion of
+ // // the type, so the value will be the same whether it points to a [T]
+ // // or something else with a [T] as its last member.
+ // let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items);
+ // ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>;
+ ptr = buffer as *mut _;
+
+ let count = atomic::AtomicUsize::new(1);
+
+ // Write the data.
+ //
+ // Note that any panics here (i.e. from the iterator) are safe, since
+ // we'll just leak the uninitialized memory.
+ ptr::write(ptr::addr_of_mut!((*ptr).count), count);
+ ptr::write(ptr::addr_of_mut!((*ptr).data.header), header);
+ ptr::write(ptr::addr_of_mut!((*ptr).data.length), num_items);
+ if num_items != 0 {
+ let mut current = ptr::addr_of_mut!((*ptr).data.slice) as *mut T;
+ debug_assert_eq!(current as usize - buffer as usize, slice_offset);
+ for _ in 0..num_items {
+ ptr::write(
+ current,
+ items.next().expect("ExactSizeIterator over-reported length"),
+ );
+ current = current.offset(1);
+ }
+ assert!(items.next().is_none(), "ExactSizeIterator under-reported length");
+
+ // We should have consumed the buffer exactly.
+ debug_assert_eq!(current as *mut u8, buffer.add(usable_size));
+ }
+ assert!(items.next().is_none(), "ExactSizeIterator under-reported length");
+ }
+
+ ThinArc { ptr: unsafe { ptr::NonNull::new_unchecked(ptr) }, phantom: PhantomData }
+ }
+}
+
+impl<H, T> Deref for ThinArc<H, T> {
+ type Target = HeaderSlice<H, [T]>;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &(*thin_to_thick(self.ptr.as_ptr())).data }
+ }
+}
+
+impl<H, T> Clone for ThinArc<H, T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ ThinArc::with_arc(self, |a| Arc::into_thin(a.clone()))
+ }
+}
+
+impl<H, T> Drop for ThinArc<H, T> {
+ #[inline]
+ fn drop(&mut self) {
+ let _ = Arc::from_thin(ThinArc { ptr: self.ptr, phantom: PhantomData });
+ }
+}
+
+impl<H, T> Arc<HeaderSlice<H, [T]>> {
+ /// Converts an `Arc` into a `ThinArc`. This consumes the `Arc`, so the refcount
+ /// is not modified.
+ #[inline]
+ pub(crate) fn into_thin(a: Self) -> ThinArc<H, T> {
+ assert_eq!(a.length, a.slice.len(), "Length needs to be correct for ThinArc to work");
+ let fat_ptr: *mut ArcInner<HeaderSlice<H, [T]>> = a.ptr();
+ mem::forget(a);
+ let thin_ptr = fat_ptr as *mut [usize] as *mut usize;
+ ThinArc {
+ ptr: unsafe {
+ ptr::NonNull::new_unchecked(thin_ptr as *mut ArcInner<HeaderSlice<H, [T; 0]>>)
+ },
+ phantom: PhantomData,
+ }
+ }
+
+ /// Converts a `ThinArc` into an `Arc`. This consumes the `ThinArc`, so the refcount
+ /// is not modified.
+ #[inline]
+ pub(crate) fn from_thin(a: ThinArc<H, T>) -> Self {
+ let ptr = thin_to_thick(a.ptr.as_ptr());
+ mem::forget(a);
+ unsafe { Arc { p: ptr::NonNull::new_unchecked(ptr), phantom: PhantomData } }
+ }
+}
+
+impl<H: PartialEq, T: PartialEq> PartialEq for ThinArc<H, T> {
+ #[inline]
+ fn eq(&self, other: &ThinArc<H, T>) -> bool {
+ **self == **other
+ }
+}
+
+impl<H: Eq, T: Eq> Eq for ThinArc<H, T> {}
+
+impl<H: Hash, T: Hash> Hash for ThinArc<H, T> {
+ fn hash<HSR: Hasher>(&self, state: &mut HSR) {
+ (**self).hash(state)
+ }
+}
diff --git a/vendor/rowan/src/ast.rs b/vendor/rowan/src/ast.rs
new file mode 100644
index 000000000..ea848e08a
--- /dev/null
+++ b/vendor/rowan/src/ast.rs
@@ -0,0 +1,206 @@
+//! Working with abstract syntax trees.
+//!
+//! In rowan, syntax trees are transient objects. That means that we create
+//! trees when we need them, and tear them down to save memory. In this
+//! architecture, hanging on to a particular syntax node for a long time is
+//! ill-advisable, as that keeps the whole tree resident.
+//!
+//! Instead, we provide a [`SyntaxNodePtr`] type, which stores information about
+//! the _location_ of a particular syntax node in a tree. It's a small type
+//! which can be cheaply stored, and which can be resolved to a real
+//! [`SyntaxNode`] when necessary.
+//!
+//! We also provide an [`AstNode`] trait for typed AST wrapper APIs over rowan
+//! nodes.
+
+use std::{
+ fmt,
+ hash::{Hash, Hasher},
+ iter::successors,
+ marker::PhantomData,
+};
+
+use crate::{Language, SyntaxNode, SyntaxNodeChildren, TextRange};
+
+/// The main trait to go from untyped [`SyntaxNode`] to a typed AST. The
+/// conversion itself has zero runtime cost: AST and syntax nodes have exactly
+/// the same representation: a pointer to the tree root and a pointer to the
+/// node itself.
+pub trait AstNode {
+ type Language: Language;
+
+ fn can_cast(kind: <Self::Language as Language>::Kind) -> bool
+ where
+ Self: Sized;
+
+ fn cast(node: SyntaxNode<Self::Language>) -> Option<Self>
+ where
+ Self: Sized;
+
+ fn syntax(&self) -> &SyntaxNode<Self::Language>;
+
+ fn clone_for_update(&self) -> Self
+ where
+ Self: Sized,
+ {
+ Self::cast(self.syntax().clone_for_update()).unwrap()
+ }
+
+ fn clone_subtree(&self) -> Self
+ where
+ Self: Sized,
+ {
+ Self::cast(self.syntax().clone_subtree()).unwrap()
+ }
+}
+
+/// A "pointer" to a [`SyntaxNode`], via location in the source code.
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct SyntaxNodePtr<L: Language> {
+ kind: L::Kind,
+ range: TextRange,
+}
+
+impl<L: Language> SyntaxNodePtr<L> {
+ /// Returns a [`SyntaxNodePtr`] for the node.
+ pub fn new(node: &SyntaxNode<L>) -> Self {
+ Self { kind: node.kind(), range: node.text_range() }
+ }
+
+ /// "Dereferences" the pointer to get the [`SyntaxNode`] it points to.
+ ///
+ /// Panics if node is not found, so make sure that `root` syntax tree is
+ /// equivalent (is build from the same text) to the tree which was
+ /// originally used to get this [`SyntaxNodePtr`].
+ ///
+ /// Also panics if `root` is not actually a root (i.e. it has a parent).
+ ///
+ /// The complexity is linear in the depth of the tree and logarithmic in
+ /// tree width. As most trees are shallow, thinking about this as
+ /// `O(log(N))` in the size of the tree is not too wrong!
+ pub fn to_node(&self, root: &SyntaxNode<L>) -> SyntaxNode<L> {
+ assert!(root.parent().is_none());
+ successors(Some(root.clone()), |node| {
+ node.child_or_token_at_range(self.range).and_then(|it| it.into_node())
+ })
+ .find(|it| it.text_range() == self.range && it.kind() == self.kind)
+ .unwrap_or_else(|| panic!("can't resolve local ptr to SyntaxNode: {:?}", self))
+ }
+
+ /// Casts this to an [`AstPtr`] to the given node type if possible.
+ pub fn cast<N: AstNode<Language = L>>(self) -> Option<AstPtr<N>> {
+ if !N::can_cast(self.kind) {
+ return None;
+ }
+ Some(AstPtr { raw: self })
+ }
+
+ /// Returns the kind of the syntax node this points to.
+ pub fn kind(&self) -> L::Kind {
+ self.kind
+ }
+
+ /// Returns the range of the syntax node this points to.
+ pub fn text_range(&self) -> TextRange {
+ self.range
+ }
+}
+
+/// Like [`SyntaxNodePtr`], but remembers the type of node.
+pub struct AstPtr<N: AstNode> {
+ raw: SyntaxNodePtr<N::Language>,
+}
+
+impl<N: AstNode> AstPtr<N> {
+ /// Returns an [`AstPtr`] for the node.
+ pub fn new(node: &N) -> Self {
+ Self { raw: SyntaxNodePtr::new(node.syntax()) }
+ }
+
+ /// Given the root node containing the node `n` that `self` is a pointer to,
+ /// returns `n`. See [`SyntaxNodePtr::to_node`].
+ pub fn to_node(&self, root: &SyntaxNode<N::Language>) -> N {
+ N::cast(self.raw.to_node(root)).unwrap()
+ }
+
+ /// Returns the underlying [`SyntaxNodePtr`].
+ pub fn syntax_node_ptr(&self) -> SyntaxNodePtr<N::Language> {
+ self.raw.clone()
+ }
+
+ /// Casts this to an [`AstPtr`] to the given node type if possible.
+ pub fn cast<U: AstNode<Language = N::Language>>(self) -> Option<AstPtr<U>> {
+ if !U::can_cast(self.raw.kind) {
+ return None;
+ }
+ Some(AstPtr { raw: self.raw })
+ }
+}
+
+impl<N: AstNode> fmt::Debug for AstPtr<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AstPtr").field("raw", &self.raw).finish()
+ }
+}
+
+impl<N: AstNode> Clone for AstPtr<N> {
+ fn clone(&self) -> Self {
+ Self { raw: self.raw.clone() }
+ }
+}
+
+impl<N: AstNode> PartialEq for AstPtr<N> {
+ fn eq(&self, other: &AstPtr<N>) -> bool {
+ self.raw == other.raw
+ }
+}
+
+impl<N: AstNode> Eq for AstPtr<N> {}
+
+impl<N: AstNode> Hash for AstPtr<N> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.raw.hash(state)
+ }
+}
+
+impl<N: AstNode> From<AstPtr<N>> for SyntaxNodePtr<N::Language> {
+ fn from(ptr: AstPtr<N>) -> SyntaxNodePtr<N::Language> {
+ ptr.raw
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct AstChildren<N: AstNode> {
+ inner: SyntaxNodeChildren<N::Language>,
+ ph: PhantomData<N>,
+}
+
+impl<N: AstNode> AstChildren<N> {
+ fn new(parent: &SyntaxNode<N::Language>) -> Self {
+ AstChildren { inner: parent.children(), ph: PhantomData }
+ }
+}
+
+impl<N: AstNode> Iterator for AstChildren<N> {
+ type Item = N;
+ fn next(&mut self) -> Option<N> {
+ self.inner.find_map(N::cast)
+ }
+}
+
+pub mod support {
+ use super::{AstChildren, AstNode};
+ use crate::{Language, SyntaxNode, SyntaxToken};
+
+ pub fn child<N: AstNode>(parent: &SyntaxNode<N::Language>) -> Option<N> {
+ parent.children().find_map(N::cast)
+ }
+
+ pub fn children<N: AstNode>(parent: &SyntaxNode<N::Language>) -> AstChildren<N> {
+ AstChildren::new(parent)
+ }
+
+ pub fn token<L: Language>(parent: &SyntaxNode<L>, kind: L::Kind) -> Option<SyntaxToken<L>> {
+ parent.children_with_tokens().filter_map(|it| it.into_token()).find(|it| it.kind() == kind)
+ }
+}
diff --git a/vendor/rowan/src/cow_mut.rs b/vendor/rowan/src/cow_mut.rs
new file mode 100644
index 000000000..c50e25b70
--- /dev/null
+++ b/vendor/rowan/src/cow_mut.rs
@@ -0,0 +1,30 @@
+#[derive(Debug)]
+pub(crate) enum CowMut<'a, T> {
+ Owned(T),
+ Borrowed(&'a mut T),
+}
+
+impl<T> std::ops::Deref for CowMut<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ match self {
+ CowMut::Owned(it) => it,
+ CowMut::Borrowed(it) => *it,
+ }
+ }
+}
+
+impl<T> std::ops::DerefMut for CowMut<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ match self {
+ CowMut::Owned(it) => it,
+ CowMut::Borrowed(it) => *it,
+ }
+ }
+}
+
+impl<T: Default> Default for CowMut<'_, T> {
+ fn default() -> Self {
+ CowMut::Owned(T::default())
+ }
+}
diff --git a/vendor/rowan/src/cursor.rs b/vendor/rowan/src/cursor.rs
new file mode 100644
index 000000000..39b0c9624
--- /dev/null
+++ b/vendor/rowan/src/cursor.rs
@@ -0,0 +1,1284 @@
+//! Implementation of the cursors -- API for convenient access to syntax trees.
+//!
+//! Functional programmers will recognize that this module implements a zipper
+//! for a purely functional (green) tree.
+//!
+//! A cursor node (`SyntaxNode`) points to a `GreenNode` and a parent
+//! `SyntaxNode`. This allows cursor to provide iteration over both ancestors
+//! and descendants, as well as a cheep access to absolute offset of the node in
+//! file.
+//!
+//! By default `SyntaxNode`s are immutable, but you can get a mutable copy of
+//! the tree by calling `clone_for_update`. Mutation is based on interior
+//! mutability and doesn't need `&mut`. You can have two `SyntaxNode`s pointing
+//! at different parts of the same tree; mutations via the first node will be
+//! reflected in the other.
+
+// Implementation notes:
+//
+// The implementation is utterly and horribly unsafe. This whole module is an
+// unsafety boundary. It is believed that the API here is, in principle, sound,
+// but the implementation might have bugs.
+//
+// The core type is `NodeData` -- a heap-allocated reference counted object,
+// which points to a green node or a green token, and to the parent `NodeData`.
+// Publicly-exposed `SyntaxNode` and `SyntaxToken` own a reference to
+// `NodeData`.
+//
+// `NodeData`s are transient, and are created and destroyed during tree
+// traversals. In general, only currently referenced nodes and their ancestors
+// are alive at any given moment.
+//
+// More specifically, `NodeData`'s ref count is equal to the number of
+// outstanding `SyntaxNode` and `SyntaxToken` plus the number of children with
+// non-zero ref counts. For example, if the user has only a single `SyntaxNode`
+// pointing somewhere in the middle of the tree, then all `NodeData` on the path
+// from that point towards the root have ref count equal to one.
+//
+// `NodeData` which doesn't have a parent (is a root) owns the corresponding
+// green node or token, and is responsible for freeing it.
+//
+// That's mostly it for the immutable subset of the API. Mutation is fun though,
+// you'll like it!
+//
+// Mutability is a run-time property of a tree of `NodeData`. The whole tree is
+// either mutable or immutable. `clone_for_update` clones the whole tree of
+// `NodeData`s, making it mutable (note that the green tree is re-used).
+//
+// If the tree is mutable, then all live `NodeData` are additionally liked to
+// each other via intrusive liked lists. Specifically, there are two pointers to
+// siblings, as well as a pointer to the first child. Note that only live nodes
+// are considered. If the user only has `SyntaxNode`s for the first and last
+// children of some particular node, then their `NodeData` will point at each
+// other.
+//
+// The links are used to propagate mutations across the tree. Specifically, each
+// `NodeData` remembers it's index in parent. When the node is detached from or
+// attached to the tree, we need to adjust the indices of all subsequent
+// siblings. That's what makes the `for c in node.children() { c.detach() }`
+// pattern work despite the apparent iterator invalidation.
+//
+// This code is encapsulated into the sorted linked list (`sll`) module.
+//
+// The actual mutation consist of functionally "mutating" (creating a
+// structurally shared copy) the green node, and then re-spinning the tree. This
+// is a delicate process: `NodeData` point directly to the green nodes, so we
+// must make sure that those nodes don't move. Additionally, during mutation a
+// node might become or might stop being a root, so we must take care to not
+// double free / leak its green node.
+//
+// Because we can change green nodes using only shared references, handing out
+// references into green nodes in the public API would be unsound. We don't do
+// that, but we do use such references internally a lot. Additionally, for
+// tokens the underlying green token actually is immutable, so we can, and do
+// return `&str`.
+//
+// Invariants [must not leak outside of the module]:
+// - Mutability is the property of the whole tree. Intermixing elements that
+// differ in mutability is not allowed.
+// - Mutability property is persistent.
+// - References to the green elements' data are not exposed into public API
+// when the tree is mutable.
+// - TBD
+
+use std::{
+ borrow::Cow,
+ cell::Cell,
+ fmt,
+ hash::{Hash, Hasher},
+ iter,
+ mem::{self, ManuallyDrop},
+ ops::Range,
+ ptr, slice,
+};
+
+use countme::Count;
+
+use crate::{
+ green::{GreenChild, GreenElementRef, GreenNodeData, GreenTokenData, SyntaxKind},
+ sll,
+ utility_types::Delta,
+ Direction, GreenNode, GreenToken, NodeOrToken, SyntaxText, TextRange, TextSize, TokenAtOffset,
+ WalkEvent,
+};
+
+enum Green {
+ Node { ptr: Cell<ptr::NonNull<GreenNodeData>> },
+ Token { ptr: ptr::NonNull<GreenTokenData> },
+}
+
+struct _SyntaxElement;
+
+struct NodeData {
+ _c: Count<_SyntaxElement>,
+
+ rc: Cell<u32>,
+ parent: Cell<Option<ptr::NonNull<NodeData>>>,
+ index: Cell<u32>,
+ green: Green,
+
+ /// Invariant: never changes after NodeData is created.
+ mutable: bool,
+ /// Absolute offset for immutable nodes, unused for mutable nodes.
+ offset: TextSize,
+ // The following links only have meaning when `mutable` is true.
+ first: Cell<*const NodeData>,
+ /// Invariant: never null if mutable.
+ next: Cell<*const NodeData>,
+ /// Invariant: never null if mutable.
+ prev: Cell<*const NodeData>,
+}
+
+unsafe impl sll::Elem for NodeData {
+ fn prev(&self) -> &Cell<*const Self> {
+ &self.prev
+ }
+ fn next(&self) -> &Cell<*const Self> {
+ &self.next
+ }
+ fn key(&self) -> &Cell<u32> {
+ &self.index
+ }
+}
+
+pub type SyntaxElement = NodeOrToken<SyntaxNode, SyntaxToken>;
+
+pub struct SyntaxNode {
+ ptr: ptr::NonNull<NodeData>,
+}
+
+impl Clone for SyntaxNode {
+ #[inline]
+ fn clone(&self) -> Self {
+ self.data().inc_rc();
+ SyntaxNode { ptr: self.ptr }
+ }
+}
+
+impl Drop for SyntaxNode {
+ #[inline]
+ fn drop(&mut self) {
+ if self.data().dec_rc() {
+ unsafe { free(self.ptr) }
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct SyntaxToken {
+ ptr: ptr::NonNull<NodeData>,
+}
+
+impl Clone for SyntaxToken {
+ #[inline]
+ fn clone(&self) -> Self {
+ self.data().inc_rc();
+ SyntaxToken { ptr: self.ptr }
+ }
+}
+
+impl Drop for SyntaxToken {
+ #[inline]
+ fn drop(&mut self) {
+ if self.data().dec_rc() {
+ unsafe { free(self.ptr) }
+ }
+ }
+}
+
+#[inline(never)]
+unsafe fn free(mut data: ptr::NonNull<NodeData>) {
+ loop {
+ debug_assert_eq!(data.as_ref().rc.get(), 0);
+ debug_assert!(data.as_ref().first.get().is_null());
+ let node = Box::from_raw(data.as_ptr());
+ match node.parent.take() {
+ Some(parent) => {
+ debug_assert!(parent.as_ref().rc.get() > 0);
+ if node.mutable {
+ sll::unlink(&parent.as_ref().first, &*node)
+ }
+ if parent.as_ref().dec_rc() {
+ data = parent;
+ } else {
+ break;
+ }
+ }
+ None => {
+ match &node.green {
+ Green::Node { ptr } => {
+ let _ = GreenNode::from_raw(ptr.get());
+ }
+ Green::Token { ptr } => {
+ let _ = GreenToken::from_raw(*ptr);
+ }
+ }
+ break;
+ }
+ }
+ }
+}
+
+impl NodeData {
+ #[inline]
+ fn new(
+ parent: Option<SyntaxNode>,
+ index: u32,
+ offset: TextSize,
+ green: Green,
+ mutable: bool,
+ ) -> ptr::NonNull<NodeData> {
+ let parent = ManuallyDrop::new(parent);
+ let res = NodeData {
+ _c: Count::new(),
+ rc: Cell::new(1),
+ parent: Cell::new(parent.as_ref().map(|it| it.ptr)),
+ index: Cell::new(index),
+ green,
+
+ mutable,
+ offset,
+ first: Cell::new(ptr::null()),
+ next: Cell::new(ptr::null()),
+ prev: Cell::new(ptr::null()),
+ };
+ unsafe {
+ if mutable {
+ let res_ptr: *const NodeData = &res;
+ match sll::init((*res_ptr).parent().map(|it| &it.first), res_ptr.as_ref().unwrap())
+ {
+ sll::AddToSllResult::AlreadyInSll(node) => {
+ if cfg!(debug_assertions) {
+ assert_eq!((*node).index(), (*res_ptr).index());
+ match ((*node).green(), (*res_ptr).green()) {
+ (NodeOrToken::Node(lhs), NodeOrToken::Node(rhs)) => {
+ assert!(ptr::eq(lhs, rhs))
+ }
+ (NodeOrToken::Token(lhs), NodeOrToken::Token(rhs)) => {
+ assert!(ptr::eq(lhs, rhs))
+ }
+ it => {
+ panic!("node/token confusion: {:?}", it)
+ }
+ }
+ }
+
+ ManuallyDrop::into_inner(parent);
+ let res = node as *mut NodeData;
+ (*res).inc_rc();
+ return ptr::NonNull::new_unchecked(res);
+ }
+ it => {
+ let res = Box::into_raw(Box::new(res));
+ it.add_to_sll(res);
+ return ptr::NonNull::new_unchecked(res);
+ }
+ }
+ }
+ ptr::NonNull::new_unchecked(Box::into_raw(Box::new(res)))
+ }
+ }
+
+ #[inline]
+ fn inc_rc(&self) {
+ let rc = match self.rc.get().checked_add(1) {
+ Some(it) => it,
+ None => std::process::abort(),
+ };
+ self.rc.set(rc)
+ }
+
+ #[inline]
+ fn dec_rc(&self) -> bool {
+ let rc = self.rc.get() - 1;
+ self.rc.set(rc);
+ rc == 0
+ }
+
+ #[inline]
+ fn key(&self) -> (ptr::NonNull<()>, TextSize) {
+ let ptr = match &self.green {
+ Green::Node { ptr } => ptr.get().cast(),
+ Green::Token { ptr } => ptr.cast(),
+ };
+ (ptr, self.offset())
+ }
+
+ #[inline]
+ fn parent_node(&self) -> Option<SyntaxNode> {
+ let parent = self.parent()?;
+ debug_assert!(matches!(parent.green, Green::Node { .. }));
+ parent.inc_rc();
+ Some(SyntaxNode { ptr: ptr::NonNull::from(parent) })
+ }
+
+ #[inline]
+ fn parent(&self) -> Option<&NodeData> {
+ self.parent.get().map(|it| unsafe { &*it.as_ptr() })
+ }
+
+ #[inline]
+ fn green(&self) -> GreenElementRef<'_> {
+ match &self.green {
+ Green::Node { ptr } => GreenElementRef::Node(unsafe { &*ptr.get().as_ptr() }),
+ Green::Token { ptr } => GreenElementRef::Token(unsafe { &*ptr.as_ref() }),
+ }
+ }
+ #[inline]
+ fn green_siblings(&self) -> slice::Iter<GreenChild> {
+ match &self.parent().map(|it| &it.green) {
+ Some(Green::Node { ptr }) => unsafe { &*ptr.get().as_ptr() }.children().raw,
+ Some(Green::Token { .. }) => {
+ debug_assert!(false);
+ [].iter()
+ }
+ None => [].iter(),
+ }
+ }
+ #[inline]
+ fn index(&self) -> u32 {
+ self.index.get()
+ }
+
+ #[inline]
+ fn offset(&self) -> TextSize {
+ if self.mutable {
+ self.offset_mut()
+ } else {
+ self.offset
+ }
+ }
+
+ #[cold]
+ fn offset_mut(&self) -> TextSize {
+ let mut res = TextSize::from(0);
+
+ let mut node = self;
+ while let Some(parent) = node.parent() {
+ let green = parent.green().into_node().unwrap();
+ res += green.children().raw.nth(node.index() as usize).unwrap().rel_offset();
+ node = parent;
+ }
+
+ res
+ }
+
+ #[inline]
+ fn text_range(&self) -> TextRange {
+ let offset = self.offset();
+ let len = self.green().text_len();
+ TextRange::at(offset, len)
+ }
+
+ #[inline]
+ fn kind(&self) -> SyntaxKind {
+ self.green().kind()
+ }
+
+ fn next_sibling(&self) -> Option<SyntaxNode> {
+ let mut siblings = self.green_siblings().enumerate();
+ let index = self.index() as usize;
+
+ siblings.nth(index);
+ siblings.find_map(|(index, child)| {
+ child.as_ref().into_node().and_then(|green| {
+ let parent = self.parent_node()?;
+ let offset = parent.offset() + child.rel_offset();
+ Some(SyntaxNode::new_child(green, parent, index as u32, offset))
+ })
+ })
+ }
+ fn prev_sibling(&self) -> Option<SyntaxNode> {
+ let mut rev_siblings = self.green_siblings().enumerate().rev();
+ let index = rev_siblings.len() - (self.index() as usize);
+
+ rev_siblings.nth(index);
+ rev_siblings.find_map(|(index, child)| {
+ child.as_ref().into_node().and_then(|green| {
+ let parent = self.parent_node()?;
+ let offset = parent.offset() + child.rel_offset();
+ Some(SyntaxNode::new_child(green, parent, index as u32, offset))
+ })
+ })
+ }
+
+ fn next_sibling_or_token(&self) -> Option<SyntaxElement> {
+ let mut siblings = self.green_siblings().enumerate();
+ let index = self.index() as usize + 1;
+
+ siblings.nth(index).and_then(|(index, child)| {
+ let parent = self.parent_node()?;
+ let offset = parent.offset() + child.rel_offset();
+ Some(SyntaxElement::new(child.as_ref(), parent, index as u32, offset))
+ })
+ }
+ fn prev_sibling_or_token(&self) -> Option<SyntaxElement> {
+ let mut siblings = self.green_siblings().enumerate();
+ let index = self.index().checked_sub(1)? as usize;
+
+ siblings.nth(index).and_then(|(index, child)| {
+ let parent = self.parent_node()?;
+ let offset = parent.offset() + child.rel_offset();
+ Some(SyntaxElement::new(child.as_ref(), parent, index as u32, offset))
+ })
+ }
+
+ fn detach(&self) {
+ assert!(self.mutable);
+ assert!(self.rc.get() > 0);
+ let parent_ptr = match self.parent.take() {
+ Some(parent) => parent,
+ None => return,
+ };
+
+ unsafe {
+ sll::adjust(self, self.index() + 1, Delta::Sub(1));
+ let parent = parent_ptr.as_ref();
+ sll::unlink(&parent.first, self);
+
+ // Add strong ref to green
+ match self.green().to_owned() {
+ NodeOrToken::Node(it) => {
+ GreenNode::into_raw(it);
+ }
+ NodeOrToken::Token(it) => {
+ GreenToken::into_raw(it);
+ }
+ }
+
+ match parent.green() {
+ NodeOrToken::Node(green) => {
+ let green = green.remove_child(self.index() as usize);
+ parent.respine(green)
+ }
+ NodeOrToken::Token(_) => unreachable!(),
+ }
+
+ if parent.dec_rc() {
+ free(parent_ptr)
+ }
+ }
+ }
+ fn attach_child(&self, index: usize, child: &NodeData) {
+ assert!(self.mutable && child.mutable && child.parent().is_none());
+ assert!(self.rc.get() > 0 && child.rc.get() > 0);
+
+ unsafe {
+ child.index.set(index as u32);
+ child.parent.set(Some(self.into()));
+ self.inc_rc();
+
+ if !self.first.get().is_null() {
+ sll::adjust(&*self.first.get(), index as u32, Delta::Add(1));
+ }
+
+ match sll::link(&self.first, child) {
+ sll::AddToSllResult::AlreadyInSll(_) => {
+ panic!("Child already in sorted linked list")
+ }
+ it => it.add_to_sll(child),
+ }
+
+ match self.green() {
+ NodeOrToken::Node(green) => {
+ // Child is root, so it ownes the green node. Steal it!
+ let child_green = match &child.green {
+ Green::Node { ptr } => GreenNode::from_raw(ptr.get()).into(),
+ Green::Token { ptr } => GreenToken::from_raw(*ptr).into(),
+ };
+
+ let green = green.insert_child(index, child_green);
+ self.respine(green);
+ }
+ NodeOrToken::Token(_) => unreachable!(),
+ }
+ }
+ }
+ unsafe fn respine(&self, mut new_green: GreenNode) {
+ let mut node = self;
+ loop {
+ let old_green = match &node.green {
+ Green::Node { ptr } => ptr.replace(ptr::NonNull::from(&*new_green)),
+ Green::Token { .. } => unreachable!(),
+ };
+ match node.parent() {
+ Some(parent) => match parent.green() {
+ NodeOrToken::Node(parent_green) => {
+ new_green =
+ parent_green.replace_child(node.index() as usize, new_green.into());
+ node = parent;
+ }
+ _ => unreachable!(),
+ },
+ None => {
+ mem::forget(new_green);
+ let _ = GreenNode::from_raw(old_green);
+ break;
+ }
+ }
+ }
+ }
+}
+
+impl SyntaxNode {
+ pub fn new_root(green: GreenNode) -> SyntaxNode {
+ let green = GreenNode::into_raw(green);
+ let green = Green::Node { ptr: Cell::new(green) };
+ SyntaxNode { ptr: NodeData::new(None, 0, 0.into(), green, false) }
+ }
+
+ pub fn new_root_mut(green: GreenNode) -> SyntaxNode {
+ let green = GreenNode::into_raw(green);
+ let green = Green::Node { ptr: Cell::new(green) };
+ SyntaxNode { ptr: NodeData::new(None, 0, 0.into(), green, true) }
+ }
+
+ fn new_child(
+ green: &GreenNodeData,
+ parent: SyntaxNode,
+ index: u32,
+ offset: TextSize,
+ ) -> SyntaxNode {
+ let mutable = parent.data().mutable;
+ let green = Green::Node { ptr: Cell::new(green.into()) };
+ SyntaxNode { ptr: NodeData::new(Some(parent), index, offset, green, mutable) }
+ }
+
+ pub fn clone_for_update(&self) -> SyntaxNode {
+ assert!(!self.data().mutable);
+ match self.parent() {
+ Some(parent) => {
+ let parent = parent.clone_for_update();
+ SyntaxNode::new_child(self.green_ref(), parent, self.data().index(), self.offset())
+ }
+ None => SyntaxNode::new_root_mut(self.green_ref().to_owned()),
+ }
+ }
+
+ pub fn clone_subtree(&self) -> SyntaxNode {
+ SyntaxNode::new_root(self.green().into())
+ }
+
+ #[inline]
+ fn data(&self) -> &NodeData {
+ unsafe { self.ptr.as_ref() }
+ }
+
+ pub fn replace_with(&self, replacement: GreenNode) -> GreenNode {
+ assert_eq!(self.kind(), replacement.kind());
+ match &self.parent() {
+ None => replacement,
+ Some(parent) => {
+ let new_parent = parent
+ .green_ref()
+ .replace_child(self.data().index() as usize, replacement.into());
+ parent.replace_with(new_parent)
+ }
+ }
+ }
+
+ #[inline]
+ pub fn kind(&self) -> SyntaxKind {
+ self.data().kind()
+ }
+
+ #[inline]
+ fn offset(&self) -> TextSize {
+ self.data().offset()
+ }
+
+ #[inline]
+ pub fn text_range(&self) -> TextRange {
+ self.data().text_range()
+ }
+
+ #[inline]
+ pub fn index(&self) -> usize {
+ self.data().index() as usize
+ }
+
+ #[inline]
+ pub fn text(&self) -> SyntaxText {
+ SyntaxText::new(self.clone())
+ }
+
+ #[inline]
+ pub fn green(&self) -> Cow<'_, GreenNodeData> {
+ let green_ref = self.green_ref();
+ match self.data().mutable {
+ false => Cow::Borrowed(green_ref),
+ true => Cow::Owned(green_ref.to_owned()),
+ }
+ }
+ #[inline]
+ fn green_ref(&self) -> &GreenNodeData {
+ self.data().green().into_node().unwrap()
+ }
+
+ #[inline]
+ pub fn parent(&self) -> Option<SyntaxNode> {
+ self.data().parent_node()
+ }
+
+ #[inline]
+ pub fn ancestors(&self) -> impl Iterator<Item = SyntaxNode> {
+ iter::successors(Some(self.clone()), SyntaxNode::parent)
+ }
+
+ #[inline]
+ pub fn children(&self) -> SyntaxNodeChildren {
+ SyntaxNodeChildren::new(self.clone())
+ }
+
+ #[inline]
+ pub fn children_with_tokens(&self) -> SyntaxElementChildren {
+ SyntaxElementChildren::new(self.clone())
+ }
+
+ pub fn first_child(&self) -> Option<SyntaxNode> {
+ self.green_ref().children().raw.enumerate().find_map(|(index, child)| {
+ child.as_ref().into_node().map(|green| {
+ SyntaxNode::new_child(
+ green,
+ self.clone(),
+ index as u32,
+ self.offset() + child.rel_offset(),
+ )
+ })
+ })
+ }
+ pub fn last_child(&self) -> Option<SyntaxNode> {
+ self.green_ref().children().raw.enumerate().rev().find_map(|(index, child)| {
+ child.as_ref().into_node().map(|green| {
+ SyntaxNode::new_child(
+ green,
+ self.clone(),
+ index as u32,
+ self.offset() + child.rel_offset(),
+ )
+ })
+ })
+ }
+
+ pub fn first_child_or_token(&self) -> Option<SyntaxElement> {
+ self.green_ref().children().raw.next().map(|child| {
+ SyntaxElement::new(child.as_ref(), self.clone(), 0, self.offset() + child.rel_offset())
+ })
+ }
+ pub fn last_child_or_token(&self) -> Option<SyntaxElement> {
+ self.green_ref().children().raw.enumerate().next_back().map(|(index, child)| {
+ SyntaxElement::new(
+ child.as_ref(),
+ self.clone(),
+ index as u32,
+ self.offset() + child.rel_offset(),
+ )
+ })
+ }
+
+ pub fn next_sibling(&self) -> Option<SyntaxNode> {
+ self.data().next_sibling()
+ }
+ pub fn prev_sibling(&self) -> Option<SyntaxNode> {
+ self.data().prev_sibling()
+ }
+
+ pub fn next_sibling_or_token(&self) -> Option<SyntaxElement> {
+ self.data().next_sibling_or_token()
+ }
+ pub fn prev_sibling_or_token(&self) -> Option<SyntaxElement> {
+ self.data().prev_sibling_or_token()
+ }
+
+ pub fn first_token(&self) -> Option<SyntaxToken> {
+ self.first_child_or_token()?.first_token()
+ }
+ pub fn last_token(&self) -> Option<SyntaxToken> {
+ self.last_child_or_token()?.last_token()
+ }
+
+ #[inline]
+ pub fn siblings(&self, direction: Direction) -> impl Iterator<Item = SyntaxNode> {
+ iter::successors(Some(self.clone()), move |node| match direction {
+ Direction::Next => node.next_sibling(),
+ Direction::Prev => node.prev_sibling(),
+ })
+ }
+
+ #[inline]
+ pub fn siblings_with_tokens(
+ &self,
+ direction: Direction,
+ ) -> impl Iterator<Item = SyntaxElement> {
+ let me: SyntaxElement = self.clone().into();
+ iter::successors(Some(me), move |el| match direction {
+ Direction::Next => el.next_sibling_or_token(),
+ Direction::Prev => el.prev_sibling_or_token(),
+ })
+ }
+
+ #[inline]
+ pub fn descendants(&self) -> impl Iterator<Item = SyntaxNode> {
+ self.preorder().filter_map(|event| match event {
+ WalkEvent::Enter(node) => Some(node),
+ WalkEvent::Leave(_) => None,
+ })
+ }
+
+ #[inline]
+ pub fn descendants_with_tokens(&self) -> impl Iterator<Item = SyntaxElement> {
+ self.preorder_with_tokens().filter_map(|event| match event {
+ WalkEvent::Enter(it) => Some(it),
+ WalkEvent::Leave(_) => None,
+ })
+ }
+
+ #[inline]
+ pub fn preorder(&self) -> Preorder {
+ Preorder::new(self.clone())
+ }
+
+ #[inline]
+ pub fn preorder_with_tokens(&self) -> PreorderWithTokens {
+ PreorderWithTokens::new(self.clone())
+ }
+
+ pub fn token_at_offset(&self, offset: TextSize) -> TokenAtOffset<SyntaxToken> {
+ // TODO: this could be faster if we first drill-down to node, and only
+ // then switch to token search. We should also replace explicit
+ // recursion with a loop.
+ let range = self.text_range();
+ assert!(
+ range.start() <= offset && offset <= range.end(),
+ "Bad offset: range {:?} offset {:?}",
+ range,
+ offset
+ );
+ if range.is_empty() {
+ return TokenAtOffset::None;
+ }
+
+ let mut children = self.children_with_tokens().filter(|child| {
+ let child_range = child.text_range();
+ !child_range.is_empty()
+ && (child_range.start() <= offset && offset <= child_range.end())
+ });
+
+ let left = children.next().unwrap();
+ let right = children.next();
+ assert!(children.next().is_none());
+
+ if let Some(right) = right {
+ match (left.token_at_offset(offset), right.token_at_offset(offset)) {
+ (TokenAtOffset::Single(left), TokenAtOffset::Single(right)) => {
+ TokenAtOffset::Between(left, right)
+ }
+ _ => unreachable!(),
+ }
+ } else {
+ left.token_at_offset(offset)
+ }
+ }
+
+ pub fn covering_element(&self, range: TextRange) -> SyntaxElement {
+ let mut res: SyntaxElement = self.clone().into();
+ loop {
+ assert!(
+ res.text_range().contains_range(range),
+ "Bad range: node range {:?}, range {:?}",
+ res.text_range(),
+ range,
+ );
+ res = match &res {
+ NodeOrToken::Token(_) => return res,
+ NodeOrToken::Node(node) => match node.child_or_token_at_range(range) {
+ Some(it) => it,
+ None => return res,
+ },
+ };
+ }
+ }
+
+ pub fn child_or_token_at_range(&self, range: TextRange) -> Option<SyntaxElement> {
+ let rel_range = range - self.offset();
+ self.green_ref().child_at_range(rel_range).map(|(index, rel_offset, green)| {
+ SyntaxElement::new(green, self.clone(), index as u32, self.offset() + rel_offset)
+ })
+ }
+
+ pub fn splice_children(&self, to_delete: Range<usize>, to_insert: Vec<SyntaxElement>) {
+ assert!(self.data().mutable, "immutable tree: {}", self);
+ for (i, child) in self.children_with_tokens().enumerate() {
+ if to_delete.contains(&i) {
+ child.detach();
+ }
+ }
+ let mut index = to_delete.start;
+ for child in to_insert {
+ self.attach_child(index, child);
+ index += 1;
+ }
+ }
+
+ pub fn detach(&self) {
+ assert!(self.data().mutable, "immutable tree: {}", self);
+ self.data().detach()
+ }
+
+ fn attach_child(&self, index: usize, child: SyntaxElement) {
+ assert!(self.data().mutable, "immutable tree: {}", self);
+ child.detach();
+ let data = match &child {
+ NodeOrToken::Node(it) => it.data(),
+ NodeOrToken::Token(it) => it.data(),
+ };
+ self.data().attach_child(index, data)
+ }
+}
+
+impl SyntaxToken {
+ fn new(
+ green: &GreenTokenData,
+ parent: SyntaxNode,
+ index: u32,
+ offset: TextSize,
+ ) -> SyntaxToken {
+ let mutable = parent.data().mutable;
+ let green = Green::Token { ptr: green.into() };
+ SyntaxToken { ptr: NodeData::new(Some(parent), index, offset, green, mutable) }
+ }
+
+ #[inline]
+ fn data(&self) -> &NodeData {
+ unsafe { self.ptr.as_ref() }
+ }
+
+ pub fn replace_with(&self, replacement: GreenToken) -> GreenNode {
+ assert_eq!(self.kind(), replacement.kind());
+ let parent = self.parent().unwrap();
+ let me: u32 = self.data().index();
+
+ let new_parent = parent.green_ref().replace_child(me as usize, replacement.into());
+ parent.replace_with(new_parent)
+ }
+
+ #[inline]
+ pub fn kind(&self) -> SyntaxKind {
+ self.data().kind()
+ }
+
+ #[inline]
+ pub fn text_range(&self) -> TextRange {
+ self.data().text_range()
+ }
+
+ #[inline]
+ pub fn index(&self) -> usize {
+ self.data().index() as usize
+ }
+
+ #[inline]
+ pub fn text(&self) -> &str {
+ match self.data().green().as_token() {
+ Some(it) => it.text(),
+ None => {
+ debug_assert!(
+ false,
+ "corrupted tree: a node thinks it is a token: {:?}",
+ self.data().green().as_node().unwrap().to_string()
+ );
+ ""
+ }
+ }
+ }
+
+ #[inline]
+ pub fn green(&self) -> &GreenTokenData {
+ self.data().green().into_token().unwrap()
+ }
+
+ #[inline]
+ pub fn parent(&self) -> Option<SyntaxNode> {
+ self.data().parent_node()
+ }
+
+ #[inline]
+ pub fn ancestors(&self) -> impl Iterator<Item = SyntaxNode> {
+ std::iter::successors(self.parent(), SyntaxNode::parent)
+ }
+
+ pub fn next_sibling_or_token(&self) -> Option<SyntaxElement> {
+ self.data().next_sibling_or_token()
+ }
+ pub fn prev_sibling_or_token(&self) -> Option<SyntaxElement> {
+ self.data().prev_sibling_or_token()
+ }
+
+ #[inline]
+ pub fn siblings_with_tokens(
+ &self,
+ direction: Direction,
+ ) -> impl Iterator<Item = SyntaxElement> {
+ let me: SyntaxElement = self.clone().into();
+ iter::successors(Some(me), move |el| match direction {
+ Direction::Next => el.next_sibling_or_token(),
+ Direction::Prev => el.prev_sibling_or_token(),
+ })
+ }
+
+ pub fn next_token(&self) -> Option<SyntaxToken> {
+ match self.next_sibling_or_token() {
+ Some(element) => element.first_token(),
+ None => self
+ .ancestors()
+ .find_map(|it| it.next_sibling_or_token())
+ .and_then(|element| element.first_token()),
+ }
+ }
+ pub fn prev_token(&self) -> Option<SyntaxToken> {
+ match self.prev_sibling_or_token() {
+ Some(element) => element.last_token(),
+ None => self
+ .ancestors()
+ .find_map(|it| it.prev_sibling_or_token())
+ .and_then(|element| element.last_token()),
+ }
+ }
+
+ pub fn detach(&self) {
+ assert!(self.data().mutable, "immutable tree: {}", self);
+ self.data().detach()
+ }
+}
+
+impl SyntaxElement {
+ fn new(
+ element: GreenElementRef<'_>,
+ parent: SyntaxNode,
+ index: u32,
+ offset: TextSize,
+ ) -> SyntaxElement {
+ match element {
+ NodeOrToken::Node(node) => {
+ SyntaxNode::new_child(node, parent, index as u32, offset).into()
+ }
+ NodeOrToken::Token(token) => {
+ SyntaxToken::new(token, parent, index as u32, offset).into()
+ }
+ }
+ }
+
+ #[inline]
+ pub fn text_range(&self) -> TextRange {
+ match self {
+ NodeOrToken::Node(it) => it.text_range(),
+ NodeOrToken::Token(it) => it.text_range(),
+ }
+ }
+
+ #[inline]
+ pub fn index(&self) -> usize {
+ match self {
+ NodeOrToken::Node(it) => it.index(),
+ NodeOrToken::Token(it) => it.index(),
+ }
+ }
+
+ #[inline]
+ pub fn kind(&self) -> SyntaxKind {
+ match self {
+ NodeOrToken::Node(it) => it.kind(),
+ NodeOrToken::Token(it) => it.kind(),
+ }
+ }
+
+ #[inline]
+ pub fn parent(&self) -> Option<SyntaxNode> {
+ match self {
+ NodeOrToken::Node(it) => it.parent(),
+ NodeOrToken::Token(it) => it.parent(),
+ }
+ }
+
+ #[inline]
+ pub fn ancestors(&self) -> impl Iterator<Item = SyntaxNode> {
+ let first = match self {
+ NodeOrToken::Node(it) => Some(it.clone()),
+ NodeOrToken::Token(it) => it.parent(),
+ };
+ iter::successors(first, SyntaxNode::parent)
+ }
+
+ pub fn first_token(&self) -> Option<SyntaxToken> {
+ match self {
+ NodeOrToken::Node(it) => it.first_token(),
+ NodeOrToken::Token(it) => Some(it.clone()),
+ }
+ }
+ pub fn last_token(&self) -> Option<SyntaxToken> {
+ match self {
+ NodeOrToken::Node(it) => it.last_token(),
+ NodeOrToken::Token(it) => Some(it.clone()),
+ }
+ }
+
+ pub fn next_sibling_or_token(&self) -> Option<SyntaxElement> {
+ match self {
+ NodeOrToken::Node(it) => it.next_sibling_or_token(),
+ NodeOrToken::Token(it) => it.next_sibling_or_token(),
+ }
+ }
+ pub fn prev_sibling_or_token(&self) -> Option<SyntaxElement> {
+ match self {
+ NodeOrToken::Node(it) => it.prev_sibling_or_token(),
+ NodeOrToken::Token(it) => it.prev_sibling_or_token(),
+ }
+ }
+
+ fn token_at_offset(&self, offset: TextSize) -> TokenAtOffset<SyntaxToken> {
+ assert!(self.text_range().start() <= offset && offset <= self.text_range().end());
+ match self {
+ NodeOrToken::Token(token) => TokenAtOffset::Single(token.clone()),
+ NodeOrToken::Node(node) => node.token_at_offset(offset),
+ }
+ }
+
+ pub fn detach(&self) {
+ match self {
+ NodeOrToken::Node(it) => it.detach(),
+ NodeOrToken::Token(it) => it.detach(),
+ }
+ }
+}
+
+// region: impls
+
+// Identity semantics for hash & eq
+impl PartialEq for SyntaxNode {
+ #[inline]
+ fn eq(&self, other: &SyntaxNode) -> bool {
+ self.data().key() == other.data().key()
+ }
+}
+
+impl Eq for SyntaxNode {}
+
+impl Hash for SyntaxNode {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.data().key().hash(state);
+ }
+}
+
+impl fmt::Debug for SyntaxNode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SyntaxNode")
+ .field("kind", &self.kind())
+ .field("text_range", &self.text_range())
+ .finish()
+ }
+}
+
+impl fmt::Display for SyntaxNode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.preorder_with_tokens()
+ .filter_map(|event| match event {
+ WalkEvent::Enter(NodeOrToken::Token(token)) => Some(token),
+ _ => None,
+ })
+ .try_for_each(|it| fmt::Display::fmt(&it, f))
+ }
+}
+
+// Identity semantics for hash & eq
+impl PartialEq for SyntaxToken {
+ #[inline]
+ fn eq(&self, other: &SyntaxToken) -> bool {
+ self.data().key() == other.data().key()
+ }
+}
+
+impl Eq for SyntaxToken {}
+
+impl Hash for SyntaxToken {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.data().key().hash(state);
+ }
+}
+
+impl fmt::Display for SyntaxToken {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.text(), f)
+ }
+}
+
+impl From<SyntaxNode> for SyntaxElement {
+ #[inline]
+ fn from(node: SyntaxNode) -> SyntaxElement {
+ NodeOrToken::Node(node)
+ }
+}
+
+impl From<SyntaxToken> for SyntaxElement {
+ #[inline]
+ fn from(token: SyntaxToken) -> SyntaxElement {
+ NodeOrToken::Token(token)
+ }
+}
+
+// endregion
+
+// region: iterators
+
+#[derive(Clone, Debug)]
+pub struct SyntaxNodeChildren {
+ next: Option<SyntaxNode>,
+}
+
+impl SyntaxNodeChildren {
+ fn new(parent: SyntaxNode) -> SyntaxNodeChildren {
+ SyntaxNodeChildren { next: parent.first_child() }
+ }
+}
+
+impl Iterator for SyntaxNodeChildren {
+ type Item = SyntaxNode;
+ fn next(&mut self) -> Option<SyntaxNode> {
+ self.next.take().map(|next| {
+ self.next = next.next_sibling();
+ next
+ })
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct SyntaxElementChildren {
+ next: Option<SyntaxElement>,
+}
+
+impl SyntaxElementChildren {
+ fn new(parent: SyntaxNode) -> SyntaxElementChildren {
+ SyntaxElementChildren { next: parent.first_child_or_token() }
+ }
+}
+
+impl Iterator for SyntaxElementChildren {
+ type Item = SyntaxElement;
+ fn next(&mut self) -> Option<SyntaxElement> {
+ self.next.take().map(|next| {
+ self.next = next.next_sibling_or_token();
+ next
+ })
+ }
+}
+
+pub struct Preorder {
+ start: SyntaxNode,
+ next: Option<WalkEvent<SyntaxNode>>,
+ skip_subtree: bool,
+}
+
+impl Preorder {
+ fn new(start: SyntaxNode) -> Preorder {
+ let next = Some(WalkEvent::Enter(start.clone()));
+ Preorder { start, next, skip_subtree: false }
+ }
+
+ pub fn skip_subtree(&mut self) {
+ self.skip_subtree = true;
+ }
+
+ #[cold]
+ fn do_skip(&mut self) {
+ self.next = self.next.take().map(|next| match next {
+ WalkEvent::Enter(first_child) => WalkEvent::Leave(first_child.parent().unwrap()),
+ WalkEvent::Leave(parent) => WalkEvent::Leave(parent),
+ })
+ }
+}
+
+impl Iterator for Preorder {
+ type Item = WalkEvent<SyntaxNode>;
+
+ fn next(&mut self) -> Option<WalkEvent<SyntaxNode>> {
+ if self.skip_subtree {
+ self.do_skip();
+ self.skip_subtree = false;
+ }
+ let next = self.next.take();
+ self.next = next.as_ref().and_then(|next| {
+ Some(match next {
+ WalkEvent::Enter(node) => match node.first_child() {
+ Some(child) => WalkEvent::Enter(child),
+ None => WalkEvent::Leave(node.clone()),
+ },
+ WalkEvent::Leave(node) => {
+ if node == &self.start {
+ return None;
+ }
+ match node.next_sibling() {
+ Some(sibling) => WalkEvent::Enter(sibling),
+ None => WalkEvent::Leave(node.parent()?),
+ }
+ }
+ })
+ });
+ next
+ }
+}
+
+pub struct PreorderWithTokens {
+ start: SyntaxElement,
+ next: Option<WalkEvent<SyntaxElement>>,
+ skip_subtree: bool,
+}
+
+impl PreorderWithTokens {
+ fn new(start: SyntaxNode) -> PreorderWithTokens {
+ let next = Some(WalkEvent::Enter(start.clone().into()));
+ PreorderWithTokens { start: start.into(), next, skip_subtree: false }
+ }
+
+ pub fn skip_subtree(&mut self) {
+ self.skip_subtree = true;
+ }
+
+ #[cold]
+ fn do_skip(&mut self) {
+ self.next = self.next.take().map(|next| match next {
+ WalkEvent::Enter(first_child) => WalkEvent::Leave(first_child.parent().unwrap().into()),
+ WalkEvent::Leave(parent) => WalkEvent::Leave(parent),
+ })
+ }
+}
+
+impl Iterator for PreorderWithTokens {
+ type Item = WalkEvent<SyntaxElement>;
+
+ fn next(&mut self) -> Option<WalkEvent<SyntaxElement>> {
+ if self.skip_subtree {
+ self.do_skip();
+ self.skip_subtree = false;
+ }
+ let next = self.next.take();
+ self.next = next.as_ref().and_then(|next| {
+ Some(match next {
+ WalkEvent::Enter(el) => match el {
+ NodeOrToken::Node(node) => match node.first_child_or_token() {
+ Some(child) => WalkEvent::Enter(child),
+ None => WalkEvent::Leave(node.clone().into()),
+ },
+ NodeOrToken::Token(token) => WalkEvent::Leave(token.clone().into()),
+ },
+ WalkEvent::Leave(el) if el == &self.start => return None,
+ WalkEvent::Leave(el) => match el.next_sibling_or_token() {
+ Some(sibling) => WalkEvent::Enter(sibling),
+ None => WalkEvent::Leave(el.parent()?.into()),
+ },
+ })
+ });
+ next
+ }
+}
+// endregion
diff --git a/vendor/rowan/src/green.rs b/vendor/rowan/src/green.rs
new file mode 100644
index 000000000..e2b5b4a4e
--- /dev/null
+++ b/vendor/rowan/src/green.rs
@@ -0,0 +1,42 @@
+mod node;
+mod token;
+mod element;
+mod builder;
+mod node_cache;
+
+use self::element::GreenElement;
+
+pub(crate) use self::{element::GreenElementRef, node::GreenChild};
+
+pub use self::{
+ builder::{Checkpoint, GreenNodeBuilder},
+ node::{Children, GreenNode, GreenNodeData},
+ node_cache::NodeCache,
+ token::{GreenToken, GreenTokenData},
+};
+
+/// SyntaxKind is a type tag for each token or node.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct SyntaxKind(pub u16);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_send_sync() {
+ fn f<T: Send + Sync>() {}
+ f::<GreenNode>();
+ f::<GreenToken>();
+ f::<GreenElement>();
+ }
+
+ #[test]
+ fn test_size_of() {
+ use std::mem::size_of;
+
+ eprintln!("GreenNode {}", size_of::<GreenNode>());
+ eprintln!("GreenToken {}", size_of::<GreenToken>());
+ eprintln!("GreenElement {}", size_of::<GreenElement>());
+ }
+}
diff --git a/vendor/rowan/src/green/builder.rs b/vendor/rowan/src/green/builder.rs
new file mode 100644
index 000000000..1dbc8bb08
--- /dev/null
+++ b/vendor/rowan/src/green/builder.rs
@@ -0,0 +1,119 @@
+use crate::{
+ cow_mut::CowMut,
+ green::{node_cache::NodeCache, GreenElement, GreenNode, SyntaxKind},
+ NodeOrToken,
+};
+
+/// A checkpoint for maybe wrapping a node. See `GreenNodeBuilder::checkpoint` for details.
+#[derive(Clone, Copy, Debug)]
+pub struct Checkpoint(usize);
+
+/// A builder for a green tree.
+#[derive(Default, Debug)]
+pub struct GreenNodeBuilder<'cache> {
+ cache: CowMut<'cache, NodeCache>,
+ parents: Vec<(SyntaxKind, usize)>,
+ children: Vec<(u64, GreenElement)>,
+}
+
+impl GreenNodeBuilder<'_> {
+ /// Creates new builder.
+ pub fn new() -> GreenNodeBuilder<'static> {
+ GreenNodeBuilder::default()
+ }
+
+ /// Reusing `NodeCache` between different `GreenNodeBuilder`s saves memory.
+ /// It allows to structurally share underlying trees.
+ pub fn with_cache(cache: &mut NodeCache) -> GreenNodeBuilder<'_> {
+ GreenNodeBuilder {
+ cache: CowMut::Borrowed(cache),
+ parents: Vec::new(),
+ children: Vec::new(),
+ }
+ }
+
+ /// Adds new token to the current branch.
+ #[inline]
+ pub fn token(&mut self, kind: SyntaxKind, text: &str) {
+ let (hash, token) = self.cache.token(kind, text);
+ self.children.push((hash, token.into()));
+ }
+
+ /// Start new node and make it current.
+ #[inline]
+ pub fn start_node(&mut self, kind: SyntaxKind) {
+ let len = self.children.len();
+ self.parents.push((kind, len));
+ }
+
+ /// Finish current branch and restore previous
+ /// branch as current.
+ #[inline]
+ pub fn finish_node(&mut self) {
+ let (kind, first_child) = self.parents.pop().unwrap();
+ let (hash, node) = self.cache.node(kind, &mut self.children, first_child);
+ self.children.push((hash, node.into()));
+ }
+
+ /// Prepare for maybe wrapping the next node.
+ /// The way wrapping works is that you first of all get a checkpoint,
+ /// then you place all tokens you want to wrap, and then *maybe* call
+ /// `start_node_at`.
+ /// Example:
+ /// ```rust
+ /// # use rowan::{GreenNodeBuilder, SyntaxKind};
+ /// # const PLUS: SyntaxKind = SyntaxKind(0);
+ /// # const OPERATION: SyntaxKind = SyntaxKind(1);
+ /// # struct Parser;
+ /// # impl Parser {
+ /// # fn peek(&self) -> Option<SyntaxKind> { None }
+ /// # fn parse_expr(&mut self) {}
+ /// # }
+ /// # let mut builder = GreenNodeBuilder::new();
+ /// # let mut parser = Parser;
+ /// let checkpoint = builder.checkpoint();
+ /// parser.parse_expr();
+ /// if parser.peek() == Some(PLUS) {
+ /// // 1 + 2 = Add(1, 2)
+ /// builder.start_node_at(checkpoint, OPERATION);
+ /// parser.parse_expr();
+ /// builder.finish_node();
+ /// }
+ /// ```
+ #[inline]
+ pub fn checkpoint(&self) -> Checkpoint {
+ Checkpoint(self.children.len())
+ }
+
+ /// Wrap the previous branch marked by `checkpoint` in a new branch and
+ /// make it current.
+ #[inline]
+ pub fn start_node_at(&mut self, checkpoint: Checkpoint, kind: SyntaxKind) {
+ let Checkpoint(checkpoint) = checkpoint;
+ assert!(
+ checkpoint <= self.children.len(),
+ "checkpoint no longer valid, was finish_node called early?"
+ );
+
+ if let Some(&(_, first_child)) = self.parents.last() {
+ assert!(
+ checkpoint >= first_child,
+ "checkpoint no longer valid, was an unmatched start_node_at called?"
+ );
+ }
+
+ self.parents.push((kind, checkpoint));
+ }
+
+ /// Complete tree building. Make sure that
+ /// `start_node_at` and `finish_node` calls
+ /// are paired!
+ #[inline]
+ pub fn finish(mut self) -> GreenNode {
+ assert_eq!(self.children.len(), 1);
+ match self.children.pop().unwrap().1 {
+ NodeOrToken::Node(node) => node,
+ NodeOrToken::Token(_) => panic!(),
+ }
+ }
+}
diff --git a/vendor/rowan/src/green/element.rs b/vendor/rowan/src/green/element.rs
new file mode 100644
index 000000000..2d1ce1f61
--- /dev/null
+++ b/vendor/rowan/src/green/element.rs
@@ -0,0 +1,89 @@
+use std::borrow::Cow;
+
+use crate::{
+ green::{GreenNode, GreenToken, SyntaxKind},
+ GreenNodeData, NodeOrToken, TextSize,
+};
+
+use super::GreenTokenData;
+
+pub(super) type GreenElement = NodeOrToken<GreenNode, GreenToken>;
+pub(crate) type GreenElementRef<'a> = NodeOrToken<&'a GreenNodeData, &'a GreenTokenData>;
+
+impl From<GreenNode> for GreenElement {
+ #[inline]
+ fn from(node: GreenNode) -> GreenElement {
+ NodeOrToken::Node(node)
+ }
+}
+
+impl<'a> From<&'a GreenNode> for GreenElementRef<'a> {
+ #[inline]
+ fn from(node: &'a GreenNode) -> GreenElementRef<'a> {
+ NodeOrToken::Node(node)
+ }
+}
+
+impl From<GreenToken> for GreenElement {
+ #[inline]
+ fn from(token: GreenToken) -> GreenElement {
+ NodeOrToken::Token(token)
+ }
+}
+
+impl From<Cow<'_, GreenNodeData>> for GreenElement {
+ #[inline]
+ fn from(cow: Cow<'_, GreenNodeData>) -> Self {
+ NodeOrToken::Node(cow.into_owned())
+ }
+}
+
+impl<'a> From<&'a GreenToken> for GreenElementRef<'a> {
+ #[inline]
+ fn from(token: &'a GreenToken) -> GreenElementRef<'a> {
+ NodeOrToken::Token(token)
+ }
+}
+
+impl GreenElementRef<'_> {
+ pub fn to_owned(self) -> GreenElement {
+ match self {
+ NodeOrToken::Node(it) => NodeOrToken::Node(it.to_owned()),
+ NodeOrToken::Token(it) => NodeOrToken::Token(it.to_owned()),
+ }
+ }
+}
+
+impl GreenElement {
+ /// Returns kind of this element.
+ #[inline]
+ pub fn kind(&self) -> SyntaxKind {
+ self.as_deref().kind()
+ }
+
+ /// Returns the length of the text covered by this element.
+ #[inline]
+ pub fn text_len(&self) -> TextSize {
+ self.as_deref().text_len()
+ }
+}
+
+impl GreenElementRef<'_> {
+ /// Returns kind of this element.
+ #[inline]
+ pub fn kind(&self) -> SyntaxKind {
+ match self {
+ NodeOrToken::Node(it) => it.kind(),
+ NodeOrToken::Token(it) => it.kind(),
+ }
+ }
+
+ /// Returns the length of the text covered by this element.
+ #[inline]
+ pub fn text_len(self) -> TextSize {
+ match self {
+ NodeOrToken::Node(it) => it.text_len(),
+ NodeOrToken::Token(it) => it.text_len(),
+ }
+ }
+}
diff --git a/vendor/rowan/src/green/node.rs b/vendor/rowan/src/green/node.rs
new file mode 100644
index 000000000..e94dc01cf
--- /dev/null
+++ b/vendor/rowan/src/green/node.rs
@@ -0,0 +1,361 @@
+use std::{
+ borrow::{Borrow, Cow},
+ fmt,
+ iter::{self, FusedIterator},
+ mem::{self, ManuallyDrop},
+ ops, ptr, slice,
+};
+
+use countme::Count;
+
+use crate::{
+ arc::{Arc, HeaderSlice, ThinArc},
+ green::{GreenElement, GreenElementRef, SyntaxKind},
+ utility_types::static_assert,
+ GreenToken, NodeOrToken, TextRange, TextSize,
+};
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub(super) struct GreenNodeHead {
+ kind: SyntaxKind,
+ text_len: TextSize,
+ _c: Count<GreenNode>,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub(crate) enum GreenChild {
+ Node { rel_offset: TextSize, node: GreenNode },
+ Token { rel_offset: TextSize, token: GreenToken },
+}
+#[cfg(target_pointer_width = "64")]
+static_assert!(mem::size_of::<GreenChild>() == mem::size_of::<usize>() * 2);
+
+type Repr = HeaderSlice<GreenNodeHead, [GreenChild]>;
+type ReprThin = HeaderSlice<GreenNodeHead, [GreenChild; 0]>;
+#[repr(transparent)]
+pub struct GreenNodeData {
+ data: ReprThin,
+}
+
+impl PartialEq for GreenNodeData {
+ fn eq(&self, other: &Self) -> bool {
+ self.header() == other.header() && self.slice() == other.slice()
+ }
+}
+
+/// Internal node in the immutable tree.
+/// It has other nodes and tokens as children.
+#[derive(Clone, PartialEq, Eq, Hash)]
+#[repr(transparent)]
+pub struct GreenNode {
+ ptr: ThinArc<GreenNodeHead, GreenChild>,
+}
+
+impl ToOwned for GreenNodeData {
+ type Owned = GreenNode;
+
+ #[inline]
+ fn to_owned(&self) -> GreenNode {
+ unsafe {
+ let green = GreenNode::from_raw(ptr::NonNull::from(self));
+ let green = ManuallyDrop::new(green);
+ GreenNode::clone(&green)
+ }
+ }
+}
+
+impl Borrow<GreenNodeData> for GreenNode {
+ #[inline]
+ fn borrow(&self) -> &GreenNodeData {
+ &*self
+ }
+}
+
+impl From<Cow<'_, GreenNodeData>> for GreenNode {
+ #[inline]
+ fn from(cow: Cow<'_, GreenNodeData>) -> Self {
+ cow.into_owned()
+ }
+}
+
+impl fmt::Debug for GreenNodeData {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("GreenNode")
+ .field("kind", &self.kind())
+ .field("text_len", &self.text_len())
+ .field("n_children", &self.children().len())
+ .finish()
+ }
+}
+
+impl fmt::Debug for GreenNode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let data: &GreenNodeData = &*self;
+ fmt::Debug::fmt(data, f)
+ }
+}
+
+impl fmt::Display for GreenNode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let data: &GreenNodeData = &*self;
+ fmt::Display::fmt(data, f)
+ }
+}
+
+impl fmt::Display for GreenNodeData {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for child in self.children() {
+ write!(f, "{}", child)?;
+ }
+ Ok(())
+ }
+}
+
+impl GreenNodeData {
+ #[inline]
+ fn header(&self) -> &GreenNodeHead {
+ &self.data.header
+ }
+
+ #[inline]
+ fn slice(&self) -> &[GreenChild] {
+ self.data.slice()
+ }
+
+ /// Kind of this node.
+ #[inline]
+ pub fn kind(&self) -> SyntaxKind {
+ self.header().kind
+ }
+
+ /// Returns the length of the text covered by this node.
+ #[inline]
+ pub fn text_len(&self) -> TextSize {
+ self.header().text_len
+ }
+
+ /// Children of this node.
+ #[inline]
+ pub fn children(&self) -> Children<'_> {
+ Children { raw: self.slice().iter() }
+ }
+
+ pub(crate) fn child_at_range(
+ &self,
+ rel_range: TextRange,
+ ) -> Option<(usize, TextSize, GreenElementRef<'_>)> {
+ let idx = self
+ .slice()
+ .binary_search_by(|it| {
+ let child_range = it.rel_range();
+ TextRange::ordering(child_range, rel_range)
+ })
+ // XXX: this handles empty ranges
+ .unwrap_or_else(|it| it.saturating_sub(1));
+ let child = &self.slice().get(idx).filter(|it| it.rel_range().contains_range(rel_range))?;
+ Some((idx, child.rel_offset(), child.as_ref()))
+ }
+
+ #[must_use]
+ pub fn replace_child(&self, index: usize, new_child: GreenElement) -> GreenNode {
+ let mut replacement = Some(new_child);
+ let children = self.children().enumerate().map(|(i, child)| {
+ if i == index {
+ replacement.take().unwrap()
+ } else {
+ child.to_owned()
+ }
+ });
+ GreenNode::new(self.kind(), children)
+ }
+ #[must_use]
+ pub fn insert_child(&self, index: usize, new_child: GreenElement) -> GreenNode {
+ // https://github.com/rust-lang/rust/issues/34433
+ self.splice_children(index..index, iter::once(new_child))
+ }
+ #[must_use]
+ pub fn remove_child(&self, index: usize) -> GreenNode {
+ self.splice_children(index..=index, iter::empty())
+ }
+ #[must_use]
+ pub fn splice_children<R, I>(&self, range: R, replace_with: I) -> GreenNode
+ where
+ R: ops::RangeBounds<usize>,
+ I: IntoIterator<Item = GreenElement>,
+ {
+ let mut children: Vec<_> = self.children().map(|it| it.to_owned()).collect();
+ children.splice(range, replace_with);
+ GreenNode::new(self.kind(), children)
+ }
+}
+
+impl ops::Deref for GreenNode {
+ type Target = GreenNodeData;
+
+ #[inline]
+ fn deref(&self) -> &GreenNodeData {
+ unsafe {
+ let repr: &Repr = &self.ptr;
+ let repr: &ReprThin = &*(repr as *const Repr as *const ReprThin);
+ mem::transmute::<&ReprThin, &GreenNodeData>(repr)
+ }
+ }
+}
+
+impl GreenNode {
+ /// Creates new Node.
+ #[inline]
+ pub fn new<I>(kind: SyntaxKind, children: I) -> GreenNode
+ where
+ I: IntoIterator<Item = GreenElement>,
+ I::IntoIter: ExactSizeIterator,
+ {
+ let mut text_len: TextSize = 0.into();
+ let children = children.into_iter().map(|el| {
+ let rel_offset = text_len;
+ text_len += el.text_len();
+ match el {
+ NodeOrToken::Node(node) => GreenChild::Node { rel_offset, node },
+ NodeOrToken::Token(token) => GreenChild::Token { rel_offset, token },
+ }
+ });
+
+ let data = ThinArc::from_header_and_iter(
+ GreenNodeHead { kind, text_len: 0.into(), _c: Count::new() },
+ children,
+ );
+
+ // XXX: fixup `text_len` after construction, because we can't iterate
+ // `children` twice.
+ let data = {
+ let mut data = Arc::from_thin(data);
+ Arc::get_mut(&mut data).unwrap().header.text_len = text_len;
+ Arc::into_thin(data)
+ };
+
+ GreenNode { ptr: data }
+ }
+
+ #[inline]
+ pub(crate) fn into_raw(this: GreenNode) -> ptr::NonNull<GreenNodeData> {
+ let green = ManuallyDrop::new(this);
+ let green: &GreenNodeData = &*green;
+ ptr::NonNull::from(&*green)
+ }
+
+ #[inline]
+ pub(crate) unsafe fn from_raw(ptr: ptr::NonNull<GreenNodeData>) -> GreenNode {
+ let arc = Arc::from_raw(&ptr.as_ref().data as *const ReprThin);
+ let arc = mem::transmute::<Arc<ReprThin>, ThinArc<GreenNodeHead, GreenChild>>(arc);
+ GreenNode { ptr: arc }
+ }
+}
+
+impl GreenChild {
+ #[inline]
+ pub(crate) fn as_ref(&self) -> GreenElementRef {
+ match self {
+ GreenChild::Node { node, .. } => NodeOrToken::Node(node),
+ GreenChild::Token { token, .. } => NodeOrToken::Token(token),
+ }
+ }
+ #[inline]
+ pub(crate) fn rel_offset(&self) -> TextSize {
+ match self {
+ GreenChild::Node { rel_offset, .. } | GreenChild::Token { rel_offset, .. } => {
+ *rel_offset
+ }
+ }
+ }
+ #[inline]
+ fn rel_range(&self) -> TextRange {
+ let len = self.as_ref().text_len();
+ TextRange::at(self.rel_offset(), len)
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct Children<'a> {
+ pub(crate) raw: slice::Iter<'a, GreenChild>,
+}
+
+// NB: forward everything stable that iter::Slice specializes as of Rust 1.39.0
+impl ExactSizeIterator for Children<'_> {
+ #[inline(always)]
+ fn len(&self) -> usize {
+ self.raw.len()
+ }
+}
+
+impl<'a> Iterator for Children<'a> {
+ type Item = GreenElementRef<'a>;
+
+ #[inline]
+ fn next(&mut self) -> Option<GreenElementRef<'a>> {
+ self.raw.next().map(GreenChild::as_ref)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.raw.size_hint()
+ }
+
+ #[inline]
+ fn count(self) -> usize
+ where
+ Self: Sized,
+ {
+ self.raw.count()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.raw.nth(n).map(GreenChild::as_ref)
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item>
+ where
+ Self: Sized,
+ {
+ self.next_back()
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, mut f: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+}
+
+impl<'a> DoubleEndedIterator for Children<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.raw.next_back().map(GreenChild::as_ref)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.raw.nth_back(n).map(GreenChild::as_ref)
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(mut self, init: Acc, mut f: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+}
+
+impl FusedIterator for Children<'_> {}
diff --git a/vendor/rowan/src/green/node_cache.rs b/vendor/rowan/src/green/node_cache.rs
new file mode 100644
index 000000000..c73f3e696
--- /dev/null
+++ b/vendor/rowan/src/green/node_cache.rs
@@ -0,0 +1,157 @@
+use hashbrown::hash_map::RawEntryMut;
+use rustc_hash::FxHasher;
+use std::hash::{BuildHasherDefault, Hash, Hasher};
+
+use crate::{
+ green::GreenElementRef, GreenNode, GreenNodeData, GreenToken, GreenTokenData, NodeOrToken,
+ SyntaxKind,
+};
+
+use super::element::GreenElement;
+
+type HashMap<K, V> = hashbrown::HashMap<K, V, BuildHasherDefault<FxHasher>>;
+
+#[derive(Debug)]
+struct NoHash<T>(T);
+
+/// Interner for GreenTokens and GreenNodes
+// XXX: the impl is a bit tricky. As usual when writing interners, we want to
+// store all values in one HashSet.
+//
+// However, hashing trees is fun: hash of the tree is recursively defined. We
+// maintain an invariant -- if the tree is interned, then all of its children
+// are interned as well.
+//
+// That means that computing the hash naively is wasteful -- we just *know*
+// hashes of children, and we can re-use those.
+//
+// So here we use *raw* API of hashbrown and provide the hashes manually,
+// instead of going via a `Hash` impl. Our manual `Hash` and the
+// `#[derive(Hash)]` are actually different! At some point we had a fun bug,
+// where we accidentally mixed the two hashes, which made the cache much less
+// efficient.
+//
+// To fix that, we additionally wrap the data in `NoHash` wrapper, to make sure
+// we don't accidentally use the wrong hash!
+#[derive(Default, Debug)]
+pub struct NodeCache {
+ nodes: HashMap<NoHash<GreenNode>, ()>,
+ tokens: HashMap<NoHash<GreenToken>, ()>,
+}
+
+fn token_hash(token: &GreenTokenData) -> u64 {
+ let mut h = FxHasher::default();
+ token.kind().hash(&mut h);
+ token.text().hash(&mut h);
+ h.finish()
+}
+
+fn node_hash(node: &GreenNodeData) -> u64 {
+ let mut h = FxHasher::default();
+ node.kind().hash(&mut h);
+ for child in node.children() {
+ match child {
+ NodeOrToken::Node(it) => node_hash(it),
+ NodeOrToken::Token(it) => token_hash(it),
+ }
+ .hash(&mut h)
+ }
+ h.finish()
+}
+
+fn element_id(elem: GreenElementRef<'_>) -> *const () {
+ match elem {
+ NodeOrToken::Node(it) => it as *const GreenNodeData as *const (),
+ NodeOrToken::Token(it) => it as *const GreenTokenData as *const (),
+ }
+}
+
+impl NodeCache {
+ pub(crate) fn node(
+ &mut self,
+ kind: SyntaxKind,
+ children: &mut Vec<(u64, GreenElement)>,
+ first_child: usize,
+ ) -> (u64, GreenNode) {
+ let build_node = move |children: &mut Vec<(u64, GreenElement)>| {
+ GreenNode::new(kind, children.drain(first_child..).map(|(_, it)| it))
+ };
+
+ let children_ref = &children[first_child..];
+ if children_ref.len() > 3 {
+ let node = build_node(children);
+ return (0, node);
+ }
+
+ let hash = {
+ let mut h = FxHasher::default();
+ kind.hash(&mut h);
+ for &(hash, _) in children_ref {
+ if hash == 0 {
+ let node = build_node(children);
+ return (0, node);
+ }
+ hash.hash(&mut h);
+ }
+ h.finish()
+ };
+
+ // Green nodes are fully immutable, so it's ok to deduplicate them.
+ // This is the same optimization that Roslyn does
+ // https://github.com/KirillOsenkov/Bliki/wiki/Roslyn-Immutable-Trees
+ //
+ // For example, all `#[inline]` in this file share the same green node!
+ // For `libsyntax/parse/parser.rs`, measurements show that deduping saves
+ // 17% of the memory for green nodes!
+ let entry = self.nodes.raw_entry_mut().from_hash(hash, |node| {
+ node.0.kind() == kind && node.0.children().len() == children_ref.len() && {
+ let lhs = node.0.children();
+ let rhs = children_ref.iter().map(|(_, it)| it.as_deref());
+
+ let lhs = lhs.map(element_id);
+ let rhs = rhs.map(element_id);
+
+ lhs.eq(rhs)
+ }
+ });
+
+ let node = match entry {
+ RawEntryMut::Occupied(entry) => {
+ drop(children.drain(first_child..));
+ entry.key().0.clone()
+ }
+ RawEntryMut::Vacant(entry) => {
+ let node = build_node(children);
+ entry.insert_with_hasher(hash, NoHash(node.clone()), (), |n| node_hash(&n.0));
+ node
+ }
+ };
+
+ (hash, node)
+ }
+
+ pub(crate) fn token(&mut self, kind: SyntaxKind, text: &str) -> (u64, GreenToken) {
+ let hash = {
+ let mut h = FxHasher::default();
+ kind.hash(&mut h);
+ text.hash(&mut h);
+ h.finish()
+ };
+
+ let entry = self
+ .tokens
+ .raw_entry_mut()
+ .from_hash(hash, |token| token.0.kind() == kind && token.0.text() == text);
+
+ let token = match entry {
+ RawEntryMut::Occupied(entry) => entry.key().0.clone(),
+ RawEntryMut::Vacant(entry) => {
+ let token = GreenToken::new(kind, text);
+ entry.insert_with_hasher(hash, NoHash(token.clone()), (), |t| token_hash(&t.0));
+ token
+ }
+ };
+
+ (hash, token)
+ }
+}
diff --git a/vendor/rowan/src/green/token.rs b/vendor/rowan/src/green/token.rs
new file mode 100644
index 000000000..1a4548a43
--- /dev/null
+++ b/vendor/rowan/src/green/token.rs
@@ -0,0 +1,145 @@
+use std::{
+ borrow::Borrow,
+ fmt,
+ mem::{self, ManuallyDrop},
+ ops, ptr,
+};
+
+use countme::Count;
+
+use crate::{
+ arc::{Arc, HeaderSlice, ThinArc},
+ green::SyntaxKind,
+ TextSize,
+};
+
+#[derive(PartialEq, Eq, Hash)]
+struct GreenTokenHead {
+ kind: SyntaxKind,
+ _c: Count<GreenToken>,
+}
+
+type Repr = HeaderSlice<GreenTokenHead, [u8]>;
+type ReprThin = HeaderSlice<GreenTokenHead, [u8; 0]>;
+#[repr(transparent)]
+pub struct GreenTokenData {
+ data: ReprThin,
+}
+
+impl PartialEq for GreenTokenData {
+ fn eq(&self, other: &Self) -> bool {
+ self.kind() == other.kind() && self.text() == other.text()
+ }
+}
+
+/// Leaf node in the immutable tree.
+#[derive(PartialEq, Eq, Hash, Clone)]
+#[repr(transparent)]
+pub struct GreenToken {
+ ptr: ThinArc<GreenTokenHead, u8>,
+}
+
+impl ToOwned for GreenTokenData {
+ type Owned = GreenToken;
+
+ #[inline]
+ fn to_owned(&self) -> GreenToken {
+ unsafe {
+ let green = GreenToken::from_raw(ptr::NonNull::from(self));
+ let green = ManuallyDrop::new(green);
+ GreenToken::clone(&green)
+ }
+ }
+}
+
+impl Borrow<GreenTokenData> for GreenToken {
+ #[inline]
+ fn borrow(&self) -> &GreenTokenData {
+ &*self
+ }
+}
+
+impl fmt::Debug for GreenTokenData {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("GreenToken")
+ .field("kind", &self.kind())
+ .field("text", &self.text())
+ .finish()
+ }
+}
+
+impl fmt::Debug for GreenToken {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let data: &GreenTokenData = &*self;
+ fmt::Debug::fmt(data, f)
+ }
+}
+
+impl fmt::Display for GreenToken {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let data: &GreenTokenData = &*self;
+ fmt::Display::fmt(data, f)
+ }
+}
+
+impl fmt::Display for GreenTokenData {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.text())
+ }
+}
+
+impl GreenTokenData {
+ /// Kind of this Token.
+ #[inline]
+ pub fn kind(&self) -> SyntaxKind {
+ self.data.header.kind
+ }
+
+ /// Text of this Token.
+ #[inline]
+ pub fn text(&self) -> &str {
+ unsafe { std::str::from_utf8_unchecked(self.data.slice()) }
+ }
+
+ /// Returns the length of the text covered by this token.
+ #[inline]
+ pub fn text_len(&self) -> TextSize {
+ TextSize::of(self.text())
+ }
+}
+
+impl GreenToken {
+ /// Creates new Token.
+ #[inline]
+ pub fn new(kind: SyntaxKind, text: &str) -> GreenToken {
+ let head = GreenTokenHead { kind, _c: Count::new() };
+ let ptr = ThinArc::from_header_and_iter(head, text.bytes());
+ GreenToken { ptr }
+ }
+ #[inline]
+ pub(crate) fn into_raw(this: GreenToken) -> ptr::NonNull<GreenTokenData> {
+ let green = ManuallyDrop::new(this);
+ let green: &GreenTokenData = &*green;
+ ptr::NonNull::from(&*green)
+ }
+
+ #[inline]
+ pub(crate) unsafe fn from_raw(ptr: ptr::NonNull<GreenTokenData>) -> GreenToken {
+ let arc = Arc::from_raw(&ptr.as_ref().data as *const ReprThin);
+ let arc = mem::transmute::<Arc<ReprThin>, ThinArc<GreenTokenHead, u8>>(arc);
+ GreenToken { ptr: arc }
+ }
+}
+
+impl ops::Deref for GreenToken {
+ type Target = GreenTokenData;
+
+ #[inline]
+ fn deref(&self) -> &GreenTokenData {
+ unsafe {
+ let repr: &Repr = &self.ptr;
+ let repr: &ReprThin = &*(repr as *const Repr as *const ReprThin);
+ mem::transmute::<&ReprThin, &GreenTokenData>(repr)
+ }
+ }
+}
diff --git a/vendor/rowan/src/lib.rs b/vendor/rowan/src/lib.rs
new file mode 100644
index 000000000..bb8f30d02
--- /dev/null
+++ b/vendor/rowan/src/lib.rs
@@ -0,0 +1,41 @@
+//! A generic library for lossless syntax trees.
+//! See `examples/s_expressions.rs` for a tutorial.
+#![forbid(
+ // missing_debug_implementations,
+ unconditional_recursion,
+ future_incompatible,
+ // missing_docs,
+)]
+#![deny(unsafe_code)]
+
+#[allow(unsafe_code)]
+mod green;
+#[allow(unsafe_code)]
+pub mod cursor;
+
+pub mod api;
+mod syntax_text;
+mod utility_types;
+
+mod cow_mut;
+#[allow(unsafe_code)]
+mod sll;
+#[allow(unsafe_code)]
+mod arc;
+#[cfg(feature = "serde1")]
+mod serde_impls;
+pub mod ast;
+
+pub use text_size::{TextLen, TextRange, TextSize};
+
+pub use crate::{
+ api::{
+ Language, SyntaxElement, SyntaxElementChildren, SyntaxNode, SyntaxNodeChildren, SyntaxToken,
+ },
+ green::{
+ Checkpoint, Children, GreenNode, GreenNodeBuilder, GreenNodeData, GreenToken,
+ GreenTokenData, NodeCache, SyntaxKind,
+ },
+ syntax_text::SyntaxText,
+ utility_types::{Direction, NodeOrToken, TokenAtOffset, WalkEvent},
+};
diff --git a/vendor/rowan/src/serde_impls.rs b/vendor/rowan/src/serde_impls.rs
new file mode 100644
index 000000000..aea5d88b7
--- /dev/null
+++ b/vendor/rowan/src/serde_impls.rs
@@ -0,0 +1,66 @@
+use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer};
+use std::fmt;
+
+use crate::{
+ api::{Language, SyntaxNode, SyntaxToken},
+ NodeOrToken,
+};
+
+struct SerDisplay<T>(T);
+impl<T: fmt::Display> Serialize for SerDisplay<T> {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ serializer.collect_str(&self.0)
+ }
+}
+
+struct DisplayDebug<T>(T);
+impl<T: fmt::Debug> fmt::Display for DisplayDebug<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.0, f)
+ }
+}
+
+impl<L: Language> Serialize for SyntaxNode<L> {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let mut state = serializer.serialize_map(Some(3))?;
+ state.serialize_entry("kind", &SerDisplay(DisplayDebug(self.kind())))?;
+ state.serialize_entry("text_range", &self.text_range())?;
+ state.serialize_entry("children", &Children(self))?;
+ state.end()
+ }
+}
+
+impl<L: Language> Serialize for SyntaxToken<L> {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let mut state = serializer.serialize_map(Some(3))?;
+ state.serialize_entry("kind", &SerDisplay(DisplayDebug(self.kind())))?;
+ state.serialize_entry("text_range", &self.text_range())?;
+ state.serialize_entry("text", &self.text())?;
+ state.end()
+ }
+}
+
+struct Children<T>(T);
+
+impl<L: Language> Serialize for Children<&'_ SyntaxNode<L>> {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let mut state = serializer.serialize_seq(None)?;
+ self.0.children_with_tokens().try_for_each(|element| match element {
+ NodeOrToken::Node(it) => state.serialize_element(&it),
+ NodeOrToken::Token(it) => state.serialize_element(&it),
+ })?;
+ state.end()
+ }
+}
diff --git a/vendor/rowan/src/sll.rs b/vendor/rowan/src/sll.rs
new file mode 100644
index 000000000..87d2f1f31
--- /dev/null
+++ b/vendor/rowan/src/sll.rs
@@ -0,0 +1,129 @@
+//! Sorted Linked List
+
+use std::{cell::Cell, cmp::Ordering, ptr};
+
+use crate::utility_types::Delta;
+pub(crate) unsafe trait Elem {
+ fn prev(&self) -> &Cell<*const Self>;
+ fn next(&self) -> &Cell<*const Self>;
+ fn key(&self) -> &Cell<u32>;
+}
+
+pub(crate) enum AddToSllResult<'a, E: Elem> {
+ NoHead,
+ EmptyHead(&'a Cell<*const E>),
+ SmallerThanHead(&'a Cell<*const E>),
+ SmallerThanNotHead(*const E),
+ AlreadyInSll(*const E),
+}
+
+impl<'a, E: Elem> AddToSllResult<'a, E> {
+ pub(crate) fn add_to_sll(&self, elem_ptr: *const E) {
+ unsafe {
+ (*elem_ptr).prev().set(elem_ptr);
+ (*elem_ptr).next().set(elem_ptr);
+
+ match self {
+ // Case 1: empty head, replace it.
+ AddToSllResult::EmptyHead(head) => head.set(elem_ptr),
+
+ // Case 2: we are smaller than the head, replace it.
+ AddToSllResult::SmallerThanHead(head) => {
+ let old_head = head.get();
+ let prev = (*old_head).prev().replace(elem_ptr);
+ (*prev).next().set(elem_ptr);
+ (*elem_ptr).next().set(old_head);
+ (*elem_ptr).prev().set(prev);
+ head.set(elem_ptr);
+ }
+
+ // Case 3: insert in place found by looping
+ AddToSllResult::SmallerThanNotHead(curr) => {
+ let next = (**curr).next().replace(elem_ptr);
+ (*next).prev().set(elem_ptr);
+ (*elem_ptr).prev().set(*curr);
+ (*elem_ptr).next().set(next);
+ }
+ AddToSllResult::NoHead | AddToSllResult::AlreadyInSll(_) => (),
+ }
+ }
+ }
+}
+
+#[cold]
+pub(crate) fn init<'a, E: Elem>(
+ head: Option<&'a Cell<*const E>>,
+ elem: &E,
+) -> AddToSllResult<'a, E> {
+ if let Some(head) = head {
+ link(head, elem)
+ } else {
+ AddToSllResult::NoHead
+ }
+}
+
+#[cold]
+pub(crate) fn unlink<E: Elem>(head: &Cell<*const E>, elem: &E) {
+ debug_assert!(!head.get().is_null(), "invalid linked list head");
+
+ let elem_ptr: *const E = elem;
+
+ let prev = elem.prev().replace(elem_ptr);
+ let next = elem.next().replace(elem_ptr);
+ unsafe {
+ debug_assert_eq!((*prev).next().get(), elem_ptr, "invalid linked list links");
+ debug_assert_eq!((*next).prev().get(), elem_ptr, "invalid linked list links");
+ (*prev).next().set(next);
+ (*next).prev().set(prev);
+ }
+
+ if head.get() == elem_ptr {
+ head.set(if next == elem_ptr { ptr::null() } else { next })
+ }
+}
+
+#[cold]
+pub(crate) fn link<'a, E: Elem>(head: &'a Cell<*const E>, elem: &E) -> AddToSllResult<'a, E> {
+ unsafe {
+ let old_head = head.get();
+ // Case 1: empty head, replace it.
+ if old_head.is_null() {
+ return AddToSllResult::EmptyHead(head);
+ }
+
+ // Case 2: we are smaller than the head, replace it.
+ if elem.key() < (*old_head).key() {
+ return AddToSllResult::SmallerThanHead(head);
+ }
+
+ // Case 3: loop *backward* until we find insertion place. Because of
+ // Case 2, we can't loop beyond the head.
+ let mut curr = (*old_head).prev().get();
+ loop {
+ match (*curr).key().cmp(elem.key()) {
+ Ordering::Less => return AddToSllResult::SmallerThanNotHead(curr),
+ Ordering::Equal => return AddToSllResult::AlreadyInSll(curr),
+ Ordering::Greater => curr = (*curr).prev().get(),
+ }
+ }
+ }
+}
+
+pub(crate) fn adjust<E: Elem>(elem: &E, from: u32, by: Delta<u32>) {
+ let elem_ptr: *const E = elem;
+
+ unsafe {
+ let mut curr = elem_ptr;
+ loop {
+ let mut key = (*curr).key().get();
+ if key >= from {
+ key += by;
+ (*curr).key().set(key);
+ }
+ curr = (*curr).next().get();
+ if curr == elem_ptr {
+ break;
+ }
+ }
+ }
+}
diff --git a/vendor/rowan/src/syntax_text.rs b/vendor/rowan/src/syntax_text.rs
new file mode 100644
index 000000000..3ab3f6cb0
--- /dev/null
+++ b/vendor/rowan/src/syntax_text.rs
@@ -0,0 +1,311 @@
+use std::fmt;
+
+use crate::{
+ cursor::{SyntaxNode, SyntaxToken},
+ TextRange, TextSize,
+};
+
+#[derive(Clone)]
+pub struct SyntaxText {
+ node: SyntaxNode,
+ range: TextRange,
+}
+
+impl SyntaxText {
+ pub(crate) fn new(node: SyntaxNode) -> SyntaxText {
+ let range = node.text_range();
+ SyntaxText { node, range }
+ }
+
+ pub fn len(&self) -> TextSize {
+ self.range.len()
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.range.is_empty()
+ }
+
+ pub fn contains_char(&self, c: char) -> bool {
+ self.try_for_each_chunk(|chunk| if chunk.contains(c) { Err(()) } else { Ok(()) }).is_err()
+ }
+
+ pub fn find_char(&self, c: char) -> Option<TextSize> {
+ let mut acc: TextSize = 0.into();
+ let res = self.try_for_each_chunk(|chunk| {
+ if let Some(pos) = chunk.find(c) {
+ let pos: TextSize = (pos as u32).into();
+ return Err(acc + pos);
+ }
+ acc += TextSize::of(chunk);
+ Ok(())
+ });
+ found(res)
+ }
+
+ pub fn char_at(&self, offset: TextSize) -> Option<char> {
+ let offset = offset.into();
+ let mut start: TextSize = 0.into();
+ let res = self.try_for_each_chunk(|chunk| {
+ let end = start + TextSize::of(chunk);
+ if start <= offset && offset < end {
+ let off: usize = u32::from(offset - start) as usize;
+ return Err(chunk[off..].chars().next().unwrap());
+ }
+ start = end;
+ Ok(())
+ });
+ found(res)
+ }
+
+ pub fn slice<R: private::SyntaxTextRange>(&self, range: R) -> SyntaxText {
+ let start = range.start().unwrap_or_default();
+ let end = range.end().unwrap_or(self.len());
+ assert!(start <= end);
+ let len = end - start;
+ let start = self.range.start() + start;
+ let end = start + len;
+ assert!(
+ start <= end,
+ "invalid slice, range: {:?}, slice: {:?}",
+ self.range,
+ (range.start(), range.end()),
+ );
+ let range = TextRange::new(start, end);
+ assert!(
+ self.range.contains_range(range),
+ "invalid slice, range: {:?}, slice: {:?}",
+ self.range,
+ range,
+ );
+ SyntaxText { node: self.node.clone(), range }
+ }
+
+ pub fn try_fold_chunks<T, F, E>(&self, init: T, mut f: F) -> Result<T, E>
+ where
+ F: FnMut(T, &str) -> Result<T, E>,
+ {
+ self.tokens_with_ranges()
+ .try_fold(init, move |acc, (token, range)| f(acc, &token.text()[range]))
+ }
+
+ pub fn try_for_each_chunk<F: FnMut(&str) -> Result<(), E>, E>(
+ &self,
+ mut f: F,
+ ) -> Result<(), E> {
+ self.try_fold_chunks((), move |(), chunk| f(chunk))
+ }
+
+ pub fn for_each_chunk<F: FnMut(&str)>(&self, mut f: F) {
+ enum Void {}
+ match self.try_for_each_chunk(|chunk| Ok::<(), Void>(f(chunk))) {
+ Ok(()) => (),
+ Err(void) => match void {},
+ }
+ }
+
+ fn tokens_with_ranges(&self) -> impl Iterator<Item = (SyntaxToken, TextRange)> {
+ let text_range = self.range;
+ self.node.descendants_with_tokens().filter_map(|element| element.into_token()).filter_map(
+ move |token| {
+ let token_range = token.text_range();
+ let range = text_range.intersect(token_range)?;
+ Some((token, range - token_range.start()))
+ },
+ )
+ }
+}
+
+fn found<T>(res: Result<(), T>) -> Option<T> {
+ match res {
+ Ok(()) => None,
+ Err(it) => Some(it),
+ }
+}
+
+impl fmt::Debug for SyntaxText {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.to_string(), f)
+ }
+}
+
+impl fmt::Display for SyntaxText {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.try_for_each_chunk(|chunk| fmt::Display::fmt(chunk, f))
+ }
+}
+
+impl From<SyntaxText> for String {
+ fn from(text: SyntaxText) -> String {
+ text.to_string()
+ }
+}
+
+impl PartialEq<str> for SyntaxText {
+ fn eq(&self, mut rhs: &str) -> bool {
+ self.try_for_each_chunk(|chunk| {
+ if !rhs.starts_with(chunk) {
+ return Err(());
+ }
+ rhs = &rhs[chunk.len()..];
+ Ok(())
+ })
+ .is_ok()
+ && rhs.is_empty()
+ }
+}
+
+impl PartialEq<SyntaxText> for str {
+ fn eq(&self, rhs: &SyntaxText) -> bool {
+ rhs == self
+ }
+}
+
+impl PartialEq<&'_ str> for SyntaxText {
+ fn eq(&self, rhs: &&str) -> bool {
+ self == *rhs
+ }
+}
+
+impl PartialEq<SyntaxText> for &'_ str {
+ fn eq(&self, rhs: &SyntaxText) -> bool {
+ rhs == self
+ }
+}
+
+impl PartialEq for SyntaxText {
+ fn eq(&self, other: &SyntaxText) -> bool {
+ if self.range.len() != other.range.len() {
+ return false;
+ }
+ let mut lhs = self.tokens_with_ranges();
+ let mut rhs = other.tokens_with_ranges();
+ zip_texts(&mut lhs, &mut rhs).is_none()
+ && lhs.all(|it| it.1.is_empty())
+ && rhs.all(|it| it.1.is_empty())
+ }
+}
+
+fn zip_texts<I: Iterator<Item = (SyntaxToken, TextRange)>>(xs: &mut I, ys: &mut I) -> Option<()> {
+ let mut x = xs.next()?;
+ let mut y = ys.next()?;
+ loop {
+ while x.1.is_empty() {
+ x = xs.next()?;
+ }
+ while y.1.is_empty() {
+ y = ys.next()?;
+ }
+ let x_text = &x.0.text()[x.1];
+ let y_text = &y.0.text()[y.1];
+ if !(x_text.starts_with(y_text) || y_text.starts_with(x_text)) {
+ return Some(());
+ }
+ let advance = std::cmp::min(x.1.len(), y.1.len());
+ x.1 = TextRange::new(x.1.start() + advance, x.1.end());
+ y.1 = TextRange::new(y.1.start() + advance, y.1.end());
+ }
+}
+
+impl Eq for SyntaxText {}
+
+mod private {
+ use std::ops;
+
+ use crate::{TextRange, TextSize};
+
+ pub trait SyntaxTextRange {
+ fn start(&self) -> Option<TextSize>;
+ fn end(&self) -> Option<TextSize>;
+ }
+
+ impl SyntaxTextRange for TextRange {
+ fn start(&self) -> Option<TextSize> {
+ Some(TextRange::start(*self))
+ }
+ fn end(&self) -> Option<TextSize> {
+ Some(TextRange::end(*self))
+ }
+ }
+
+ impl SyntaxTextRange for ops::Range<TextSize> {
+ fn start(&self) -> Option<TextSize> {
+ Some(self.start)
+ }
+ fn end(&self) -> Option<TextSize> {
+ Some(self.end)
+ }
+ }
+
+ impl SyntaxTextRange for ops::RangeFrom<TextSize> {
+ fn start(&self) -> Option<TextSize> {
+ Some(self.start)
+ }
+ fn end(&self) -> Option<TextSize> {
+ None
+ }
+ }
+
+ impl SyntaxTextRange for ops::RangeTo<TextSize> {
+ fn start(&self) -> Option<TextSize> {
+ None
+ }
+ fn end(&self) -> Option<TextSize> {
+ Some(self.end)
+ }
+ }
+
+ impl SyntaxTextRange for ops::RangeFull {
+ fn start(&self) -> Option<TextSize> {
+ None
+ }
+ fn end(&self) -> Option<TextSize> {
+ None
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{green::SyntaxKind, GreenNodeBuilder};
+
+ use super::*;
+
+ fn build_tree(chunks: &[&str]) -> SyntaxNode {
+ let mut builder = GreenNodeBuilder::new();
+ builder.start_node(SyntaxKind(62));
+ for &chunk in chunks.iter() {
+ builder.token(SyntaxKind(92), chunk.into())
+ }
+ builder.finish_node();
+ SyntaxNode::new_root(builder.finish())
+ }
+
+ #[test]
+ fn test_text_equality() {
+ fn do_check(t1: &[&str], t2: &[&str]) {
+ let t1 = build_tree(t1).text();
+ let t2 = build_tree(t2).text();
+ let expected = t1.to_string() == t2.to_string();
+ let actual = t1 == t2;
+ assert_eq!(expected, actual, "`{}` (SyntaxText) `{}` (SyntaxText)", t1, t2);
+ let actual = t1 == &*t2.to_string();
+ assert_eq!(expected, actual, "`{}` (SyntaxText) `{}` (&str)", t1, t2);
+ }
+ fn check(t1: &[&str], t2: &[&str]) {
+ do_check(t1, t2);
+ do_check(t2, t1)
+ }
+
+ check(&[""], &[""]);
+ check(&["a"], &[""]);
+ check(&["a"], &["a"]);
+ check(&["abc"], &["def"]);
+ check(&["hello", "world"], &["hello", "world"]);
+ check(&["hellowo", "rld"], &["hell", "oworld"]);
+ check(&["hel", "lowo", "rld"], &["helloworld"]);
+ check(&["{", "abc", "}"], &["{", "123", "}"]);
+ check(&["{", "abc", "}", "{"], &["{", "123", "}"]);
+ check(&["{", "abc", "}"], &["{", "123", "}", "{"]);
+ check(&["{", "abc", "}ab"], &["{", "abc", "}", "ab"]);
+ }
+}
diff --git a/vendor/rowan/src/utility_types.rs b/vendor/rowan/src/utility_types.rs
new file mode 100644
index 000000000..817add72e
--- /dev/null
+++ b/vendor/rowan/src/utility_types.rs
@@ -0,0 +1,180 @@
+use std::{
+ fmt,
+ ops::{AddAssign, Deref},
+};
+use text_size::TextSize;
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub enum NodeOrToken<N, T> {
+ Node(N),
+ Token(T),
+}
+
+impl<N, T> NodeOrToken<N, T> {
+ pub fn into_node(self) -> Option<N> {
+ match self {
+ NodeOrToken::Node(node) => Some(node),
+ NodeOrToken::Token(_) => None,
+ }
+ }
+
+ pub fn into_token(self) -> Option<T> {
+ match self {
+ NodeOrToken::Node(_) => None,
+ NodeOrToken::Token(token) => Some(token),
+ }
+ }
+
+ pub fn as_node(&self) -> Option<&N> {
+ match self {
+ NodeOrToken::Node(node) => Some(node),
+ NodeOrToken::Token(_) => None,
+ }
+ }
+
+ pub fn as_token(&self) -> Option<&T> {
+ match self {
+ NodeOrToken::Node(_) => None,
+ NodeOrToken::Token(token) => Some(token),
+ }
+ }
+}
+
+impl<N: Deref, T: Deref> NodeOrToken<N, T> {
+ pub(crate) fn as_deref(&self) -> NodeOrToken<&N::Target, &T::Target> {
+ match self {
+ NodeOrToken::Node(node) => NodeOrToken::Node(&*node),
+ NodeOrToken::Token(token) => NodeOrToken::Token(&*token),
+ }
+ }
+}
+
+impl<N: fmt::Display, T: fmt::Display> fmt::Display for NodeOrToken<N, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ NodeOrToken::Node(node) => fmt::Display::fmt(node, f),
+ NodeOrToken::Token(token) => fmt::Display::fmt(token, f),
+ }
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub enum Direction {
+ Next,
+ Prev,
+}
+
+/// `WalkEvent` describes tree walking process.
+#[derive(Debug, Copy, Clone)]
+pub enum WalkEvent<T> {
+ /// Fired before traversing the node.
+ Enter(T),
+ /// Fired after the node is traversed.
+ Leave(T),
+}
+
+impl<T> WalkEvent<T> {
+ pub fn map<F: FnOnce(T) -> U, U>(self, f: F) -> WalkEvent<U> {
+ match self {
+ WalkEvent::Enter(it) => WalkEvent::Enter(f(it)),
+ WalkEvent::Leave(it) => WalkEvent::Leave(f(it)),
+ }
+ }
+}
+
+/// There might be zero, one or two leaves at a given offset.
+#[derive(Clone, Debug)]
+pub enum TokenAtOffset<T> {
+ /// No leaves at offset -- possible for the empty file.
+ None,
+ /// Only a single leaf at offset.
+ Single(T),
+ /// Offset is exactly between two leaves.
+ Between(T, T),
+}
+
+impl<T> TokenAtOffset<T> {
+ pub fn map<F: Fn(T) -> U, U>(self, f: F) -> TokenAtOffset<U> {
+ match self {
+ TokenAtOffset::None => TokenAtOffset::None,
+ TokenAtOffset::Single(it) => TokenAtOffset::Single(f(it)),
+ TokenAtOffset::Between(l, r) => TokenAtOffset::Between(f(l), f(r)),
+ }
+ }
+
+ /// Convert to option, preferring the right leaf in case of a tie.
+ pub fn right_biased(self) -> Option<T> {
+ match self {
+ TokenAtOffset::None => None,
+ TokenAtOffset::Single(node) => Some(node),
+ TokenAtOffset::Between(_, right) => Some(right),
+ }
+ }
+
+ /// Convert to option, preferring the left leaf in case of a tie.
+ pub fn left_biased(self) -> Option<T> {
+ match self {
+ TokenAtOffset::None => None,
+ TokenAtOffset::Single(node) => Some(node),
+ TokenAtOffset::Between(left, _) => Some(left),
+ }
+ }
+}
+
+impl<T> Iterator for TokenAtOffset<T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ match std::mem::replace(self, TokenAtOffset::None) {
+ TokenAtOffset::None => None,
+ TokenAtOffset::Single(node) => {
+ *self = TokenAtOffset::None;
+ Some(node)
+ }
+ TokenAtOffset::Between(left, right) => {
+ *self = TokenAtOffset::Single(right);
+ Some(left)
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self {
+ TokenAtOffset::None => (0, Some(0)),
+ TokenAtOffset::Single(_) => (1, Some(1)),
+ TokenAtOffset::Between(_, _) => (2, Some(2)),
+ }
+ }
+}
+
+impl<T> ExactSizeIterator for TokenAtOffset<T> {}
+
+macro_rules! _static_assert {
+ ($expr:expr) => {
+ const _: i32 = 0 / $expr as i32;
+ };
+}
+
+pub(crate) use _static_assert as static_assert;
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum Delta<T> {
+ Add(T),
+ Sub(T),
+}
+
+// This won't be coherent :-(
+// impl<T: AddAssign + SubAssign> AddAssign<Delta<T>> for T
+macro_rules! impls {
+ ($($ty:ident)*) => {$(
+ impl AddAssign<Delta<$ty>> for $ty {
+ fn add_assign(&mut self, rhs: Delta<$ty>) {
+ match rhs {
+ Delta::Add(amt) => *self += amt,
+ Delta::Sub(amt) => *self -= amt,
+ }
+ }
+ }
+ )*};
+}
+impls!(u32 TextSize);
diff --git a/vendor/rowan/tests/tidy.rs b/vendor/rowan/tests/tidy.rs
new file mode 100644
index 000000000..a716e35b2
--- /dev/null
+++ b/vendor/rowan/tests/tidy.rs
@@ -0,0 +1,46 @@
+use std::{
+ env,
+ path::{Path, PathBuf},
+ process::{Command, Stdio},
+};
+
+fn project_root() -> PathBuf {
+ PathBuf::from(
+ env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| env!("CARGO_MANIFEST_DIR").to_owned()),
+ )
+}
+
+fn run(cmd: &str, dir: impl AsRef<Path>) -> Result<(), ()> {
+ let mut args: Vec<_> = cmd.split_whitespace().collect();
+ let bin = args.remove(0);
+ println!("> {}", cmd);
+ let output = Command::new(bin)
+ .args(args)
+ .current_dir(dir)
+ .stdin(Stdio::null())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::inherit())
+ .output()
+ .map_err(drop)?;
+ if output.status.success() {
+ Ok(())
+ } else {
+ let stdout = String::from_utf8(output.stdout).map_err(drop)?;
+ print!("{}", stdout);
+ Err(())
+ }
+}
+
+#[test]
+fn check_code_formatting() {
+ let dir = project_root();
+ if run("rustfmt +stable --version", &dir).is_err() {
+ panic!(
+ "failed to run rustfmt from toolchain 'stable'; \
+ please run `rustup component add rustfmt --toolchain stable` to install it.",
+ );
+ }
+ if run("cargo +stable fmt -- --check", &dir).is_err() {
+ panic!("code is not properly formatted; please format the code by running `cargo fmt`")
+ }
+}