summaryrefslogtreecommitdiffstats
path: root/vendor/chalk-engine
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /vendor/chalk-engine
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/chalk-engine')
-rw-r--r--vendor/chalk-engine/.cargo-checksum.json1
-rw-r--r--vendor/chalk-engine/Cargo.toml40
-rw-r--r--vendor/chalk-engine/README.md3
-rw-r--r--vendor/chalk-engine/src/README.md405
-rw-r--r--vendor/chalk-engine/src/context.rs72
-rw-r--r--vendor/chalk-engine/src/derived.rs34
-rw-r--r--vendor/chalk-engine/src/forest.rs116
-rw-r--r--vendor/chalk-engine/src/lib.rs334
-rw-r--r--vendor/chalk-engine/src/logic.rs1648
-rw-r--r--vendor/chalk-engine/src/normalize_deep.rs172
-rw-r--r--vendor/chalk-engine/src/simplify.rs141
-rw-r--r--vendor/chalk-engine/src/slg.rs378
-rw-r--r--vendor/chalk-engine/src/slg/aggregate.rs642
-rw-r--r--vendor/chalk-engine/src/slg/resolvent.rs731
-rw-r--r--vendor/chalk-engine/src/solve.rs89
-rw-r--r--vendor/chalk-engine/src/stack.rs175
-rw-r--r--vendor/chalk-engine/src/strand.rs51
-rw-r--r--vendor/chalk-engine/src/table.rs173
-rw-r--r--vendor/chalk-engine/src/tables.rs72
19 files changed, 5277 insertions, 0 deletions
diff --git a/vendor/chalk-engine/.cargo-checksum.json b/vendor/chalk-engine/.cargo-checksum.json
new file mode 100644
index 000000000..93c5b694a
--- /dev/null
+++ b/vendor/chalk-engine/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"f0bf3c13da236bb4ee2b8f5f35ae43dbcc155258f2aeba875fc0891b90dcc822","README.md":"d8d9a21a6700b554e110030288fb94ca98fa6c6398fa6a5a1dafa52b3f7dd545","src/README.md":"6606b446db6a271e99f1019cd6e59a44af50e7b1ee995718d30fde11c09bbc80","src/context.rs":"89dcf1a74cb22604f32265bcce7f93376cf299101337368d0191f4d1cd598de7","src/derived.rs":"a59fe981325e664865bf21407e4a24e317b00cc2545b380d1140b4fddc86ede3","src/forest.rs":"d6d59557b8ff4fbdcdd3474d214b6e455993996224a14773929ee0e295c02984","src/lib.rs":"3c1a331575ede17623bbbb9eb4e6bb49497b41f805b317d91895718cb886fcaf","src/logic.rs":"45747ba7d9a7b14f8c652659ac2e31854f962c613346c23ba55b972fdd548c75","src/normalize_deep.rs":"d1663c7bb5fc16be8b414f188a982cd6b21ad7fa9185f72a0476dba9e4756472","src/simplify.rs":"fbc006c2974dfef17d8849b6cd21b9510805bafce1217721eb43479cf02572d1","src/slg.rs":"2fbdce2aaebe3342f3af5d7b4ccbdef969009c0e0d302c9201bf30b7efb1ae5f","src/slg/aggregate.rs":"52b51f86e837b8219bb84935f870d47f6c5c0c1765cfea3e32a75b1055fa0761","src/slg/resolvent.rs":"476b5efa99711fd1739e2e21ca6d7918c5546d849c96b26e4bd6260852ad1fc9","src/solve.rs":"ed054d4a9b132fa8c5a6ff1c8c4a435207412eaee30fc992ed0d4a668543584c","src/stack.rs":"0eb11b6f40575aff6784ab10d5f027a9e52ee05e818d7c77fdccf9a59fd18a60","src/strand.rs":"3006dab840a6a3cf48bf45a14fa9d117d06dfeb53d8e36301b5b43cef07b648e","src/table.rs":"253197ca4a700cdc8132e04171a00603c65d5db6794e7779073a8412131dd7cf","src/tables.rs":"c140ea3b7ada35c097f578dc81873164c2c36f41adfb5f7d0d71cd9978974a23"},"package":"c44ee96f2d67cb5193d1503f185db1abad9933a1c6e6b4169c176f90baecd393"} \ No newline at end of file
diff --git a/vendor/chalk-engine/Cargo.toml b/vendor/chalk-engine/Cargo.toml
new file mode 100644
index 000000000..413af5387
--- /dev/null
+++ b/vendor/chalk-engine/Cargo.toml
@@ -0,0 +1,40 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "chalk-engine"
+version = "0.80.0"
+authors = ["Rust Compiler Team", "Chalk developers"]
+description = "Core trait engine from Chalk project"
+readme = "README.md"
+keywords = ["compiler", "traits", "prolog"]
+license = "Apache-2.0/MIT"
+repository = "https://github.com/rust-lang/chalk"
+[dependencies.chalk-derive]
+version = "=0.80.0"
+
+[dependencies.chalk-ir]
+version = "=0.80.0"
+
+[dependencies.chalk-solve]
+version = "=0.80.0"
+
+[dependencies.rustc-hash]
+version = "1.1.0"
+
+[dependencies.tracing]
+version = "0.1"
+
+[dev-dependencies]
+
+[features]
+default = []
diff --git a/vendor/chalk-engine/README.md b/vendor/chalk-engine/README.md
new file mode 100644
index 000000000..b396aa456
--- /dev/null
+++ b/vendor/chalk-engine/README.md
@@ -0,0 +1,3 @@
+The core crate for Chalk.
+
+See [Github](https://github.com/rust-lang/chalk) for up-to-date information.
diff --git a/vendor/chalk-engine/src/README.md b/vendor/chalk-engine/src/README.md
new file mode 100644
index 000000000..b7cf2d453
--- /dev/null
+++ b/vendor/chalk-engine/src/README.md
@@ -0,0 +1,405 @@
+# The on-demand SLG solver
+
+The basis of the solver is the `Forest` type. A *forest* stores a
+collection of *tables* as well as a *stack*. Each *table* represents
+the stored results of a particular query that is being performed, as
+well as the various *strands*, which are basically suspended
+computations that may be used to find more answers. Tables are
+interdependent: solving one query may require solving others.
+
+## Walkthrough
+
+Perhaps the easiest way to explain how the solver works is to walk
+through an example. Let's imagine that we have the following program:
+
+```rust
+trait Debug { }
+
+struct u32 { }
+impl Debug for u32 { }
+
+struct Rc<T> { }
+impl<T: Debug> Debug for Rc<T> { }
+
+struct Vec<T> { }
+impl<T: Debug> Debug for Vec<T> { }
+```
+
+Now imagine that we want to find answers for the query `exists<T> {
+Rc<T>: Debug }`. The first step would be to u-canonicalize this query; this
+is the act of giving canonical names to all the unbound inference variables based on the
+order of their left-most appearance, as well as canonicalizing the universes of any
+universally bound names (e.g., the `T` in `forall<T> { ... }`). In this case, there are no
+universally bound names, but the canonical form Q of the query might look something like:
+
+ Rc<?0>: Debug
+
+where `?0` is a variable in the root universe U0. We would then go and
+look for a table with this as the key: since the forest is empty, this
+lookup will fail, and we will create a new table T0, corresponding to
+the u-canonical goal Q.
+
+### Ignoring negative reasoning and regions
+
+To start, we'll ignore the possibility of negative goals like `not {
+Foo }`. We'll phase them in later, as they bring several
+complications.
+
+### Creating a table
+
+When we first create a table, we also initialize it with a set of
+*initial strands*. A "strand" is kind of like a "thread" for the
+solver: it contains a particular way to produce an answer. The initial
+set of strands for a goal like `Rc<?0>: Debug` (i.e., a "domain goal")
+is determined by looking for *clauses* in the environment. In Rust,
+these clauses derive from impls, but also from where-clauses that are
+in scope. In the case of our example, there would be three clauses,
+each coming from the program. Using a Prolog-like notation, these look
+like:
+
+```
+(u32: Debug).
+(Rc<T>: Debug) :- (T: Debug).
+(Vec<T>: Debug) :- (T: Debug).
+```
+
+To create our initial strands, then, we will try to apply each of
+these clauses to our goal of `Rc<?0>: Debug`. The first and third
+clauses are inapplicable because `u32` and `Vec<?0>` cannot be unified
+with `Rc<?0>`. The second clause, however, will work.
+
+### What is a strand?
+
+Let's talk a bit more about what a strand *is*. In the code, a strand
+is the combination of an inference table, an X-clause, and (possibly)
+a selected subgoal from that X-clause. But what is an X-clause
+(`ExClause`, in the code)? An X-clause pulls together a few things:
+
+- The current state of the goal we are trying to prove;
+- A set of subgoals that have yet to be proven;
+- A set of floundered subgoals (see the section on floundering below);
+- There are also a few things we're ignoring for now:
+ - delayed literals, region constraints
+
+The general form of an X-clause is written much like a Prolog clause,
+but with somewhat different semantics. Since we're ignoring delayed
+literals and region constraints, an X-clause just looks like this:
+
+ G :- L
+
+where G is a goal and L is a set of subgoals that must be proven.
+(The L stands for *literal* -- when we address negative reasoning, a
+literal will be either a positive or negative subgoal.) The idea is
+that if we are able to prove L then the goal G can be considered true.
+
+In the case of our example, we would wind up creating one strand, with
+an X-clause like so:
+
+ (Rc<?T>: Debug) :- (?T: Debug)
+
+Here, the `?T` refers to one of the inference variables created in the
+inference table that accompanies the strand. (I'll use named variables
+to refer to inference variables, and numbered variables like `?0` to
+refer to variables in a canonicalized goal; in the code, however, they
+are both represented with an index.)
+
+For each strand, we also optionally store a *selected subgoal*. This
+is the subgoal after the turnstile (`:-`) that we are currently trying
+to prove in this strand. Initially, when a strand is first created,
+there is no selected subgoal.
+
+### Activating a strand
+
+Now that we have created the table T0 and initialized it with strands,
+we have to actually try and produce an answer. We do this by invoking
+the `ensure_answer` operation on the table: specifically, we say
+`ensure_answer(T0, A0)`, meaning "ensure that there is a 0th answer".
+
+Remember that tables store not only strands, but also a vector of
+cached answers. The first thing that `ensure_answer` does is to check
+whether answer 0 is in this vector. If so, we can just return
+immediately. In this case, the vector will be empty, and hence that
+does not apply (this becomes important for cyclic checks later on).
+
+When there is no cached answer, `ensure_answer` will try to produce
+one. It does this by selecting a strand from the set of active
+strands -- the strands are stored in a `VecDeque` and hence processed
+in a round-robin fashion. Right now, we have only one strand, storing
+the following X-clause with no selected subgoal:
+
+ (Rc<?T>: Debug) :- (?T: Debug)
+
+When we activate the strand, we see that we have no selected subgoal,
+and so we first pick one of the subgoals to process. Here, there is only
+one (`?T: Debug`), so that becomes the selected subgoal, changing
+the state of the strand to:
+
+ (Rc<?T>: Debug) :- selected(?T: Debug, A0)
+
+Here, we write `selected(L, An)` to indicate that (a) the literal `L`
+is the selected subgoal and (b) which answer `An` we are looking for. We
+start out looking for `A0`.
+
+### Processing the selected subgoal
+
+Next, we have to try and find an answer to this selected goal. To do
+that, we will u-canonicalize it and try to find an associated
+table. In this case, the u-canonical form of the subgoal is `?0:
+Debug`: we don't have a table yet for that, so we can create a new
+one, T1. As before, we'll initialize T1 with strands. In this case,
+there will be three strands, because all the program clauses are
+potentially applicable. Those three strands will be:
+
+- `(u32: Debug) :-`, derived from the program clause `(u32: Debug).`.
+ - Note: This strand has no subgoals.
+- `(Vec<?U>: Debug) :- (?U: Debug)`, derived from the `Vec` impl.
+- `(Rc<?U>: Debug) :- (?U: Debug)`, derived from the `Rc` impl.
+
+We can thus summarize the state of the whole forest at this point as
+follows:
+
+```
+Table T0 [Rc<?0>: Debug]
+ Strands:
+ (Rc<?T>: Debug) :- selected(?T: Debug, A0)
+
+Table T1 [?0: Debug]
+ Strands:
+ (u32: Debug) :-
+ (Vec<?U>: Debug) :- (?U: Debug)
+ (Rc<?V>: Debug) :- (?V: Debug)
+```
+
+### Delegation between tables
+
+Now that the active strand from T0 has created the table T1, it can
+try to extract an answer. It does this via that same `ensure_answer`
+operation we saw before. In this case, the strand would invoke
+`ensure_answer(T1, A0)`, since we will start with the first
+answer. This will cause T1 to activate its first strand, `u32: Debug
+:-`.
+
+This strand is somewhat special: it has no subgoals at all. This means
+that the goal is proven. We can therefore add `u32: Debug` to the set
+of *answers* for our table, calling it answer A0 (it is the first
+answer). The strand is then removed from the list of strands.
+
+The state of table T1 is therefore:
+
+```
+Table T1 [?0: Debug]
+ Answers:
+ A0 = [?0 = u32]
+ Strand:
+ (Vec<?U>: Debug) :- (?U: Debug)
+ (Rc<?V>: Debug) :- (?V: Debug)
+```
+
+Note that I am writing out the answer A0 as a substitution that can be
+applied to the table goal; actually, in the code, the goals for each
+X-clause are also represented as substitutions, but in this exposition
+I've chosen to write them as full goals, following NFTD.
+
+Since we now have an answer, `ensure_answer(T1, A0)` will return `Ok`
+to the table T0, indicating that answer A0 is available. T0 now has
+the job of incorporating that result into its active strand. It does
+this in two ways. First, it creates a new strand that is looking for
+the next possible answer of T1. Next, it incorporates the answer from
+A0 and removes the subgoal. The resulting state of table T0 is:
+
+```
+Table T0 [Rc<?0>: Debug]
+ Strands:
+ (Rc<?T>: Debug) :- selected(?T: Debug, A1)
+ (Rc<u32>: Debug) :-
+```
+
+We then immediately activate the strand that incorporated the answer
+(the `Rc<u32>: Debug` one). In this case, that strand has no further
+subgoals, so it becomes an answer to the table T0. This answer can
+then be returned up to our caller, and the whole forest goes quiescent
+at this point (remember, we only do enough work to generate *one*
+answer). The ending state of the forest at this point will be:
+
+```
+Table T0 [Rc<?0>: Debug]
+ Answer:
+ A0 = [?0 = u32]
+ Strands:
+ (Rc<?T>: Debug) :- selected(?T: Debug, A1)
+
+Table T1 [?0: Debug]
+ Answers:
+ A0 = [?0 = u32]
+ Strand:
+ (Vec<?U>: Debug) :- (?U: Debug)
+ (Rc<?V>: Debug) :- (?V: Debug)
+```
+
+Here you can see how the forest captures both the answers we have
+created thus far *and* the strands that will let us try to produce
+more answers later on.
+
+### Floundering
+
+The first thing we do when we create a table is to initialize it with
+a set of strands. These strands represent all the ways that one can
+solve the table's associated goal. For an ordinary trait, we would
+effectively create one strand per "relevant impl". But sometimes the
+goals are too vague for this to be possible; other times, it may be possible
+but just really inefficient, since all of those strands must be explored.
+
+As an example of when it may not be possible, consider a goal like
+`?T: Sized`. This goal can in principle enumerate **every sized type**
+that exists -- that includes not only struct/enum types, but also
+closure types, fn types with arbitrary arity, tuple types with
+arbitrary arity, and so forth. In other words, there are not only an
+infinite set of **answers** but actually an infinite set of
+**strands**. The same applies to auto traits like `Send` as well as
+"hybrid" traits like `Clone`, which contain *some* auto-generated sets
+of impls.
+
+Another example of floundering comes from negative logic. In general,
+we cannot process negative subgoals if they have unbound existential
+variables, such as `not { Vec<?T>: Foo }`. This is because we can only
+enumerate things that *do* match a given trait (or which *are*
+provable, more generally). We cannot enumerate out possible types `?T`
+that *are not* provable (there is an infinite set, to be sure).
+
+To handle this, we allow tables to enter a **floundered** state. This
+state begins when we try to create the program clauses for a table --
+if that is not possible (e.g., in one of the cases above) then the
+table enters a floundered state. Attempts to get an answer from a
+floundered table result in an error (e.g.,
+`RecursiveSearchFail::Floundered`).
+
+Whenever a goal results in a floundered result, that goal is placed
+into a distinct list (the "floundered subgoals"). We then go on and
+process the rest of the subgoals. Once all the normal subgoals have
+completed, floundered subgoals are removed from the floundered list
+and re-attempted: the idea is that we may have accumulated more type
+information in the meantime. If they continue to flounder, then we
+stop.
+
+Let's look at an example. Imagine that we have:
+
+```rust
+trait Foo { }
+trait Bar { }
+
+impl<T: Send + Bar> Foo for T { }
+
+impl Bar for u32 { }
+impl Bar for i32 { }
+```
+
+Now imagine we are trying to prove `?T: Foo`. There is only one impl,
+so we will create a state like:
+
+```
+(?T: Foo) :- (?T: Send), (?T: Bar)
+```
+
+When we attempt to solve `?T: Send`, however, that subgoal will
+flounder, because `Send` is an auto-trait. So it will be put into a
+floundered list:
+
+```
+(?T: Foo) :- (?T: Bar) [ floundered: (?T: Send) ]
+```
+
+and we will go on to solve `?T: Bar`. `Bar` is an ordinary trait -- so
+we can enumerate two strands (one for `u32` and one for `i32`). When
+we process the first answer, we wind up with:
+
+```
+(u32: Foo) :- [ floundered: (u32: Send) ]
+```
+
+At this point we can move the floundered subgoal back into the main
+list and process:
+
+```
+(u32: Foo) :- (u32: Send)
+```
+
+This time, the goal does not flounder.
+
+But how do we detect when it makes sense to move a floundered subgoal
+into the main list? To handle this, we use a timestamp scheme. We
+keep a counter as part of the strand -- each time we succeed in
+solving some subgoal, we increment the counter, as that *may* have
+provided more information about some type variables. When we move a
+goal to the floundered list, we also track the current value of the
+timestamp. Then, when it comes time to move things *from* the
+floundered list, we can compare if the timestamp has been changed
+since the goal was marked as floundering. If not, then no new
+information can have been attempted, and we can mark the current table
+as being floundered itself.
+
+This mechanism allows floundered to propagate up many levels, e.g.
+in an example like this:
+
+```rust
+trait Foo { }
+trait Bar { }
+trait Baz { }
+
+impl<T: Baz + Bar> Foo for T { }
+
+impl Bar for u32 { }
+impl Bar for i32 { }
+
+impl<T: Sized> Baz for T { }
+```
+
+Here, solving `?T: Baz` will in turn invoke `?T: Sized` -- this
+floundering state will be propagated up to the `?T: Foo` table.
+
+## Heritage and acronyms
+
+This solver implements the SLG solving technique, though extended to
+accommodate hereditary harrop (HH) predicates, as well as the needs of
+lazy normalization.
+
+Its design is kind of a fusion of [MiniKanren] and the following
+papers, which I will refer to as EWFS and NTFD respectively:
+
+> Efficient Top-Down Computation of Queries Under the Well-formed Semantics
+> (Chen, Swift, and Warren; Journal of Logic Programming '95)
+
+> A New Formulation of Tabled resolution With Delay
+> (Swift; EPIA '99)
+
+[MiniKanren]: http://minikanren.org/
+
+In addition, I incorporated extensions from the following papers,
+which I will refer to as SA and RR respectively, that describes how to
+do introduce approximation when processing subgoals and so forth:
+
+> Terminating Evaluation of Logic Programs with Finite Three-Valued Models
+> Riguzzi and Swift; ACM Transactions on Computational Logic 2013
+> (Introduces "subgoal abstraction", hence the name SA)
+>
+> Radial Restraint
+> Grosof and Swift; 2013
+
+Another useful paper that gives a kind of high-level overview of
+concepts at play is the following:
+
+> XSB: Extending Prolog with Tabled Logic Programming
+> (Swift and Warren; Theory and Practice of Logic Programming '10)
+
+There are a places where I intentionally diverged from the semantics
+as described in the papers -- e.g. by more aggressively approximating
+-- which I marked them with a comment DIVERGENCE. Those places may
+want to be evaluated in the future.
+
+A few other acronyms that I use:
+
+- WAM: Warren abstract machine, an efficient way to evaluate Prolog programs.
+ See <http://wambook.sourceforge.net/>.
+- HH: Hereditary harrop predicates. What Chalk deals in.
+ Popularized by Lambda Prolog.
+
+
diff --git a/vendor/chalk-engine/src/context.rs b/vendor/chalk-engine/src/context.rs
new file mode 100644
index 000000000..fa418dfd4
--- /dev/null
+++ b/vendor/chalk-engine/src/context.rs
@@ -0,0 +1,72 @@
+//! Defines traits used to embed the chalk-engine in another crate.
+//!
+//! chalk and rustc both define types which implement the traits in this
+//! module. This allows each user of chalk-engine to define their own
+//! `DomainGoal` type, add arena lifetime parameters, and more. See
+//! [`Context`] trait for a list of types.
+
+use crate::CompleteAnswer;
+use chalk_ir::interner::Interner;
+use chalk_ir::Substitution;
+use std::fmt::Debug;
+
+pub enum AnswerResult<I: Interner> {
+ /// The next available answer.
+ Answer(CompleteAnswer<I>),
+
+ /// No answer could be returned because there are no more solutions.
+ NoMoreSolutions,
+
+ /// No answer could be returned because the goal has floundered.
+ Floundered,
+
+ // No answer could be returned *yet*, because we exceeded our
+ // quantum (`should_continue` returned false).
+ QuantumExceeded,
+}
+
+impl<I: Interner> AnswerResult<I> {
+ pub fn is_answer(&self) -> bool {
+ matches!(self, Self::Answer(_))
+ }
+
+ pub fn answer(self) -> CompleteAnswer<I> {
+ match self {
+ Self::Answer(answer) => answer,
+ _ => panic!("Not an answer."),
+ }
+ }
+
+ pub fn is_no_more_solutions(&self) -> bool {
+ matches!(self, Self::NoMoreSolutions)
+ }
+
+ pub fn is_quantum_exceeded(&self) -> bool {
+ matches!(self, Self::QuantumExceeded)
+ }
+}
+
+impl<I: Interner> Debug for AnswerResult<I> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ AnswerResult::Answer(answer) => write!(fmt, "{:?}", answer),
+ AnswerResult::Floundered => write!(fmt, "Floundered"),
+ AnswerResult::NoMoreSolutions => write!(fmt, "None"),
+ AnswerResult::QuantumExceeded => write!(fmt, "QuantumExceeded"),
+ }
+ }
+}
+
+pub trait AnswerStream<I: Interner> {
+ /// Gets the next answer for a given goal, but doesn't increment the answer index.
+ /// Calling this or `next_answer` again will give the same answer.
+ fn peek_answer(&mut self, should_continue: impl Fn() -> bool) -> AnswerResult<I>;
+
+ /// Gets the next answer for a given goal, incrementing the answer index.
+ /// Calling this or `peek_answer` again will give the next answer.
+ fn next_answer(&mut self, should_continue: impl Fn() -> bool) -> AnswerResult<I>;
+
+ /// Invokes `test` with each possible future answer, returning true immediately
+ /// if we find any answer for which `test` returns true.
+ fn any_future_answer(&self, test: impl Fn(&Substitution<I>) -> bool) -> bool;
+}
diff --git a/vendor/chalk-engine/src/derived.rs b/vendor/chalk-engine/src/derived.rs
new file mode 100644
index 000000000..b3cdc3d0d
--- /dev/null
+++ b/vendor/chalk-engine/src/derived.rs
@@ -0,0 +1,34 @@
+// These impls for PartialEq, Eq, etc are written by hand. This is
+// because the `#[derive()]` would add requirements onto the context
+// object that are not needed.
+
+use super::*;
+use std::cmp::{Eq, PartialEq};
+use std::hash::{Hash, Hasher};
+use std::mem;
+
+///////////////////////////////////////////////////////////////////////////
+
+impl<I: Interner> PartialEq for Literal<I> {
+ fn eq(&self, other: &Literal<I>) -> bool {
+ match (self, other) {
+ (Literal::Positive(goal1), Literal::Positive(goal2))
+ | (Literal::Negative(goal1), Literal::Negative(goal2)) => goal1 == goal2,
+
+ _ => false,
+ }
+ }
+}
+
+impl<I: Interner> Eq for Literal<I> {}
+
+impl<I: Interner> Hash for Literal<I> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ mem::discriminant(self).hash(state);
+ match self {
+ Literal::Positive(goal) | Literal::Negative(goal) => {
+ goal.hash(state);
+ }
+ }
+ }
+}
diff --git a/vendor/chalk-engine/src/forest.rs b/vendor/chalk-engine/src/forest.rs
new file mode 100644
index 000000000..410b9cd0a
--- /dev/null
+++ b/vendor/chalk-engine/src/forest.rs
@@ -0,0 +1,116 @@
+use crate::context::{AnswerResult, AnswerStream};
+use crate::logic::RootSearchFail;
+use crate::slg::SlgContextOps;
+use crate::table::AnswerIndex;
+use crate::tables::Tables;
+use crate::{TableIndex, TimeStamp};
+
+use chalk_ir::interner::Interner;
+use chalk_ir::{Goal, InEnvironment, Substitution, UCanonical};
+use tracing::debug;
+
+pub(crate) struct Forest<I: Interner> {
+ pub(crate) tables: Tables<I>,
+
+ /// This is a clock which always increases. It is
+ /// incremented every time a new subgoal is followed.
+ /// This effectively gives us way to track what depth
+ /// and loop a table or strand was last followed.
+ pub(crate) clock: TimeStamp,
+}
+
+impl<I: Interner> Forest<I> {
+ pub fn new() -> Self {
+ Forest {
+ tables: Tables::new(),
+ clock: TimeStamp::default(),
+ }
+ }
+
+ // Gets the next clock TimeStamp. This will never decrease.
+ pub(crate) fn increment_clock(&mut self) -> TimeStamp {
+ self.clock.increment();
+ self.clock
+ }
+
+ /// Returns a "solver" for a given goal in the form of an
+ /// iterator. Each time you invoke `next`, it will do the work to
+ /// extract one more answer. These answers are cached in between
+ /// invocations. Invoking `next` fewer times is preferable =)
+ pub fn iter_answers<'f>(
+ &'f mut self,
+ context: &'f SlgContextOps<'f, I>,
+ goal: &UCanonical<InEnvironment<Goal<I>>>,
+ ) -> impl AnswerStream<I> + 'f {
+ let table = self.get_or_create_table_for_ucanonical_goal(context, goal.clone());
+ let answer = AnswerIndex::ZERO;
+ ForestSolver {
+ forest: self,
+ context,
+ table,
+ answer,
+ }
+ }
+}
+
+struct ForestSolver<'me, I: Interner> {
+ forest: &'me mut Forest<I>,
+ context: &'me SlgContextOps<'me, I>,
+ table: TableIndex,
+ answer: AnswerIndex,
+}
+
+impl<'me, I: Interner> AnswerStream<I> for ForestSolver<'me, I> {
+ /// # Panics
+ ///
+ /// Panics if a negative cycle was detected.
+ fn peek_answer(&mut self, should_continue: impl Fn() -> bool) -> AnswerResult<I> {
+ loop {
+ match self
+ .forest
+ .root_answer(self.context, self.table, self.answer)
+ {
+ Ok(answer) => {
+ debug!(answer = ?(&answer));
+ return AnswerResult::Answer(answer);
+ }
+
+ Err(RootSearchFail::InvalidAnswer) => {
+ self.answer.increment();
+ }
+ Err(RootSearchFail::Floundered) => {
+ return AnswerResult::Floundered;
+ }
+
+ Err(RootSearchFail::NoMoreSolutions) => {
+ return AnswerResult::NoMoreSolutions;
+ }
+
+ Err(RootSearchFail::QuantumExceeded) => {
+ if !should_continue() {
+ return AnswerResult::QuantumExceeded;
+ }
+ }
+
+ Err(RootSearchFail::NegativeCycle) => {
+ // Negative cycles *ought* to be avoided by construction. Hence panic
+ // if we find one, as that likely indicates a problem in the chalk-solve
+ // lowering rules. (In principle, we could propagate this error out,
+ // and let chalk-solve do the asserting, but that seemed like it would
+ // complicate the function signature more than it's worth.)
+ panic!("negative cycle was detected");
+ }
+ }
+ }
+ }
+
+ fn next_answer(&mut self, should_continue: impl Fn() -> bool) -> AnswerResult<I> {
+ let answer = self.peek_answer(should_continue);
+ self.answer.increment();
+ answer
+ }
+
+ fn any_future_answer(&self, test: impl Fn(&Substitution<I>) -> bool) -> bool {
+ self.forest.any_future_answer(self.table, self.answer, test)
+ }
+}
diff --git a/vendor/chalk-engine/src/lib.rs b/vendor/chalk-engine/src/lib.rs
new file mode 100644
index 000000000..499b33e4f
--- /dev/null
+++ b/vendor/chalk-engine/src/lib.rs
@@ -0,0 +1,334 @@
+//! An alternative solver based around the SLG algorithm, which
+//! implements the well-formed semantics. For an overview of how the solver
+//! works, see [The On-Demand SLG Solver][guide] in the chalk book.
+//!
+//! [guide]: https://rust-lang.github.io/chalk/book/engine/slg.html
+//!
+//! This algorithm is very closed based on the description found in the
+//! following paper, which I will refer to in the comments as EWFS:
+//!
+//! > Efficient Top-Down Computation of Queries Under the Well-founded Semantics
+//! > (Chen, Swift, and Warren; Journal of Logic Programming '95)
+//!
+//! However, to understand that paper, I would recommend first
+//! starting with the following paper, which I will refer to in the
+//! comments as NFTD:
+//!
+//! > A New Formulation of Tabled resolution With Delay
+//! > (Swift; EPIA '99)
+//!
+//! In addition, I incorporated extensions from the following papers,
+//! which I will refer to as SA and RR respectively, that
+//! describes how to do introduce approximation when processing
+//! subgoals and so forth:
+//!
+//! > Terminating Evaluation of Logic Programs with Finite Three-Valued Models
+//! > Riguzzi and Swift; ACM Transactions on Computational Logic 2013
+//! > (Introduces "subgoal abstraction", hence the name SA)
+//! >
+//! > Radial Restraint
+//! > Grosof and Swift; 2013
+//!
+//! Another useful paper that gives a kind of high-level overview of
+//! concepts at play is the following, which I will refer to as XSB:
+//!
+//! > XSB: Extending Prolog with Tabled Logic Programming
+//! > (Swift and Warren; Theory and Practice of Logic Programming '10)
+//!
+//! While this code is adapted from the algorithms described in those
+//! papers, it is not the same. For one thing, the approaches there
+//! had to be extended to our context, and in particular to coping
+//! with hereditary harrop predicates and our version of unification
+//! (which produces subgoals). I believe those to be largely faithful
+//! extensions. However, there are some other places where I
+//! intentionally diverged from the semantics as described in the
+//! papers -- e.g. by more aggressively approximating -- which I
+//! marked them with a comment DIVERGENCE. Those places may want to be
+//! evaluated in the future.
+//!
+//! Glossary of other terms:
+//!
+//! - WAM: Warren abstract machine, an efficient way to evaluate Prolog programs.
+//! See <http://wambook.sourceforge.net/>.
+//! - HH: Hereditary harrop predicates. What Chalk deals in.
+//! Popularized by Lambda Prolog.
+
+use std::cmp::min;
+use std::usize;
+
+use chalk_derive::{Fold, HasInterner, Visit};
+use chalk_ir::interner::Interner;
+use chalk_ir::{
+ AnswerSubst, Canonical, ConstrainedSubst, Constraint, DebruijnIndex, Goal, InEnvironment,
+ Substitution,
+};
+use std::ops::ControlFlow;
+
+pub mod context;
+mod derived;
+pub mod forest;
+mod logic;
+mod normalize_deep;
+mod simplify;
+pub mod slg;
+pub mod solve;
+mod stack;
+mod strand;
+mod table;
+mod tables;
+
+index_struct! {
+ pub struct TableIndex { // FIXME: pub b/c Fold
+ value: usize,
+ }
+}
+
+/// The paper describes these as `A :- D | G`.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Fold, Visit, HasInterner)]
+pub struct ExClause<I: Interner> {
+ /// The substitution which, applied to the goal of our table,
+ /// would yield A.
+ pub subst: Substitution<I>,
+
+ /// True if any subgoals were depended upon negatively and
+ /// were not fully evaluated, or if we encountered a `CannotProve`
+ /// goal. (In the full SLG algorithm, we would use delayed literals here,
+ /// but we don't bother, as we don't need that support.)
+ pub ambiguous: bool,
+
+ /// Region constraints we have accumulated.
+ pub constraints: Vec<InEnvironment<Constraint<I>>>,
+
+ /// Subgoals: literals that must be proven
+ pub subgoals: Vec<Literal<I>>,
+
+ /// We assume that negative literals cannot have coinductive cycles.
+ pub delayed_subgoals: Vec<InEnvironment<Goal<I>>>,
+
+ /// Time stamp that is incremented each time we find an answer to
+ /// some subgoal. This is used to figure out whether any of the
+ /// floundered subgoals may no longer be floundered: we record the
+ /// current time when we add something to the list of floundered
+ /// subgoals, and then we can compare whether its value has
+ /// changed since then. This is not the same `TimeStamp` of
+ /// `Forest`'s clock.
+ pub answer_time: TimeStamp,
+
+ /// List of subgoals that have floundered. See `FlounderedSubgoal`
+ /// for more information.
+ pub floundered_subgoals: Vec<FlounderedSubgoal<I>>,
+}
+
+/// The "time stamp" is a simple clock that gets incremented each time
+/// we encounter a positive answer in processing a particular
+/// strand. This is used as an optimization to help us figure out when
+/// we *may* have changed inference variables.
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct TimeStamp {
+ clock: u64,
+}
+
+impl TimeStamp {
+ const MAX: TimeStamp = TimeStamp {
+ clock: ::std::u64::MAX,
+ };
+
+ fn increment(&mut self) {
+ self.clock += 1;
+ }
+}
+
+/// A "floundered" subgoal is one that contains unbound existential
+/// variables for which it cannot produce a value. The classic example
+/// of floundering is a negative subgoal:
+///
+/// ```notrust
+/// not { Implemented(?T: Foo) }
+/// ```
+///
+/// The way the prolog solver works, it basically enumerates all the
+/// ways that a given goal can be *true*. But we can't use this
+/// technique to find all the ways that `?T: Foo` can be *false* -- so
+/// we call it floundered. In other words, we can evaluate a negative
+/// goal, but only if we know what `?T` is -- we can't use the
+/// negative goal to help us figuring out `?T`.
+///
+/// In addition to negative goals, we use floundering to prevent the
+/// trait solver from trying to enumerate very large goals with tons
+/// of answers. For example, we consider a goal like `?T: Sized` to
+/// "flounder", since we can't hope to enumerate all types that are
+/// `Sized`. The same is true for other special traits like `Clone`.
+///
+/// Floundering can also occur indirectly. For example:
+///
+/// ```notrust
+/// trait Foo { }
+/// impl<T> Foo for T { }
+/// ```
+///
+/// trying to solve `?T: Foo` would immediately require solving `?T:
+/// Sized`, and hence would flounder.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Fold, Visit)]
+pub struct FlounderedSubgoal<I: Interner> {
+ /// Literal that floundered.
+ pub floundered_literal: Literal<I>,
+
+ /// Current value of the strand's clock at the time of
+ /// floundering.
+ pub floundered_time: TimeStamp,
+}
+
+/// An "answer" in the on-demand solver corresponds to a fully solved
+/// goal for a particular table (modulo delayed literals). It contains
+/// a substitution
+#[derive(Clone, Debug)]
+pub struct Answer<I: Interner> {
+ /// Contains values for the unbound inference variables for which
+ /// the table is true, along with any delayed subgoals (Which must
+ /// still be proven) and region constrained (which must still be
+ /// proven, but not by chalk).
+ pub subst: Canonical<AnswerSubst<I>>,
+
+ /// If this flag is set, then the answer could be neither proven
+ /// nor disproven. This could be the size of the answer exceeded
+ /// `max_size` or because of a negative loop (e.g., `P :- not { P }`).
+ pub ambiguous: bool,
+}
+
+#[derive(Clone, Debug)]
+pub struct CompleteAnswer<I: Interner> {
+ /// Contains values for the unbound inference variables for which
+ /// the table is true, along with any region constrained (which must still be
+ /// proven, but not by chalk).
+ pub subst: Canonical<ConstrainedSubst<I>>,
+
+ /// If this flag is set, then the answer could be neither proven
+ /// nor disproven. This could be the size of the answer exceeded
+ /// `max_size` or because of a negative loop (e.g., `P :- not { P }`).
+ pub ambiguous: bool,
+}
+
+/// Either `A` or `~A`, where `A` is a `Env |- Goal`.
+#[derive(Clone, Debug, Fold, Visit)]
+pub enum Literal<I: Interner> {
+ // FIXME: pub b/c fold
+ Positive(InEnvironment<Goal<I>>),
+ Negative(InEnvironment<Goal<I>>),
+}
+
+/// The `Minimums` structure is used to track the dependencies between
+/// some item E on the evaluation stack. In particular, it tracks
+/// cases where the success of E depends (or may depend) on items
+/// deeper in the stack than E (i.e., with lower DFNs).
+///
+/// `positive` tracks the lowest index on the stack to which we had a
+/// POSITIVE dependency (e.g. `foo(X) :- bar(X)`) -- meaning that in
+/// order for E to succeed, the dependency must succeed. It is
+/// initialized with the index of the predicate on the stack. So
+/// imagine we have a stack like this:
+///
+/// ```notrust
+/// // 0 foo(X) <-- bottom of stack
+/// // 1 bar(X)
+/// // 2 baz(X) <-- top of stack
+/// ```
+///
+/// In this case, `positive` would be initially 0, 1, and 2 for `foo`,
+/// `bar`, and `baz` respectively. This reflects the fact that the
+/// answers for `foo(X)` depend on the answers for `foo(X)`. =)
+///
+/// Now imagine that we had a clause `baz(X) :- foo(X)`, inducing a
+/// cycle. In this case, we would update `positive` for `baz(X)` to be
+/// 0, reflecting the fact that its answers depend on the answers for
+/// `foo(X)`. Similarly, the minimum for `bar` would (eventually) be
+/// updated, since it too transitively depends on `foo`. `foo` is
+/// unaffected.
+///
+/// `negative` tracks the lowest index on the stack to which we had a
+/// NEGATIVE dependency (e.g., `foo(X) :- not { bar(X) }`) -- meaning
+/// that for E to succeed, the dependency must fail. This is initially
+/// `usize::MAX`, reflecting the fact that the answers for `foo(X)` do
+/// not depend on `not(foo(X))`. When negative cycles are encountered,
+/// however, this value must be updated.
+#[derive(Copy, Clone, Debug)]
+struct Minimums {
+ positive: TimeStamp,
+ negative: TimeStamp,
+}
+
+impl Minimums {
+ const MAX: Minimums = Minimums {
+ positive: TimeStamp::MAX,
+ negative: TimeStamp::MAX,
+ };
+
+ /// Update our fields to be the minimum of our current value
+ /// and the values from other.
+ fn take_minimums(&mut self, other: &Minimums) {
+ self.positive = min(self.positive, other.positive);
+ self.negative = min(self.negative, other.negative);
+ }
+
+ fn minimum_of_pos_and_neg(&self) -> TimeStamp {
+ min(self.positive, self.negative)
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum AnswerMode {
+ Complete,
+ Ambiguous,
+}
+
+chalk_ir::copy_fold!(TableIndex);
+chalk_ir::copy_fold!(TimeStamp);
+
+chalk_ir::const_visit!(TableIndex);
+chalk_ir::const_visit!(TimeStamp);
+
+#[macro_export]
+macro_rules! index_struct {
+ ($(#[$m:meta])* $v:vis struct $n:ident {
+ $vf:vis value: usize,
+ }) => {
+ #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+ $(#[$m])*
+ $v struct $n {
+ $vf value: usize,
+ }
+
+ impl $n {
+ // Not all index structs need this, so allow it to be dead
+ // code.
+ #[allow(dead_code)]
+ $v fn get_and_increment(&mut self) -> Self {
+ let old_value = *self;
+ self.increment();
+ old_value
+ }
+
+ #[allow(dead_code)]
+ $v fn increment(&mut self) {
+ self.value += 1;
+ }
+
+ // TODO: Once the Step trait is stabilized (https://github.com/rust-lang/rust/issues/42168), instead implement it and use the Iterator implementation of Range
+ #[allow(dead_code)]
+ pub fn iterate_range(range: ::std::ops::Range<Self>) -> impl Iterator<Item = $n> {
+ (range.start.value..range.end.value).into_iter().map(|i| Self { value: i })
+ }
+ }
+
+ impl ::std::fmt::Debug for $n {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ write!(fmt, "{}({})", stringify!($n), self.value)
+ }
+ }
+
+ impl From<usize> for $n {
+ fn from(value: usize) -> Self {
+ Self { value }
+ }
+ }
+ }
+}
diff --git a/vendor/chalk-engine/src/logic.rs b/vendor/chalk-engine/src/logic.rs
new file mode 100644
index 000000000..681f6e7ed
--- /dev/null
+++ b/vendor/chalk-engine/src/logic.rs
@@ -0,0 +1,1648 @@
+use crate::forest::Forest;
+use crate::normalize_deep::DeepNormalizer;
+use crate::slg::{ResolventOps, SlgContext, SlgContextOps};
+use crate::stack::{Stack, StackIndex};
+use crate::strand::{CanonicalStrand, SelectedSubgoal, Strand};
+use crate::table::{AnswerIndex, Table};
+use crate::{
+ Answer, AnswerMode, CompleteAnswer, ExClause, FlounderedSubgoal, Literal, Minimums, TableIndex,
+ TimeStamp,
+};
+
+use chalk_ir::could_match::CouldMatch;
+use chalk_ir::interner::Interner;
+use chalk_ir::{
+ AnswerSubst, Canonical, ConstrainedSubst, Constraints, FallibleOrFloundered, Floundered, Goal,
+ GoalData, InEnvironment, NoSolution, ProgramClause, Substitution, UCanonical, UniverseMap,
+};
+use chalk_solve::clauses::program_clauses_that_could_match;
+use chalk_solve::coinductive_goal::IsCoinductive;
+use chalk_solve::infer::ucanonicalize::UCanonicalized;
+use chalk_solve::infer::InferenceTable;
+use chalk_solve::solve::truncate;
+use tracing::{debug, debug_span, info, instrument};
+
+type RootSearchResult<T> = Result<T, RootSearchFail>;
+
+/// The different ways that a *root* search (which potentially pursues
+/// many strands) can fail. A root search is one that begins with an
+/// empty stack.
+#[derive(Debug)]
+pub(super) enum RootSearchFail {
+ /// The table we were trying to solve cannot succeed.
+ NoMoreSolutions,
+
+ /// The table cannot be solved without more type information.
+ Floundered,
+
+ /// We did not find a solution, but we still have things to try.
+ /// Repeat the request, and we'll give one of those a spin.
+ ///
+ /// (In a purely depth-first-based solver, like Prolog, this
+ /// doesn't appear.)
+ QuantumExceeded,
+
+ /// A negative cycle was found. This is fail-fast, so even if there was
+ /// possibly a solution (ambiguous or not), it may not have been found.
+ NegativeCycle,
+
+ /// The current answer index is not useful. Currently, this is returned
+ /// because the current answer needs refining.
+ InvalidAnswer,
+}
+
+/// This is returned when we try to select a subgoal for a strand.
+#[derive(PartialEq)]
+enum SubGoalSelection {
+ /// A subgoal was successfully selected. It has already been checked
+ /// to not be floundering. However, it may have an answer already, be
+ /// coinductive, or create a cycle.
+ Selected,
+
+ /// This strand has no remaining subgoals, but there may still be
+ /// floundered subgoals.
+ NotSelected,
+}
+
+/// This is returned `on_no_remaining_subgoals`
+enum NoRemainingSubgoalsResult {
+ /// There is an answer available for the root table
+ RootAnswerAvailable,
+
+ /// There was a `RootSearchFail`
+ RootSearchFail(RootSearchFail),
+
+ // This was a success
+ Success,
+}
+
+impl<I: Interner> Forest<I> {
+ /// Returns an answer with a given index for the given table. This
+ /// may require activating a strand and following it. It returns
+ /// `Ok(answer)` if they answer is available and otherwise a
+ /// `RootSearchFail` result.
+ pub(super) fn root_answer(
+ &mut self,
+ context: &SlgContextOps<I>,
+ table: TableIndex,
+ answer_index: AnswerIndex,
+ ) -> RootSearchResult<CompleteAnswer<I>> {
+ let stack = Stack::default();
+
+ let mut state = SolveState {
+ forest: self,
+ context,
+ stack,
+ };
+
+ match state.ensure_root_answer(table, answer_index) {
+ Ok(()) => {
+ assert!(state.stack.is_empty());
+ let answer = state.forest.answer(table, answer_index);
+ if !answer.subst.value.delayed_subgoals.is_empty() {
+ return Err(RootSearchFail::InvalidAnswer);
+ }
+ Ok(CompleteAnswer {
+ subst: Canonical {
+ binders: answer.subst.binders.clone(),
+ value: ConstrainedSubst {
+ subst: answer.subst.value.subst.clone(),
+ constraints: answer.subst.value.constraints.clone(),
+ },
+ },
+ ambiguous: answer.ambiguous,
+ })
+ }
+ Err(err) => Err(err),
+ }
+ }
+
+ pub(super) fn any_future_answer(
+ &self,
+ table: TableIndex,
+ mut answer_index: AnswerIndex,
+ mut test: impl FnMut(&Substitution<I>) -> bool,
+ ) -> bool {
+ // Check any cached answers, starting at `answer_index`.
+ while let Some(answer) = self.tables[table].answer(answer_index) {
+ info!("answer cached = {:?}", answer);
+ if test(&answer.subst.value.subst) {
+ return true;
+ }
+ answer_index.increment();
+ }
+
+ // Check any unsolved strands, which may give further answers.
+ self.tables[table]
+ .strands()
+ .any(|strand| test(&strand.value.ex_clause.subst))
+ }
+
+ pub(crate) fn answer(&self, table: TableIndex, answer: AnswerIndex) -> &Answer<I> {
+ self.tables[table].answer(answer).unwrap()
+ }
+
+ fn canonicalize_strand_from(
+ context: &SlgContextOps<I>,
+ infer: &mut InferenceTable<I>,
+ strand: &Strand<I>,
+ ) -> CanonicalStrand<I> {
+ infer
+ .canonicalize(context.program().interner(), strand.clone())
+ .quantified
+ }
+
+ /// Given a subgoal, converts the literal into u-canonical form
+ /// and searches for an existing table. If one is found, it is
+ /// returned, but otherwise a new table is created (and populated
+ /// with its initial set of strands).
+ ///
+ /// Returns `None` if the literal cannot be converted into a table
+ /// -- for example, this can occur when we have selected a
+ /// negative literal with free existential variables, in which
+ /// case the execution is said to "flounder".
+ ///
+ /// In terms of the NFTD paper, creating a new table corresponds
+ /// to the *New Subgoal* step as well as the *Program Clause
+ /// Resolution* steps.
+ #[instrument(level = "debug", skip(self, context, infer))]
+ fn get_or_create_table_for_subgoal(
+ &mut self,
+ context: &SlgContextOps<I>,
+ infer: &mut InferenceTable<I>,
+ subgoal: &Literal<I>,
+ ) -> Option<(TableIndex, UniverseMap)> {
+ // Subgoal abstraction:
+ let (ucanonical_subgoal, universe_map) = match subgoal {
+ Literal::Positive(subgoal) => {
+ Forest::abstract_positive_literal(context, infer, subgoal.clone())?
+ }
+ Literal::Negative(subgoal) => {
+ Forest::abstract_negative_literal(context, infer, subgoal.clone())?
+ }
+ };
+
+ debug!(?ucanonical_subgoal, ?universe_map);
+
+ let table = self.get_or_create_table_for_ucanonical_goal(context, ucanonical_subgoal);
+
+ Some((table, universe_map))
+ }
+
+ /// Given a u-canonical goal, searches for an existing table. If
+ /// one is found, it is returned, but otherwise a new table is
+ /// created (and populated with its initial set of strands).
+ ///
+ /// In terms of the NFTD paper, creating a new table corresponds
+ /// to the *New Subgoal* step as well as the *Program Clause
+ /// Resolution* steps.
+ #[instrument(level = "debug", skip(self, context))]
+ pub(crate) fn get_or_create_table_for_ucanonical_goal(
+ &mut self,
+ context: &SlgContextOps<I>,
+ goal: UCanonical<InEnvironment<Goal<I>>>,
+ ) -> TableIndex {
+ if let Some(table) = self.tables.index_of(&goal) {
+ debug!(?table, "found existing table");
+ return table;
+ }
+
+ info!(
+ table = ?self.tables.next_index(),
+ "creating new table with goal = {:#?}",
+ goal,
+ );
+ let table = Self::build_table(context, self.tables.next_index(), goal);
+ self.tables.insert(table)
+ }
+
+ /// When a table is first created, this function is invoked to
+ /// create the initial set of strands. If the table represents a
+ /// domain goal, these strands are created from the program
+ /// clauses as well as the clauses found in the environment. If
+ /// the table represents a non-domain goal, such as `for<T> G`
+ /// etc, then `simplify_goal` is invoked to create a strand
+ /// that breaks the goal down.
+ ///
+ /// In terms of the NFTD paper, this corresponds to the *Program
+ /// Clause Resolution* step being applied eagerly, as many times
+ /// as possible.
+ fn build_table(
+ context: &SlgContextOps<I>,
+ table_idx: TableIndex,
+ goal: UCanonical<InEnvironment<Goal<I>>>,
+ ) -> Table<I> {
+ let coinductive = goal.is_coinductive(context.program());
+ let mut table = Table::new(goal.clone(), coinductive);
+
+ let goal_data = goal.canonical.value.goal.data(context.program().interner());
+ match goal_data {
+ GoalData::DomainGoal(domain_goal) => {
+ let canon_domain_goal = UCanonical {
+ canonical: Canonical {
+ binders: goal.canonical.binders,
+ value: InEnvironment::new(
+ &goal.canonical.value.environment,
+ domain_goal.clone(),
+ ),
+ },
+ universes: goal.universes,
+ };
+
+ let db = context.program();
+ let canon_goal = canon_domain_goal.canonical.value.goal.clone();
+ let could_match = |c: &ProgramClause<I>| {
+ c.could_match(db.interner(), db.unification_database(), &canon_goal)
+ };
+
+ match program_clauses_that_could_match(db, &canon_domain_goal) {
+ Ok(mut clauses) => {
+ clauses.retain(could_match);
+ clauses.extend(db.custom_clauses().into_iter().filter(could_match));
+
+ let (infer, subst, goal) =
+ chalk_solve::infer::InferenceTable::from_canonical(
+ context.program().interner(),
+ canon_domain_goal.universes,
+ canon_domain_goal.canonical,
+ );
+
+ clauses.extend(
+ db.program_clauses_for_env(&goal.environment)
+ .iter(db.interner())
+ .cloned()
+ .filter(could_match),
+ );
+
+ let InEnvironment { environment, goal } = goal;
+
+ for clause in clauses {
+ info!("program clause = {:#?}", clause);
+ let mut infer = infer.clone();
+ if let Ok(resolvent) = infer.resolvent_clause(
+ context.unification_database(),
+ context.program().interner(),
+ &environment,
+ &goal,
+ &subst,
+ &clause,
+ ) {
+ info!("pushing initial strand with ex-clause: {:#?}", &resolvent,);
+ let strand = Strand {
+ ex_clause: resolvent,
+ selected_subgoal: None,
+ last_pursued_time: TimeStamp::default(),
+ };
+ let canonical_strand =
+ Self::canonicalize_strand_from(context, &mut infer, &strand);
+ table.enqueue_strand(canonical_strand);
+ }
+ }
+ }
+ Err(Floundered) => {
+ debug!(
+ table = ?table_idx,
+ "Marking table {:?} as floundered! (failed to create program clauses)",
+ table_idx
+ );
+ table.mark_floundered();
+ }
+ }
+ }
+
+ _ => {
+ let (mut infer, subst, InEnvironment { environment, goal }) =
+ chalk_solve::infer::InferenceTable::from_canonical(
+ context.program().interner(),
+ goal.universes,
+ goal.canonical,
+ );
+ // The goal for this table is not a domain goal, so we instead
+ // simplify it into a series of *literals*, all of which must be
+ // true. Thus, in EWFS terms, we are effectively creating a
+ // single child of the `A :- A` goal that is like `A :- B, C, D`
+ // where B, C, and D are the simplified subgoals. You can think
+ // of this as applying built-in "meta program clauses" that
+ // reduce goals into Domain goals.
+ match Self::simplify_goal(context, &mut infer, subst, environment, goal) {
+ FallibleOrFloundered::Ok(ex_clause) => {
+ info!(
+ ex_clause = ?DeepNormalizer::normalize_deep(
+ &mut infer,
+ context.program().interner(),
+ ex_clause.clone(),
+ ),
+ "pushing initial strand"
+ );
+ let strand = Strand {
+ ex_clause,
+ selected_subgoal: None,
+ last_pursued_time: TimeStamp::default(),
+ };
+ let canonical_strand =
+ Self::canonicalize_strand_from(context, &mut infer, &strand);
+ table.enqueue_strand(canonical_strand);
+ }
+ FallibleOrFloundered::NoSolution => {}
+ FallibleOrFloundered::Floundered => table.mark_floundered(),
+ }
+ }
+ }
+
+ table
+ }
+
+ /// Given a selected positive subgoal, applies the subgoal
+ /// abstraction function to yield the canonical form that will be
+ /// used to pick a table. Typically, this abstraction has no
+ /// effect, and hence we are simply returning the canonical form
+ /// of `subgoal`; but if the subgoal is getting too big, we return
+ /// `None`, which causes the subgoal to flounder.
+ fn abstract_positive_literal(
+ context: &SlgContextOps<I>,
+ infer: &mut InferenceTable<I>,
+ subgoal: InEnvironment<Goal<I>>,
+ ) -> Option<(UCanonical<InEnvironment<Goal<I>>>, UniverseMap)> {
+ if truncate::needs_truncation(
+ context.program().interner(),
+ infer,
+ context.max_size(),
+ &subgoal,
+ ) {
+ None
+ } else {
+ let canonicalized_goal = infer
+ .canonicalize(context.program().interner(), subgoal)
+ .quantified;
+ let UCanonicalized {
+ quantified,
+ universes,
+ } = InferenceTable::u_canonicalize(context.program().interner(), &canonicalized_goal);
+ Some((quantified, universes))
+ }
+ }
+
+ /// Given a selected negative subgoal, the subgoal is "inverted"
+ /// (see `InferenceTable<I, C>::invert`) and then potentially truncated
+ /// (see `abstract_positive_literal`). The result subgoal is
+ /// canonicalized. In some cases, this may return `None` and hence
+ /// fail to yield a useful result, for example if free existential
+ /// variables appear in `subgoal` (in which case the execution is
+ /// said to "flounder").
+ fn abstract_negative_literal(
+ context: &SlgContextOps<I>,
+ infer: &mut InferenceTable<I>,
+ subgoal: InEnvironment<Goal<I>>,
+ ) -> Option<(UCanonical<InEnvironment<Goal<I>>>, UniverseMap)> {
+ // First, we have to check that the selected negative literal
+ // is ground, and invert any universally quantified variables.
+ //
+ // DIVERGENCE -- In the RR paper, to ensure completeness, they
+ // permit non-ground negative literals, but only consider
+ // them to succeed when the target table has no answers at
+ // all. This is equivalent inverting those free existentials
+ // into universals, as discussed in the comments of
+ // `invert`. This is clearly *sound*, but the completeness is
+ // a subtle point. In particular, it can cause **us** to reach
+ // false conclusions, because e.g. given a program like
+ // (selected left-to-right):
+ //
+ // not { ?T: Copy }, ?T = Vec<u32>
+ //
+ // we would select `not { ?T: Copy }` first. For this goal to
+ // succeed we would require that -- effectively -- `forall<T>
+ // { not { T: Copy } }`, which clearly doesn't hold. (In the
+ // terms of RR, we would require that the table for `?T: Copy`
+ // has failed before we can continue.)
+ //
+ // In the RR paper, this is acceptable because they assume all
+ // of their input programs are both **normal** (negative
+ // literals are selected after positive ones) and **safe**
+ // (all free variables in negative literals occur in positive
+ // literals). It is plausible for us to guarantee "normal"
+ // form, we can reorder clauses as we need. I suspect we can
+ // guarantee safety too, but I have to think about it.
+ //
+ // For now, we opt for the safer route of terming such
+ // executions as floundering, because I think our use of
+ // negative goals is sufficiently limited we can get away with
+ // it. The practical effect is that we will judge more
+ // executions as floundering than we ought to (i.e., where we
+ // could instead generate an (imprecise) result). As you can
+ // see a bit later, we also diverge in some other aspects that
+ // affect completeness when it comes to subgoal abstraction.
+ let inverted_subgoal = infer.invert(context.program().interner(), subgoal)?;
+
+ if truncate::needs_truncation(
+ context.program().interner(),
+ infer,
+ context.max_size(),
+ &inverted_subgoal,
+ ) {
+ None
+ } else {
+ let canonicalized_goal = infer
+ .canonicalize(context.program().interner(), inverted_subgoal)
+ .quantified;
+ let UCanonicalized {
+ quantified,
+ universes,
+ } = InferenceTable::u_canonicalize(context.program().interner(), &canonicalized_goal);
+ Some((quantified, universes))
+ }
+ }
+}
+
+pub(crate) struct SolveState<'forest, I: Interner> {
+ forest: &'forest mut Forest<I>,
+ context: &'forest SlgContextOps<'forest, I>,
+ stack: Stack<I>,
+}
+
+impl<'forest, I: Interner> Drop for SolveState<'forest, I> {
+ fn drop(&mut self) {
+ if !self.stack.is_empty() {
+ if let Some(active_strand) = self.stack.top().active_strand.take() {
+ let table = self.stack.top().table;
+ self.forest.tables[table].enqueue_strand(active_strand);
+ }
+ self.unwind_stack();
+ }
+ }
+}
+
+impl<'forest, I: Interner> SolveState<'forest, I> {
+ /// Ensures that answer with the given index is available from the
+ /// given table. Returns `Ok` if there is an answer.
+ ///
+ /// This function first attempts to fetch answer that is cached in
+ /// the table. If none is found, then it will recursively search
+ /// to find an answer.
+ #[instrument(level = "info", skip(self))]
+ fn ensure_root_answer(
+ &mut self,
+ initial_table: TableIndex,
+ initial_answer: AnswerIndex,
+ ) -> RootSearchResult<()> {
+ info!(
+ "table goal = {:#?}",
+ self.forest.tables[initial_table].table_goal
+ );
+ // Check if this table has floundered.
+ if self.forest.tables[initial_table].is_floundered() {
+ return Err(RootSearchFail::Floundered);
+ }
+ // Check for a tabled answer.
+ if let Some(answer) = self.forest.tables[initial_table].answer(initial_answer) {
+ info!("answer cached = {:?}", answer);
+ return Ok(());
+ }
+
+ // If no tabled answer is present, we ought to be requesting
+ // the next available index.
+ assert_eq!(
+ self.forest.tables[initial_table].next_answer_index(),
+ initial_answer
+ );
+
+ self.stack
+ .push(initial_table, Minimums::MAX, self.forest.increment_clock());
+ loop {
+ let clock = self.stack.top().clock;
+ // If we had an active strand, continue to pursue it
+ let table = self.stack.top().table;
+ let table_answer_mode = self.forest.tables[table].answer_mode;
+
+ // We track when we last pursued each strand. If all the strands have been
+ // pursued at this depth, then that means they all encountered a cycle.
+ // We also know that if the first strand has been pursued at this depth,
+ // then all have. Otherwise, an answer to any strand would have provided an
+ // answer for the table.
+ let forest = &mut self.forest;
+ let next_strand = self.stack.top().active_strand.take().or_else(|| {
+ forest.tables[table].dequeue_next_strand_that(|strand| {
+ let time_eligble = strand.value.last_pursued_time < clock;
+ let mode_eligble = match (table_answer_mode, strand.value.ex_clause.ambiguous) {
+ (AnswerMode::Complete, false) => true,
+ (AnswerMode::Complete, true) => false,
+ (AnswerMode::Ambiguous, _) => true,
+ };
+ time_eligble && mode_eligble
+ })
+ });
+ match next_strand {
+ Some(mut canonical_strand) => {
+ debug!("starting next strand = {:#?}", canonical_strand);
+
+ canonical_strand.value.last_pursued_time = clock;
+ match self.select_subgoal(&mut canonical_strand) {
+ SubGoalSelection::Selected => {
+ // A subgoal has been selected. We now check this subgoal
+ // table for an existing answer or if it's in a cycle.
+ // If neither of those are the case, a strand is selected
+ // and the next loop iteration happens.
+ self.on_subgoal_selected(canonical_strand)?;
+ continue;
+ }
+ SubGoalSelection::NotSelected => {
+ match self.on_no_remaining_subgoals(canonical_strand) {
+ NoRemainingSubgoalsResult::RootAnswerAvailable => return Ok(()),
+ NoRemainingSubgoalsResult::RootSearchFail(e) => return Err(e),
+ NoRemainingSubgoalsResult::Success => continue,
+ };
+ }
+ }
+ }
+ None => {
+ self.on_no_strands_left()?;
+ continue;
+ }
+ }
+ }
+ }
+
+ /// This is called when an answer is available for the selected subgoal
+ /// of the strand. First, if the selected subgoal is a `Positive` subgoal,
+ /// it first clones the strand pursuing the next answer. Then, it merges the
+ /// answer into the provided `Strand`.
+ /// On success, `Ok` is returned and the `Strand` can be continued to process
+ /// On failure, `Err` is returned and the `Strand` should be discarded
+ fn merge_answer_into_strand(
+ &mut self,
+ infer: &mut InferenceTable<I>,
+ strand: &mut Strand<I>,
+ ) -> RootSearchResult<()> {
+ // At this point, we know we have an answer for
+ // the selected subgoal of the strand.
+ // Now, we have to unify that answer onto the strand.
+
+ // If this answer is ambiguous and we don't want ambiguous answers
+ // yet, then we act like this is a floundered subgoal.
+ let ambiguous = {
+ let selected_subgoal = strand.selected_subgoal.as_ref().unwrap();
+ let answer = self.forest.answer(
+ selected_subgoal.subgoal_table,
+ selected_subgoal.answer_index,
+ );
+ answer.ambiguous
+ };
+ if let AnswerMode::Complete = self.forest.tables[self.stack.top().table].answer_mode {
+ if ambiguous {
+ // FIXME: we could try to be a little bit smarter here. This can
+ // really be split into cases:
+ // 1) Cases where no amount of solving will cause this ambiguity to change.
+ // (e.g. `CannnotProve`)
+ // 2) Cases where we may be able to get a better answer if we
+ // solve other subgoals first.
+ // (e.g. the `non_enumerable_traits_reorder` test)
+ // We really only need to delay merging an ambiguous answer for
+ // case 2. Do note, though, that even if we *do* merge the answer
+ // case 1, we should stop solving this strand when in
+ // `AnswerMode::Complete` since we wouldn't use this answer yet
+ // *anyways*.
+
+ // The selected subgoal returned an ambiguous answer, but we don't want that.
+ // So, we treat this subgoal as floundered.
+ let selected_subgoal = strand.selected_subgoal.take().unwrap();
+ self.flounder_subgoal(&mut strand.ex_clause, selected_subgoal.subgoal_index);
+ return Ok(());
+ }
+ }
+
+ // If this subgoal was a `Positive` one, whichever way this
+ // particular answer turns out, there may yet be *more* answers,
+ // if this isn't a trivial substitution.
+ // Enqueue that alternative for later.
+ // NOTE: this is separate from the match below because we `take` the selected_subgoal
+ // below, but here we keep it for the new `Strand`.
+ let selected_subgoal = strand.selected_subgoal.as_ref().unwrap();
+ if let Literal::Positive(_) = strand.ex_clause.subgoals[selected_subgoal.subgoal_index] {
+ let answer = self.forest.answer(
+ selected_subgoal.subgoal_table,
+ selected_subgoal.answer_index,
+ );
+ if !self.forest.tables[selected_subgoal.subgoal_table]
+ .table_goal
+ .is_trivial_substitution(self.context.program().interner(), &answer.subst)
+ {
+ let mut next_subgoal = selected_subgoal.clone();
+ next_subgoal.answer_index.increment();
+ let next_strand = Strand {
+ ex_clause: strand.ex_clause.clone(),
+ selected_subgoal: Some(next_subgoal),
+ last_pursued_time: strand.last_pursued_time,
+ };
+ let table = self.stack.top().table;
+ let canonical_next_strand =
+ Forest::canonicalize_strand_from(self.context, infer, &next_strand);
+ self.forest.tables[table].enqueue_strand(canonical_next_strand);
+ }
+ }
+
+ // Deselect and remove the selected subgoal, now that we have an answer for it.
+ let selected_subgoal = strand.selected_subgoal.take().unwrap();
+ let subgoal = strand
+ .ex_clause
+ .subgoals
+ .remove(selected_subgoal.subgoal_index);
+ match subgoal {
+ Literal::Positive(subgoal) => {
+ let SelectedSubgoal {
+ subgoal_index: _,
+ subgoal_table,
+ answer_index,
+ ref universe_map,
+ } = selected_subgoal;
+ use chalk_solve::infer::ucanonicalize::UniverseMapExt;
+ let table_goal = universe_map.map_from_canonical(
+ self.context.program().interner(),
+ &self.forest.tables[subgoal_table].table_goal.canonical,
+ );
+ let answer_subst = universe_map.map_from_canonical(
+ self.context.program().interner(),
+ &self.forest.answer(subgoal_table, answer_index).subst,
+ );
+ match infer.apply_answer_subst(
+ self.context.program().interner(),
+ self.context.unification_database(),
+ &mut strand.ex_clause,
+ &subgoal,
+ &table_goal,
+ answer_subst,
+ ) {
+ Ok(()) => {
+ let ex_clause = &mut strand.ex_clause;
+
+ // If the answer had was ambiguous, we have to
+ // ensure that `ex_clause` is also ambiguous. This is
+ // the SLG FACTOR operation, though NFTD just makes it
+ // part of computing the SLG resolvent.
+ if self.forest.answer(subgoal_table, answer_index).ambiguous {
+ debug!("Marking Strand as ambiguous because answer to (positive) subgoal was ambiguous");
+ ex_clause.ambiguous = true;
+ }
+
+ // Increment the answer time for the `ex_clause`. Floundered
+ // subgoals may be eligble to be pursued again.
+ ex_clause.answer_time.increment();
+
+ // Ok, we've applied the answer to this Strand.
+ Ok(())
+ }
+
+ // This answer led nowhere. Give up for now, but of course
+ // there may still be other strands to pursue, so return
+ // `QuantumExceeded`.
+ Err(NoSolution) => {
+ info!("answer not unifiable -> NoSolution");
+ // This strand as no solution. It is no longer active,
+ // so it dropped at the end of this scope.
+
+ // Now we want to propogate back to the up with `QuantumExceeded`
+ self.unwind_stack();
+ Err(RootSearchFail::QuantumExceeded)
+ }
+ }
+ }
+ Literal::Negative(_) => {
+ let SelectedSubgoal {
+ subgoal_index: _,
+ subgoal_table,
+ answer_index,
+ universe_map: _,
+ } = selected_subgoal;
+ // We got back an answer. This is bad, because we want
+ // to disprove the subgoal, but it may be
+ // "conditional" (maybe true, maybe not).
+ let answer = self.forest.answer(subgoal_table, answer_index);
+
+ // By construction, we do not expect negative subgoals
+ // to have delayed subgoals. This is because we do not
+ // need to permit `not { L }` where `L` is a
+ // coinductive goal. We could improve this if needed,
+ // but it keeps things simple.
+ if !answer.subst.value.delayed_subgoals.is_empty() {
+ panic!("Negative subgoal had delayed_subgoals");
+ }
+
+ if !answer.ambiguous {
+ // We want to disproval the subgoal, but we
+ // have an unconditional answer for the subgoal,
+ // therefore we have failed to disprove it.
+ info!("found unconditional answer to neg literal -> NoSolution");
+
+ // This strand as no solution. By returning an Err,
+ // the caller should discard this `Strand`.
+
+ // Now we want to propogate back to the up with `QuantumExceeded`
+ self.unwind_stack();
+ return Err(RootSearchFail::QuantumExceeded);
+ }
+
+ // Otherwise, the answer is ambiguous. We can keep going,
+ // but we have to mark our strand, too, as ambiguous.
+ //
+ // We want to disproval the subgoal, but we
+ // have an unconditional answer for the subgoal,
+ // therefore we have failed to disprove it.
+ debug!(?strand, "Marking Strand as ambiguous because answer to (negative) subgoal was ambiguous");
+ strand.ex_clause.ambiguous = true;
+
+ // Strand is ambigious.
+ Ok(())
+ }
+ }
+ }
+
+ /// This is called when the selected subgoal for a strand has floundered.
+ /// We have to decide what this means for the strand.
+ /// - If the strand was positively dependent on the subgoal, we flounder,
+ /// the subgoal, then return `false`. This strand may be able to be
+ /// retried later.
+ /// - If the strand was negatively dependent on the subgoal, then strand
+ /// has led nowhere of interest and we return `true`. This strand should
+ /// be discarded.
+ ///
+ /// In other words, we return whether this strand flounders.
+ fn propagate_floundered_subgoal(&mut self, strand: &mut CanonicalStrand<I>) -> bool {
+ // This subgoal selection for the strand is finished, so take it
+ let selected_subgoal = strand.value.selected_subgoal.take().unwrap();
+ match strand.value.ex_clause.subgoals[selected_subgoal.subgoal_index] {
+ Literal::Positive(_) => {
+ // If this strand depends on this positively, then we can
+ // come back to it later. So, we mark that subgoal as
+ // floundered and yield `QuantumExceeded` up the stack
+
+ // If this subgoal floundered, push it onto the
+ // floundered list, along with the time that it
+ // floundered. We'll try to solve some other subgoals
+ // and maybe come back to it.
+ self.flounder_subgoal(&mut strand.value.ex_clause, selected_subgoal.subgoal_index);
+
+ false
+ }
+ Literal::Negative(_) => {
+ // Floundering on a negative literal isn't like a
+ // positive search: we only pursue negative literals
+ // when we already know precisely the type we are
+ // looking for. So there's no point waiting for other
+ // subgoals, we'll never recover more information.
+ //
+ // In fact, floundering on negative searches shouldn't
+ // normally happen, since there are no uninferred
+ // variables in the goal, but it can with forall
+ // goals:
+ //
+ // forall<T> { not { T: Debug } }
+ //
+ // Here, the table we will be searching for answers is
+ // `?T: Debug`, so it could well flounder.
+
+ // This strand has no solution. It is no longer active,
+ // so it dropped at the end of this scope.
+
+ true
+ }
+ }
+ }
+
+ /// This is called if the selected subgoal for a `Strand` is
+ /// a coinductive cycle.
+ fn on_coinductive_subgoal(
+ &mut self,
+ mut canonical_strand: CanonicalStrand<I>,
+ ) -> Result<(), RootSearchFail> {
+ // This is a co-inductive cycle. That is, this table
+ // appears somewhere higher on the stack, and has now
+ // recursively requested an answer for itself. This
+ // means that we have to delay this subgoal until we
+ // reach a trivial self-cycle.
+
+ // This subgoal selection for the strand is finished, so take it
+ let selected_subgoal = canonical_strand.value.selected_subgoal.take().unwrap();
+ match canonical_strand
+ .value
+ .ex_clause
+ .subgoals
+ .remove(selected_subgoal.subgoal_index)
+ {
+ Literal::Positive(subgoal) => {
+ // We delay this subgoal
+ let table = self.stack.top().table;
+ assert!(
+ self.forest.tables[table].coinductive_goal
+ && self.forest.tables[selected_subgoal.subgoal_table].coinductive_goal
+ );
+
+ canonical_strand
+ .value
+ .ex_clause
+ .delayed_subgoals
+ .push(subgoal);
+
+ self.stack.top().active_strand = Some(canonical_strand);
+ Ok(())
+ }
+ Literal::Negative(_) => {
+ // We don't allow coinduction for negative literals
+ info!("found coinductive answer to negative literal");
+ panic!("Coinductive cycle with negative literal");
+ }
+ }
+ }
+
+ /// This is called if the selected subgoal for `strand` is
+ /// a positive, non-coinductive cycle.
+ ///
+ /// # Parameters
+ ///
+ /// * `strand` the strand from the top of the stack we are pursuing
+ /// * `minimums` is the collected minimum clock times
+ fn on_positive_cycle(
+ &mut self,
+ canonical_strand: CanonicalStrand<I>,
+ minimums: Minimums,
+ ) -> Result<(), RootSearchFail> {
+ // We can't take this because we might need it later to clear the cycle
+ let selected_subgoal = canonical_strand.value.selected_subgoal.as_ref().unwrap();
+
+ match canonical_strand.value.ex_clause.subgoals[selected_subgoal.subgoal_index] {
+ Literal::Positive(_) => {
+ self.stack.top().cyclic_minimums.take_minimums(&minimums);
+ }
+ Literal::Negative(_) => {
+ // We depend on `not(subgoal)`. For us to continue,
+ // `subgoal` must be completely evaluated. Therefore,
+ // we depend (negatively) on the minimum link of
+ // `subgoal` as a whole -- it doesn't matter whether
+ // it's pos or neg.
+ let mins = Minimums {
+ positive: self.stack.top().clock,
+ negative: minimums.minimum_of_pos_and_neg(),
+ };
+ self.stack.top().cyclic_minimums.take_minimums(&mins);
+ }
+ }
+
+ // Ok, we've taken the minimums from this cycle above. Now,
+ // we just return the strand to the table. The table only
+ // pulls strands if they have not been checked at this
+ // depth.
+ //
+ // We also can't mark these and return early from this
+ // because the stack above us might change.
+ let table = self.stack.top().table;
+ self.forest.tables[table].enqueue_strand(canonical_strand);
+
+ // The strand isn't active, but the table is, so just continue
+ Ok(())
+ }
+
+ /// Invoked after we've selected a (new) subgoal for the top-most
+ /// strand. Attempts to pursue this selected subgoal.
+ ///
+ /// Returns:
+ ///
+ /// * `Ok` if we should keep searching.
+ /// * `Err` if the subgoal failed in some way such that the strand can be abandoned.
+ fn on_subgoal_selected(
+ &mut self,
+ mut canonical_strand: CanonicalStrand<I>,
+ ) -> Result<(), RootSearchFail> {
+ // This may be a newly selected subgoal or an existing selected subgoal.
+
+ let SelectedSubgoal {
+ subgoal_index: _,
+ subgoal_table,
+ answer_index,
+ universe_map: _,
+ } = *canonical_strand.value.selected_subgoal.as_ref().unwrap();
+
+ debug!(
+ ?subgoal_table,
+ goal = ?self.forest.tables[subgoal_table].table_goal,
+ "table selection {:?} with goal: {:?}",
+ subgoal_table, self.forest.tables[subgoal_table].table_goal
+ );
+
+ // This is checked inside select_subgoal
+ assert!(!self.forest.tables[subgoal_table].is_floundered());
+
+ // Check for a tabled answer.
+ if let Some(answer) = self.forest.tables[subgoal_table].answer(answer_index) {
+ info!("answer cached = {:?}", answer);
+
+ // There was a previous answer available for this table
+ // We need to check if we can merge it into the current `Strand`.
+ let num_universes = self.forest.tables[self.stack.top().table]
+ .table_goal
+ .universes;
+ let (mut infer, _, mut strand) = chalk_solve::infer::InferenceTable::from_canonical(
+ self.context.program().interner(),
+ num_universes,
+ canonical_strand.clone(),
+ );
+ match self.merge_answer_into_strand(&mut infer, &mut strand) {
+ Err(e) => {
+ debug!(?strand, "could not merge into current strand");
+ drop(strand);
+ return Err(e);
+ }
+ Ok(_) => {
+ debug!(?strand, "merged answer into current strand");
+ canonical_strand =
+ Forest::canonicalize_strand_from(self.context, &mut infer, &strand);
+ self.stack.top().active_strand = Some(canonical_strand);
+ return Ok(());
+ }
+ }
+ }
+
+ // If no tabled answer is present, we ought to be requesting
+ // the next available index.
+ assert_eq!(
+ self.forest.tables[subgoal_table].next_answer_index(),
+ answer_index
+ );
+
+ // Next, check if the table is already active. If so, then we
+ // have a recursive attempt.
+ if let Some(cyclic_depth) = self.stack.is_active(subgoal_table) {
+ info!("cycle detected at depth {:?}", cyclic_depth);
+ let minimums = Minimums {
+ positive: self.stack[cyclic_depth].clock,
+ negative: TimeStamp::MAX,
+ };
+
+ if self.top_of_stack_is_coinductive_from(cyclic_depth) {
+ debug!("table is coinductive");
+ return self.on_coinductive_subgoal(canonical_strand);
+ }
+
+ debug!("table encountered a positive cycle");
+ return self.on_positive_cycle(canonical_strand, minimums);
+ }
+
+ // We don't know anything about the selected subgoal table.
+ // Set this strand as active and push it onto the stack.
+ self.stack.top().active_strand = Some(canonical_strand);
+
+ let cyclic_minimums = Minimums::MAX;
+ self.stack.push(
+ subgoal_table,
+ cyclic_minimums,
+ self.forest.increment_clock(),
+ );
+ Ok(())
+ }
+
+ /// This is called when there are no remaining subgoals for a strand, so
+ /// it represents an answer. If the strand is ambiguous and we don't want
+ /// it yet, we just enqueue it again to pick it up later. Otherwise, we
+ /// add the answer from the strand onto the table.
+ fn on_no_remaining_subgoals(
+ &mut self,
+ canonical_strand: CanonicalStrand<I>,
+ ) -> NoRemainingSubgoalsResult {
+ let ambiguous = canonical_strand.value.ex_clause.ambiguous;
+ if let AnswerMode::Complete = self.forest.tables[self.stack.top().table].answer_mode {
+ if ambiguous {
+ // The strand can only return an ambiguous answer, but we don't
+ // want that right now, so requeue and we'll deal with it later.
+ self.forest.tables[self.stack.top().table].enqueue_strand(canonical_strand);
+ return NoRemainingSubgoalsResult::RootSearchFail(RootSearchFail::QuantumExceeded);
+ }
+ }
+ let floundered = !canonical_strand
+ .value
+ .ex_clause
+ .floundered_subgoals
+ .is_empty();
+ if floundered {
+ debug!("all remaining subgoals floundered for the table");
+ } else {
+ debug!("no remaining subgoals for the table");
+ };
+ match self.pursue_answer(canonical_strand) {
+ Some(answer_index) => {
+ debug!("answer is available");
+
+ // We found an answer for this strand, and therefore an
+ // answer for this table. Now, this table was either a
+ // subgoal for another strand, or was the root table.
+ let table = self.stack.top().table;
+ match self.stack.pop_and_take_caller_strand() {
+ Some(caller_strand) => {
+ self.stack.top().active_strand = Some(caller_strand);
+ NoRemainingSubgoalsResult::Success
+ }
+ None => {
+ // That was the root table, so we are done --
+ // *well*, unless there were delayed
+ // subgoals. In that case, we want to evaluate
+ // those delayed subgoals to completion, so we
+ // have to create a fresh strand that will
+ // take them as goals. Note that we *still
+ // need the original answer in place*, because
+ // we might have to build on it (see the
+ // Delayed Trivial Self Cycle, Variant 3
+ // example).
+
+ let answer = self.forest.answer(table, answer_index);
+ if let Some(strand) = self.create_refinement_strand(table, answer) {
+ self.forest.tables[table].enqueue_strand(strand);
+ }
+
+ NoRemainingSubgoalsResult::RootAnswerAvailable
+ }
+ }
+ }
+ None => {
+ debug!("answer is not available (or not new)");
+
+ // This strand led nowhere of interest. There might be *other*
+ // answers on this table, but we don't care right now, we'll
+ // try again at another time.
+
+ // Now we yield with `QuantumExceeded`
+ self.unwind_stack();
+ NoRemainingSubgoalsResult::RootSearchFail(RootSearchFail::QuantumExceeded)
+ }
+ }
+ }
+
+ /// A "refinement" strand is used in coinduction. When the root
+ /// table on the stack publishes an answer has delayed subgoals,
+ /// we create a new strand that will attempt to prove out those
+ /// delayed subgoals (the root answer here is not *special* except
+ /// in so far as that there is nothing above it, and hence we know
+ /// that the delayed subgoals (which resulted in some cycle) must
+ /// be referring to a table that now has completed).
+ ///
+ /// Note that it is important for this to be a *refinement* strand
+ /// -- meaning that the answer with delayed subgoals has been
+ /// published. This is necessary because sometimes the strand must
+ /// build on that very answer that it is refining. See Delayed
+ /// Trivial Self Cycle, Variant 3.
+ fn create_refinement_strand(
+ &self,
+ table: TableIndex,
+ answer: &Answer<I>,
+ ) -> Option<CanonicalStrand<I>> {
+ // If there are no delayed subgoals, then there is no need for
+ // a refinement strand.
+ if answer.subst.value.delayed_subgoals.is_empty() {
+ return None;
+ }
+
+ let num_universes = self.forest.tables[table].table_goal.universes;
+ let (
+ mut infer,
+ _,
+ AnswerSubst {
+ subst,
+ constraints,
+ delayed_subgoals,
+ },
+ ) = chalk_solve::infer::InferenceTable::from_canonical(
+ self.context.program().interner(),
+ num_universes,
+ answer.subst.clone(),
+ );
+
+ let delayed_subgoals = delayed_subgoals
+ .into_iter()
+ .map(Literal::Positive)
+ .collect();
+
+ let strand = Strand {
+ ex_clause: ExClause {
+ subst,
+ ambiguous: answer.ambiguous,
+ constraints: constraints
+ .as_slice(self.context.program().interner())
+ .to_vec(),
+ subgoals: delayed_subgoals,
+ delayed_subgoals: Vec::new(),
+ answer_time: TimeStamp::default(),
+ floundered_subgoals: Vec::new(),
+ },
+ selected_subgoal: None,
+ last_pursued_time: TimeStamp::default(),
+ };
+
+ Some(Forest::canonicalize_strand_from(
+ self.context,
+ &mut infer,
+ &strand,
+ ))
+ }
+
+ fn on_no_strands_left(&mut self) -> Result<(), RootSearchFail> {
+ let table = self.stack.top().table;
+ debug!("no more strands available (or all cycles) for {:?}", table);
+
+ // No more strands left to try! This is either because all
+ // strands have failed, because all strands encountered a
+ // cycle, or all strands have would give ambiguous answers.
+
+ if self.forest.tables[table].strands_mut().count() == 0 {
+ // All strands for the table T on the top of the stack
+ // have **failed**. Hence we can pop it off the stack and
+ // check what this means for the table T' that was just
+ // below T on the stack (if any).
+ debug!("no more strands available");
+ let caller_strand = match self.stack.pop_and_borrow_caller_strand() {
+ Some(s) => s,
+ None => {
+ // T was the root table, so we are done.
+ debug!("no more solutions");
+ return Err(RootSearchFail::NoMoreSolutions);
+ }
+ };
+
+ // This subgoal selection for the strand is finished, so take it
+ let caller_selected_subgoal = caller_strand.value.selected_subgoal.take().unwrap();
+ return match caller_strand.value.ex_clause.subgoals
+ [caller_selected_subgoal.subgoal_index]
+ {
+ // T' wanted an answer from T, but none is
+ // forthcoming. Therefore, the active strand from T'
+ // has failed and can be discarded.
+ Literal::Positive(_) => {
+ debug!("discarding strand because positive literal");
+ self.stack.top().active_strand.take();
+ self.unwind_stack();
+ Err(RootSearchFail::QuantumExceeded)
+ }
+
+ // T' wanted there to be no answer from T, but none is forthcoming.
+ Literal::Negative(_) => {
+ debug!("subgoal was proven because negative literal");
+
+ // There is no solution for this strand. But, this
+ // is what we want, so can remove this subgoal and
+ // keep going.
+ caller_strand
+ .value
+ .ex_clause
+ .subgoals
+ .remove(caller_selected_subgoal.subgoal_index);
+
+ // This strand is still active, so continue
+ Ok(())
+ }
+ };
+ }
+
+ // We can't consider this table as part of a cycle unless we've handled
+ // all strands, not just non-ambiguous ones. See chalk#571.
+ if let AnswerMode::Complete = self.forest.tables[table].answer_mode {
+ debug!("Allowing ambiguous answers.");
+ self.forest.tables[table].answer_mode = AnswerMode::Ambiguous;
+ return Err(RootSearchFail::QuantumExceeded);
+ }
+
+ let clock = self.stack.top().clock;
+ let cyclic_minimums = self.stack.top().cyclic_minimums;
+ if cyclic_minimums.positive >= clock && cyclic_minimums.negative >= clock {
+ debug!("cycle with no new answers");
+
+ if cyclic_minimums.negative < TimeStamp::MAX {
+ // This is a negative cycle.
+ self.unwind_stack();
+ return Err(RootSearchFail::NegativeCycle);
+ }
+
+ // If all the things that we recursively depend on have
+ // positive dependencies on things below us in the stack,
+ // then no more answers are forthcoming. We can clear all
+ // the strands for those things recursively.
+ let table = self.stack.top().table;
+ let cyclic_strands = self.forest.tables[table].take_strands();
+ self.clear_strands_after_cycle(cyclic_strands);
+
+ // Now we yield with `QuantumExceeded`
+ self.unwind_stack();
+ Err(RootSearchFail::QuantumExceeded)
+ } else {
+ debug!("table part of a cycle");
+
+ // This table resulted in a positive cycle, so we have
+ // to check what this means for the subgoal containing
+ // this strand
+ let caller_strand = match self.stack.pop_and_borrow_caller_strand() {
+ Some(s) => s,
+ None => {
+ panic!("nothing on the stack but cyclic result");
+ }
+ };
+
+ // We can't take this because we might need it later to clear the cycle
+ let caller_selected_subgoal = caller_strand.value.selected_subgoal.as_ref().unwrap();
+ match caller_strand.value.ex_clause.subgoals[caller_selected_subgoal.subgoal_index] {
+ Literal::Positive(_) => {
+ self.stack
+ .top()
+ .cyclic_minimums
+ .take_minimums(&cyclic_minimums);
+ }
+ Literal::Negative(_) => {
+ // We depend on `not(subgoal)`. For us to continue,
+ // `subgoal` must be completely evaluated. Therefore,
+ // we depend (negatively) on the minimum link of
+ // `subgoal` as a whole -- it doesn't matter whether
+ // it's pos or neg.
+ let mins = Minimums {
+ positive: self.stack.top().clock,
+ negative: cyclic_minimums.minimum_of_pos_and_neg(),
+ };
+ self.stack.top().cyclic_minimums.take_minimums(&mins);
+ }
+ }
+
+ // We can't pursue this strand anymore, so push it back onto the table
+ let active_strand = self.stack.top().active_strand.take().unwrap();
+ let table = self.stack.top().table;
+ self.forest.tables[table].enqueue_strand(active_strand);
+
+ // The strand isn't active, but the table is, so just continue
+ Ok(())
+ }
+ }
+
+ /// Unwinds the entire stack, returning all active strands back to
+ /// their tables (this time at the end of the queue).
+ fn unwind_stack(&mut self) {
+ loop {
+ match self.stack.pop_and_take_caller_strand() {
+ Some(active_strand) => {
+ let table = self.stack.top().table;
+ self.forest.tables[table].enqueue_strand(active_strand);
+ }
+
+ None => return,
+ }
+ }
+ }
+
+ /// Invoked after we have determined that every strand in `table`
+ /// encounters a cycle; `strands` is the set of strands (which
+ /// have been moved out of the table). This method then
+ /// recursively clears the active strands from the tables
+ /// referenced in `strands`, since all of them must encounter
+ /// cycles too.
+ fn clear_strands_after_cycle(&mut self, strands: impl IntoIterator<Item = CanonicalStrand<I>>) {
+ for strand in strands {
+ let selected_subgoal = strand.value.selected_subgoal;
+ let ex_clause = strand.value.ex_clause;
+ let selected_subgoal = selected_subgoal.unwrap_or_else(|| {
+ panic!(
+ "clear_strands_after_cycle invoked on strand in table \
+ without a selected subgoal: {:?}",
+ ex_clause,
+ )
+ });
+
+ let strand_table = selected_subgoal.subgoal_table;
+ let strands = self.forest.tables[strand_table].take_strands();
+ self.clear_strands_after_cycle(strands);
+ }
+ }
+
+ fn select_subgoal(
+ &mut self,
+ mut canonical_strand: &mut CanonicalStrand<I>,
+ ) -> SubGoalSelection {
+ loop {
+ while canonical_strand.value.selected_subgoal.is_none() {
+ if canonical_strand.value.ex_clause.subgoals.is_empty() {
+ if canonical_strand
+ .value
+ .ex_clause
+ .floundered_subgoals
+ .is_empty()
+ {
+ return SubGoalSelection::NotSelected;
+ }
+
+ self.reconsider_floundered_subgoals(&mut canonical_strand.value.ex_clause);
+
+ if canonical_strand.value.ex_clause.subgoals.is_empty() {
+ // All the subgoals of this strand floundered. We may be able
+ // to get helpful information from this strand still, but it
+ // will *always* be ambiguous, so mark it as so.
+ assert!(!canonical_strand
+ .value
+ .ex_clause
+ .floundered_subgoals
+ .is_empty());
+ canonical_strand.value.ex_clause.ambiguous = true;
+ return SubGoalSelection::NotSelected;
+ }
+
+ continue;
+ }
+
+ let subgoal_index =
+ SlgContext::next_subgoal_index(&canonical_strand.value.ex_clause);
+
+ // Get or create table for this subgoal.
+ let num_universes = self.forest.tables[self.stack.top().table]
+ .table_goal
+ .universes;
+ let (mut infer, _, strand) = chalk_solve::infer::InferenceTable::from_canonical(
+ self.context.program().interner(),
+ num_universes,
+ canonical_strand.clone(),
+ );
+ match self.forest.get_or_create_table_for_subgoal(
+ self.context,
+ &mut infer,
+ &strand.ex_clause.subgoals[subgoal_index],
+ ) {
+ Some((subgoal_table, universe_map)) => {
+ canonical_strand.value.selected_subgoal = Some(SelectedSubgoal {
+ subgoal_index,
+ subgoal_table,
+ universe_map,
+ answer_index: AnswerIndex::ZERO,
+ });
+ }
+
+ None => {
+ // If we failed to create a table for the subgoal,
+ // that is because we have a floundered negative
+ // literal.
+ self.flounder_subgoal(&mut canonical_strand.value.ex_clause, subgoal_index);
+ }
+ }
+ }
+
+ let selected_subgoal_table = canonical_strand
+ .value
+ .selected_subgoal
+ .as_ref()
+ .unwrap()
+ .subgoal_table;
+ if self.forest.tables[selected_subgoal_table].is_floundered() {
+ if self.propagate_floundered_subgoal(canonical_strand) {
+ // This strand will never lead anywhere of interest.
+ return SubGoalSelection::NotSelected;
+ } else {
+ // This subgoal has floundered and has been marked.
+ // We previously would immediately mark the table as
+ // floundered too, and maybe come back to it. Now, we
+ // try to see if any other subgoals can be pursued first.
+ continue;
+ }
+ } else {
+ return SubGoalSelection::Selected;
+ }
+ }
+ }
+
+ /// Invoked when a strand represents an **answer**. This means
+ /// that the strand has no subgoals left. There are two possibilities:
+ ///
+ /// - the strand may represent an answer we have already found; in
+ /// that case, we can return `None`, as this
+ /// strand led nowhere of interest.
+ /// - the strand may represent a new answer, in which case it is
+ /// added to the table and `Some(())` is returned.
+ fn pursue_answer(&mut self, canonical_strand: CanonicalStrand<I>) -> Option<AnswerIndex> {
+ let table = self.stack.top().table;
+ let Canonical {
+ binders,
+ value: strand,
+ } = canonical_strand;
+ let ExClause {
+ subst,
+ constraints,
+ ambiguous,
+ subgoals,
+ delayed_subgoals,
+ answer_time: _,
+ floundered_subgoals,
+ } = strand.ex_clause;
+ // If there are subgoals left, they should be followed
+ assert!(subgoals.is_empty());
+ // We can still try to get an ambiguous answer if there are floundered subgoals
+ let floundered = !floundered_subgoals.is_empty();
+ // So let's make sure that it *really* is an ambiguous answer (this should be set previously)
+ assert!(!floundered || ambiguous);
+
+ // FIXME: If there are floundered subgoals, we *could* potentially
+ // actually check if the partial answers to any of these subgoals
+ // conflict. But this requires that we think about whether they are
+ // positive or negative subgoals. This duplicates some of the logic
+ // in `merge_answer_into_strand`, so a bit of refactoring is needed.
+
+ // If the answer gets too large, mark the table as floundered.
+ // This is the *most conservative* course. There are a few alternatives:
+ // 1) Replace the answer with a truncated version of it. (This was done
+ // previously, but turned out to be more complicated than we wanted and
+ // and a source of multiple bugs.)
+ // 2) Mark this *strand* as floundered. We don't currently have a mechanism
+ // for this (only floundered subgoals), so implementing this is more
+ // difficult because we don't want to just *remove* this strand from the
+ // table, because that might make the table give `NoMoreSolutions`, which
+ // is *wrong*.
+ // 3) Do something fancy with delayed subgoals, effectively delayed the
+ // truncated bits to a different strand (and a more "refined" answer).
+ // (This one probably needs more thought, but is here for "completeness")
+ //
+ // Ultimately, the current decision to flounder the entire table mostly boils
+ // down to "it works as we expect for the current tests". And, we likely don't
+ // even *need* the added complexity just for potentially more answers.
+ if truncate::needs_truncation(
+ self.context.program().interner(),
+ &mut InferenceTable::new(),
+ self.context.max_size(),
+ &subst,
+ ) {
+ self.forest.tables[table].mark_floundered();
+ return None;
+ }
+
+ let table_goal = &self.forest.tables[table].table_goal;
+
+ let filtered_delayed_subgoals = delayed_subgoals
+ .into_iter()
+ .filter(|delayed_subgoal| {
+ let canonicalized = InferenceTable::u_canonicalize(
+ self.context.program().interner(),
+ &chalk_ir::Canonical {
+ binders: binders.clone(),
+ value: delayed_subgoal.clone(),
+ },
+ )
+ .quantified;
+ *table_goal != canonicalized
+ })
+ .collect();
+
+ let subst = Canonical {
+ binders,
+ value: AnswerSubst {
+ subst,
+ constraints: Constraints::from_iter(self.context.program().interner(), constraints),
+ delayed_subgoals: filtered_delayed_subgoals,
+ },
+ };
+ debug!(?table, ?subst, ?floundered, "found answer");
+
+ let answer = Answer { subst, ambiguous };
+
+ // A "trivial" answer is one that is 'just true for all cases'
+ // -- in other words, it gives no information back to the
+ // caller. For example, `Vec<u32>: Sized` is "just true".
+ // Such answers are important because they are the most
+ // general case, and after we provide a trivial answer, no
+ // further answers are useful -- therefore we can clear any
+ // further pending strands (this is a "green cut", in
+ // Prolog parlance).
+ //
+ // This optimization is *crucial* for performance: for
+ // example, `projection_from_env_slow` fails miserably without
+ // it. The reason is that we wind up (thanks to implied bounds)
+ // with a clause like this:
+ //
+ // ```ignore
+ // forall<T> { (<T as SliceExt>::Item: Clone) :- WF(T: SliceExt) }
+ // ```
+ //
+ // we then apply that clause to `!1: Clone`, resulting in the
+ // table goal `!1: Clone :- <?0 as SliceExt>::Item = !1,
+ // WF(?0: SliceExt)`. This causes us to **enumerate all types
+ // `?0` that where `Slice<?0>` normalizes to `!1` -- this is
+ // an infinite set of types, effectively. Interestingly,
+ // though, we only need one and we are done, because (if you
+ // look) our goal (`!1: Clone`) doesn't have any output
+ // parameters.
+ //
+ // This is actually a kind of general case. Due to Rust's rule
+ // about constrained impl type parameters, generally speaking
+ // when we have some free inference variable (like `?0`)
+ // within our clause, it must appear in the head of the
+ // clause. This means that the values we create for it will
+ // propagate up to the caller, and they will quickly surmise
+ // that there is ambiguity and stop requesting more answers.
+ // Indeed, the only exception to this rule about constrained
+ // type parameters if with associated type projections, as in
+ // the case above!
+ //
+ // (Actually, because of the trivial answer cut off rule, we
+ // never even get to the point of asking the query above in
+ // `projection_from_env_slow`.)
+ //
+ // However, there is one fly in the ointment: answers include
+ // region constraints, and you might imagine that we could
+ // find future answers that are also trivial but with distinct
+ // sets of region constraints. **For this reason, we only
+ // apply this green cut rule if the set of generated
+ // constraints is empty.**
+ //
+ // The limitation on region constraints is quite a drag! We
+ // can probably do better, though: for example, coherence
+ // guarantees that, for any given set of types, only a single
+ // impl ought to be applicable, and that impl can only impose
+ // one set of region constraints. However, it's not quite that
+ // simple, thanks to specialization as well as the possibility
+ // of proving things from the environment (though the latter
+ // is a *bit* suspect; e.g., those things in the environment
+ // must be backed by an impl *eventually*).
+ let is_trivial_answer = {
+ self.forest.tables[table]
+ .table_goal
+ .is_trivial_substitution(self.context.program().interner(), &answer.subst)
+ && answer
+ .subst
+ .value
+ .constraints
+ .is_empty(self.context.program().interner())
+ };
+
+ if let Some(answer_index) = self.forest.tables[table].push_answer(answer) {
+ // See above, if we have a *complete* and trivial answer, we don't
+ // want to follow any more strands
+ if !ambiguous && is_trivial_answer {
+ self.forest.tables[table].take_strands();
+ }
+
+ Some(answer_index)
+ } else {
+ info!("answer: not a new answer, returning None");
+ None
+ }
+ }
+
+ fn reconsider_floundered_subgoals(&mut self, ex_clause: &mut ExClause<I>) {
+ info!("reconsider_floundered_subgoals(ex_clause={:#?})", ex_clause,);
+ let ExClause {
+ answer_time,
+ subgoals,
+ floundered_subgoals,
+ ..
+ } = ex_clause;
+ for i in (0..floundered_subgoals.len()).rev() {
+ if floundered_subgoals[i].floundered_time < *answer_time {
+ let floundered_subgoal = floundered_subgoals.swap_remove(i);
+ subgoals.push(floundered_subgoal.floundered_literal);
+ }
+ }
+ }
+
+ /// Removes the subgoal at `subgoal_index` from the strand's
+ /// subgoal list and adds it to the strand's floundered subgoal
+ /// list.
+ fn flounder_subgoal(&self, ex_clause: &mut ExClause<I>, subgoal_index: usize) {
+ let _s = debug_span!(
+ "flounder_subgoal",
+ answer_time = ?ex_clause.answer_time,
+ subgoal = ?ex_clause.subgoals[subgoal_index],
+ );
+ let _s = _s.enter();
+
+ let floundered_time = ex_clause.answer_time;
+ let floundered_literal = ex_clause.subgoals.remove(subgoal_index);
+ ex_clause.floundered_subgoals.push(FlounderedSubgoal {
+ floundered_literal,
+ floundered_time,
+ });
+ debug!(?ex_clause);
+ }
+
+ /// True if all the tables on the stack starting from `depth` and
+ /// continuing until the top of the stack are coinductive.
+ ///
+ /// Example: Given a program like:
+ ///
+ /// ```notrust
+ /// struct Foo { a: Option<Box<Bar>> }
+ /// struct Bar { a: Option<Box<Foo>> }
+ /// trait XXX { }
+ /// impl<T: Send> XXX for T { }
+ /// ```
+ ///
+ /// and then a goal of `Foo: XXX`, we would eventually wind up
+ /// with a stack like this:
+ ///
+ /// | StackIndex | Table Goal |
+ /// | ---------- | ----------- |
+ /// | 0 | `Foo: XXX` |
+ /// | 1 | `Foo: Send` |
+ /// | 2 | `Bar: Send` |
+ ///
+ /// Here, the top of the stack is `Bar: Send`. And now we are
+ /// asking `top_of_stack_is_coinductive_from(1)` -- the answer
+ /// would be true, since `Send` is an auto trait, which yields a
+ /// coinductive goal. But `top_of_stack_is_coinductive_from(0)` is
+ /// false, since `XXX` is not an auto trait.
+ pub(super) fn top_of_stack_is_coinductive_from(&self, depth: StackIndex) -> bool {
+ StackIndex::iterate_range(self.stack.top_of_stack_from(depth)).all(|d| {
+ let table = self.stack[d].table;
+ self.forest.tables[table].coinductive_goal
+ })
+ }
+}
diff --git a/vendor/chalk-engine/src/normalize_deep.rs b/vendor/chalk-engine/src/normalize_deep.rs
new file mode 100644
index 000000000..0c4a01ccb
--- /dev/null
+++ b/vendor/chalk-engine/src/normalize_deep.rs
@@ -0,0 +1,172 @@
+use chalk_ir::fold::shift::Shift;
+use chalk_ir::fold::{Fold, Folder};
+use chalk_ir::interner::Interner;
+use chalk_ir::*;
+use chalk_solve::infer::InferenceTable;
+
+pub(crate) struct DeepNormalizer<'table, I: Interner> {
+ table: &'table mut InferenceTable<I>,
+ interner: I,
+}
+
+impl<I: Interner> DeepNormalizer<'_, I> {
+ /// Given a value `value` with variables in it, replaces those variables
+ /// with their instantiated values (if any). Uninstantiated variables are
+ /// left as-is.
+ ///
+ /// This is mainly intended for getting final values to dump to
+ /// the user and its use should otherwise be avoided, particularly
+ /// given the possibility of snapshots and rollbacks.
+ ///
+ /// See also `InferenceTable::canonicalize`, which -- during real
+ /// processing -- is often used to capture the "current state" of
+ /// variables.
+ pub fn normalize_deep<T: Fold<I>>(
+ table: &mut InferenceTable<I>,
+ interner: I,
+ value: T,
+ ) -> T::Result {
+ value
+ .fold_with(
+ &mut DeepNormalizer { interner, table },
+ DebruijnIndex::INNERMOST,
+ )
+ .unwrap()
+ }
+}
+
+impl<I: Interner> Folder<I> for DeepNormalizer<'_, I> {
+ type Error = NoSolution;
+
+ fn as_dyn(&mut self) -> &mut dyn Folder<I, Error = Self::Error> {
+ self
+ }
+
+ fn fold_inference_ty(
+ &mut self,
+ var: InferenceVar,
+ kind: TyVariableKind,
+ _outer_binder: DebruijnIndex,
+ ) -> Fallible<Ty<I>> {
+ let interner = self.interner;
+ match self.table.probe_var(var) {
+ Some(ty) => Ok(ty
+ .assert_ty_ref(interner)
+ .clone()
+ .fold_with(self, DebruijnIndex::INNERMOST)?
+ .shifted_in(interner)), // FIXME shift
+ None => {
+ // Normalize all inference vars which have been unified into a
+ // single variable. Ena calls this the "root" variable.
+ Ok(self.table.inference_var_root(var).to_ty(interner, kind))
+ }
+ }
+ }
+
+ fn fold_inference_lifetime(
+ &mut self,
+ var: InferenceVar,
+ _outer_binder: DebruijnIndex,
+ ) -> Fallible<Lifetime<I>> {
+ let interner = self.interner;
+ match self.table.probe_var(var) {
+ Some(l) => Ok(l
+ .assert_lifetime_ref(interner)
+ .clone()
+ .fold_with(self, DebruijnIndex::INNERMOST)?
+ .shifted_in(interner)),
+ None => Ok(var.to_lifetime(interner)), // FIXME shift
+ }
+ }
+
+ fn fold_inference_const(
+ &mut self,
+ ty: Ty<I>,
+ var: InferenceVar,
+ _outer_binder: DebruijnIndex,
+ ) -> Fallible<Const<I>> {
+ let interner = self.interner;
+ match self.table.probe_var(var) {
+ Some(c) => Ok(c
+ .assert_const_ref(interner)
+ .clone()
+ .fold_with(self, DebruijnIndex::INNERMOST)?
+ .shifted_in(interner)),
+ None => Ok(var.to_const(interner, ty)), // FIXME shift
+ }
+ }
+
+ fn forbid_free_vars(&self) -> bool {
+ true
+ }
+
+ fn interner(&self) -> I {
+ self.interner
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use chalk_integration::interner::ChalkIr;
+ use chalk_integration::{arg, ty};
+
+ const U0: UniverseIndex = UniverseIndex { counter: 0 };
+
+ // We just use a vec of 20 `Invariant`, since this is zipped and no substs are
+ // longer than this
+ #[derive(Debug)]
+ struct TestDatabase;
+ impl UnificationDatabase<ChalkIr> for TestDatabase {
+ fn fn_def_variance(&self, _fn_def_id: FnDefId<ChalkIr>) -> Variances<ChalkIr> {
+ Variances::from_iter(ChalkIr, [Variance::Invariant; 20].iter().copied())
+ }
+
+ fn adt_variance(&self, _adt_id: AdtId<ChalkIr>) -> Variances<ChalkIr> {
+ Variances::from_iter(ChalkIr, [Variance::Invariant; 20].iter().copied())
+ }
+ }
+
+ #[test]
+ fn infer() {
+ let interner = ChalkIr;
+ let mut table: InferenceTable<ChalkIr> = InferenceTable::new();
+ let environment0 = Environment::new(interner);
+ let a = table.new_variable(U0).to_ty(interner);
+ let b = table.new_variable(U0).to_ty(interner);
+ table
+ .relate(
+ interner,
+ &TestDatabase,
+ &environment0,
+ Variance::Invariant,
+ &a,
+ &ty!(apply (item 0) (expr b)),
+ )
+ .unwrap();
+ // a is unified to Adt<#0>(c), where 'c' is a new inference var
+ // created by the generalizer to generalize 'b'. It then unifies 'b'
+ // and 'c', and when we normalize them, they'll both be output as
+ // the same "root" variable. However, there are no guarantees for
+ // _which_ of 'b' and 'c' becomes the root. We need to normalize
+ // "b" too, then, to ensure we get a consistent result.
+ assert_eq!(
+ DeepNormalizer::normalize_deep(&mut table, interner, a.clone()),
+ ty!(apply (item 0) (expr DeepNormalizer::normalize_deep(&mut table, interner, b.clone()))),
+ );
+ table
+ .relate(
+ interner,
+ &TestDatabase,
+ &environment0,
+ Variance::Invariant,
+ &b,
+ &ty!(apply (item 1)),
+ )
+ .unwrap();
+ assert_eq!(
+ DeepNormalizer::normalize_deep(&mut table, interner, a),
+ ty!(apply (item 0) (apply (item 1)))
+ );
+ }
+}
diff --git a/vendor/chalk-engine/src/simplify.rs b/vendor/chalk-engine/src/simplify.rs
new file mode 100644
index 000000000..1c594af01
--- /dev/null
+++ b/vendor/chalk-engine/src/simplify.rs
@@ -0,0 +1,141 @@
+use crate::forest::Forest;
+use crate::slg::SlgContextOps;
+use crate::{ExClause, Literal, TimeStamp};
+
+use chalk_ir::cast::{Cast, Caster};
+use chalk_ir::interner::Interner;
+use chalk_ir::{
+ Environment, FallibleOrFloundered, Goal, GoalData, InEnvironment, QuantifierKind, Substitution,
+ TyKind, TyVariableKind, Variance,
+};
+use chalk_solve::infer::InferenceTable;
+use tracing::debug;
+
+impl<I: Interner> Forest<I> {
+ /// Simplifies a goal into a series of positive domain goals
+ /// and negative goals. This operation may fail if the goal
+ /// includes unifications that cannot be completed.
+ pub(super) fn simplify_goal(
+ context: &SlgContextOps<I>,
+ infer: &mut InferenceTable<I>,
+ subst: Substitution<I>,
+ initial_environment: Environment<I>,
+ initial_goal: Goal<I>,
+ ) -> FallibleOrFloundered<ExClause<I>> {
+ let mut ex_clause = ExClause {
+ subst,
+ ambiguous: false,
+ constraints: vec![],
+ subgoals: vec![],
+ delayed_subgoals: vec![],
+ answer_time: TimeStamp::default(),
+ floundered_subgoals: vec![],
+ };
+
+ // A stack of higher-level goals to process.
+ let mut pending_goals = vec![(initial_environment, initial_goal)];
+
+ while let Some((environment, goal)) = pending_goals.pop() {
+ match goal.data(context.program().interner()) {
+ GoalData::Quantified(QuantifierKind::ForAll, subgoal) => {
+ let subgoal = infer.instantiate_binders_universally(
+ context.program().interner(),
+ subgoal.clone(),
+ );
+ pending_goals.push((environment, subgoal.clone()));
+ }
+ GoalData::Quantified(QuantifierKind::Exists, subgoal) => {
+ let subgoal = infer.instantiate_binders_existentially(
+ context.program().interner(),
+ subgoal.clone(),
+ );
+ pending_goals.push((environment, subgoal.clone()));
+ }
+ GoalData::Implies(wc, subgoal) => {
+ let new_environment = environment.add_clauses(
+ context.program().interner(),
+ wc.iter(context.program().interner()).cloned(),
+ );
+ pending_goals.push((new_environment, subgoal.clone()));
+ }
+ GoalData::All(subgoals) => {
+ for subgoal in subgoals.iter(context.program().interner()) {
+ pending_goals.push((environment.clone(), subgoal.clone()));
+ }
+ }
+ GoalData::Not(subgoal) => {
+ ex_clause
+ .subgoals
+ .push(Literal::Negative(InEnvironment::new(
+ &environment,
+ subgoal.clone(),
+ )));
+ }
+ GoalData::EqGoal(goal) => {
+ let interner = context.program().interner();
+ let db = context.unification_database();
+ let a = &goal.a;
+ let b = &goal.b;
+
+ let result =
+ match infer.relate(interner, db, &environment, Variance::Invariant, a, b) {
+ Ok(r) => r,
+ Err(_) => return FallibleOrFloundered::NoSolution,
+ };
+ ex_clause.subgoals.extend(
+ result
+ .goals
+ .into_iter()
+ .casted(interner)
+ .map(Literal::Positive),
+ );
+ }
+ GoalData::SubtypeGoal(goal) => {
+ let interner = context.program().interner();
+ let db = context.unification_database();
+ let a_norm = infer.normalize_ty_shallow(interner, &goal.a);
+ let a = a_norm.as_ref().unwrap_or(&goal.a);
+ let b_norm = infer.normalize_ty_shallow(interner, &goal.b);
+ let b = b_norm.as_ref().unwrap_or(&goal.b);
+
+ if matches!(
+ a.kind(interner),
+ TyKind::InferenceVar(_, TyVariableKind::General)
+ ) && matches!(
+ b.kind(interner),
+ TyKind::InferenceVar(_, TyVariableKind::General)
+ ) {
+ return FallibleOrFloundered::Floundered;
+ }
+
+ let result =
+ match infer.relate(interner, db, &environment, Variance::Covariant, a, b) {
+ Ok(r) => r,
+ Err(_) => return FallibleOrFloundered::Floundered,
+ };
+ ex_clause.subgoals.extend(
+ result
+ .goals
+ .into_iter()
+ .casted(interner)
+ .map(Literal::Positive),
+ );
+ }
+ GoalData::DomainGoal(domain_goal) => {
+ ex_clause
+ .subgoals
+ .push(Literal::Positive(InEnvironment::new(
+ &environment,
+ domain_goal.clone().cast(context.program().interner()),
+ )));
+ }
+ GoalData::CannotProve => {
+ debug!("Marking Strand as ambiguous because of a `CannotProve` subgoal");
+ ex_clause.ambiguous = true;
+ }
+ }
+ }
+
+ FallibleOrFloundered::Ok(ex_clause)
+ }
+}
diff --git a/vendor/chalk-engine/src/slg.rs b/vendor/chalk-engine/src/slg.rs
new file mode 100644
index 000000000..f3522e777
--- /dev/null
+++ b/vendor/chalk-engine/src/slg.rs
@@ -0,0 +1,378 @@
+use crate::ExClause;
+
+use chalk_derive::HasInterner;
+use chalk_ir::interner::Interner;
+use chalk_ir::*;
+use chalk_solve::infer::InferenceTable;
+use chalk_solve::RustIrDatabase;
+
+use std::fmt::Debug;
+use std::marker::PhantomData;
+
+pub(crate) mod aggregate;
+mod resolvent;
+
+#[derive(Clone, Debug, HasInterner)]
+pub(crate) struct SlgContext<I: Interner> {
+ phantom: PhantomData<I>,
+}
+
+impl<I: Interner> SlgContext<I> {
+ pub(crate) fn next_subgoal_index(ex_clause: &ExClause<I>) -> usize {
+ // For now, we always pick the last subgoal in the
+ // list.
+ //
+ // FIXME(rust-lang-nursery/chalk#80) -- we should be more
+ // selective. For example, we don't want to pick a
+ // negative literal that will flounder, and we don't want
+ // to pick things like `?T: Sized` if we can help it.
+ ex_clause.subgoals.len() - 1
+ }
+}
+#[derive(Clone, Debug)]
+pub(crate) struct SlgContextOps<'me, I: Interner> {
+ program: &'me dyn RustIrDatabase<I>,
+ max_size: usize,
+ expected_answers: Option<usize>,
+}
+
+impl<I: Interner> SlgContextOps<'_, I> {
+ pub(crate) fn new(
+ program: &dyn RustIrDatabase<I>,
+ max_size: usize,
+ expected_answers: Option<usize>,
+ ) -> SlgContextOps<'_, I> {
+ SlgContextOps {
+ program,
+ max_size,
+ expected_answers,
+ }
+ }
+
+ fn identity_constrained_subst(
+ &self,
+ goal: &UCanonical<InEnvironment<Goal<I>>>,
+ ) -> Canonical<ConstrainedSubst<I>> {
+ let (mut infer, subst, _) = InferenceTable::from_canonical(
+ self.program.interner(),
+ goal.universes,
+ goal.canonical.clone(),
+ );
+ infer
+ .canonicalize(
+ self.program.interner(),
+ ConstrainedSubst {
+ subst,
+ constraints: Constraints::empty(self.program.interner()),
+ },
+ )
+ .quantified
+ }
+
+ pub(crate) fn program(&self) -> &dyn RustIrDatabase<I> {
+ self.program
+ }
+
+ pub(crate) fn max_size(&self) -> usize {
+ self.max_size
+ }
+
+ pub(crate) fn unification_database(&self) -> &dyn UnificationDatabase<I> {
+ self.program.unification_database()
+ }
+}
+
+pub trait ResolventOps<I: Interner> {
+ /// Combines the `goal` (instantiated within `infer`) with the
+ /// given program clause to yield the start of a new strand (a
+ /// canonical ex-clause).
+ ///
+ /// The bindings in `infer` are unaffected by this operation.
+ fn resolvent_clause(
+ &mut self,
+ ops: &dyn UnificationDatabase<I>,
+ interner: I,
+ environment: &Environment<I>,
+ goal: &DomainGoal<I>,
+ subst: &Substitution<I>,
+ clause: &ProgramClause<I>,
+ ) -> Fallible<ExClause<I>>;
+
+ fn apply_answer_subst(
+ &mut self,
+ interner: I,
+ unification_database: &dyn UnificationDatabase<I>,
+ ex_clause: &mut ExClause<I>,
+ selected_goal: &InEnvironment<Goal<I>>,
+ answer_table_goal: &Canonical<InEnvironment<Goal<I>>>,
+ canonical_answer_subst: Canonical<AnswerSubst<I>>,
+ ) -> Fallible<()>;
+}
+
+trait SubstitutionExt<I: Interner> {
+ fn may_invalidate(&self, interner: I, subst: &Canonical<Substitution<I>>) -> bool;
+}
+
+impl<I: Interner> SubstitutionExt<I> for Substitution<I> {
+ fn may_invalidate(&self, interner: I, subst: &Canonical<Substitution<I>>) -> bool {
+ self.iter(interner)
+ .zip(subst.value.iter(interner))
+ .any(|(new, current)| MayInvalidate { interner }.aggregate_generic_args(new, current))
+ }
+}
+
+// This is a struct in case we need to add state at any point like in AntiUnifier
+struct MayInvalidate<I> {
+ interner: I,
+}
+
+impl<I: Interner> MayInvalidate<I> {
+ fn aggregate_generic_args(&mut self, new: &GenericArg<I>, current: &GenericArg<I>) -> bool {
+ let interner = self.interner;
+ match (new.data(interner), current.data(interner)) {
+ (GenericArgData::Ty(ty1), GenericArgData::Ty(ty2)) => self.aggregate_tys(ty1, ty2),
+ (GenericArgData::Lifetime(l1), GenericArgData::Lifetime(l2)) => {
+ self.aggregate_lifetimes(l1, l2)
+ }
+ (GenericArgData::Const(c1), GenericArgData::Const(c2)) => self.aggregate_consts(c1, c2),
+ (GenericArgData::Ty(_), _)
+ | (GenericArgData::Lifetime(_), _)
+ | (GenericArgData::Const(_), _) => panic!(
+ "mismatched parameter kinds: new={:?} current={:?}",
+ new, current
+ ),
+ }
+ }
+
+ /// Returns true if the two types could be unequal.
+ fn aggregate_tys(&mut self, new: &Ty<I>, current: &Ty<I>) -> bool {
+ let interner = self.interner;
+ match (new.kind(interner), current.kind(interner)) {
+ (_, TyKind::BoundVar(_)) => {
+ // If the aggregate solution already has an inference
+ // variable here, then no matter what type we produce,
+ // the aggregate cannot get 'more generalized' than it
+ // already is. So return false, we cannot invalidate.
+ //
+ // (Note that "inference variables" show up as *bound
+ // variables* here, because we are looking at the
+ // canonical form.)
+ false
+ }
+
+ (TyKind::BoundVar(_), _) => {
+ // If we see a type variable in the potential future
+ // solution, we have to be conservative. We don't know
+ // what type variable will wind up being! Remember
+ // that the future solution could be any instantiation
+ // of `ty0` -- or it could leave this variable
+ // unbound, if the result is true for all types.
+ //
+ // (Note that "inference variables" show up as *bound
+ // variables* here, because we are looking at the
+ // canonical form.)
+ true
+ }
+
+ (TyKind::InferenceVar(_, _), _) | (_, TyKind::InferenceVar(_, _)) => {
+ panic!(
+ "unexpected free inference variable in may-invalidate: {:?} vs {:?}",
+ new, current,
+ );
+ }
+
+ (TyKind::Placeholder(p1), TyKind::Placeholder(p2)) => {
+ self.aggregate_placeholders(p1, p2)
+ }
+
+ (
+ TyKind::Alias(AliasTy::Projection(proj1)),
+ TyKind::Alias(AliasTy::Projection(proj2)),
+ ) => self.aggregate_projection_tys(proj1, proj2),
+
+ (
+ TyKind::Alias(AliasTy::Opaque(opaque_ty1)),
+ TyKind::Alias(AliasTy::Opaque(opaque_ty2)),
+ ) => self.aggregate_opaque_ty_tys(opaque_ty1, opaque_ty2),
+
+ (TyKind::Adt(id_a, substitution_a), TyKind::Adt(id_b, substitution_b)) => {
+ self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ }
+ (
+ TyKind::AssociatedType(id_a, substitution_a),
+ TyKind::AssociatedType(id_b, substitution_b),
+ ) => self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b),
+ (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => scalar_a != scalar_b,
+ (TyKind::Str, TyKind::Str) => false,
+ (TyKind::Tuple(arity_a, substitution_a), TyKind::Tuple(arity_b, substitution_b)) => {
+ self.aggregate_name_and_substs(arity_a, substitution_a, arity_b, substitution_b)
+ }
+ (
+ TyKind::OpaqueType(id_a, substitution_a),
+ TyKind::OpaqueType(id_b, substitution_b),
+ ) => self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b),
+ (TyKind::Slice(ty_a), TyKind::Slice(ty_b)) => self.aggregate_tys(ty_a, ty_b),
+ (TyKind::FnDef(id_a, substitution_a), TyKind::FnDef(id_b, substitution_b)) => {
+ self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ }
+ (TyKind::Ref(id_a, lifetime_a, ty_a), TyKind::Ref(id_b, lifetime_b, ty_b)) => {
+ id_a != id_b
+ || self.aggregate_lifetimes(lifetime_a, lifetime_b)
+ || self.aggregate_tys(ty_a, ty_b)
+ }
+ (TyKind::Raw(id_a, ty_a), TyKind::Raw(id_b, ty_b)) => {
+ id_a != id_b || self.aggregate_tys(ty_a, ty_b)
+ }
+ (TyKind::Never, TyKind::Never) => false,
+ (TyKind::Array(ty_a, const_a), TyKind::Array(ty_b, const_b)) => {
+ self.aggregate_tys(ty_a, ty_b) || self.aggregate_consts(const_a, const_b)
+ }
+ (TyKind::Closure(id_a, substitution_a), TyKind::Closure(id_b, substitution_b)) => {
+ self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ }
+ (TyKind::Generator(id_a, substitution_a), TyKind::Generator(id_b, substitution_b)) => {
+ self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ }
+ (
+ TyKind::GeneratorWitness(id_a, substitution_a),
+ TyKind::GeneratorWitness(id_b, substitution_b),
+ ) => self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b),
+ (TyKind::Foreign(id_a), TyKind::Foreign(id_b)) => id_a != id_b,
+ (TyKind::Error, TyKind::Error) => false,
+
+ (_, _) => true,
+ }
+ }
+
+ /// Returns true if the two consts could be unequal.
+ fn aggregate_lifetimes(&mut self, _: &Lifetime<I>, _: &Lifetime<I>) -> bool {
+ true
+ }
+
+ /// Returns true if the two consts could be unequal.
+ fn aggregate_consts(&mut self, new: &Const<I>, current: &Const<I>) -> bool {
+ let interner = self.interner;
+ let ConstData {
+ ty: new_ty,
+ value: new_value,
+ } = new.data(interner);
+ let ConstData {
+ ty: current_ty,
+ value: current_value,
+ } = current.data(interner);
+
+ if self.aggregate_tys(new_ty, current_ty) {
+ return true;
+ }
+
+ match (new_value, current_value) {
+ (_, ConstValue::BoundVar(_)) => {
+ // see comment in aggregate_tys
+ false
+ }
+
+ (ConstValue::BoundVar(_), _) => {
+ // see comment in aggregate_tys
+ true
+ }
+
+ (ConstValue::InferenceVar(_), _) | (_, ConstValue::InferenceVar(_)) => {
+ panic!(
+ "unexpected free inference variable in may-invalidate: {:?} vs {:?}",
+ new, current,
+ );
+ }
+
+ (ConstValue::Placeholder(p1), ConstValue::Placeholder(p2)) => {
+ self.aggregate_placeholders(p1, p2)
+ }
+
+ (ConstValue::Concrete(c1), ConstValue::Concrete(c2)) => {
+ !c1.const_eq(new_ty, c2, interner)
+ }
+
+ // Only variants left are placeholder = concrete, which always fails
+ (ConstValue::Placeholder(_), _) | (ConstValue::Concrete(_), _) => true,
+ }
+ }
+
+ fn aggregate_placeholders(
+ &mut self,
+ new: &PlaceholderIndex,
+ current: &PlaceholderIndex,
+ ) -> bool {
+ new != current
+ }
+
+ fn aggregate_projection_tys(
+ &mut self,
+ new: &ProjectionTy<I>,
+ current: &ProjectionTy<I>,
+ ) -> bool {
+ let ProjectionTy {
+ associated_ty_id: new_name,
+ substitution: new_substitution,
+ } = new;
+ let ProjectionTy {
+ associated_ty_id: current_name,
+ substitution: current_substitution,
+ } = current;
+
+ self.aggregate_name_and_substs(
+ new_name,
+ new_substitution,
+ current_name,
+ current_substitution,
+ )
+ }
+
+ fn aggregate_opaque_ty_tys(&mut self, new: &OpaqueTy<I>, current: &OpaqueTy<I>) -> bool {
+ let OpaqueTy {
+ opaque_ty_id: new_name,
+ substitution: new_substitution,
+ } = new;
+ let OpaqueTy {
+ opaque_ty_id: current_name,
+ substitution: current_substitution,
+ } = current;
+
+ self.aggregate_name_and_substs(
+ new_name,
+ new_substitution,
+ current_name,
+ current_substitution,
+ )
+ }
+
+ fn aggregate_name_and_substs<N>(
+ &mut self,
+ new_name: N,
+ new_substitution: &Substitution<I>,
+ current_name: N,
+ current_substitution: &Substitution<I>,
+ ) -> bool
+ where
+ N: Copy + Eq + Debug,
+ {
+ let interner = self.interner;
+ if new_name != current_name {
+ return true;
+ }
+
+ let name = new_name;
+
+ assert_eq!(
+ new_substitution.len(interner),
+ current_substitution.len(interner),
+ "does {:?} take {} substitution or {}? can't both be right",
+ name,
+ new_substitution.len(interner),
+ current_substitution.len(interner)
+ );
+
+ new_substitution
+ .iter(interner)
+ .zip(current_substitution.iter(interner))
+ .any(|(new, current)| self.aggregate_generic_args(new, current))
+ }
+}
diff --git a/vendor/chalk-engine/src/slg/aggregate.rs b/vendor/chalk-engine/src/slg/aggregate.rs
new file mode 100644
index 000000000..cce4e14dd
--- /dev/null
+++ b/vendor/chalk-engine/src/slg/aggregate.rs
@@ -0,0 +1,642 @@
+use crate::context::{self, AnswerResult};
+use crate::slg::SlgContextOps;
+use crate::slg::SubstitutionExt;
+use crate::CompleteAnswer;
+use chalk_ir::cast::Cast;
+use chalk_ir::interner::Interner;
+use chalk_ir::*;
+use chalk_solve::ext::*;
+use chalk_solve::infer::InferenceTable;
+use chalk_solve::solve::{Guidance, Solution};
+
+use std::fmt::Debug;
+
+/// Methods for combining solutions to yield an aggregate solution.
+pub trait AggregateOps<I: Interner> {
+ fn make_solution(
+ &self,
+ root_goal: &UCanonical<InEnvironment<Goal<I>>>,
+ answers: impl context::AnswerStream<I>,
+ should_continue: impl std::ops::Fn() -> bool,
+ ) -> Option<Solution<I>>;
+}
+
+/// Draws as many answers as it needs from `answers` (but
+/// no more!) in order to come up with a solution.
+impl<I: Interner> AggregateOps<I> for SlgContextOps<'_, I> {
+ fn make_solution(
+ &self,
+ root_goal: &UCanonical<InEnvironment<Goal<I>>>,
+ mut answers: impl context::AnswerStream<I>,
+ should_continue: impl std::ops::Fn() -> bool,
+ ) -> Option<Solution<I>> {
+ let interner = self.program.interner();
+ let CompleteAnswer { subst, ambiguous } = match answers.next_answer(&should_continue) {
+ AnswerResult::NoMoreSolutions => {
+ // No answers at all
+ return None;
+ }
+ AnswerResult::Answer(answer) => answer,
+ AnswerResult::Floundered => CompleteAnswer {
+ subst: self.identity_constrained_subst(root_goal),
+ ambiguous: true,
+ },
+ AnswerResult::QuantumExceeded => {
+ return Some(Solution::Ambig(Guidance::Unknown));
+ }
+ };
+
+ // Exactly 1 unconditional answer?
+ let next_answer = answers.peek_answer(&should_continue);
+ if next_answer.is_quantum_exceeded() {
+ if subst.value.subst.is_identity_subst(interner) {
+ return Some(Solution::Ambig(Guidance::Unknown));
+ } else {
+ return Some(Solution::Ambig(Guidance::Suggested(
+ subst.map(interner, |cs| cs.subst),
+ )));
+ }
+ }
+ if next_answer.is_no_more_solutions() && !ambiguous {
+ return Some(Solution::Unique(subst));
+ }
+
+ // Otherwise, we either have >1 answer, or else we have
+ // ambiguity. Either way, we are only going to be giving back
+ // **guidance**, and with guidance, the caller doesn't get
+ // back any region constraints. So drop them from our `subst`
+ // variable.
+ //
+ // FIXME-- there is actually a 3rd possibility. We could have
+ // >1 answer where all the answers have the same substitution,
+ // but different region constraints. We should collapse those
+ // cases into an `OR` region constraint at some point, but I
+ // leave that for future work. This is basically
+ // rust-lang/rust#21974.
+ let mut subst = subst.map(interner, |cs| cs.subst);
+
+ // Extract answers and merge them into `subst`. Stop once we have
+ // a trivial subst (or run out of answers).
+ let mut num_answers = 1;
+ let guidance = loop {
+ if subst.value.is_empty(interner) || is_trivial(interner, &subst) {
+ break Guidance::Unknown;
+ }
+
+ if !answers
+ .any_future_answer(|ref mut new_subst| new_subst.may_invalidate(interner, &subst))
+ {
+ break Guidance::Definite(subst);
+ }
+
+ if let Some(expected_answers) = self.expected_answers {
+ if num_answers >= expected_answers {
+ panic!("Too many answers for solution.");
+ }
+ }
+
+ let new_subst = match answers.next_answer(&should_continue) {
+ AnswerResult::Answer(answer1) => answer1.subst,
+ AnswerResult::Floundered => {
+ // FIXME: this doesn't trigger for any current tests
+ self.identity_constrained_subst(root_goal)
+ }
+ AnswerResult::NoMoreSolutions => {
+ break Guidance::Definite(subst);
+ }
+ AnswerResult::QuantumExceeded => {
+ break Guidance::Suggested(subst);
+ }
+ };
+ subst = merge_into_guidance(interner, &root_goal.canonical, subst, &new_subst);
+ num_answers += 1;
+ };
+
+ if let Some(expected_answers) = self.expected_answers {
+ assert_eq!(
+ expected_answers, num_answers,
+ "Not enough answers for solution."
+ );
+ }
+ Some(Solution::Ambig(guidance))
+ }
+}
+
+/// Given a current substitution used as guidance for `root_goal`, and
+/// a new possible answer to `root_goal`, returns a new set of
+/// guidance that encompasses both of them. This is often more general
+/// than the old guidance. For example, if we had a guidance of `?0 =
+/// u32` and the new answer is `?0 = i32`, then the guidance would
+/// become `?0 = ?X` (where `?X` is some fresh variable).
+fn merge_into_guidance<I: Interner>(
+ interner: I,
+ root_goal: &Canonical<InEnvironment<Goal<I>>>,
+ guidance: Canonical<Substitution<I>>,
+ answer: &Canonical<ConstrainedSubst<I>>,
+) -> Canonical<Substitution<I>> {
+ let mut infer = InferenceTable::new();
+ let Canonical {
+ value: ConstrainedSubst {
+ subst: subst1,
+ constraints: _,
+ },
+ binders: _,
+ } = answer;
+
+ // Collect the types that the two substitutions have in
+ // common.
+ let aggr_generic_args: Vec<_> = guidance
+ .value
+ .iter(interner)
+ .zip(subst1.iter(interner))
+ .enumerate()
+ .map(|(index, (p1, p2))| {
+ // We have two values for some variable X that
+ // appears in the root goal. Find out the universe
+ // of X.
+ let universe = *root_goal.binders.as_slice(interner)[index].skip_kind();
+
+ match p1.data(interner) {
+ GenericArgData::Ty(_) => (),
+ GenericArgData::Lifetime(_) => {
+ // Ignore the lifetimes from the substitution: we're just
+ // creating guidance here anyway.
+ return infer
+ .new_variable(universe)
+ .to_lifetime(interner)
+ .cast(interner);
+ }
+ GenericArgData::Const(_) => (),
+ };
+
+ // Combine the two types into a new type.
+ let mut aggr = AntiUnifier {
+ infer: &mut infer,
+ universe,
+ interner,
+ };
+ aggr.aggregate_generic_args(p1, p2)
+ })
+ .collect();
+
+ let aggr_subst = Substitution::from_iter(interner, aggr_generic_args);
+
+ infer.canonicalize(interner, aggr_subst).quantified
+}
+
+fn is_trivial<I: Interner>(interner: I, subst: &Canonical<Substitution<I>>) -> bool {
+ // A subst is trivial if..
+ subst
+ .value
+ .iter(interner)
+ .enumerate()
+ .all(|(index, parameter)| {
+ let is_trivial = |b: Option<BoundVar>| match b {
+ None => false,
+ Some(bound_var) => {
+ if let Some(index1) = bound_var.index_if_innermost() {
+ index == index1
+ } else {
+ false
+ }
+ }
+ };
+
+ match parameter.data(interner) {
+ // All types and consts are mapped to distinct variables. Since this
+ // has been canonicalized, those will also be the first N
+ // variables.
+ GenericArgData::Ty(t) => is_trivial(t.bound_var(interner)),
+ GenericArgData::Const(t) => is_trivial(t.bound_var(interner)),
+
+ // And no lifetime mappings. (This is too strict, but we never
+ // product substs with lifetimes.)
+ GenericArgData::Lifetime(_) => false,
+ }
+ })
+}
+
+/// [Anti-unification] is the act of taking two things that do not
+/// unify and finding a minimal generalization of them. So for
+/// example `Vec<u32>` anti-unified with `Vec<i32>` might be
+/// `Vec<?X>`. This is a **very simplistic** anti-unifier.
+///
+/// NOTE: The values here are canonicalized, but output is not, this means
+/// that any escaping bound variables that we see have to be replaced with
+/// inference variables.
+///
+/// [Anti-unification]: https://en.wikipedia.org/wiki/Anti-unification_(computer_science)
+struct AntiUnifier<'infer, I: Interner> {
+ infer: &'infer mut InferenceTable<I>,
+ universe: UniverseIndex,
+ interner: I,
+}
+
+impl<I: Interner> AntiUnifier<'_, I> {
+ fn aggregate_tys(&mut self, ty0: &Ty<I>, ty1: &Ty<I>) -> Ty<I> {
+ let interner = self.interner;
+ match (ty0.kind(interner), ty1.kind(interner)) {
+ // If we see bound things on either side, just drop in a
+ // fresh variable. This means we will sometimes
+ // overgeneralize. So for example if we have two
+ // solutions that are both `(X, X)`, we just produce `(Y,
+ // Z)` in all cases.
+ (TyKind::InferenceVar(_, _), TyKind::InferenceVar(_, _)) => self.new_ty_variable(),
+
+ // Ugh. Aggregating two types like `for<'a> fn(&'a u32,
+ // &'a u32)` and `for<'a, 'b> fn(&'a u32, &'b u32)` seems
+ // kinda hard. Don't try to be smart for now, just plop a
+ // variable in there and be done with it.
+ // This also ensures that any bound variables we do see
+ // were bound by `Canonical`.
+ (TyKind::BoundVar(_), TyKind::BoundVar(_))
+ | (TyKind::Function(_), TyKind::Function(_))
+ | (TyKind::Dyn(_), TyKind::Dyn(_)) => self.new_ty_variable(),
+
+ (
+ TyKind::Alias(AliasTy::Projection(proj1)),
+ TyKind::Alias(AliasTy::Projection(proj2)),
+ ) => self.aggregate_projection_tys(proj1, proj2),
+
+ (
+ TyKind::Alias(AliasTy::Opaque(opaque_ty1)),
+ TyKind::Alias(AliasTy::Opaque(opaque_ty2)),
+ ) => self.aggregate_opaque_ty_tys(opaque_ty1, opaque_ty2),
+
+ (TyKind::Placeholder(placeholder1), TyKind::Placeholder(placeholder2)) => {
+ self.aggregate_placeholder_tys(placeholder1, placeholder2)
+ }
+
+ (TyKind::Adt(id_a, substitution_a), TyKind::Adt(id_b, substitution_b)) => self
+ .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ .map(|(&name, substitution)| TyKind::Adt(name, substitution).intern(interner))
+ .unwrap_or_else(|| self.new_ty_variable()),
+ (
+ TyKind::AssociatedType(id_a, substitution_a),
+ TyKind::AssociatedType(id_b, substitution_b),
+ ) => self
+ .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ .map(|(&name, substitution)| {
+ TyKind::AssociatedType(name, substitution).intern(interner)
+ })
+ .unwrap_or_else(|| self.new_ty_variable()),
+ (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => {
+ if scalar_a == scalar_b {
+ TyKind::Scalar(*scalar_a).intern(interner)
+ } else {
+ self.new_ty_variable()
+ }
+ }
+ (TyKind::Str, TyKind::Str) => TyKind::Str.intern(interner),
+ (TyKind::Tuple(arity_a, substitution_a), TyKind::Tuple(arity_b, substitution_b)) => {
+ self.aggregate_name_and_substs(arity_a, substitution_a, arity_b, substitution_b)
+ .map(|(&name, substitution)| TyKind::Tuple(name, substitution).intern(interner))
+ .unwrap_or_else(|| self.new_ty_variable())
+ }
+ (
+ TyKind::OpaqueType(id_a, substitution_a),
+ TyKind::OpaqueType(id_b, substitution_b),
+ ) => self
+ .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ .map(|(&name, substitution)| {
+ TyKind::OpaqueType(name, substitution).intern(interner)
+ })
+ .unwrap_or_else(|| self.new_ty_variable()),
+ (TyKind::Slice(ty_a), TyKind::Slice(ty_b)) => {
+ TyKind::Slice(self.aggregate_tys(ty_a, ty_b)).intern(interner)
+ }
+ (TyKind::FnDef(id_a, substitution_a), TyKind::FnDef(id_b, substitution_b)) => self
+ .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ .map(|(&name, substitution)| TyKind::FnDef(name, substitution).intern(interner))
+ .unwrap_or_else(|| self.new_ty_variable()),
+ (TyKind::Ref(id_a, lifetime_a, ty_a), TyKind::Ref(id_b, lifetime_b, ty_b)) => {
+ if id_a == id_b {
+ TyKind::Ref(
+ *id_a,
+ self.aggregate_lifetimes(lifetime_a, lifetime_b),
+ self.aggregate_tys(ty_a, ty_b),
+ )
+ .intern(interner)
+ } else {
+ self.new_ty_variable()
+ }
+ }
+ (TyKind::Raw(id_a, ty_a), TyKind::Raw(id_b, ty_b)) => {
+ if id_a == id_b {
+ TyKind::Raw(*id_a, self.aggregate_tys(ty_a, ty_b)).intern(interner)
+ } else {
+ self.new_ty_variable()
+ }
+ }
+ (TyKind::Never, TyKind::Never) => TyKind::Never.intern(interner),
+ (TyKind::Array(ty_a, const_a), TyKind::Array(ty_b, const_b)) => TyKind::Array(
+ self.aggregate_tys(ty_a, ty_b),
+ self.aggregate_consts(const_a, const_b),
+ )
+ .intern(interner),
+ (TyKind::Closure(id_a, substitution_a), TyKind::Closure(id_b, substitution_b)) => self
+ .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ .map(|(&name, substitution)| TyKind::Closure(name, substitution).intern(interner))
+ .unwrap_or_else(|| self.new_ty_variable()),
+ (TyKind::Generator(id_a, substitution_a), TyKind::Generator(id_b, substitution_b)) => {
+ self.aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ .map(|(&name, substitution)| {
+ TyKind::Generator(name, substitution).intern(interner)
+ })
+ .unwrap_or_else(|| self.new_ty_variable())
+ }
+ (
+ TyKind::GeneratorWitness(id_a, substitution_a),
+ TyKind::GeneratorWitness(id_b, substitution_b),
+ ) => self
+ .aggregate_name_and_substs(id_a, substitution_a, id_b, substitution_b)
+ .map(|(&name, substitution)| {
+ TyKind::GeneratorWitness(name, substitution).intern(interner)
+ })
+ .unwrap_or_else(|| self.new_ty_variable()),
+ (TyKind::Foreign(id_a), TyKind::Foreign(id_b)) => {
+ if id_a == id_b {
+ TyKind::Foreign(*id_a).intern(interner)
+ } else {
+ self.new_ty_variable()
+ }
+ }
+ (TyKind::Error, TyKind::Error) => TyKind::Error.intern(interner),
+
+ (_, _) => self.new_ty_variable(),
+ }
+ }
+
+ fn aggregate_placeholder_tys(
+ &mut self,
+ index1: &PlaceholderIndex,
+ index2: &PlaceholderIndex,
+ ) -> Ty<I> {
+ let interner = self.interner;
+ if index1 != index2 {
+ self.new_ty_variable()
+ } else {
+ TyKind::Placeholder(*index1).intern(interner)
+ }
+ }
+
+ fn aggregate_projection_tys(
+ &mut self,
+ proj1: &ProjectionTy<I>,
+ proj2: &ProjectionTy<I>,
+ ) -> Ty<I> {
+ let interner = self.interner;
+ let ProjectionTy {
+ associated_ty_id: name1,
+ substitution: substitution1,
+ } = proj1;
+ let ProjectionTy {
+ associated_ty_id: name2,
+ substitution: substitution2,
+ } = proj2;
+
+ self.aggregate_name_and_substs(name1, substitution1, name2, substitution2)
+ .map(|(&associated_ty_id, substitution)| {
+ TyKind::Alias(AliasTy::Projection(ProjectionTy {
+ associated_ty_id,
+ substitution,
+ }))
+ .intern(interner)
+ })
+ .unwrap_or_else(|| self.new_ty_variable())
+ }
+
+ fn aggregate_opaque_ty_tys(
+ &mut self,
+ opaque_ty1: &OpaqueTy<I>,
+ opaque_ty2: &OpaqueTy<I>,
+ ) -> Ty<I> {
+ let OpaqueTy {
+ opaque_ty_id: name1,
+ substitution: substitution1,
+ } = opaque_ty1;
+ let OpaqueTy {
+ opaque_ty_id: name2,
+ substitution: substitution2,
+ } = opaque_ty2;
+
+ self.aggregate_name_and_substs(name1, substitution1, name2, substitution2)
+ .map(|(&opaque_ty_id, substitution)| {
+ TyKind::Alias(AliasTy::Opaque(OpaqueTy {
+ opaque_ty_id,
+ substitution,
+ }))
+ .intern(self.interner)
+ })
+ .unwrap_or_else(|| self.new_ty_variable())
+ }
+
+ fn aggregate_name_and_substs<N>(
+ &mut self,
+ name1: N,
+ substitution1: &Substitution<I>,
+ name2: N,
+ substitution2: &Substitution<I>,
+ ) -> Option<(N, Substitution<I>)>
+ where
+ N: Copy + Eq + Debug,
+ {
+ let interner = self.interner;
+ if name1 != name2 {
+ return None;
+ }
+
+ let name = name1;
+
+ assert_eq!(
+ substitution1.len(interner),
+ substitution2.len(interner),
+ "does {:?} take {} substitution or {}? can't both be right",
+ name,
+ substitution1.len(interner),
+ substitution2.len(interner)
+ );
+
+ let substitution = Substitution::from_iter(
+ interner,
+ substitution1
+ .iter(interner)
+ .zip(substitution2.iter(interner))
+ .map(|(p1, p2)| self.aggregate_generic_args(p1, p2)),
+ );
+
+ Some((name, substitution))
+ }
+
+ fn aggregate_generic_args(&mut self, p1: &GenericArg<I>, p2: &GenericArg<I>) -> GenericArg<I> {
+ let interner = self.interner;
+ match (p1.data(interner), p2.data(interner)) {
+ (GenericArgData::Ty(ty1), GenericArgData::Ty(ty2)) => {
+ self.aggregate_tys(ty1, ty2).cast(interner)
+ }
+ (GenericArgData::Lifetime(l1), GenericArgData::Lifetime(l2)) => {
+ self.aggregate_lifetimes(l1, l2).cast(interner)
+ }
+ (GenericArgData::Const(c1), GenericArgData::Const(c2)) => {
+ self.aggregate_consts(c1, c2).cast(interner)
+ }
+ (GenericArgData::Ty(_), _)
+ | (GenericArgData::Lifetime(_), _)
+ | (GenericArgData::Const(_), _) => {
+ panic!("mismatched parameter kinds: p1={:?} p2={:?}", p1, p2)
+ }
+ }
+ }
+
+ fn aggregate_lifetimes(&mut self, l1: &Lifetime<I>, l2: &Lifetime<I>) -> Lifetime<I> {
+ let interner = self.interner;
+ match (l1.data(interner), l2.data(interner)) {
+ (LifetimeData::Phantom(void, ..), _) | (_, LifetimeData::Phantom(void, ..)) => {
+ match *void {}
+ }
+ (LifetimeData::BoundVar(..), _) | (_, LifetimeData::BoundVar(..)) => {
+ self.new_lifetime_variable()
+ }
+ _ => {
+ if l1 == l2 {
+ l1.clone()
+ } else {
+ self.new_lifetime_variable()
+ }
+ }
+ }
+ }
+
+ fn aggregate_consts(&mut self, c1: &Const<I>, c2: &Const<I>) -> Const<I> {
+ let interner = self.interner;
+
+ // It would be nice to check that c1 and c2 have the same type, even though
+ // on this stage of solving they should already have the same type.
+
+ let ConstData {
+ ty: c1_ty,
+ value: c1_value,
+ } = c1.data(interner);
+ let ConstData {
+ ty: _c2_ty,
+ value: c2_value,
+ } = c2.data(interner);
+
+ let ty = c1_ty.clone();
+
+ match (c1_value, c2_value) {
+ (ConstValue::InferenceVar(_), _) | (_, ConstValue::InferenceVar(_)) => {
+ self.new_const_variable(ty)
+ }
+
+ (ConstValue::BoundVar(_), _) | (_, ConstValue::BoundVar(_)) => {
+ self.new_const_variable(ty)
+ }
+
+ (ConstValue::Placeholder(_), ConstValue::Placeholder(_)) => {
+ if c1 == c2 {
+ c1.clone()
+ } else {
+ self.new_const_variable(ty)
+ }
+ }
+ (ConstValue::Concrete(e1), ConstValue::Concrete(e2)) => {
+ if e1.const_eq(&ty, e2, interner) {
+ c1.clone()
+ } else {
+ self.new_const_variable(ty)
+ }
+ }
+
+ (ConstValue::Placeholder(_), _) | (_, ConstValue::Placeholder(_)) => {
+ self.new_const_variable(ty)
+ }
+ }
+ }
+
+ fn new_ty_variable(&mut self) -> Ty<I> {
+ let interner = self.interner;
+ self.infer.new_variable(self.universe).to_ty(interner)
+ }
+
+ fn new_lifetime_variable(&mut self) -> Lifetime<I> {
+ let interner = self.interner;
+ self.infer.new_variable(self.universe).to_lifetime(interner)
+ }
+
+ fn new_const_variable(&mut self, ty: Ty<I>) -> Const<I> {
+ let interner = self.interner;
+ self.infer
+ .new_variable(self.universe)
+ .to_const(interner, ty)
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use crate::slg::aggregate::AntiUnifier;
+ use chalk_integration::{arg, ty};
+ use chalk_ir::UniverseIndex;
+ use chalk_solve::infer::InferenceTable;
+
+ /// Test the equivalent of `Vec<i32>` vs `Vec<u32>`
+ #[test]
+ fn vec_i32_vs_vec_u32() {
+ use chalk_integration::interner::ChalkIr;
+ let mut infer: InferenceTable<ChalkIr> = InferenceTable::new();
+ let mut anti_unifier = AntiUnifier {
+ infer: &mut infer,
+ universe: UniverseIndex::root(),
+ interner: ChalkIr,
+ };
+
+ let ty = anti_unifier.aggregate_tys(
+ &ty!(apply (item 0) (apply (item 1))),
+ &ty!(apply (item 0) (apply (item 2))),
+ );
+ assert_eq!(ty!(apply (item 0) (infer 0)), ty);
+ }
+
+ /// Test the equivalent of `Vec<i32>` vs `Vec<i32>`
+ #[test]
+ fn vec_i32_vs_vec_i32() {
+ use chalk_integration::interner::ChalkIr;
+ let interner = ChalkIr;
+ let mut infer: InferenceTable<ChalkIr> = InferenceTable::new();
+ let mut anti_unifier = AntiUnifier {
+ interner,
+ infer: &mut infer,
+ universe: UniverseIndex::root(),
+ };
+
+ let ty = anti_unifier.aggregate_tys(
+ &ty!(apply (item 0) (apply (item 1))),
+ &ty!(apply (item 0) (apply (item 1))),
+ );
+ assert_eq!(ty!(apply (item 0) (apply (item 1))), ty);
+ }
+
+ /// Test the equivalent of `Vec<X>` vs `Vec<Y>`
+ #[test]
+ fn vec_x_vs_vec_y() {
+ use chalk_integration::interner::ChalkIr;
+ let interner = ChalkIr;
+ let mut infer: InferenceTable<ChalkIr> = InferenceTable::new();
+ let mut anti_unifier = AntiUnifier {
+ interner,
+ infer: &mut infer,
+ universe: UniverseIndex::root(),
+ };
+
+ // Note that the `var 0` and `var 1` in these types would be
+ // referring to canonicalized free variables, not variables in
+ // `infer`.
+ let ty = anti_unifier.aggregate_tys(
+ &ty!(apply (item 0) (infer 0)),
+ &ty!(apply (item 0) (infer 1)),
+ );
+
+ // But this `var 0` is from `infer.
+ assert_eq!(ty!(apply (item 0) (infer 0)), ty);
+ }
+}
diff --git a/vendor/chalk-engine/src/slg/resolvent.rs b/vendor/chalk-engine/src/slg/resolvent.rs
new file mode 100644
index 000000000..c6d0f8d5c
--- /dev/null
+++ b/vendor/chalk-engine/src/slg/resolvent.rs
@@ -0,0 +1,731 @@
+use crate::normalize_deep::DeepNormalizer;
+use crate::slg::ResolventOps;
+use crate::{ExClause, Literal, TimeStamp};
+use chalk_ir::cast::Caster;
+use chalk_ir::fold::shift::Shift;
+use chalk_ir::fold::Fold;
+use chalk_ir::interner::{HasInterner, Interner};
+use chalk_ir::zip::{Zip, Zipper};
+use chalk_ir::*;
+use chalk_solve::infer::InferenceTable;
+use tracing::{debug, instrument};
+
+///////////////////////////////////////////////////////////////////////////
+// SLG RESOLVENTS
+//
+// The "SLG Resolvent" is used to combine a *goal* G with some
+// clause or answer *C*. It unifies the goal's selected literal
+// with the clause and then inserts the clause's conditions into
+// the goal's list of things to prove, basically. Although this is
+// one operation in EWFS, we have specialized variants for merging
+// a program clause and an answer (though they share some code in
+// common).
+//
+// Terminology note: The NFTD and RR papers use the term
+// "resolvent" to mean both the factor and the resolvent, but EWFS
+// distinguishes the two. We follow EWFS here since -- in the code
+// -- we tend to know whether there are delayed literals or not,
+// and hence to know which code path we actually want.
+//
+// From EWFS:
+//
+// Let G be an X-clause A :- D | L1,...Ln, where N > 0, and Li be selected atom.
+//
+// Let C be an X-clause with no delayed literals. Let
+//
+// C' = A' :- L'1...L'm
+//
+// be a variant of C such that G and C' have no variables in
+// common.
+//
+// Let Li and A' be unified with MGU S.
+//
+// Then:
+//
+// S(A :- D | L1...Li-1, L1'...L'm, Li+1...Ln)
+//
+// is the SLG resolvent of G with C.
+
+impl<I: Interner> ResolventOps<I> for InferenceTable<I> {
+ /// Applies the SLG resolvent algorithm to incorporate a program
+ /// clause into the main X-clause, producing a new X-clause that
+ /// must be solved.
+ ///
+ /// # Parameters
+ ///
+ /// - `goal` is the goal G that we are trying to solve
+ /// - `clause` is the program clause that may be useful to that end
+ #[instrument(level = "debug", skip(self, interner, environment, subst))]
+ fn resolvent_clause(
+ &mut self,
+ db: &dyn UnificationDatabase<I>,
+ interner: I,
+ environment: &Environment<I>,
+ goal: &DomainGoal<I>,
+ subst: &Substitution<I>,
+ clause: &ProgramClause<I>,
+ ) -> Fallible<ExClause<I>> {
+ // Relating the above description to our situation:
+ //
+ // - `goal` G, except with binders for any existential variables.
+ // - Also, we always select the first literal in `ex_clause.literals`, so `i` is 0.
+ // - `clause` is C, except with binders for any existential variables.
+
+ // C' in the description above is `consequence :- conditions`.
+ //
+ // Note that G and C' have no variables in common.
+ let ProgramClauseImplication {
+ consequence,
+ conditions,
+ constraints,
+ priority: _,
+ } = {
+ let ProgramClauseData(implication) = clause.data(interner);
+
+ self.instantiate_binders_existentially(interner, implication.clone())
+ };
+ debug!(?consequence, ?conditions, ?constraints);
+
+ // Unify the selected literal Li with C'.
+ let unification_result = self.relate(
+ interner,
+ db,
+ environment,
+ Variance::Invariant,
+ goal,
+ &consequence,
+ )?;
+
+ // Final X-clause that we will return.
+ let mut ex_clause = ExClause {
+ subst: subst.clone(),
+ ambiguous: false,
+ constraints: vec![],
+ subgoals: vec![],
+ delayed_subgoals: vec![],
+ answer_time: TimeStamp::default(),
+ floundered_subgoals: vec![],
+ };
+
+ // Add the subgoals/region-constraints that unification gave us.
+ ex_clause.subgoals.extend(
+ unification_result
+ .goals
+ .into_iter()
+ .casted(interner)
+ .map(Literal::Positive),
+ );
+
+ ex_clause
+ .constraints
+ .extend(constraints.as_slice(interner).to_owned());
+
+ // Add the `conditions` from the program clause into the result too.
+ ex_clause
+ .subgoals
+ .extend(conditions.iter(interner).map(|c| match c.data(interner) {
+ GoalData::Not(c1) => {
+ Literal::Negative(InEnvironment::new(environment, Goal::clone(c1)))
+ }
+ _ => Literal::Positive(InEnvironment::new(environment, Goal::clone(c))),
+ }));
+
+ Ok(ex_clause)
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // apply_answer_subst
+ //
+ // Apply answer subst has the job of "plugging in" the answer to a
+ // query into the pending ex-clause. To see how it works, it's worth stepping
+ // up one level. Imagine that first we are trying to prove a goal A:
+ //
+ // A :- T: Foo<Vec<?U>>, ?U: Bar
+ //
+ // this spawns a subgoal `T: Foo<Vec<?0>>`, and it's this subgoal that
+ // has now produced an answer `?0 = u32`. When the goal A spawned the
+ // subgoal, it will also have registered a `PendingExClause` with its
+ // current state. At the point where *this* method has been invoked,
+ // that pending ex-clause has been instantiated with fresh variables and setup,
+ // so we have four bits of incoming information:
+ //
+ // - `ex_clause`, which is the remaining stuff to prove for the goal A.
+ // Here, the inference variable `?U` has been instantiated with a fresh variable
+ // `?X`.
+ // - `A :- ?X: Bar`
+ // - `selected_goal`, which is the thing we were trying to prove when we
+ // spawned the subgoal. It shares inference variables with `ex_clause`.
+ // - `T: Foo<Vec<?X>>`
+ // - `answer_table_goal`, which is the subgoal in canonical form:
+ // - `for<type> T: Foo<Vec<?0>>`
+ // - `canonical_answer_subst`, which is an answer to `answer_table_goal`.
+ // - `[?0 = u32]`
+ //
+ // In this case, this function will (a) unify `u32` and `?X` and then
+ // (b) return `ex_clause` (extended possibly with new region constraints
+ // and subgoals).
+ //
+ // One way to do this would be to (a) substitute
+ // `canonical_answer_subst` into `answer_table_goal` (yielding `T:
+ // Foo<Vec<u32>>`) and then (b) instantiate the result with fresh
+ // variables (no effect in this instance) and then (c) unify that with
+ // `selected_goal` (yielding, indirectly, that `?X = u32`). But that
+ // is not what we do: it's inefficient, to start, but it also causes
+ // problems because unification of projections can make new
+ // sub-goals. That is, even if the answers don't involve any
+ // projections, the table goals might, and this can create an infinite
+ // loop (see also #74).
+ //
+ // What we do instead is to (a) instantiate the substitution, which
+ // may have free variables in it (in this case, it would not, and the
+ // instantiation would have no effect) and then (b) zip
+ // `answer_table_goal` and `selected_goal` without having done any
+ // substitution. After all, these ought to be basically the same,
+ // since `answer_table_goal` was created by canonicalizing (and
+ // possibly truncating, but we'll get to that later)
+ // `selected_goal`. Then, whenever we reach a "free variable" in
+ // `answer_table_goal`, say `?0`, we go to the instantiated answer
+ // substitution and lookup the result (in this case, `u32`). We take
+ // that result and unify it with whatever we find in `selected_goal`
+ // (in this case, `?X`).
+ //
+ // Let's cover then some corner cases. First off, what is this
+ // business of instantiating the answer? Well, the answer may not be a
+ // simple type like `u32`, it could be a "family" of types, like
+ // `for<type> Vec<?0>` -- i.e., `Vec<X>: Bar` for *any* `X`. In that
+ // case, the instantiation would produce a substitution `[?0 :=
+ // Vec<?Y>]` (note that the key is not affected, just the value). So
+ // when we do the unification, instead of unifying `?X = u32`, we
+ // would unify `?X = Vec<?Y>`.
+ //
+ // Next, truncation. One key thing is that the `answer_table_goal` may
+ // not be *exactly* the same as the `selected_goal` -- we will
+ // truncate it if it gets too deep. so, in our example, it may be that
+ // instead of `answer_table_goal` being `for<type> T: Foo<Vec<?0>>`,
+ // it could have been truncated to `for<type> T: Foo<?0>` (which is a
+ // more general goal). In that case, let's say that the answer is
+ // still `[?0 = u32]`, meaning that `T: Foo<u32>` is true (which isn't
+ // actually interesting to our original goal). When we do the zip
+ // then, we will encounter `?0` in the `answer_table_goal` and pair
+ // that with `Vec<?X>` from the pending goal. We will attempt to unify
+ // `Vec<?X>` with `u32` (from the substitution), which will fail. That
+ // failure will get propagated back up.
+
+ #[instrument(level = "debug", skip(self, interner))]
+ fn apply_answer_subst(
+ &mut self,
+ interner: I,
+ unification_database: &dyn UnificationDatabase<I>,
+ ex_clause: &mut ExClause<I>,
+ selected_goal: &InEnvironment<Goal<I>>,
+ answer_table_goal: &Canonical<InEnvironment<Goal<I>>>,
+ canonical_answer_subst: Canonical<AnswerSubst<I>>,
+ ) -> Fallible<()> {
+ debug!(selected_goal = ?DeepNormalizer::normalize_deep(self, interner, selected_goal.clone()));
+
+ // C' is now `answer`. No variables in common with G.
+ let AnswerSubst {
+ subst: answer_subst,
+
+ // Assuming unification succeeds, we incorporate the
+ // region constraints from the answer into the result;
+ // we'll need them if this answer (which is not yet known
+ // to be true) winds up being true, and otherwise (if the
+ // answer is false or unknown) it doesn't matter.
+ constraints: answer_constraints,
+
+ delayed_subgoals,
+ } = self.instantiate_canonical(interner, canonical_answer_subst);
+
+ AnswerSubstitutor::substitute(
+ interner,
+ unification_database,
+ self,
+ &selected_goal.environment,
+ &answer_subst,
+ ex_clause,
+ &answer_table_goal.value,
+ selected_goal,
+ )?;
+ ex_clause
+ .constraints
+ .extend(answer_constraints.as_slice(interner).to_vec());
+ // at that point we should only have goals that stemmed
+ // from non trivial self cycles
+ ex_clause.delayed_subgoals.extend(delayed_subgoals);
+ Ok(())
+ }
+}
+
+struct AnswerSubstitutor<'t, I: Interner> {
+ table: &'t mut InferenceTable<I>,
+ environment: &'t Environment<I>,
+ answer_subst: &'t Substitution<I>,
+
+ /// Tracks the debrujn index of the first binder that is outside
+ /// the term we are traversing. This starts as `DebruijnIndex::INNERMOST`,
+ /// since we have not yet traversed *any* binders, but when we visit
+ /// the inside of a binder, it would be incremented.
+ ///
+ /// Example: If we are visiting `(for<type> A, B, C, for<type> for<type> D)`,
+ /// then this would be:
+ ///
+ /// * `1`, when visiting `A`,
+ /// * `0`, when visiting `B` and `C`,
+ /// * `2`, when visiting `D`.
+ outer_binder: DebruijnIndex,
+
+ ex_clause: &'t mut ExClause<I>,
+ interner: I,
+ unification_database: &'t dyn UnificationDatabase<I>,
+}
+
+impl<I: Interner> AnswerSubstitutor<'_, I> {
+ fn substitute<T: Zip<I>>(
+ interner: I,
+ unification_database: &dyn UnificationDatabase<I>,
+ table: &mut InferenceTable<I>,
+ environment: &Environment<I>,
+ answer_subst: &Substitution<I>,
+ ex_clause: &mut ExClause<I>,
+ answer: &T,
+ pending: &T,
+ ) -> Fallible<()> {
+ let mut this = AnswerSubstitutor {
+ interner,
+ unification_database,
+ table,
+ environment,
+ answer_subst,
+ ex_clause,
+ outer_binder: DebruijnIndex::INNERMOST,
+ };
+ Zip::zip_with(&mut this, Variance::Invariant, answer, pending)?;
+ Ok(())
+ }
+
+ fn unify_free_answer_var(
+ &mut self,
+ interner: I,
+ db: &dyn UnificationDatabase<I>,
+ variance: Variance,
+ answer_var: BoundVar,
+ pending: GenericArgData<I>,
+ ) -> Fallible<bool> {
+ let answer_index = match answer_var.index_if_bound_at(self.outer_binder) {
+ Some(index) => index,
+
+ // This variable is bound in the answer, not free, so it
+ // doesn't represent a reference into the answer substitution.
+ None => return Ok(false),
+ };
+
+ let answer_param = self.answer_subst.at(interner, answer_index);
+
+ let pending_shifted = pending
+ .shifted_out_to(interner, self.outer_binder)
+ .expect("truncate extracted a pending value that references internal binder");
+
+ let result = self.table.relate(
+ interner,
+ db,
+ self.environment,
+ variance,
+ answer_param,
+ &GenericArg::new(interner, pending_shifted),
+ )?;
+
+ self.ex_clause.subgoals.extend(
+ result
+ .goals
+ .into_iter()
+ .casted(interner)
+ .map(Literal::Positive),
+ );
+
+ Ok(true)
+ }
+
+ /// When we encounter a variable in the answer goal, we first try
+ /// `unify_free_answer_var`. Assuming that this fails, the
+ /// variable must be a bound variable in the answer goal -- in
+ /// that case, there should be a corresponding bound variable in
+ /// the pending goal. This bit of code just checks that latter
+ /// case.
+ fn assert_matching_vars(
+ &mut self,
+ answer_var: BoundVar,
+ pending_var: BoundVar,
+ ) -> Fallible<()> {
+ let BoundVar {
+ debruijn: answer_depth,
+ index: answer_index,
+ } = answer_var;
+ let BoundVar {
+ debruijn: pending_depth,
+ index: pending_index,
+ } = pending_var;
+
+ // Both bound variables are bound within the term we are matching
+ assert!(answer_depth.within(self.outer_binder));
+
+ // They are bound at the same (relative) depth
+ assert_eq!(answer_depth, pending_depth);
+
+ // They are bound at the same index within the binder
+ assert_eq!(answer_index, pending_index,);
+
+ Ok(())
+ }
+}
+
+impl<'i, I: Interner> Zipper<I> for AnswerSubstitutor<'i, I> {
+ fn zip_tys(&mut self, variance: Variance, answer: &Ty<I>, pending: &Ty<I>) -> Fallible<()> {
+ let interner = self.interner;
+
+ if let Some(pending) = self.table.normalize_ty_shallow(interner, pending) {
+ return Zip::zip_with(self, variance, answer, &pending);
+ }
+
+ // If the answer has a variable here, then this is one of the
+ // "inputs" to the subgoal table. We need to extract the
+ // resulting answer that the subgoal found and unify it with
+ // the value from our "pending subgoal".
+ if let TyKind::BoundVar(answer_depth) = answer.kind(interner) {
+ if self.unify_free_answer_var(
+ interner,
+ self.unification_database,
+ variance,
+ *answer_depth,
+ GenericArgData::Ty(pending.clone()),
+ )? {
+ return Ok(());
+ }
+ }
+
+ // Otherwise, the answer and the selected subgoal ought to be a perfect match for
+ // one another.
+ match (answer.kind(interner), pending.kind(interner)) {
+ (TyKind::BoundVar(answer_depth), TyKind::BoundVar(pending_depth)) => {
+ self.assert_matching_vars(*answer_depth, *pending_depth)
+ }
+
+ (TyKind::Dyn(answer), TyKind::Dyn(pending)) => {
+ Zip::zip_with(self, variance, answer, pending)
+ }
+
+ (TyKind::Alias(answer), TyKind::Alias(pending)) => {
+ Zip::zip_with(self, variance, answer, pending)
+ }
+
+ (TyKind::Placeholder(answer), TyKind::Placeholder(pending)) => {
+ Zip::zip_with(self, variance, answer, pending)
+ }
+
+ (TyKind::Function(answer), TyKind::Function(pending)) => Zip::zip_with(
+ self,
+ variance,
+ &answer.clone().into_binders(interner),
+ &pending.clone().into_binders(interner),
+ ),
+
+ (TyKind::InferenceVar(_, _), _) | (_, TyKind::InferenceVar(_, _)) => panic!(
+ "unexpected inference var in answer `{:?}` or pending goal `{:?}`",
+ answer, pending,
+ ),
+
+ (TyKind::Adt(id_a, substitution_a), TyKind::Adt(id_b, substitution_b)) => {
+ if id_a != id_b {
+ return Err(NoSolution);
+ }
+ self.zip_substs(
+ variance,
+ Some(self.unification_database().adt_variance(*id_a)),
+ substitution_a.as_slice(interner),
+ substitution_b.as_slice(interner),
+ )
+ }
+ (
+ TyKind::AssociatedType(id_a, substitution_a),
+ TyKind::AssociatedType(id_b, substitution_b),
+ ) => {
+ if id_a != id_b {
+ return Err(NoSolution);
+ }
+ self.zip_substs(
+ variance,
+ None,
+ substitution_a.as_slice(interner),
+ substitution_b.as_slice(interner),
+ )
+ }
+ (TyKind::Scalar(scalar_a), TyKind::Scalar(scalar_b)) => {
+ Zip::zip_with(self, variance, scalar_a, scalar_b)
+ }
+ (TyKind::Str, TyKind::Str) => Ok(()),
+ (TyKind::Tuple(arity_a, substitution_a), TyKind::Tuple(arity_b, substitution_b)) => {
+ if arity_a != arity_b {
+ return Err(NoSolution);
+ }
+ self.zip_substs(
+ variance,
+ None,
+ substitution_a.as_slice(interner),
+ substitution_b.as_slice(interner),
+ )
+ }
+ (
+ TyKind::OpaqueType(id_a, substitution_a),
+ TyKind::OpaqueType(id_b, substitution_b),
+ ) => {
+ if id_a != id_b {
+ return Err(NoSolution);
+ }
+ self.zip_substs(
+ variance,
+ None,
+ substitution_a.as_slice(interner),
+ substitution_b.as_slice(interner),
+ )
+ }
+ (TyKind::Slice(ty_a), TyKind::Slice(ty_b)) => Zip::zip_with(self, variance, ty_a, ty_b),
+ (TyKind::FnDef(id_a, substitution_a), TyKind::FnDef(id_b, substitution_b)) => {
+ if id_a != id_b {
+ return Err(NoSolution);
+ }
+ self.zip_substs(
+ variance,
+ Some(self.unification_database().fn_def_variance(*id_a)),
+ substitution_a.as_slice(interner),
+ substitution_b.as_slice(interner),
+ )
+ }
+ (
+ TyKind::Ref(mutability_a, lifetime_a, ty_a),
+ TyKind::Ref(mutability_b, lifetime_b, ty_b),
+ ) => {
+ if mutability_a != mutability_b {
+ return Err(NoSolution);
+ }
+ // The lifetime is `Contravariant`
+ Zip::zip_with(
+ self,
+ variance.xform(Variance::Contravariant),
+ lifetime_a,
+ lifetime_b,
+ )?;
+ // The type is `Covariant` when not mut, `Invariant` otherwise
+ let output_variance = match mutability_a {
+ Mutability::Not => Variance::Covariant,
+ Mutability::Mut => Variance::Invariant,
+ };
+ Zip::zip_with(self, variance.xform(output_variance), ty_a, ty_b)
+ }
+ (TyKind::Raw(mutability_a, ty_a), TyKind::Raw(mutability_b, ty_b)) => {
+ if mutability_a != mutability_b {
+ return Err(NoSolution);
+ }
+ let ty_variance = match mutability_a {
+ Mutability::Not => Variance::Covariant,
+ Mutability::Mut => Variance::Invariant,
+ };
+ Zip::zip_with(self, variance.xform(ty_variance), ty_a, ty_b)
+ }
+ (TyKind::Never, TyKind::Never) => Ok(()),
+ (TyKind::Array(ty_a, const_a), TyKind::Array(ty_b, const_b)) => {
+ Zip::zip_with(self, variance, ty_a, ty_b)?;
+ Zip::zip_with(self, variance, const_a, const_b)
+ }
+ (TyKind::Closure(id_a, substitution_a), TyKind::Closure(id_b, substitution_b)) => {
+ if id_a != id_b {
+ return Err(NoSolution);
+ }
+ self.zip_substs(
+ variance,
+ None,
+ substitution_a.as_slice(interner),
+ substitution_b.as_slice(interner),
+ )
+ }
+ (TyKind::Generator(id_a, substitution_a), TyKind::Generator(id_b, substitution_b)) => {
+ if id_a != id_b {
+ return Err(NoSolution);
+ }
+ self.zip_substs(
+ variance,
+ None,
+ substitution_a.as_slice(interner),
+ substitution_b.as_slice(interner),
+ )
+ }
+ (
+ TyKind::GeneratorWitness(id_a, substitution_a),
+ TyKind::GeneratorWitness(id_b, substitution_b),
+ ) => {
+ if id_a != id_b {
+ return Err(NoSolution);
+ }
+ self.zip_substs(
+ variance,
+ None,
+ substitution_a.as_slice(interner),
+ substitution_b.as_slice(interner),
+ )
+ }
+ (TyKind::Foreign(id_a), TyKind::Foreign(id_b)) => {
+ Zip::zip_with(self, variance, id_a, id_b)
+ }
+ (TyKind::Error, TyKind::Error) => Ok(()),
+
+ (_, _) => panic!(
+ "structural mismatch between answer `{:?}` and pending goal `{:?}`",
+ answer, pending,
+ ),
+ }
+ }
+
+ fn zip_lifetimes(
+ &mut self,
+ variance: Variance,
+ answer: &Lifetime<I>,
+ pending: &Lifetime<I>,
+ ) -> Fallible<()> {
+ let interner = self.interner;
+ if let Some(pending) = self.table.normalize_lifetime_shallow(interner, pending) {
+ return Zip::zip_with(self, variance, answer, &pending);
+ }
+
+ if let LifetimeData::BoundVar(answer_depth) = answer.data(interner) {
+ if self.unify_free_answer_var(
+ interner,
+ self.unification_database,
+ variance,
+ *answer_depth,
+ GenericArgData::Lifetime(pending.clone()),
+ )? {
+ return Ok(());
+ }
+ }
+
+ match (answer.data(interner), pending.data(interner)) {
+ (LifetimeData::BoundVar(answer_depth), LifetimeData::BoundVar(pending_depth)) => {
+ self.assert_matching_vars(*answer_depth, *pending_depth)
+ }
+
+ (LifetimeData::Static, LifetimeData::Static)
+ | (LifetimeData::Placeholder(_), LifetimeData::Placeholder(_))
+ | (LifetimeData::Erased, LifetimeData::Erased)
+ | (LifetimeData::Empty(_), LifetimeData::Empty(_)) => {
+ assert_eq!(answer, pending);
+ Ok(())
+ }
+
+ (LifetimeData::InferenceVar(_), _) | (_, LifetimeData::InferenceVar(_)) => panic!(
+ "unexpected inference var in answer `{:?}` or pending goal `{:?}`",
+ answer, pending,
+ ),
+
+ (LifetimeData::Static, _)
+ | (LifetimeData::BoundVar(_), _)
+ | (LifetimeData::Placeholder(_), _)
+ | (LifetimeData::Erased, _)
+ | (LifetimeData::Empty(_), _) => panic!(
+ "structural mismatch between answer `{:?}` and pending goal `{:?}`",
+ answer, pending,
+ ),
+
+ (LifetimeData::Phantom(void, _), _) => match *void {},
+ }
+ }
+
+ fn zip_consts(
+ &mut self,
+ variance: Variance,
+ answer: &Const<I>,
+ pending: &Const<I>,
+ ) -> Fallible<()> {
+ let interner = self.interner;
+ if let Some(pending) = self.table.normalize_const_shallow(interner, pending) {
+ return Zip::zip_with(self, variance, answer, &pending);
+ }
+
+ let ConstData {
+ ty: answer_ty,
+ value: answer_value,
+ } = answer.data(interner);
+ let ConstData {
+ ty: pending_ty,
+ value: pending_value,
+ } = pending.data(interner);
+
+ self.zip_tys(variance, answer_ty, pending_ty)?;
+
+ if let ConstValue::BoundVar(answer_depth) = answer_value {
+ if self.unify_free_answer_var(
+ interner,
+ self.unification_database,
+ variance,
+ *answer_depth,
+ GenericArgData::Const(pending.clone()),
+ )? {
+ return Ok(());
+ }
+ }
+
+ match (answer_value, pending_value) {
+ (ConstValue::BoundVar(answer_depth), ConstValue::BoundVar(pending_depth)) => {
+ self.assert_matching_vars(*answer_depth, *pending_depth)
+ }
+
+ (ConstValue::Placeholder(_), ConstValue::Placeholder(_)) => {
+ assert_eq!(answer, pending);
+ Ok(())
+ }
+
+ (ConstValue::Concrete(c1), ConstValue::Concrete(c2)) => {
+ assert!(c1.const_eq(answer_ty, c2, interner));
+ Ok(())
+ }
+
+ (ConstValue::InferenceVar(_), _) | (_, ConstValue::InferenceVar(_)) => panic!(
+ "unexpected inference var in answer `{:?}` or pending goal `{:?}`",
+ answer, pending,
+ ),
+
+ (ConstValue::BoundVar(_), _)
+ | (ConstValue::Placeholder(_), _)
+ | (ConstValue::Concrete(_), _) => panic!(
+ "structural mismatch between answer `{:?}` and pending goal `{:?}`",
+ answer, pending,
+ ),
+ }
+ }
+
+ fn zip_binders<T>(
+ &mut self,
+ variance: Variance,
+ answer: &Binders<T>,
+ pending: &Binders<T>,
+ ) -> Fallible<()>
+ where
+ T: HasInterner<Interner = I> + Zip<I> + Fold<I, Result = T>,
+ {
+ self.outer_binder.shift_in();
+ Zip::zip_with(
+ self,
+ variance,
+ answer.skip_binders(),
+ pending.skip_binders(),
+ )?;
+ self.outer_binder.shift_out();
+ Ok(())
+ }
+
+ fn interner(&self) -> I {
+ self.interner
+ }
+
+ fn unification_database(&self) -> &dyn UnificationDatabase<I> {
+ self.unification_database
+ }
+}
diff --git a/vendor/chalk-engine/src/solve.rs b/vendor/chalk-engine/src/solve.rs
new file mode 100644
index 000000000..fc35adb66
--- /dev/null
+++ b/vendor/chalk-engine/src/solve.rs
@@ -0,0 +1,89 @@
+use crate::context::{AnswerResult, AnswerStream};
+use crate::forest::Forest;
+use crate::slg::aggregate::AggregateOps;
+use crate::slg::SlgContextOps;
+use chalk_ir::interner::Interner;
+use chalk_ir::{Canonical, ConstrainedSubst, Goal, InEnvironment, UCanonical};
+use chalk_solve::{RustIrDatabase, Solution, Solver, SubstitutionResult};
+
+use std::fmt;
+
+pub struct SLGSolver<I: Interner> {
+ pub(crate) forest: Forest<I>,
+ pub(crate) max_size: usize,
+ pub(crate) expected_answers: Option<usize>,
+}
+
+impl<I: Interner> SLGSolver<I> {
+ pub fn new(max_size: usize, expected_answers: Option<usize>) -> Self {
+ Self {
+ forest: Forest::new(),
+ max_size,
+ expected_answers,
+ }
+ }
+}
+
+impl<I: Interner> fmt::Debug for SLGSolver<I> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "SLGSolver")
+ }
+}
+
+impl<I: Interner> Solver<I> for SLGSolver<I> {
+ fn solve(
+ &mut self,
+ program: &dyn RustIrDatabase<I>,
+ goal: &UCanonical<InEnvironment<Goal<I>>>,
+ ) -> Option<Solution<I>> {
+ let ops = SlgContextOps::new(program, self.max_size, self.expected_answers);
+ ops.make_solution(goal, self.forest.iter_answers(&ops, goal), || true)
+ }
+
+ fn solve_limited(
+ &mut self,
+ program: &dyn RustIrDatabase<I>,
+ goal: &UCanonical<InEnvironment<Goal<I>>>,
+ should_continue: &dyn std::ops::Fn() -> bool,
+ ) -> Option<Solution<I>> {
+ let ops = SlgContextOps::new(program, self.max_size, self.expected_answers);
+ ops.make_solution(goal, self.forest.iter_answers(&ops, goal), should_continue)
+ }
+
+ fn solve_multiple(
+ &mut self,
+ program: &dyn RustIrDatabase<I>,
+ goal: &UCanonical<InEnvironment<Goal<I>>>,
+ f: &mut dyn FnMut(SubstitutionResult<Canonical<ConstrainedSubst<I>>>, bool) -> bool,
+ ) -> bool {
+ let ops = SlgContextOps::new(program, self.max_size, self.expected_answers);
+ let mut answers = self.forest.iter_answers(&ops, goal);
+ loop {
+ let subst = match answers.next_answer(|| true) {
+ AnswerResult::Answer(answer) => {
+ if !answer.ambiguous {
+ SubstitutionResult::Definite(answer.subst)
+ } else if answer
+ .subst
+ .value
+ .subst
+ .is_identity_subst(ops.program().interner())
+ {
+ SubstitutionResult::Floundered
+ } else {
+ SubstitutionResult::Ambiguous(answer.subst)
+ }
+ }
+ AnswerResult::Floundered => SubstitutionResult::Floundered,
+ AnswerResult::NoMoreSolutions => {
+ return true;
+ }
+ AnswerResult::QuantumExceeded => continue,
+ };
+
+ if !f(subst, !answers.peek_answer(|| true).is_no_more_solutions()) {
+ return false;
+ }
+ }
+ }
+}
diff --git a/vendor/chalk-engine/src/stack.rs b/vendor/chalk-engine/src/stack.rs
new file mode 100644
index 000000000..66b53fa58
--- /dev/null
+++ b/vendor/chalk-engine/src/stack.rs
@@ -0,0 +1,175 @@
+use crate::index_struct;
+use crate::strand::CanonicalStrand;
+use crate::tables::Tables;
+use crate::{Minimums, TableIndex, TimeStamp};
+use std::fmt;
+use std::ops::{Index, IndexMut, Range};
+
+use chalk_ir::interner::Interner;
+
+/// See `Forest`.
+#[derive(Debug)]
+pub(crate) struct Stack<I: Interner> {
+ /// Stack: as described above, stores the in-progress goals.
+ stack: Vec<StackEntry<I>>,
+}
+
+impl<I: Interner> Stack<I> {
+ // This isn't actually used, but it can be helpful when debugging stack issues
+ #[allow(dead_code)]
+ pub(crate) fn debug_with<'a>(&'a self, tables: &'a Tables<I>) -> StackDebug<'_, I> {
+ StackDebug {
+ stack: self,
+ tables,
+ }
+ }
+}
+
+pub(crate) struct StackDebug<'a, I: Interner> {
+ stack: &'a Stack<I>,
+ tables: &'a Tables<I>,
+}
+
+impl<I: Interner> fmt::Debug for StackDebug<'_, I> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ writeln!(f, "---- Stack ----")?;
+ for entry in self.stack.stack.iter() {
+ writeln!(f, " --- StackEntry ---")?;
+ writeln!(
+ f,
+ " Table {:?} with goal {:?}",
+ entry.table, self.tables[entry.table].table_goal
+ )?;
+ writeln!(f, " Active strand: {:#?}", entry.active_strand)?;
+ writeln!(
+ f,
+ " Additional strands: {:#?}",
+ self.tables[entry.table].strands().collect::<Vec<_>>()
+ )?;
+ }
+ write!(f, "---- End Stack ----")?;
+ Ok(())
+ }
+}
+
+impl<I: Interner> Default for Stack<I> {
+ fn default() -> Self {
+ Stack { stack: vec![] }
+ }
+}
+
+index_struct! {
+ /// The StackIndex identifies the position of a table's goal in the
+ /// stack of goals that are actively being processed. Note that once a
+ /// table is completely evaluated, it may be popped from the stack,
+ /// and hence no longer have a stack index.
+ pub(crate) struct StackIndex {
+ value: usize,
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct StackEntry<I: Interner> {
+ /// The goal G from the stack entry `A :- G` represented here.
+ pub(super) table: TableIndex,
+
+ /// The clock TimeStamp of this stack entry.
+ pub(super) clock: TimeStamp,
+
+ pub(super) cyclic_minimums: Minimums,
+
+ // FIXME: should store this as an index.
+ // This would mean that if we unwind,
+ // we don't need to worry about losing a strand
+ pub(super) active_strand: Option<CanonicalStrand<I>>,
+}
+
+impl<I: Interner> Stack<I> {
+ pub(super) fn is_empty(&self) -> bool {
+ self.stack.is_empty()
+ }
+
+ /// Searches the stack to see if `table` is active. If so, returns
+ /// its stack index.
+ pub(super) fn is_active(&self, table: TableIndex) -> Option<StackIndex> {
+ self.stack
+ .iter()
+ .enumerate()
+ .filter_map(|(index, stack_entry)| {
+ if stack_entry.table == table {
+ Some(StackIndex::from(index))
+ } else {
+ None
+ }
+ })
+ .next()
+ }
+
+ pub(super) fn top_of_stack_from(&self, depth: StackIndex) -> Range<StackIndex> {
+ depth..StackIndex::from(self.stack.len())
+ }
+
+ pub(super) fn push(
+ &mut self,
+ table: TableIndex,
+ cyclic_minimums: Minimums,
+ clock: TimeStamp,
+ ) -> StackIndex {
+ let old_len = self.stack.len();
+ self.stack.push(StackEntry {
+ table,
+ clock,
+ cyclic_minimums,
+ active_strand: None,
+ });
+ StackIndex::from(old_len)
+ }
+
+ /// Pops the top-most entry from the stack:
+ /// * If the stack is now empty, returns false.
+ /// * Otherwise, returns true.
+ fn pop_and_adjust_depth(&mut self) -> bool {
+ self.stack.pop();
+ !self.stack.is_empty()
+ }
+
+ /// Pops the top-most entry from the stack, which should have the depth `*depth`:
+ /// * If the stack is now empty, returns None.
+ /// * Otherwise, `take`s the active strand from the new top and returns it.
+ pub(super) fn pop_and_take_caller_strand(&mut self) -> Option<CanonicalStrand<I>> {
+ if self.pop_and_adjust_depth() {
+ Some(self.top().active_strand.take().unwrap())
+ } else {
+ None
+ }
+ }
+
+ /// Pops the top-most entry from the stack, which should have the depth `*depth`:
+ /// * If the stack is now empty, returns None.
+ /// * Otherwise, borrows the active strand (mutably) from the new top and returns it.
+ pub(super) fn pop_and_borrow_caller_strand(&mut self) -> Option<&mut CanonicalStrand<I>> {
+ if self.pop_and_adjust_depth() {
+ Some(self.top().active_strand.as_mut().unwrap())
+ } else {
+ None
+ }
+ }
+
+ pub(super) fn top(&mut self) -> &mut StackEntry<I> {
+ self.stack.last_mut().unwrap()
+ }
+}
+
+impl<I: Interner> Index<StackIndex> for Stack<I> {
+ type Output = StackEntry<I>;
+
+ fn index(&self, index: StackIndex) -> &StackEntry<I> {
+ &self.stack[index.value]
+ }
+}
+
+impl<I: Interner> IndexMut<StackIndex> for Stack<I> {
+ fn index_mut(&mut self, index: StackIndex) -> &mut StackEntry<I> {
+ &mut self.stack[index.value]
+ }
+}
diff --git a/vendor/chalk-engine/src/strand.rs b/vendor/chalk-engine/src/strand.rs
new file mode 100644
index 000000000..91196ec94
--- /dev/null
+++ b/vendor/chalk-engine/src/strand.rs
@@ -0,0 +1,51 @@
+use crate::table::AnswerIndex;
+use crate::{ExClause, TableIndex, TimeStamp};
+use std::fmt::Debug;
+
+use chalk_derive::HasInterner;
+use chalk_ir::fold::{Fold, Folder};
+use chalk_ir::interner::Interner;
+use chalk_ir::{Canonical, DebruijnIndex, UniverseMap};
+
+#[derive(Clone, Debug, HasInterner)]
+pub(crate) struct Strand<I: Interner> {
+ pub(super) ex_clause: ExClause<I>,
+
+ /// Index into `ex_clause.subgoals`.
+ pub(crate) selected_subgoal: Option<SelectedSubgoal>,
+
+ pub(crate) last_pursued_time: TimeStamp,
+}
+
+pub(crate) type CanonicalStrand<I> = Canonical<Strand<I>>;
+
+#[derive(Clone, Debug)]
+pub(crate) struct SelectedSubgoal {
+ /// The index of the subgoal in `ex_clause.subgoals`
+ pub(crate) subgoal_index: usize,
+
+ /// The index of the table that we created or found for this subgoal
+ pub(super) subgoal_table: TableIndex,
+
+ /// Index of the answer we should request next from the table
+ pub(crate) answer_index: AnswerIndex,
+
+ /// Maps the universes of the subgoal to the canonical universes
+ /// used in the table
+ pub(crate) universe_map: UniverseMap,
+}
+
+impl<I: Interner> Fold<I> for Strand<I> {
+ type Result = Strand<I>;
+ fn fold_with<E>(
+ self,
+ folder: &mut dyn Folder<I, Error = E>,
+ outer_binder: DebruijnIndex,
+ ) -> Result<Self::Result, E> {
+ Ok(Strand {
+ ex_clause: self.ex_clause.fold_with(folder, outer_binder)?,
+ last_pursued_time: self.last_pursued_time,
+ selected_subgoal: self.selected_subgoal,
+ })
+ }
+}
diff --git a/vendor/chalk-engine/src/table.rs b/vendor/chalk-engine/src/table.rs
new file mode 100644
index 000000000..e883cad32
--- /dev/null
+++ b/vendor/chalk-engine/src/table.rs
@@ -0,0 +1,173 @@
+use crate::index_struct;
+use crate::strand::CanonicalStrand;
+use crate::{Answer, AnswerMode};
+use rustc_hash::FxHashMap;
+use std::collections::hash_map::Entry;
+use std::collections::VecDeque;
+use std::mem;
+
+use chalk_ir::interner::Interner;
+use chalk_ir::{AnswerSubst, Canonical, Goal, InEnvironment, UCanonical};
+use tracing::{debug, info, instrument};
+
+#[derive(Debug)]
+pub(crate) struct Table<I: Interner> {
+ /// The goal this table is trying to solve (also the key to look
+ /// it up).
+ pub(crate) table_goal: UCanonical<InEnvironment<Goal<I>>>,
+
+ /// A goal is coinductive if it can assume itself to be true, more
+ /// or less. This is true for auto traits.
+ pub(crate) coinductive_goal: bool,
+
+ /// True if this table is floundered, meaning that it doesn't have
+ /// enough types specified for us to solve.
+ floundered: bool,
+
+ /// Stores the answers that we have found thus far. When we get a request
+ /// for an answer N, we will first check this vector.
+ answers: Vec<Answer<I>>,
+
+ /// An alternative storage for the answers we have so far, used to
+ /// detect duplicates. Not every answer in `answers` will be
+ /// represented here -- we discard answers from `answers_hash`
+ /// (but not `answers`) when better answers arrive (in particular,
+ /// answers with no ambiguity).
+ ///
+ /// FIXME -- Ideally we would exclude the region constraints and
+ /// delayed subgoals from the hash, but that's a bit tricky to do
+ /// with the current canonicalization setup. It should be ok not
+ /// to do so though it can result in more answers than we need.
+ answers_hash: FxHashMap<Canonical<AnswerSubst<I>>, bool>,
+
+ /// Stores the active strands that we can "pull on" to find more
+ /// answers.
+ strands: VecDeque<CanonicalStrand<I>>,
+
+ pub(crate) answer_mode: AnswerMode,
+}
+
+index_struct! {
+ pub(crate) struct AnswerIndex {
+ value: usize,
+ }
+}
+
+impl<I: Interner> Table<I> {
+ pub(crate) fn new(
+ table_goal: UCanonical<InEnvironment<Goal<I>>>,
+ coinductive_goal: bool,
+ ) -> Table<I> {
+ Table {
+ table_goal,
+ coinductive_goal,
+ answers: Vec::new(),
+ floundered: false,
+ answers_hash: FxHashMap::default(),
+ strands: VecDeque::new(),
+ answer_mode: AnswerMode::Complete,
+ }
+ }
+
+ /// Push a strand to the back of the queue of strands to be processed.
+ pub(crate) fn enqueue_strand(&mut self, strand: CanonicalStrand<I>) {
+ self.strands.push_back(strand);
+ }
+
+ pub(crate) fn strands_mut(&mut self) -> impl Iterator<Item = &mut CanonicalStrand<I>> {
+ self.strands.iter_mut()
+ }
+
+ pub(crate) fn strands(&self) -> impl Iterator<Item = &CanonicalStrand<I>> {
+ self.strands.iter()
+ }
+
+ pub(crate) fn take_strands(&mut self) -> VecDeque<CanonicalStrand<I>> {
+ mem::take(&mut self.strands)
+ }
+
+ /// Remove the next strand from the queue that meets the given criteria
+ pub(crate) fn dequeue_next_strand_that(
+ &mut self,
+ test: impl Fn(&CanonicalStrand<I>) -> bool,
+ ) -> Option<CanonicalStrand<I>> {
+ let first = self.strands.iter().position(test);
+ if let Some(first) = first {
+ self.strands.rotate_left(first);
+ self.strands.pop_front()
+ } else {
+ None
+ }
+ }
+
+ /// Mark the table as floundered -- this also discards all pre-existing answers,
+ /// as they are no longer relevant.
+ pub(crate) fn mark_floundered(&mut self) {
+ self.floundered = true;
+ self.strands = Default::default();
+ self.answers = Default::default();
+ }
+
+ /// Returns true if the table is floundered.
+ pub(crate) fn is_floundered(&self) -> bool {
+ self.floundered
+ }
+
+ /// Adds `answer` to our list of answers, unless it is already present.
+ ///
+ /// Returns true if `answer` was added.
+ ///
+ /// # Panics
+ /// This will panic if a previous answer with the same substitution
+ /// was marked as ambgiuous, but the new answer is not. No current
+ /// tests trigger this case, and assumptions upstream assume that when
+ /// `true` is returned here, that a *new* answer was added (instead of an)
+ /// existing answer replaced.
+ #[instrument(level = "debug", skip(self))]
+ pub(super) fn push_answer(&mut self, answer: Answer<I>) -> Option<AnswerIndex> {
+ assert!(!self.floundered);
+ debug!(
+ "pre-existing entry: {:?}",
+ self.answers_hash.get(&answer.subst)
+ );
+
+ let added = match self.answers_hash.entry(answer.subst.clone()) {
+ Entry::Vacant(entry) => {
+ entry.insert(answer.ambiguous);
+ true
+ }
+
+ Entry::Occupied(entry) => {
+ let was_ambiguous = entry.get();
+ if *was_ambiguous && !answer.ambiguous {
+ panic!("New answer was not ambiguous whereas previous answer was.");
+ }
+ false
+ }
+ };
+
+ info!(
+ goal = ?self.table_goal, ?answer,
+ "new answer to table",
+ );
+ if !added {
+ return None;
+ }
+
+ let index = self.answers.len();
+ self.answers.push(answer);
+ Some(AnswerIndex::from(index))
+ }
+
+ pub(super) fn answer(&self, index: AnswerIndex) -> Option<&Answer<I>> {
+ self.answers.get(index.value)
+ }
+
+ pub(super) fn next_answer_index(&self) -> AnswerIndex {
+ AnswerIndex::from(self.answers.len())
+ }
+}
+
+impl AnswerIndex {
+ pub(crate) const ZERO: AnswerIndex = AnswerIndex { value: 0 };
+}
diff --git a/vendor/chalk-engine/src/tables.rs b/vendor/chalk-engine/src/tables.rs
new file mode 100644
index 000000000..76508eaa8
--- /dev/null
+++ b/vendor/chalk-engine/src/tables.rs
@@ -0,0 +1,72 @@
+use crate::table::Table;
+use crate::TableIndex;
+use rustc_hash::FxHashMap;
+use std::ops::{Index, IndexMut};
+
+use chalk_ir::interner::Interner;
+use chalk_ir::{Goal, InEnvironment, UCanonical};
+
+/// See `Forest`.
+#[derive(Debug)]
+pub(crate) struct Tables<I: Interner> {
+ /// Maps from a canonical goal to the index of its table.
+ table_indices: FxHashMap<UCanonical<InEnvironment<Goal<I>>>, TableIndex>,
+
+ /// Table: as described above, stores the key information for each
+ /// tree in the forest.
+ tables: Vec<Table<I>>,
+}
+
+impl<I: Interner> Tables<I> {
+ pub(crate) fn new() -> Tables<I> {
+ Tables {
+ table_indices: FxHashMap::default(),
+ tables: Vec::default(),
+ }
+ }
+
+ /// The index that will be given to the next table to be inserted.
+ pub(super) fn next_index(&self) -> TableIndex {
+ TableIndex {
+ value: self.tables.len(),
+ }
+ }
+
+ pub(super) fn insert(&mut self, table: Table<I>) -> TableIndex {
+ let goal = table.table_goal.clone();
+ let index = self.next_index();
+ self.tables.push(table);
+ self.table_indices.insert(goal, index);
+ index
+ }
+
+ pub(super) fn index_of(
+ &self,
+ literal: &UCanonical<InEnvironment<Goal<I>>>,
+ ) -> Option<TableIndex> {
+ self.table_indices.get(literal).cloned()
+ }
+}
+
+impl<I: Interner> Index<TableIndex> for Tables<I> {
+ type Output = Table<I>;
+
+ fn index(&self, index: TableIndex) -> &Table<I> {
+ &self.tables[index.value]
+ }
+}
+
+impl<I: Interner> IndexMut<TableIndex> for Tables<I> {
+ fn index_mut(&mut self, index: TableIndex) -> &mut Table<I> {
+ &mut self.tables[index.value]
+ }
+}
+
+impl<'a, I: Interner> IntoIterator for &'a mut Tables<I> {
+ type IntoIter = <&'a mut Vec<Table<I>> as IntoIterator>::IntoIter;
+ type Item = <&'a mut Vec<Table<I>> as IntoIterator>::Item;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIterator::into_iter(&mut self.tables)
+ }
+}