summaryrefslogtreecommitdiffstats
path: root/src/tools/rust-analyzer/crates/hir-ty
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
commitdc0db358abe19481e475e10c32149b53370f1a1c (patch)
treeab8ce99c4b255ce46f99ef402c27916055b899ee /src/tools/rust-analyzer/crates/hir-ty
parentReleasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff)
downloadrustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz
rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/tools/rust-analyzer/crates/hir-ty')
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/Cargo.toml12
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs84
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/builder.rs50
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs149
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs55
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs119
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs1435
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests/intrinsics.rs377
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/db.rs115
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs43
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/expr.rs6
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check.rs51
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/deconstruct_pat.rs12
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/pat_util.rs2
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/usefulness.rs2
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/unsafe_check.rs11
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/display.rs671
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/infer.rs542
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs931
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs64
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs461
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/infer/mutability.rs218
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs59
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/infer/path.rs127
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs157
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/inhabitedness.rs30
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/interner.rs147
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/lang_items.rs68
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/layout.rs150
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs106
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs3
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs192
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/layout/tests/closure.rs257
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/lib.rs85
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/lower.rs325
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs305
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir.rs343
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs282
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs2209
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs792
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/tests.rs676
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs1795
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs163
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/pattern_matching.rs617
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/monomorphization.rs351
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs185
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/test_db.rs20
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests.rs45
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/coercion.rs111
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/incremental.rs8
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs59
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs74
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs47
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/patterns.rs43
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs234
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs398
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs383
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/tls.rs22
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/traits.rs55
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/utils.rs154
60 files changed, 13306 insertions, 3181 deletions
diff --git a/src/tools/rust-analyzer/crates/hir-ty/Cargo.toml b/src/tools/rust-analyzer/crates/hir-ty/Cargo.toml
index 9b3296df2..c8bea3450 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/Cargo.toml
+++ b/src/tools/rust-analyzer/crates/hir-ty/Cargo.toml
@@ -15,19 +15,21 @@ doctest = false
cov-mark = "2.0.0-pre.1"
itertools = "0.10.5"
arrayvec = "0.7.2"
-bitflags = "1.3.2"
+bitflags = "2.1.0"
smallvec.workspace = true
ena = "0.14.0"
either = "1.7.0"
tracing = "0.1.35"
rustc-hash = "1.1.0"
scoped-tls = "1.0.0"
-chalk-solve = { version = "0.89.0", default-features = false }
-chalk-ir = "0.89.0"
-chalk-recursive = { version = "0.89.0", default-features = false }
-chalk-derive = "0.89.0"
+chalk-solve = { version = "0.91.0", default-features = false }
+chalk-ir = "0.91.0"
+chalk-recursive = { version = "0.91.0", default-features = false }
+chalk-derive = "0.91.0"
la-arena = { version = "0.3.0", path = "../../lib/la-arena" }
once_cell = "1.17.0"
+triomphe.workspace = true
+nohash-hasher.workspace = true
typed-arena = "2.0.1"
rustc_index = { version = "0.0.20221221", package = "hkalbasi-rustc-ap-rustc_index", default-features = false }
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs b/src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs
index 58744dd0c..3860bccec 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs
@@ -3,12 +3,11 @@
//! reference to a type with the field `bar`. This is an approximation of the
//! logic in rustc (which lives in rustc_hir_analysis/check/autoderef.rs).
-use std::sync::Arc;
-
use chalk_ir::cast::Cast;
use hir_def::lang_item::LangItem;
use hir_expand::name::name;
use limit::Limit;
+use triomphe::Arc;
use crate::{
db::HirDatabase, infer::unify::InferenceTable, Canonical, Goal, Interner, ProjectionTyExt,
@@ -23,6 +22,41 @@ pub(crate) enum AutoderefKind {
Overloaded,
}
+/// Returns types that `ty` transitively dereferences to. This function is only meant to be used
+/// outside `hir-ty`.
+///
+/// It is guaranteed that:
+/// - the yielded types don't contain inference variables (but may contain `TyKind::Error`).
+/// - a type won't be yielded more than once; in other words, the returned iterator will stop if it
+/// detects a cycle in the deref chain.
+pub fn autoderef(
+ db: &dyn HirDatabase,
+ env: Arc<TraitEnvironment>,
+ ty: Canonical<Ty>,
+) -> impl Iterator<Item = Ty> {
+ let mut table = InferenceTable::new(db, env);
+ let ty = table.instantiate_canonical(ty);
+ let mut autoderef = Autoderef::new(&mut table, ty);
+ let mut v = Vec::new();
+ while let Some((ty, _steps)) = autoderef.next() {
+ // `ty` may contain unresolved inference variables. Since there's no chance they would be
+ // resolved, just replace with fallback type.
+ let resolved = autoderef.table.resolve_completely(ty);
+
+ // If the deref chain contains a cycle (e.g. `A` derefs to `B` and `B` derefs to `A`), we
+ // would revisit some already visited types. Stop here to avoid duplication.
+ //
+ // XXX: The recursion limit for `Autoderef` is currently 10, so `Vec::contains()` shouldn't
+ // be too expensive. Replace this duplicate check with `FxHashSet` if it proves to be more
+ // performant.
+ if v.contains(&resolved) {
+ break;
+ }
+ v.push(resolved);
+ }
+ v.into_iter()
+}
+
#[derive(Debug)]
pub(crate) struct Autoderef<'a, 'db> {
pub(crate) table: &'a mut InferenceTable<'db>,
@@ -76,49 +110,43 @@ pub(crate) fn autoderef_step(
table: &mut InferenceTable<'_>,
ty: Ty,
) -> Option<(AutoderefKind, Ty)> {
- if let Some(derefed) = builtin_deref(&ty) {
+ if let Some(derefed) = builtin_deref(table, &ty, false) {
Some((AutoderefKind::Builtin, table.resolve_ty_shallow(derefed)))
} else {
Some((AutoderefKind::Overloaded, deref_by_trait(table, ty)?))
}
}
-// FIXME: replace uses of this with Autoderef above
-pub fn autoderef(
- db: &dyn HirDatabase,
- env: Arc<TraitEnvironment>,
- ty: Canonical<Ty>,
-) -> impl Iterator<Item = Canonical<Ty>> + '_ {
- let mut table = InferenceTable::new(db, env);
- let ty = table.instantiate_canonical(ty);
- let mut autoderef = Autoderef::new(&mut table, ty);
- let mut v = Vec::new();
- while let Some((ty, _steps)) = autoderef.next() {
- v.push(autoderef.table.canonicalize(ty).value);
- }
- v.into_iter()
-}
-
-pub(crate) fn deref(table: &mut InferenceTable<'_>, ty: Ty) -> Option<Ty> {
- let _p = profile::span("deref");
- autoderef_step(table, ty).map(|(_, ty)| ty)
-}
-
-fn builtin_deref(ty: &Ty) -> Option<&Ty> {
+pub(crate) fn builtin_deref<'ty>(
+ table: &mut InferenceTable<'_>,
+ ty: &'ty Ty,
+ explicit: bool,
+) -> Option<&'ty Ty> {
match ty.kind(Interner) {
- TyKind::Ref(.., ty) | TyKind::Raw(.., ty) => Some(ty),
+ TyKind::Ref(.., ty) => Some(ty),
+ // FIXME: Maybe accept this but diagnose if its not explicit?
+ TyKind::Raw(.., ty) if explicit => Some(ty),
+ &TyKind::Adt(chalk_ir::AdtId(adt), ref substs) => {
+ if crate::lang_items::is_box(table.db, adt) {
+ substs.at(Interner, 0).ty(Interner)
+ } else {
+ None
+ }
+ }
_ => None,
}
}
-fn deref_by_trait(table: &mut InferenceTable<'_>, ty: Ty) -> Option<Ty> {
+pub(crate) fn deref_by_trait(
+ table @ &mut InferenceTable { db, .. }: &mut InferenceTable<'_>,
+ ty: Ty,
+) -> Option<Ty> {
let _p = profile::span("deref_by_trait");
if table.resolve_ty_shallow(&ty).inference_var(Interner).is_some() {
// don't try to deref unknown variables
return None;
}
- let db = table.db;
let deref_trait =
db.lang_item(table.trait_env.krate, LangItem::Deref).and_then(|l| l.as_trait())?;
let target = db.trait_data(deref_trait).associated_type_by_name(&name![Target])?;
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/builder.rs b/src/tools/rust-analyzer/crates/hir-ty/src/builder.rs
index 03e944359..eec57ba3f 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/builder.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/builder.rs
@@ -18,7 +18,6 @@ use crate::{
consteval::unknown_const_as_generic, db::HirDatabase, infer::unify::InferenceTable, primitive,
to_assoc_type_id, to_chalk_trait_id, utils::generics, Binders, BoundVar, CallableSig,
GenericArg, Interner, ProjectionTy, Substitution, TraitRef, Ty, TyDefId, TyExt, TyKind,
- ValueTyDefId,
};
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -195,6 +194,19 @@ impl TyBuilder<()> {
params.placeholder_subst(db)
}
+ pub fn unknown_subst(db: &dyn HirDatabase, def: impl Into<GenericDefId>) -> Substitution {
+ let params = generics(db.upcast(), def.into());
+ Substitution::from_iter(
+ Interner,
+ params.iter_id().map(|id| match id {
+ either::Either::Left(_) => TyKind::Error.intern(Interner).cast(Interner),
+ either::Either::Right(id) => {
+ unknown_const_as_generic(db.const_param_ty(id)).cast(Interner)
+ }
+ }),
+ )
+ }
+
pub fn subst_for_def(
db: &dyn HirDatabase,
def: impl Into<GenericDefId>,
@@ -233,6 +245,25 @@ impl TyBuilder<()> {
TyBuilder::new((), params, parent_subst)
}
+ pub fn subst_for_closure(
+ db: &dyn HirDatabase,
+ parent: DefWithBodyId,
+ sig_ty: Ty,
+ ) -> Substitution {
+ let sig_ty = sig_ty.cast(Interner);
+ let self_subst = iter::once(&sig_ty);
+ let Some(parent) = parent.as_generic_def_id() else {
+ return Substitution::from_iter(Interner, self_subst);
+ };
+ Substitution::from_iter(
+ Interner,
+ self_subst
+ .chain(generics(db.upcast(), parent).placeholder_subst(db).iter(Interner))
+ .cloned()
+ .collect::<Vec<_>>(),
+ )
+ }
+
pub fn build(self) -> Substitution {
let ((), subst) = self.build_internal();
subst
@@ -362,21 +393,4 @@ impl TyBuilder<Binders<Ty>> {
pub fn impl_self_ty(db: &dyn HirDatabase, def: hir_def::ImplId) -> TyBuilder<Binders<Ty>> {
TyBuilder::subst_for_def(db, def, None).with_data(db.impl_self_ty(def))
}
-
- pub fn value_ty(
- db: &dyn HirDatabase,
- def: ValueTyDefId,
- parent_subst: Option<Substitution>,
- ) -> TyBuilder<Binders<Ty>> {
- let poly_value_ty = db.value_ty(def);
- let id = match def.to_generic_def_id() {
- Some(id) => id,
- None => {
- // static items
- assert!(parent_subst.is_none());
- return TyBuilder::new_empty(poly_value_ty);
- }
- };
- TyBuilder::subst_for_def(db, id, parent_subst).with_data(poly_value_ty)
- }
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs b/src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs
index 28ae4c349..5dd8e2719 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs
@@ -1,8 +1,8 @@
//! The implementation of `RustIrDatabase` for Chalk, which provides information
//! about the code that Chalk needs.
-use std::sync::Arc;
+use core::ops;
+use std::{iter, sync::Arc};
-use cov_mark::hit;
use tracing::debug;
use chalk_ir::{cast::Cast, fold::shift::Shift, CanonicalVarKinds};
@@ -10,9 +10,9 @@ use chalk_solve::rust_ir::{self, OpaqueTyDatumBound, WellKnownTrait};
use base_db::CrateId;
use hir_def::{
- expr::Movability,
+ hir::Movability,
lang_item::{lang_attr, LangItem, LangItemTarget},
- AssocItemId, GenericDefId, HasModule, ItemContainerId, Lookup, ModuleId, TypeAliasId,
+ AssocItemId, BlockId, GenericDefId, HasModule, ItemContainerId, Lookup, TypeAliasId,
};
use hir_expand::name::name;
@@ -25,7 +25,7 @@ use crate::{
method_resolution::{TraitImpls, TyFingerprint, ALL_FLOAT_FPS, ALL_INT_FPS},
to_assoc_type_id, to_chalk_trait_id,
traits::ChalkContext,
- utils::generics,
+ utils::{generics, ClosureSubst},
wrap_empty_binders, AliasEq, AliasTy, BoundVar, CallableDefId, DebruijnIndex, FnDefId,
Interner, ProjectionTy, ProjectionTyExt, QuantifiedWhereClause, Substitution, TraitRef,
TraitRefExt, Ty, TyBuilder, TyExt, TyKind, WhereClause,
@@ -108,17 +108,6 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
_ => self_ty_fp.as_ref().map(std::slice::from_ref).unwrap_or(&[]),
};
- fn local_impls(db: &dyn HirDatabase, module: ModuleId) -> Option<Arc<TraitImpls>> {
- let block = module.containing_block()?;
- hit!(block_local_impls);
- db.trait_impls_in_block(block)
- }
-
- // Note: Since we're using impls_for_trait, only impls where the trait
- // can be resolved should ever reach Chalk. impl_datum relies on that
- // and will panic if the trait can't be resolved.
- let in_deps = self.db.trait_impls_in_deps(self.krate);
- let in_self = self.db.trait_impls_in_crate(self.krate);
let trait_module = trait_.module(self.db.upcast());
let type_module = match self_ty_fp {
Some(TyFingerprint::Adt(adt_id)) => Some(adt_id.module(self.db.upcast())),
@@ -128,33 +117,62 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
Some(TyFingerprint::Dyn(trait_id)) => Some(trait_id.module(self.db.upcast())),
_ => None,
};
- let impl_maps = [
- Some(in_deps),
- Some(in_self),
- local_impls(self.db, trait_module),
- type_module.and_then(|m| local_impls(self.db, m)),
- ];
- let id_to_chalk = |id: hir_def::ImplId| id.to_chalk(self.db);
+ let mut def_blocks =
+ [trait_module.containing_block(), type_module.and_then(|it| it.containing_block())];
- let result: Vec<_> = if fps.is_empty() {
- debug!("Unrestricted search for {:?} impls...", trait_);
- impl_maps
- .iter()
- .filter_map(|o| o.as_ref())
- .flat_map(|impls| impls.for_trait(trait_).map(id_to_chalk))
- .collect()
- } else {
- impl_maps
- .iter()
- .filter_map(|o| o.as_ref())
- .flat_map(|impls| {
- fps.iter().flat_map(move |fp| {
- impls.for_trait_and_self_ty(trait_, *fp).map(id_to_chalk)
- })
- })
- .collect()
- };
+ // Note: Since we're using impls_for_trait, only impls where the trait
+ // can be resolved should ever reach Chalk. impl_datum relies on that
+ // and will panic if the trait can't be resolved.
+ let in_deps = self.db.trait_impls_in_deps(self.krate);
+ let in_self = self.db.trait_impls_in_crate(self.krate);
+
+ let block_impls = iter::successors(self.block, |&block_id| {
+ cov_mark::hit!(block_local_impls);
+ self.db.block_def_map(block_id).parent().and_then(|module| module.containing_block())
+ })
+ .inspect(|&block_id| {
+ // make sure we don't search the same block twice
+ def_blocks.iter_mut().for_each(|block| {
+ if *block == Some(block_id) {
+ *block = None;
+ }
+ });
+ })
+ .map(|block_id| self.db.trait_impls_in_block(block_id));
+
+ let id_to_chalk = |id: hir_def::ImplId| id.to_chalk(self.db);
+ let mut result = vec![];
+ match fps {
+ [] => {
+ debug!("Unrestricted search for {:?} impls...", trait_);
+ let mut f = |impls: &TraitImpls| {
+ result.extend(impls.for_trait(trait_).map(id_to_chalk));
+ };
+ f(&in_self);
+ in_deps.iter().map(ops::Deref::deref).for_each(&mut f);
+ block_impls.for_each(|it| f(&it));
+ def_blocks
+ .into_iter()
+ .flatten()
+ .for_each(|it| f(&self.db.trait_impls_in_block(it)));
+ }
+ fps => {
+ let mut f =
+ |impls: &TraitImpls| {
+ result.extend(fps.iter().flat_map(|fp| {
+ impls.for_trait_and_self_ty(trait_, *fp).map(id_to_chalk)
+ }));
+ };
+ f(&in_self);
+ in_deps.iter().map(ops::Deref::deref).for_each(&mut f);
+ block_impls.for_each(|it| f(&it));
+ def_blocks
+ .into_iter()
+ .flatten()
+ .for_each(|it| f(&self.db.trait_impls_in_block(it)));
+ }
+ }
debug!("impls_for_trait returned {} impls", result.len());
result
@@ -193,7 +211,7 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
&self,
environment: &chalk_ir::Environment<Interner>,
) -> chalk_ir::ProgramClauses<Interner> {
- self.db.program_clauses_for_chalk_env(self.krate, environment.clone())
+ self.db.program_clauses_for_chalk_env(self.krate, self.block, environment.clone())
}
fn opaque_ty_data(&self, id: chalk_ir::OpaqueTyId<Interner>) -> Arc<OpaqueTyDatum> {
@@ -321,7 +339,7 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
_closure_id: chalk_ir::ClosureId<Interner>,
substs: &chalk_ir::Substitution<Interner>,
) -> chalk_ir::Binders<rust_ir::FnDefInputsAndOutputDatum<Interner>> {
- let sig_ty = substs.at(Interner, 0).assert_ty_ref(Interner).clone();
+ let sig_ty = ClosureSubst(substs).sig_ty();
let sig = &sig_ty.callable_sig(self.db).expect("first closure param should be fn ptr");
let io = rust_ir::FnDefInputsAndOutputDatum {
argument_types: sig.params().to_vec(),
@@ -347,13 +365,19 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
fn trait_name(&self, trait_id: chalk_ir::TraitId<Interner>) -> String {
let id = from_chalk_trait_id(trait_id);
- self.db.trait_data(id).name.to_string()
+ self.db.trait_data(id).name.display(self.db.upcast()).to_string()
}
fn adt_name(&self, chalk_ir::AdtId(adt_id): AdtId) -> String {
match adt_id {
- hir_def::AdtId::StructId(id) => self.db.struct_data(id).name.to_string(),
- hir_def::AdtId::EnumId(id) => self.db.enum_data(id).name.to_string(),
- hir_def::AdtId::UnionId(id) => self.db.union_data(id).name.to_string(),
+ hir_def::AdtId::StructId(id) => {
+ self.db.struct_data(id).name.display(self.db.upcast()).to_string()
+ }
+ hir_def::AdtId::EnumId(id) => {
+ self.db.enum_data(id).name.display(self.db.upcast()).to_string()
+ }
+ hir_def::AdtId::UnionId(id) => {
+ self.db.union_data(id).name.display(self.db.upcast()).to_string()
+ }
}
}
fn adt_size_align(&self, _id: chalk_ir::AdtId<Interner>) -> Arc<rust_ir::AdtSizeAlign> {
@@ -362,7 +386,7 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
}
fn assoc_type_name(&self, assoc_ty_id: chalk_ir::AssocTypeId<Interner>) -> String {
let id = self.db.associated_ty_data(assoc_ty_id).name;
- self.db.type_alias_data(id).name.to_string()
+ self.db.type_alias_data(id).name.display(self.db.upcast()).to_string()
}
fn opaque_type_name(&self, opaque_ty_id: chalk_ir::OpaqueTyId<Interner>) -> String {
format!("Opaque_{}", opaque_ty_id.0)
@@ -373,7 +397,7 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
fn generator_datum(
&self,
id: chalk_ir::GeneratorId<Interner>,
- ) -> std::sync::Arc<chalk_solve::rust_ir::GeneratorDatum<Interner>> {
+ ) -> Arc<chalk_solve::rust_ir::GeneratorDatum<Interner>> {
let (parent, expr) = self.db.lookup_intern_generator(id.into());
// We fill substitution with unknown type, because we only need to know whether the generic
@@ -398,8 +422,8 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
let input_output = crate::make_type_and_const_binders(it, input_output);
let movability = match self.db.body(parent)[expr] {
- hir_def::expr::Expr::Closure {
- closure_kind: hir_def::expr::ClosureKind::Generator(movability),
+ hir_def::hir::Expr::Closure {
+ closure_kind: hir_def::hir::ClosureKind::Generator(movability),
..
} => movability,
_ => unreachable!("non generator expression interned as generator"),
@@ -414,7 +438,7 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
fn generator_witness_datum(
&self,
id: chalk_ir::GeneratorId<Interner>,
- ) -> std::sync::Arc<chalk_solve::rust_ir::GeneratorWitnessDatum<Interner>> {
+ ) -> Arc<chalk_solve::rust_ir::GeneratorWitnessDatum<Interner>> {
// FIXME: calculate inner types
let inner_types =
rust_ir::GeneratorWitnessExistential { types: wrap_empty_binders(vec![]) };
@@ -435,7 +459,7 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> {
}
}
-impl<'a> chalk_ir::UnificationDatabase<Interner> for &'a dyn HirDatabase {
+impl chalk_ir::UnificationDatabase<Interner> for &dyn HirDatabase {
fn fn_def_variance(
&self,
fn_def_id: chalk_ir::FnDefId<Interner>,
@@ -451,9 +475,10 @@ impl<'a> chalk_ir::UnificationDatabase<Interner> for &'a dyn HirDatabase {
pub(crate) fn program_clauses_for_chalk_env_query(
db: &dyn HirDatabase,
krate: CrateId,
+ block: Option<BlockId>,
environment: chalk_ir::Environment<Interner>,
) -> chalk_ir::ProgramClauses<Interner> {
- chalk_solve::program_clauses_for_env(&ChalkContext { db, krate }, &environment)
+ chalk_solve::program_clauses_for_env(&ChalkContext { db, krate, block }, &environment)
}
pub(crate) fn associated_ty_data_query(
@@ -472,7 +497,7 @@ pub(crate) fn associated_ty_data_query(
let generic_params = generics(db.upcast(), type_alias.into());
// let bound_vars = generic_params.bound_vars_subst(DebruijnIndex::INNERMOST);
let resolver = hir_def::resolver::HasResolver::resolver(type_alias, db.upcast());
- let ctx = crate::TyLoweringContext::new(db, &resolver)
+ let ctx = crate::TyLoweringContext::new(db, &resolver, type_alias.into())
.with_type_param_mode(crate::lower::ParamLoweringMode::Variable);
let trait_subst = TyBuilder::subst_for_def(db, trait_, None)
@@ -567,6 +592,7 @@ fn well_known_trait_from_lang_item(item: LangItem) -> Option<WellKnownTrait> {
LangItem::Unpin => WellKnownTrait::Unpin,
LangItem::Unsize => WellKnownTrait::Unsize,
LangItem::Tuple => WellKnownTrait::Tuple,
+ LangItem::PointeeTrait => WellKnownTrait::Pointee,
_ => return None,
})
}
@@ -587,6 +613,7 @@ fn lang_item_from_well_known_trait(trait_: WellKnownTrait) -> LangItem {
WellKnownTrait::Tuple => LangItem::Tuple,
WellKnownTrait::Unpin => LangItem::Unpin,
WellKnownTrait::Unsize => LangItem::Unsize,
+ WellKnownTrait::Pointee => LangItem::PointeeTrait,
}
}
@@ -786,17 +813,17 @@ pub(crate) fn adt_variance_query(
)
}
+/// Returns instantiated predicates.
pub(super) fn convert_where_clauses(
db: &dyn HirDatabase,
def: GenericDefId,
substs: &Substitution,
) -> Vec<chalk_ir::QuantifiedWhereClause<Interner>> {
- let generic_predicates = db.generic_predicates(def);
- let mut result = Vec::with_capacity(generic_predicates.len());
- for pred in generic_predicates.iter() {
- result.push(pred.clone().substitute(Interner, substs));
- }
- result
+ db.generic_predicates(def)
+ .iter()
+ .cloned()
+ .map(|pred| pred.substitute(Interner, substs))
+ .collect()
}
pub(super) fn generic_predicate_to_inline_bound(
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs b/src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs
index 214189492..a8071591a 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs
@@ -1,24 +1,28 @@
//! Various extensions traits for Chalk types.
-use chalk_ir::{FloatTy, IntTy, Mutability, Scalar, TyVariableKind, UintTy};
+use chalk_ir::{cast::Cast, FloatTy, IntTy, Mutability, Scalar, TyVariableKind, UintTy};
use hir_def::{
builtin_type::{BuiltinFloat, BuiltinInt, BuiltinType, BuiltinUint},
generics::TypeOrConstParamData,
lang_item::LangItem,
type_ref::Rawness,
- FunctionId, GenericDefId, HasModule, ItemContainerId, Lookup, TraitId,
+ DefWithBodyId, FunctionId, GenericDefId, HasModule, ItemContainerId, Lookup, TraitId,
};
use crate::{
- db::HirDatabase, from_assoc_type_id, from_chalk_trait_id, from_foreign_def_id,
- from_placeholder_idx, to_chalk_trait_id, utils::generics, AdtId, AliasEq, AliasTy, Binders,
- CallableDefId, CallableSig, DynTy, FnPointer, ImplTraitId, Interner, Lifetime, ProjectionTy,
+ db::HirDatabase,
+ from_assoc_type_id, from_chalk_trait_id, from_foreign_def_id, from_placeholder_idx,
+ to_chalk_trait_id,
+ utils::{generics, ClosureSubst},
+ AdtId, AliasEq, AliasTy, Binders, CallableDefId, CallableSig, Canonical, CanonicalVarKinds,
+ ClosureId, DynTy, FnPointer, ImplTraitId, InEnvironment, Interner, Lifetime, ProjectionTy,
QuantifiedWhereClause, Substitution, TraitRef, Ty, TyBuilder, TyKind, TypeFlags, WhereClause,
};
pub trait TyExt {
fn is_unit(&self) -> bool;
fn is_integral(&self) -> bool;
+ fn is_scalar(&self) -> bool;
fn is_floating_point(&self) -> bool;
fn is_never(&self) -> bool;
fn is_unknown(&self) -> bool;
@@ -28,8 +32,10 @@ pub trait TyExt {
fn as_adt(&self) -> Option<(hir_def::AdtId, &Substitution)>;
fn as_builtin(&self) -> Option<BuiltinType>;
fn as_tuple(&self) -> Option<&Substitution>;
+ fn as_closure(&self) -> Option<ClosureId>;
fn as_fn_def(&self, db: &dyn HirDatabase) -> Option<FunctionId>;
fn as_reference(&self) -> Option<(&Ty, Lifetime, Mutability)>;
+ fn as_raw_ptr(&self) -> Option<(&Ty, Mutability)>;
fn as_reference_or_ptr(&self) -> Option<(&Ty, Rawness, Mutability)>;
fn as_generic_def(&self, db: &dyn HirDatabase) -> Option<GenericDefId>;
@@ -44,6 +50,7 @@ pub trait TyExt {
fn impl_trait_bounds(&self, db: &dyn HirDatabase) -> Option<Vec<QuantifiedWhereClause>>;
fn associated_type_parent_trait(&self, db: &dyn HirDatabase) -> Option<TraitId>;
+ fn is_copy(self, db: &dyn HirDatabase, owner: DefWithBodyId) -> bool;
/// FIXME: Get rid of this, it's not a good abstraction
fn equals_ctor(&self, other: &Ty) -> bool;
@@ -62,6 +69,10 @@ impl TyExt for Ty {
)
}
+ fn is_scalar(&self) -> bool {
+ matches!(self.kind(Interner), TyKind::Scalar(_))
+ }
+
fn is_floating_point(&self) -> bool {
matches!(
self.kind(Interner),
@@ -128,12 +139,20 @@ impl TyExt for Ty {
}
}
+ fn as_closure(&self) -> Option<ClosureId> {
+ match self.kind(Interner) {
+ TyKind::Closure(id, _) => Some(*id),
+ _ => None,
+ }
+ }
+
fn as_fn_def(&self, db: &dyn HirDatabase) -> Option<FunctionId> {
match self.callable_def(db) {
Some(CallableDefId::FunctionId(func)) => Some(func),
Some(CallableDefId::StructId(_) | CallableDefId::EnumVariantId(_)) | None => None,
}
}
+
fn as_reference(&self) -> Option<(&Ty, Lifetime, Mutability)> {
match self.kind(Interner) {
TyKind::Ref(mutability, lifetime, ty) => Some((ty, lifetime.clone(), *mutability)),
@@ -141,6 +160,13 @@ impl TyExt for Ty {
}
}
+ fn as_raw_ptr(&self) -> Option<(&Ty, Mutability)> {
+ match self.kind(Interner) {
+ TyKind::Raw(mutability, ty) => Some((ty, *mutability)),
+ _ => None,
+ }
+ }
+
fn as_reference_or_ptr(&self) -> Option<(&Ty, Rawness, Mutability)> {
match self.kind(Interner) {
TyKind::Ref(mutability, _, ty) => Some((ty, Rawness::Ref, *mutability)),
@@ -176,10 +202,7 @@ impl TyExt for Ty {
let sig = db.callable_item_signature(callable_def);
Some(sig.substitute(Interner, parameters))
}
- TyKind::Closure(.., substs) => {
- let sig_param = substs.at(Interner, 0).assert_ty_ref(Interner);
- sig_param.callable_sig(db)
- }
+ TyKind::Closure(.., substs) => ClosureSubst(substs).sig_ty().callable_sig(db),
_ => None,
}
}
@@ -318,6 +341,20 @@ impl TyExt for Ty {
}
}
+ fn is_copy(self, db: &dyn HirDatabase, owner: DefWithBodyId) -> bool {
+ let crate_id = owner.module(db.upcast()).krate();
+ let Some(copy_trait) = db.lang_item(crate_id, LangItem::Copy).and_then(|x| x.as_trait()) else {
+ return false;
+ };
+ let trait_ref = TyBuilder::trait_ref(db, copy_trait).push(self).build();
+ let env = db.trait_environment_for_body(owner);
+ let goal = Canonical {
+ value: InEnvironment::new(&env.env, trait_ref.cast(Interner)),
+ binders: CanonicalVarKinds::empty(Interner),
+ };
+ db.trait_solve(crate_id, None, goal).is_some()
+ }
+
fn equals_ctor(&self, other: &Ty) -> bool {
match (self.kind(Interner), other.kind(Interner)) {
(TyKind::Adt(adt, ..), TyKind::Adt(adt2, ..)) => adt == adt2,
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs b/src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs
index 5830c4898..262341c6e 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs
@@ -3,19 +3,20 @@
use base_db::CrateId;
use chalk_ir::{BoundVar, DebruijnIndex, GenericArgData};
use hir_def::{
- expr::Expr,
- path::ModPath,
+ hir::Expr,
+ path::Path,
resolver::{Resolver, ValueNs},
- type_ref::ConstRef,
- ConstId, EnumVariantId,
+ type_ref::LiteralConstRef,
+ ConstBlockLoc, EnumVariantId, GeneralConstId, StaticId,
};
use la_arena::{Idx, RawIdx};
use stdx::never;
+use triomphe::Arc;
use crate::{
- db::HirDatabase, infer::InferenceContext, layout::layout_of_ty, lower::ParamLoweringMode,
- to_placeholder_idx, utils::Generics, Const, ConstData, ConstScalar, ConstValue, GenericArg,
- Interner, MemoryMap, Ty, TyBuilder,
+ db::HirDatabase, infer::InferenceContext, lower::ParamLoweringMode,
+ mir::monomorphize_mir_body_bad, to_placeholder_idx, utils::Generics, Const, ConstData,
+ ConstScalar, ConstValue, GenericArg, Interner, MemoryMap, Substitution, Ty, TyBuilder,
};
use super::mir::{interpret_mir, lower_to_mir, pad16, MirEvalError, MirLowerError};
@@ -57,7 +58,7 @@ pub enum ConstEvalError {
impl From<MirLowerError> for ConstEvalError {
fn from(value: MirLowerError) -> Self {
match value {
- MirLowerError::ConstEvalError(e) => *e,
+ MirLowerError::ConstEvalError(_, e) => *e,
_ => ConstEvalError::MirLowerError(value),
}
}
@@ -72,10 +73,11 @@ impl From<MirEvalError> for ConstEvalError {
pub(crate) fn path_to_const(
db: &dyn HirDatabase,
resolver: &Resolver,
- path: &ModPath,
+ path: &Path,
mode: ParamLoweringMode,
args_lazy: impl FnOnce() -> Generics,
debruijn: DebruijnIndex,
+ expected_ty: Ty,
) -> Option<Const> {
match resolver.resolve_path_in_value_ns_fully(db.upcast(), path) {
Some(ValueNs::GenericParam(p)) => {
@@ -89,7 +91,7 @@ pub(crate) fn path_to_const(
Some(x) => ConstValue::BoundVar(BoundVar::new(debruijn, x)),
None => {
never!(
- "Generic list doesn't contain this param: {:?}, {}, {:?}",
+ "Generic list doesn't contain this param: {:?}, {:?}, {:?}",
args,
path,
p
@@ -100,6 +102,10 @@ pub(crate) fn path_to_const(
};
Some(ConstData { ty, value }.intern(Interner))
}
+ Some(ValueNs::ConstId(c)) => Some(intern_const_scalar(
+ ConstScalar::UnevaluatedConst(c.into(), Substitution::empty(Interner)),
+ expected_ty,
+ )),
_ => None,
}
}
@@ -123,22 +129,28 @@ pub fn intern_const_scalar(value: ConstScalar, ty: Ty) -> Const {
}
/// Interns a constant scalar with the given type
-pub fn intern_const_ref(db: &dyn HirDatabase, value: &ConstRef, ty: Ty, krate: CrateId) -> Const {
+pub fn intern_const_ref(
+ db: &dyn HirDatabase,
+ value: &LiteralConstRef,
+ ty: Ty,
+ krate: CrateId,
+) -> Const {
+ let layout = db.layout_of_ty(ty.clone(), krate);
let bytes = match value {
- ConstRef::Int(i) => {
+ LiteralConstRef::Int(i) => {
// FIXME: We should handle failure of layout better.
- let size = layout_of_ty(db, &ty, krate).map(|x| x.size.bytes_usize()).unwrap_or(16);
+ let size = layout.map(|x| x.size.bytes_usize()).unwrap_or(16);
ConstScalar::Bytes(i.to_le_bytes()[0..size].to_vec(), MemoryMap::default())
}
- ConstRef::UInt(i) => {
- let size = layout_of_ty(db, &ty, krate).map(|x| x.size.bytes_usize()).unwrap_or(16);
+ LiteralConstRef::UInt(i) => {
+ let size = layout.map(|x| x.size.bytes_usize()).unwrap_or(16);
ConstScalar::Bytes(i.to_le_bytes()[0..size].to_vec(), MemoryMap::default())
}
- ConstRef::Bool(b) => ConstScalar::Bytes(vec![*b as u8], MemoryMap::default()),
- ConstRef::Char(c) => {
+ LiteralConstRef::Bool(b) => ConstScalar::Bytes(vec![*b as u8], MemoryMap::default()),
+ LiteralConstRef::Char(c) => {
ConstScalar::Bytes((*c as u32).to_le_bytes().to_vec(), MemoryMap::default())
}
- ConstRef::Unknown => ConstScalar::Unknown,
+ LiteralConstRef::Unknown => ConstScalar::Unknown,
};
intern_const_scalar(bytes, ty)
}
@@ -147,19 +159,23 @@ pub fn intern_const_ref(db: &dyn HirDatabase, value: &ConstRef, ty: Ty, krate: C
pub fn usize_const(db: &dyn HirDatabase, value: Option<u128>, krate: CrateId) -> Const {
intern_const_ref(
db,
- &value.map_or(ConstRef::Unknown, ConstRef::UInt),
+ &value.map_or(LiteralConstRef::Unknown, LiteralConstRef::UInt),
TyBuilder::usize(),
krate,
)
}
-pub fn try_const_usize(c: &Const) -> Option<u128> {
+pub fn try_const_usize(db: &dyn HirDatabase, c: &Const) -> Option<u128> {
match &c.data(Interner).value {
chalk_ir::ConstValue::BoundVar(_) => None,
chalk_ir::ConstValue::InferenceVar(_) => None,
chalk_ir::ConstValue::Placeholder(_) => None,
chalk_ir::ConstValue::Concrete(c) => match &c.interned {
ConstScalar::Bytes(x, _) => Some(u128::from_le_bytes(pad16(&x, false))),
+ ConstScalar::UnevaluatedConst(c, subst) => {
+ let ec = db.const_eval(*c, subst.clone()).ok()?;
+ try_const_usize(db, &ec)
+ }
_ => None,
},
}
@@ -168,7 +184,16 @@ pub fn try_const_usize(c: &Const) -> Option<u128> {
pub(crate) fn const_eval_recover(
_: &dyn HirDatabase,
_: &[String],
- _: &ConstId,
+ _: &GeneralConstId,
+ _: &Substitution,
+) -> Result<Const, ConstEvalError> {
+ Err(ConstEvalError::MirLowerError(MirLowerError::Loop))
+}
+
+pub(crate) fn const_eval_static_recover(
+ _: &dyn HirDatabase,
+ _: &[String],
+ _: &StaticId,
) -> Result<Const, ConstEvalError> {
Err(ConstEvalError::MirLowerError(MirLowerError::Loop))
}
@@ -183,11 +208,40 @@ pub(crate) fn const_eval_discriminant_recover(
pub(crate) fn const_eval_query(
db: &dyn HirDatabase,
- const_id: ConstId,
+ def: GeneralConstId,
+ subst: Substitution,
) -> Result<Const, ConstEvalError> {
- let def = const_id.into();
- let body = db.mir_body(def)?;
- let c = interpret_mir(db, &body, false)?;
+ let body = match def {
+ GeneralConstId::ConstId(c) => {
+ db.monomorphized_mir_body(c.into(), subst, db.trait_environment(c.into()))?
+ }
+ GeneralConstId::ConstBlockId(c) => {
+ let ConstBlockLoc { parent, root } = db.lookup_intern_anonymous_const(c);
+ let body = db.body(parent);
+ let infer = db.infer(parent);
+ Arc::new(monomorphize_mir_body_bad(
+ db,
+ lower_to_mir(db, parent, &body, &infer, root)?,
+ subst,
+ db.trait_environment_for_body(parent),
+ )?)
+ }
+ GeneralConstId::InTypeConstId(c) => db.mir_body(c.into())?,
+ };
+ let c = interpret_mir(db, &body, false).0?;
+ Ok(c)
+}
+
+pub(crate) fn const_eval_static_query(
+ db: &dyn HirDatabase,
+ def: StaticId,
+) -> Result<Const, ConstEvalError> {
+ let body = db.monomorphized_mir_body(
+ def.into(),
+ Substitution::empty(Interner),
+ db.trait_environment_for_body(def.into()),
+ )?;
+ let c = interpret_mir(db, &body, false).0?;
Ok(c)
}
@@ -209,9 +263,13 @@ pub(crate) fn const_eval_discriminant_variant(
};
return Ok(value);
}
- let mir_body = db.mir_body(def)?;
- let c = interpret_mir(db, &mir_body, false)?;
- let c = try_const_usize(&c).unwrap() as i128;
+ let mir_body = db.monomorphized_mir_body(
+ def,
+ Substitution::empty(Interner),
+ db.trait_environment_for_body(def),
+ )?;
+ let c = interpret_mir(db, &mir_body, false).0?;
+ let c = try_const_usize(db, &c).unwrap() as i128;
Ok(c)
}
@@ -226,15 +284,16 @@ pub(crate) fn eval_to_const(
debruijn: DebruijnIndex,
) -> Const {
let db = ctx.db;
+ let infer = ctx.clone().resolve_all();
if let Expr::Path(p) = &ctx.body.exprs[expr] {
let resolver = &ctx.resolver;
- if let Some(c) = path_to_const(db, resolver, p.mod_path(), mode, args, debruijn) {
+ if let Some(c) = path_to_const(db, resolver, p, mode, args, debruijn, infer[expr].clone()) {
return c;
}
}
let infer = ctx.clone().resolve_all();
if let Ok(mir_body) = lower_to_mir(ctx.db, ctx.owner, &ctx.body, &infer, expr) {
- if let Ok(result) = interpret_mir(db, &mir_body, true) {
+ if let Ok(result) = interpret_mir(db, &mir_body, true).0 {
return result;
}
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs
index 6a29e8ce5..0db1fefbf 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs
@@ -1,8 +1,10 @@
-use base_db::fixture::WithFixture;
+use base_db::{fixture::WithFixture, FileId};
+use chalk_ir::Substitution;
use hir_def::db::DefDatabase;
use crate::{
- consteval::try_const_usize, db::HirDatabase, test_db::TestDB, Const, ConstScalar, Interner,
+ consteval::try_const_usize, db::HirDatabase, mir::pad16, test_db::TestDB, Const, ConstScalar,
+ Interner,
};
use super::{
@@ -10,9 +12,11 @@ use super::{
ConstEvalError,
};
+mod intrinsics;
+
fn simplify(e: ConstEvalError) -> ConstEvalError {
match e {
- ConstEvalError::MirEvalError(MirEvalError::InFunction(_, e)) => {
+ ConstEvalError::MirEvalError(MirEvalError::InFunction(_, e, _, _)) => {
simplify(ConstEvalError::MirEvalError(*e))
}
_ => e,
@@ -20,17 +24,35 @@ fn simplify(e: ConstEvalError) -> ConstEvalError {
}
#[track_caller]
-fn check_fail(ra_fixture: &str, error: ConstEvalError) {
- assert_eq!(eval_goal(ra_fixture).map_err(simplify), Err(error));
+fn check_fail(ra_fixture: &str, error: impl FnOnce(ConstEvalError) -> bool) {
+ let (db, file_id) = TestDB::with_single_file(ra_fixture);
+ match eval_goal(&db, file_id) {
+ Ok(_) => panic!("Expected fail, but it succeeded"),
+ Err(e) => {
+ assert!(error(simplify(e.clone())), "Actual error was: {}", pretty_print_err(e, db))
+ }
+ }
}
#[track_caller]
fn check_number(ra_fixture: &str, answer: i128) {
- let r = eval_goal(ra_fixture).unwrap();
+ let (db, file_id) = TestDB::with_single_file(ra_fixture);
+ let r = match eval_goal(&db, file_id) {
+ Ok(t) => t,
+ Err(e) => {
+ let err = pretty_print_err(e, db);
+ panic!("Error in evaluating goal: {}", err);
+ }
+ };
match &r.data(Interner).value {
chalk_ir::ConstValue::Concrete(c) => match &c.interned {
ConstScalar::Bytes(b, _) => {
- assert_eq!(b, &answer.to_le_bytes()[0..b.len()]);
+ assert_eq!(
+ b,
+ &answer.to_le_bytes()[0..b.len()],
+ "Bytes differ. In decimal form: actual = {}, expected = {answer}",
+ i128::from_le_bytes(pad16(b, true))
+ );
}
x => panic!("Expected number but found {:?}", x),
},
@@ -38,16 +60,26 @@ fn check_number(ra_fixture: &str, answer: i128) {
}
}
-fn eval_goal(ra_fixture: &str) -> Result<Const, ConstEvalError> {
- let (db, file_id) = TestDB::with_single_file(ra_fixture);
+fn pretty_print_err(e: ConstEvalError, db: TestDB) -> String {
+ let mut err = String::new();
+ let span_formatter = |file, range| format!("{:?} {:?}", file, range);
+ match e {
+ ConstEvalError::MirLowerError(e) => e.pretty_print(&mut err, &db, span_formatter),
+ ConstEvalError::MirEvalError(e) => e.pretty_print(&mut err, &db, span_formatter),
+ }
+ .unwrap();
+ err
+}
+
+fn eval_goal(db: &TestDB, file_id: FileId) -> Result<Const, ConstEvalError> {
let module_id = db.module_for_file(file_id);
- let def_map = module_id.def_map(&db);
+ let def_map = module_id.def_map(db);
let scope = &def_map[module_id.local_id].scope;
let const_id = scope
.declarations()
.find_map(|x| match x {
hir_def::ModuleDefId::ConstId(x) => {
- if db.const_data(x).name.as_ref()?.to_string() == "GOAL" {
+ if db.const_data(x).name.as_ref()?.display(db).to_string() == "GOAL" {
Some(x)
} else {
None
@@ -56,7 +88,7 @@ fn eval_goal(ra_fixture: &str) -> Result<Const, ConstEvalError> {
_ => None,
})
.unwrap();
- db.const_eval(const_id)
+ db.const_eval(const_id.into(), Substitution::empty(Interner))
}
#[test]
@@ -72,8 +104,98 @@ fn bit_op() {
check_number(r#"const GOAL: u8 = !0 & !(!0 >> 1)"#, 128);
check_number(r#"const GOAL: i8 = !0 & !(!0 >> 1)"#, 0);
check_number(r#"const GOAL: i8 = 1 << 7"#, (1i8 << 7) as i128);
- // FIXME: report panic here
- check_number(r#"const GOAL: i8 = 1 << 8"#, 0);
+ check_number(r#"const GOAL: i8 = -1 << 2"#, (-1i8 << 2) as i128);
+ check_fail(r#"const GOAL: i8 = 1 << 8"#, |e| {
+ e == ConstEvalError::MirEvalError(MirEvalError::Panic("Overflow in Shl".to_string()))
+ });
+}
+
+#[test]
+fn floating_point() {
+ check_number(
+ r#"const GOAL: f64 = 2.0 + 3.0 * 5.5 - 8.;"#,
+ i128::from_le_bytes(pad16(&f64::to_le_bytes(10.5), true)),
+ );
+ check_number(
+ r#"const GOAL: f32 = 2.0 + 3.0 * 5.5 - 8.;"#,
+ i128::from_le_bytes(pad16(&f32::to_le_bytes(10.5), true)),
+ );
+ check_number(
+ r#"const GOAL: f32 = -90.0 + 36.0;"#,
+ i128::from_le_bytes(pad16(&f32::to_le_bytes(-54.0), true)),
+ );
+}
+
+#[test]
+fn casts() {
+ check_number(r#"const GOAL: usize = 12 as *const i32 as usize"#, 12);
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ const GOAL: i32 = {
+ let a = [10, 20, 3, 15];
+ let x: &[i32] = &a;
+ let y: *const [i32] = x;
+ let z = y as *const i32;
+ unsafe { *z }
+ };
+ "#,
+ 10,
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ const GOAL: i16 = {
+ let a = &mut 5;
+ let z = a as *mut _;
+ unsafe { *z }
+ };
+ "#,
+ 5,
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ const GOAL: usize = {
+ let a = &[10, 20, 30, 40] as &[i32];
+ a.len()
+ };
+ "#,
+ 4,
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ const GOAL: usize = {
+ let a = [10, 20, 3, 15];
+ let x: &[i32] = &a;
+ let y: *const [i32] = x;
+ let z = y as *const [u8]; // slice fat pointer cast don't touch metadata
+ let q = z as *const str;
+ let p = q as *const [u8];
+ let w = unsafe { &*z };
+ w.len()
+ };
+ "#,
+ 4,
+ );
+ check_number(r#"const GOAL: i32 = -12i8 as i32"#, -12);
+}
+
+#[test]
+fn raw_pointer_equality() {
+ check_number(
+ r#"
+ //- minicore: copy, eq
+ const GOAL: bool = {
+ let a = 2;
+ let p1 = a as *const i32;
+ let p2 = a as *const i32;
+ p1 == p2
+ };
+ "#,
+ 1,
+ );
}
#[test]
@@ -166,8 +288,7 @@ fn reference_autoderef() {
#[test]
fn overloaded_deref() {
- // FIXME: We should support this.
- check_fail(
+ check_number(
r#"
//- minicore: deref_mut
struct Foo;
@@ -185,9 +306,7 @@ fn overloaded_deref() {
*y + *x
};
"#,
- ConstEvalError::MirLowerError(MirLowerError::NotSupported(
- "explicit overloaded deref".into(),
- )),
+ 10,
);
}
@@ -219,6 +338,117 @@ fn overloaded_deref_autoref() {
}
#[test]
+fn overloaded_index() {
+ check_number(
+ r#"
+ //- minicore: index
+ struct Foo;
+
+ impl core::ops::Index<usize> for Foo {
+ type Output = i32;
+ fn index(&self, index: usize) -> &i32 {
+ if index == 7 {
+ &700
+ } else {
+ &1000
+ }
+ }
+ }
+
+ impl core::ops::IndexMut<usize> for Foo {
+ fn index_mut(&mut self, index: usize) -> &mut i32 {
+ if index == 7 {
+ &mut 7
+ } else {
+ &mut 10
+ }
+ }
+ }
+
+ const GOAL: i32 = {
+ (Foo[2]) + (Foo[7]) + (*&Foo[2]) + (*&Foo[7]) + (*&mut Foo[2]) + (*&mut Foo[7])
+ };
+ "#,
+ 3417,
+ );
+}
+
+#[test]
+fn overloaded_binop() {
+ check_number(
+ r#"
+ //- minicore: add
+ enum Color {
+ Red,
+ Green,
+ Yellow,
+ }
+
+ use Color::*;
+
+ impl core::ops::Add for Color {
+ type Output = Color;
+ fn add(self, rhs: Color) -> Self::Output {
+ Yellow
+ }
+ }
+
+ impl core::ops::AddAssign for Color {
+ fn add_assign(&mut self, rhs: Color) {
+ *self = Red;
+ }
+ }
+
+ const GOAL: bool = {
+ let x = Red + Green;
+ let mut y = Green;
+ y += x;
+ x == Yellow && y == Red && Red + Green == Yellow && Red + Red == Yellow && Yellow + Green == Yellow
+ };
+ "#,
+ 1,
+ );
+ check_number(
+ r#"
+ //- minicore: add
+ impl core::ops::Add for usize {
+ type Output = usize;
+ fn add(self, rhs: usize) -> Self::Output {
+ self + rhs
+ }
+ }
+
+ impl core::ops::AddAssign for usize {
+ fn add_assign(&mut self, rhs: usize) {
+ *self += rhs;
+ }
+ }
+
+ #[lang = "shl"]
+ pub trait Shl<Rhs = Self> {
+ type Output;
+
+ fn shl(self, rhs: Rhs) -> Self::Output;
+ }
+
+ impl Shl<u8> for usize {
+ type Output = usize;
+
+ fn shl(self, rhs: u8) -> Self::Output {
+ self << rhs
+ }
+ }
+
+ const GOAL: usize = {
+ let mut x = 10;
+ x += 20;
+ 2 + 2 + (x << 1u8)
+ };"#,
+ 64,
+ );
+}
+
+#[test]
fn function_call() {
check_number(
r#"
@@ -241,20 +471,6 @@ fn function_call() {
}
#[test]
-fn intrinsics() {
- check_number(
- r#"
- extern "rust-intrinsic" {
- pub fn size_of<T>() -> usize;
- }
-
- const GOAL: usize = size_of::<i32>();
- "#,
- 4,
- );
-}
-
-#[test]
fn trait_basic() {
check_number(
r#"
@@ -301,6 +517,35 @@ fn trait_method() {
}
#[test]
+fn trait_method_inside_block() {
+ check_number(
+ r#"
+trait Twait {
+ fn a(&self) -> i32;
+}
+
+fn outer() -> impl Twait {
+ struct Stwuct;
+
+ impl Twait for Stwuct {
+ fn a(&self) -> i32 {
+ 5
+ }
+ }
+ fn f() -> impl Twait {
+ let s = Stwuct;
+ s
+ }
+ f()
+}
+
+const GOAL: i32 = outer().a();
+ "#,
+ 5,
+ );
+}
+
+#[test]
fn generic_fn() {
check_number(
r#"
@@ -357,6 +602,16 @@ fn generic_fn() {
);
check_number(
r#"
+ const fn y<T>(b: T) -> (T, ) {
+ let alloc = b;
+ (alloc, )
+ }
+ const GOAL: u8 = y(2).0;
+ "#,
+ 2,
+ );
+ check_number(
+ r#"
//- minicore: coerce_unsized, index, slice
fn bar<A, B>(a: A, b: B) -> B {
b
@@ -483,6 +738,66 @@ fn loops() {
"#,
4,
);
+ check_number(
+ r#"
+ const GOAL: u8 = {
+ let mut x = 0;
+ loop {
+ x = x + 1;
+ if x == 5 {
+ break x + 2;
+ }
+ }
+ };
+ "#,
+ 7,
+ );
+ check_number(
+ r#"
+ const GOAL: u8 = {
+ 'a: loop {
+ let x = 'b: loop {
+ let x = 'c: loop {
+ let x = 'd: loop {
+ let x = 'e: loop {
+ break 'd 1;
+ };
+ break 2 + x;
+ };
+ break 3 + x;
+ };
+ break 'a 4 + x;
+ };
+ break 5 + x;
+ }
+ };
+ "#,
+ 8,
+ );
+ check_number(
+ r#"
+ //- minicore: add
+ const GOAL: u8 = {
+ let mut x = 0;
+ 'a: loop {
+ 'b: loop {
+ 'c: while x < 20 {
+ 'd: while x < 5 {
+ 'e: loop {
+ x += 1;
+ continue 'c;
+ };
+ };
+ x += 1;
+ };
+ break 'a;
+ };
+ }
+ x
+ };
+ "#,
+ 20,
+ );
}
#[test]
@@ -523,6 +838,18 @@ fn for_loops() {
}
#[test]
+fn ranges() {
+ check_number(
+ r#"
+ //- minicore: range
+ const GOAL: i32 = (1..2).start + (20..10).end + (100..=200).start + (2000..=1000).end
+ + (10000..).start + (..100000).end + (..=1000000).end;
+ "#,
+ 1111111,
+ );
+}
+
+#[test]
fn recursion() {
check_number(
r#"
@@ -555,6 +882,38 @@ fn structs() {
"#,
17,
);
+ check_number(
+ r#"
+ struct Point {
+ x: i32,
+ y: i32,
+ }
+
+ const GOAL: i32 = {
+ let p = Point { x: 5, y: 2 };
+ let p2 = Point { x: 3, ..p };
+ p.x * 1000 + p.y * 100 + p2.x * 10 + p2.y
+ };
+ "#,
+ 5232,
+ );
+ check_number(
+ r#"
+ struct Point {
+ x: i32,
+ y: i32,
+ }
+
+ const GOAL: i32 = {
+ let p = Point { x: 5, y: 2 };
+ let Point { x, y } = p;
+ let Point { x: x2, .. } = p;
+ let Point { y: y2, .. } = p;
+ x * 1000 + y * 100 + x2 * 10 + y2
+ };
+ "#,
+ 5252,
+ );
}
#[test]
@@ -599,13 +958,14 @@ fn tuples() {
);
check_number(
r#"
- struct TupleLike(i32, u8, i64, u16);
- const GOAL: u8 = {
+ struct TupleLike(i32, i64, u8, u16);
+ const GOAL: i64 = {
let a = TupleLike(10, 20, 3, 15);
- a.1
+ let TupleLike(b, .., c) = a;
+ a.1 * 100 + b as i64 + c as i64
};
"#,
- 20,
+ 2025,
);
check_number(
r#"
@@ -638,11 +998,17 @@ fn path_pattern_matching() {
use Season::*;
+ const MY_SEASON: Season = Summer;
+
+ impl Season {
+ const FALL: Season = Fall;
+ }
+
const fn f(x: Season) -> i32 {
match x {
Spring => 1,
- Summer => 2,
- Fall => 3,
+ MY_SEASON => 2,
+ Season::FALL => 3,
Winter => 4,
}
}
@@ -653,6 +1019,91 @@ fn path_pattern_matching() {
}
#[test]
+fn pattern_matching_literal() {
+ check_number(
+ r#"
+ const fn f(x: i32) -> i32 {
+ match x {
+ -1 => 1,
+ 1 => 10,
+ _ => 100,
+ }
+ }
+ const GOAL: i32 = f(-1) + f(1) + f(0) + f(-5);
+ "#,
+ 211,
+ );
+ check_number(
+ r#"
+ const fn f(x: &str) -> i32 {
+ match x {
+ "f" => 1,
+ "foo" => 10,
+ "" => 100,
+ "bar" => 1000,
+ _ => 10000,
+ }
+ }
+ const GOAL: i32 = f("f") + f("foo") * 2 + f("") * 3 + f("bar") * 4;
+ "#,
+ 4321,
+ );
+}
+
+#[test]
+fn pattern_matching_range() {
+ check_number(
+ r#"
+ pub const L: i32 = 6;
+ mod x {
+ pub const R: i32 = 100;
+ }
+ const fn f(x: i32) -> i32 {
+ match x {
+ -1..=5 => x * 10,
+ L..=x::R => x * 100,
+ _ => x,
+ }
+ }
+ const GOAL: i32 = f(-1) + f(2) + f(100) + f(-2) + f(1000);
+ "#,
+ 11008,
+ );
+}
+
+#[test]
+fn pattern_matching_slice() {
+ check_number(
+ r#"
+ //- minicore: slice, index, coerce_unsized, copy
+ const fn f(x: &[usize]) -> usize {
+ match x {
+ [a, b @ .., c, d] => *a + b.len() + *c + *d,
+ }
+ }
+ const GOAL: usize = f(&[10, 20, 3, 15, 1000, 60, 16]);
+ "#,
+ 10 + 4 + 60 + 16,
+ );
+ check_number(
+ r#"
+ //- minicore: slice, index, coerce_unsized, copy
+ const fn f(x: &[usize]) -> usize {
+ match x {
+ [] => 0,
+ [a] => *a,
+ &[a, b] => a + b,
+ [a, b @ .., c, d] => *a + b.len() + *c + *d,
+ }
+ }
+ const GOAL: usize = f(&[]) + f(&[10]) + f(&[100, 100])
+ + f(&[1000, 1000, 1000]) + f(&[10000, 57, 34, 46, 10000, 10000]);
+ "#,
+ 33213,
+ );
+}
+
+#[test]
fn pattern_matching_ergonomics() {
check_number(
r#"
@@ -665,6 +1116,16 @@ fn pattern_matching_ergonomics() {
"#,
5,
);
+ check_number(
+ r#"
+ const GOAL: u8 = {
+ let a = &(2, 3);
+ let &(x, y) = a;
+ x + y
+ };
+ "#,
+ 5,
+ );
}
#[test]
@@ -749,6 +1210,77 @@ fn function_param_patterns() {
}
#[test]
+fn match_guards() {
+ check_number(
+ r#"
+ //- minicore: option
+ fn f(x: Option<i32>) -> i32 {
+ match x {
+ y if let Some(42) = y => 42000,
+ Some(y) => y,
+ None => 10
+ }
+ }
+ const GOAL: i32 = f(Some(42)) + f(Some(2)) + f(None);
+ "#,
+ 42012,
+ );
+}
+
+#[test]
+fn result_layout_niche_optimization() {
+ check_number(
+ r#"
+ //- minicore: option, result
+ const GOAL: i32 = match Some(2).ok_or(Some(2)) {
+ Ok(x) => x,
+ Err(_) => 1000,
+ };
+ "#,
+ 2,
+ );
+ check_number(
+ r#"
+ //- minicore: result
+ pub enum AlignmentEnum64 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ }
+ const GOAL: Result<AlignmentEnum64, ()> = {
+ let align = Err(());
+ align
+ };
+ "#,
+ 0, // It is 0 since result is niche encoded and 1 is valid for `AlignmentEnum64`
+ );
+ check_number(
+ r#"
+ //- minicore: result
+ pub enum AlignmentEnum64 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ }
+ const GOAL: i32 = {
+ let align = Ok::<_, ()>(AlignmentEnum64::_Align1Shl0);
+ match align {
+ Ok(_) => 2,
+ Err(_) => 1,
+ }
+ };
+ "#,
+ 2,
+ );
+}
+
+#[test]
fn options() {
check_number(
r#"
@@ -802,6 +1334,253 @@ fn options() {
}
#[test]
+fn from_trait() {
+ check_number(
+ r#"
+ //- minicore: from
+ struct E1(i32);
+ struct E2(i32);
+
+ impl From<E1> for E2 {
+ fn from(E1(x): E1) -> Self {
+ E2(1000 * x)
+ }
+ }
+ const GOAL: i32 = {
+ let x: E2 = E1(2).into();
+ x.0
+ };
+ "#,
+ 2000,
+ );
+}
+
+#[test]
+fn builtin_derive_macro() {
+ check_number(
+ r#"
+ //- minicore: clone, derive, builtin_impls
+ #[derive(Clone)]
+ enum Z {
+ Foo(Y),
+ Bar,
+ }
+ #[derive(Clone)]
+ struct X(i32, Z, i64)
+ #[derive(Clone)]
+ struct Y {
+ field1: i32,
+ field2: u8,
+ }
+
+ const GOAL: u8 = {
+ let x = X(2, Z::Foo(Y { field1: 4, field2: 5 }), 8);
+ let x = x.clone();
+ let Z::Foo(t) = x.1;
+ t.field2
+ };
+ "#,
+ 5,
+ );
+ check_number(
+ r#"
+ //- minicore: default, derive, builtin_impls
+ #[derive(Default)]
+ struct X(i32, Y, i64)
+ #[derive(Default)]
+ struct Y {
+ field1: i32,
+ field2: u8,
+ }
+
+ const GOAL: u8 = {
+ let x = X::default();
+ x.1.field2
+ };
+ "#,
+ 0,
+ );
+}
+
+#[test]
+fn try_operator() {
+ check_number(
+ r#"
+ //- minicore: option, try
+ const fn f(x: Option<i32>, y: Option<i32>) -> Option<i32> {
+ Some(x? * y?)
+ }
+ const fn g(x: Option<i32>, y: Option<i32>) -> i32 {
+ match f(x, y) {
+ Some(k) => k,
+ None => 5,
+ }
+ }
+ const GOAL: i32 = g(Some(10), Some(20)) + g(Some(30), None) + g(None, Some(40)) + g(None, None);
+ "#,
+ 215,
+ );
+ check_number(
+ r#"
+ //- minicore: result, try, from
+ struct E1(i32);
+ struct E2(i32);
+
+ impl From<E1> for E2 {
+ fn from(E1(x): E1) -> Self {
+ E2(1000 * x)
+ }
+ }
+
+ const fn f(x: Result<i32, E1>) -> Result<i32, E2> {
+ Ok(x? * 10)
+ }
+ const fn g(x: Result<i32, E1>) -> i32 {
+ match f(x) {
+ Ok(k) => 7 * k,
+ Err(E2(k)) => 5 * k,
+ }
+ }
+ const GOAL: i32 = g(Ok(2)) + g(Err(E1(3)));
+ "#,
+ 15140,
+ );
+}
+
+#[test]
+fn try_block() {
+ check_number(
+ r#"
+ //- minicore: option, try
+ const fn g(x: Option<i32>, y: Option<i32>) -> i32 {
+ let r = try { x? * y? };
+ match r {
+ Some(k) => k,
+ None => 5,
+ }
+ }
+ const GOAL: i32 = g(Some(10), Some(20)) + g(Some(30), None) + g(None, Some(40)) + g(None, None);
+ "#,
+ 215,
+ );
+}
+
+#[test]
+fn closures() {
+ check_number(
+ r#"
+ //- minicore: fn, copy
+ const GOAL: i32 = {
+ let y = 5;
+ let c = |x| x + y;
+ c(2)
+ };
+ "#,
+ 7,
+ );
+ check_number(
+ r#"
+ //- minicore: fn, copy
+ const GOAL: i32 = {
+ let y = 5;
+ let c = |(a, b): &(i32, i32)| *a + *b + y;
+ c(&(2, 3))
+ };
+ "#,
+ 10,
+ );
+ check_number(
+ r#"
+ //- minicore: fn, copy
+ const GOAL: i32 = {
+ let mut y = 5;
+ let c = |x| {
+ y = y + x;
+ };
+ c(2);
+ c(3);
+ y
+ };
+ "#,
+ 10,
+ );
+ check_number(
+ r#"
+ //- minicore: fn, copy
+ const GOAL: i32 = {
+ let c: fn(i32) -> i32 = |x| 2 * x;
+ c(2) + c(10)
+ };
+ "#,
+ 24,
+ );
+ check_number(
+ r#"
+ //- minicore: fn, copy
+ struct X(i32);
+ impl X {
+ fn mult(&mut self, n: i32) {
+ self.0 = self.0 * n
+ }
+ }
+ const GOAL: i32 = {
+ let x = X(1);
+ let c = || {
+ x.mult(2);
+ || {
+ x.mult(3);
+ || {
+ || {
+ x.mult(4);
+ || {
+ x.mult(x.0);
+ || {
+ x.0
+ }
+ }
+ }
+ }
+ }
+ };
+ let r = c()()()()()();
+ r + x.0
+ };
+ "#,
+ 24 * 24 * 2,
+ );
+}
+
+#[test]
+fn closure_and_impl_fn() {
+ check_number(
+ r#"
+ //- minicore: fn, copy
+ fn closure_wrapper<F: FnOnce() -> i32>(c: F) -> impl FnOnce() -> F {
+ || c
+ }
+
+ const GOAL: i32 = {
+ let y = 5;
+ let c = closure_wrapper(|| y);
+ c()()
+ };
+ "#,
+ 5,
+ );
+ check_number(
+ r#"
+ //- minicore: fn, copy
+ fn f<T, F: Fn() -> T>(t: F) -> impl Fn() -> T {
+ move || t()
+ }
+
+ const GOAL: i32 = f(|| 2)();
+ "#,
+ 2,
+ );
+}
+
+#[test]
fn or_pattern() {
check_number(
r#"
@@ -840,6 +1619,282 @@ fn or_pattern() {
}
#[test]
+fn function_pointer_in_constants() {
+ check_number(
+ r#"
+ struct Foo {
+ f: fn(u8) -> u8,
+ }
+ const FOO: Foo = Foo { f: add2 };
+ fn add2(x: u8) -> u8 {
+ x + 2
+ }
+ const GOAL: u8 = (FOO.f)(3);
+ "#,
+ 5,
+ );
+}
+
+#[test]
+fn function_pointer() {
+ check_number(
+ r#"
+ fn add2(x: u8) -> u8 {
+ x + 2
+ }
+ const GOAL: u8 = {
+ let plus2 = add2;
+ plus2(3)
+ };
+ "#,
+ 5,
+ );
+ check_number(
+ r#"
+ fn add2(x: u8) -> u8 {
+ x + 2
+ }
+ const GOAL: u8 = {
+ let plus2: fn(u8) -> u8 = add2;
+ plus2(3)
+ };
+ "#,
+ 5,
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ fn add2(x: u8) -> u8 {
+ x + 2
+ }
+ fn mult3(x: u8) -> u8 {
+ x * 3
+ }
+ const GOAL: u8 = {
+ let x = [add2, mult3];
+ x[0](1) + x[1](5)
+ };
+ "#,
+ 18,
+ );
+}
+
+#[test]
+fn enum_variant_as_function() {
+ check_number(
+ r#"
+ //- minicore: option
+ const GOAL: u8 = {
+ let f = Some;
+ f(3).unwrap_or(2)
+ };
+ "#,
+ 3,
+ );
+ check_number(
+ r#"
+ //- minicore: option
+ const GOAL: u8 = {
+ let f: fn(u8) -> Option<u8> = Some;
+ f(3).unwrap_or(2)
+ };
+ "#,
+ 3,
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ enum Foo {
+ Add2(u8),
+ Mult3(u8),
+ }
+ use Foo::*;
+ const fn f(x: Foo) -> u8 {
+ match x {
+ Add2(x) => x + 2,
+ Mult3(x) => x * 3,
+ }
+ }
+ const GOAL: u8 = {
+ let x = [Add2, Mult3];
+ f(x[0](1)) + f(x[1](5))
+ };
+ "#,
+ 18,
+ );
+}
+
+#[test]
+fn function_traits() {
+ check_number(
+ r#"
+ //- minicore: fn
+ fn add2(x: u8) -> u8 {
+ x + 2
+ }
+ fn call(f: impl Fn(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ fn call_mut(mut f: impl FnMut(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ fn call_once(f: impl FnOnce(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ const GOAL: u8 = call(add2, 3) + call_mut(add2, 3) + call_once(add2, 3);
+ "#,
+ 15,
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, fn
+ fn add2(x: u8) -> u8 {
+ x + 2
+ }
+ fn call(f: &dyn Fn(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ fn call_mut(f: &mut dyn FnMut(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ const GOAL: u8 = call(&add2, 3) + call_mut(&mut add2, 3);
+ "#,
+ 10,
+ );
+ check_number(
+ r#"
+ //- minicore: fn
+ fn add2(x: u8) -> u8 {
+ x + 2
+ }
+ fn call(f: impl Fn(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ fn call_mut(mut f: impl FnMut(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ fn call_once(f: impl FnOnce(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ const GOAL: u8 = {
+ let add2: fn(u8) -> u8 = add2;
+ call(add2, 3) + call_mut(add2, 3) + call_once(add2, 3)
+ };
+ "#,
+ 15,
+ );
+ check_number(
+ r#"
+ //- minicore: fn
+ fn add2(x: u8) -> u8 {
+ x + 2
+ }
+ fn call(f: &&&&&impl Fn(u8) -> u8, x: u8) -> u8 {
+ f(x)
+ }
+ const GOAL: u8 = call(&&&&&add2, 3);
+ "#,
+ 5,
+ );
+}
+
+#[test]
+fn dyn_trait() {
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ trait Foo {
+ fn foo(&self) -> u8 { 10 }
+ }
+ struct S1;
+ struct S2;
+ struct S3;
+ impl Foo for S1 {
+ fn foo(&self) -> u8 { 1 }
+ }
+ impl Foo for S2 {
+ fn foo(&self) -> u8 { 2 }
+ }
+ impl Foo for S3 {}
+ const GOAL: u8 = {
+ let x: &[&dyn Foo] = &[&S1, &S2, &S3];
+ x[0].foo() + x[1].foo() + x[2].foo()
+ };
+ "#,
+ 13,
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ trait Foo {
+ fn foo(&self) -> i32 { 10 }
+ }
+ trait Bar {
+ fn bar(&self) -> i32 { 20 }
+ }
+
+ struct S;
+ impl Foo for S {
+ fn foo(&self) -> i32 { 200 }
+ }
+ impl Bar for dyn Foo {
+ fn bar(&self) -> i32 { 700 }
+ }
+ const GOAL: i32 = {
+ let x: &dyn Foo = &S;
+ x.bar() + x.foo()
+ };
+ "#,
+ 900,
+ );
+}
+
+#[test]
+fn boxes() {
+ check_number(
+ r#"
+//- minicore: coerce_unsized, deref_mut, slice
+use core::ops::{Deref, DerefMut};
+use core::{marker::Unsize, ops::CoerceUnsized};
+
+#[lang = "owned_box"]
+pub struct Box<T: ?Sized> {
+ inner: *mut T,
+}
+impl<T> Box<T> {
+ fn new(t: T) -> Self {
+ #[rustc_box]
+ Box::new(t)
+ }
+}
+
+impl<T: ?Sized> Deref for Box<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &**self
+ }
+}
+
+impl<T: ?Sized> DerefMut for Box<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+const GOAL: usize = {
+ let x = Box::new(5);
+ let y: Box<[i32]> = Box::new([1, 2, 3]);
+ *x + y.len()
+};
+"#,
+ 8,
+ );
+}
+
+#[test]
fn array_and_index() {
check_number(
r#"
@@ -867,9 +1922,42 @@ fn array_and_index() {
check_number(
r#"
//- minicore: coerce_unsized, index, slice
+ const GOAL: usize = {
+ let a = [1, 2, 3];
+ let x: &[i32] = &a;
+ let y = &*x;
+ y.len()
+ };"#,
+ 3,
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
const GOAL: usize = [1, 2, 3, 4, 5].len();"#,
5,
);
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ const GOAL: [u16; 5] = [1, 2, 3, 4, 5];"#,
+ 1 + (2 << 16) + (3 << 32) + (4 << 48) + (5 << 64),
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ const GOAL: [u16; 5] = [12; 5];"#,
+ 12 + (12 << 16) + (12 << 32) + (12 << 48) + (12 << 64),
+ );
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ const LEN: usize = 4;
+ const GOAL: u16 = {
+ let x = [7; LEN];
+ x[2]
+ }"#,
+ 7,
+ );
}
#[test]
@@ -888,6 +1976,38 @@ fn byte_string() {
}
#[test]
+fn c_string() {
+ check_number(
+ r#"
+//- minicore: index, slice
+#[lang = "CStr"]
+pub struct CStr {
+ inner: [u8]
+}
+const GOAL: u8 = {
+ let a = c"hello";
+ a.inner[0]
+};
+ "#,
+ 104,
+ );
+ check_number(
+ r#"
+//- minicore: index, slice
+#[lang = "CStr"]
+pub struct CStr {
+ inner: [u8]
+}
+const GOAL: u8 = {
+ let a = c"hello";
+ a.inner[6]
+};
+ "#,
+ 0,
+ );
+}
+
+#[test]
fn consts() {
check_number(
r#"
@@ -901,6 +2021,48 @@ fn consts() {
}
#[test]
+fn statics() {
+ check_number(
+ r#"
+ //- minicore: cell
+ use core::cell::Cell;
+ fn f() -> i32 {
+ static S: Cell<i32> = Cell::new(10);
+ S.set(S.get() + 1);
+ S.get()
+ }
+ const GOAL: i32 = f() + f() + f();
+ "#,
+ 36,
+ );
+}
+
+#[test]
+fn extern_weak_statics() {
+ check_number(
+ r#"
+ extern "C" {
+ #[linkage = "extern_weak"]
+ static __dso_handle: *mut u8;
+ }
+ const GOAL: usize = __dso_handle as usize;
+ "#,
+ 0,
+ );
+}
+
+#[test]
+fn from_ne_bytes() {
+ check_number(
+ r#"
+//- minicore: int_impl
+const GOAL: u32 = u32::from_ne_bytes([44, 1, 0, 0]);
+ "#,
+ 300,
+ );
+}
+
+#[test]
fn enums() {
check_number(
r#"
@@ -927,14 +2089,14 @@ fn enums() {
"#,
0,
);
- let r = eval_goal(
+ let (db, file_id) = TestDB::with_single_file(
r#"
enum E { A = 1, B }
const GOAL: E = E::A;
"#,
- )
- .unwrap();
- assert_eq!(try_const_usize(&r), Some(1));
+ );
+ let r = eval_goal(&db, file_id).unwrap();
+ assert_eq!(try_const_usize(&db, &r), Some(1));
}
#[test]
@@ -946,7 +2108,7 @@ fn const_loop() {
const F2: i32 = 2 * F1;
const GOAL: i32 = F3;
"#,
- ConstEvalError::MirLowerError(MirLowerError::Loop),
+ |e| e == ConstEvalError::MirLowerError(MirLowerError::Loop),
);
}
@@ -963,6 +2125,29 @@ fn const_transfer_memory() {
}
#[test]
+fn anonymous_const_block() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn size_of<T>() -> usize;
+ }
+
+ const fn f<T>() -> usize {
+ let r = const { size_of::<T>() };
+ r
+ }
+
+ const GOAL: usize = {
+ let x = const { 2 + const { 3 } };
+ let y = f::<i32>();
+ x + y
+ };
+ "#,
+ 9,
+ );
+}
+
+#[test]
fn const_impl_assoc() {
check_number(
r#"
@@ -970,9 +2155,9 @@ fn const_impl_assoc() {
impl U5 {
const VAL: usize = 5;
}
- const GOAL: usize = U5::VAL;
+ const GOAL: usize = U5::VAL + <U5>::VAL;
"#,
- 5,
+ 10,
);
}
@@ -987,12 +2172,61 @@ fn const_generic_subst_fn() {
"#,
11,
);
+ check_number(
+ r#"
+ fn f<const N: usize>(x: [i32; N]) -> usize {
+ N
+ }
+
+ trait ArrayExt {
+ fn f(self) -> usize;
+ }
+
+ impl<T, const N: usize> ArrayExt for [T; N] {
+ fn g(self) -> usize {
+ f(self)
+ }
+ }
+
+ const GOAL: usize = f([1, 2, 5]);
+ "#,
+ 3,
+ );
+}
+
+#[test]
+fn layout_of_type_with_associated_type_field_defined_inside_body() {
+ check_number(
+ r#"
+trait Tr {
+ type Ty;
+}
+
+struct St<T: Tr>(T::Ty);
+
+const GOAL: i64 = {
+ // if we move `St2` out of body, the test will fail, as we don't see the impl anymore. That
+ // case will probably be rejected by rustc in some later edition, but we should support this
+ // case.
+ struct St2;
+
+ impl Tr for St2 {
+ type Ty = i64;
+ }
+
+ struct Goal(St<St2>);
+
+ let x = Goal(St(5));
+ x.0.0
+};
+"#,
+ 5,
+ );
}
#[test]
fn const_generic_subst_assoc_const_impl() {
- // FIXME: this should evaluate to 5
- check_fail(
+ check_number(
r#"
struct Adder<const N: usize, const M: usize>;
impl<const N: usize, const M: usize> Adder<N, M> {
@@ -1000,14 +2234,42 @@ fn const_generic_subst_assoc_const_impl() {
}
const GOAL: usize = Adder::<2, 3>::VAL;
"#,
- ConstEvalError::MirEvalError(MirEvalError::TypeError("missing generic arg")),
+ 5,
+ );
+}
+
+#[test]
+fn associated_types() {
+ check_number(
+ r#"
+ trait Tr {
+ type Item;
+ fn get_item(&self) -> Self::Item;
+ }
+
+ struct X(i32);
+ struct Y(i32);
+
+ impl Tr for X {
+ type Item = Y;
+ fn get_item(&self) -> Self::Item {
+ Y(self.0 + 2)
+ }
+ }
+
+ fn my_get_item<T: Tr>(x: T) -> <T as Tr>::Item {
+ x.get_item()
+ }
+
+ const GOAL: i32 = my_get_item(X(3)).0;
+ "#,
+ 5,
);
}
#[test]
fn const_trait_assoc() {
- // FIXME: this should evaluate to 0
- check_fail(
+ check_number(
r#"
struct U0;
trait ToConst {
@@ -1016,9 +2278,49 @@ fn const_trait_assoc() {
impl ToConst for U0 {
const VAL: usize = 0;
}
- const GOAL: usize = U0::VAL;
+ impl ToConst for i32 {
+ const VAL: usize = 32;
+ }
+ const GOAL: usize = U0::VAL + i32::VAL;
"#,
- ConstEvalError::MirLowerError(MirLowerError::IncompleteExpr),
+ 32,
+ );
+ check_number(
+ r#"
+ struct S<T>(*mut T);
+
+ trait MySized: Sized {
+ const SIZE: S<Self> = S(1 as *mut Self);
+ }
+
+ impl MySized for i32 {
+ const SIZE: S<i32> = S(10 as *mut i32);
+ }
+
+ impl MySized for i64 {
+ }
+
+ const fn f<T: MySized>() -> usize {
+ T::SIZE.0 as usize
+ }
+
+ const GOAL: usize = f::<i32>() + f::<i64>() * 2;
+ "#,
+ 12,
+ );
+}
+
+#[test]
+fn panic_messages() {
+ check_fail(
+ r#"
+ //- minicore: panic
+ const GOAL: u8 = {
+ let x: u16 = 2;
+ panic!("hello");
+ };
+ "#,
+ |e| e == ConstEvalError::MirEvalError(MirEvalError::Panic("hello".to_string())),
);
}
@@ -1028,7 +2330,7 @@ fn exec_limits() {
r#"
const GOAL: usize = loop {};
"#,
- ConstEvalError::MirEvalError(MirEvalError::ExecutionLimitExceeded),
+ |e| e == ConstEvalError::MirEvalError(MirEvalError::ExecutionLimitExceeded),
);
check_fail(
r#"
@@ -1037,7 +2339,7 @@ fn exec_limits() {
}
const GOAL: i32 = f(0);
"#,
- ConstEvalError::MirEvalError(MirEvalError::StackOverflow),
+ |e| e == ConstEvalError::MirEvalError(MirEvalError::StackOverflow),
);
// Reasonable code should still work
check_number(
@@ -1062,7 +2364,7 @@ fn exec_limits() {
#[test]
fn type_error() {
- let e = eval_goal(
+ check_fail(
r#"
const GOAL: u8 = {
let x: u16 = 2;
@@ -1070,6 +2372,25 @@ fn type_error() {
y.0
};
"#,
+ |e| matches!(e, ConstEvalError::MirLowerError(MirLowerError::TypeMismatch(_))),
+ );
+}
+
+#[test]
+fn unsized_local() {
+ check_fail(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ const fn x() -> SomeUnknownTypeThatDereferenceToSlice {
+ SomeUnknownTypeThatDereferenceToSlice
+ }
+
+ const GOAL: u16 = {
+ let y = x();
+ let z: &[u16] = &y;
+ z[1]
+ };
+ "#,
+ |e| matches!(e, ConstEvalError::MirLowerError(MirLowerError::UnsizedTemporary(_))),
);
- assert!(matches!(e, Err(ConstEvalError::MirLowerError(MirLowerError::TypeMismatch(_)))));
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests/intrinsics.rs b/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests/intrinsics.rs
new file mode 100644
index 000000000..e05d824db
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests/intrinsics.rs
@@ -0,0 +1,377 @@
+use super::*;
+
+#[test]
+fn size_of() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn size_of<T>() -> usize;
+ }
+
+ const GOAL: usize = size_of::<i32>();
+ "#,
+ 4,
+ );
+}
+
+#[test]
+fn transmute() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn transmute<T, U>(e: T) -> U;
+ }
+
+ const GOAL: i32 = transmute((1i16, 1i16));
+ "#,
+ 0x00010001,
+ );
+}
+
+#[test]
+fn const_eval_select() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET
+ where
+ G: FnOnce<ARG, Output = RET>,
+ F: FnOnce<ARG, Output = RET>;
+ }
+
+ const fn in_const(x: i32, y: i32) -> i32 {
+ x + y
+ }
+
+ fn in_rt(x: i32, y: i32) -> i32 {
+ x + y
+ }
+
+ const GOAL: i32 = const_eval_select((2, 3), in_const, in_rt);
+ "#,
+ 5,
+ );
+}
+
+#[test]
+fn wrapping_add() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn wrapping_add<T>(a: T, b: T) -> T;
+ }
+
+ const GOAL: u8 = wrapping_add(10, 250);
+ "#,
+ 4,
+ );
+}
+
+#[test]
+fn saturating_add() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn saturating_add<T>(a: T, b: T) -> T;
+ }
+
+ const GOAL: u8 = saturating_add(10, 250);
+ "#,
+ 255,
+ );
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn saturating_add<T>(a: T, b: T) -> T;
+ }
+
+ const GOAL: i8 = saturating_add(5, 8);
+ "#,
+ 13,
+ );
+}
+
+#[test]
+fn allocator() {
+ check_number(
+ r#"
+ extern "Rust" {
+ #[rustc_allocator]
+ fn __rust_alloc(size: usize, align: usize) -> *mut u8;
+ #[rustc_deallocator]
+ fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
+ #[rustc_reallocator]
+ fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
+ #[rustc_allocator_zeroed]
+ fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
+ }
+
+ const GOAL: u8 = unsafe {
+ let ptr = __rust_alloc(4, 1);
+ let ptr2 = ((ptr as usize) + 1) as *mut u8;
+ *ptr = 23;
+ *ptr2 = 32;
+ let ptr = __rust_realloc(ptr, 4, 1, 8);
+ let ptr2 = ((ptr as usize) + 1) as *mut u8;
+ *ptr + *ptr2
+ };
+ "#,
+ 55,
+ );
+}
+
+#[test]
+fn overflowing_add() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
+ }
+
+ const GOAL: u8 = add_with_overflow(1, 2).0;
+ "#,
+ 3,
+ );
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
+ }
+
+ const GOAL: u8 = add_with_overflow(1, 2).1 as u8;
+ "#,
+ 0,
+ );
+}
+
+#[test]
+fn needs_drop() {
+ check_number(
+ r#"
+ //- minicore: copy, sized
+ extern "rust-intrinsic" {
+ pub fn needs_drop<T: ?Sized>() -> bool;
+ }
+ struct X;
+ const GOAL: bool = !needs_drop::<i32>() && needs_drop::<X>();
+ "#,
+ 1,
+ );
+}
+
+#[test]
+fn likely() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn likely(b: bool) -> bool;
+ pub fn unlikely(b: bool) -> bool;
+ }
+
+ const GOAL: bool = likely(true) && unlikely(true) && !likely(false) && !unlikely(false);
+ "#,
+ 1,
+ );
+}
+
+#[test]
+fn floating_point() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn sqrtf32(x: f32) -> f32;
+ pub fn powf32(a: f32, x: f32) -> f32;
+ pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
+ }
+
+ const GOAL: f32 = sqrtf32(1.2) + powf32(3.4, 5.6) + fmaf32(-7.8, 1.3, 2.4);
+ "#,
+ i128::from_le_bytes(pad16(
+ &f32::to_le_bytes(1.2f32.sqrt() + 3.4f32.powf(5.6) + (-7.8f32).mul_add(1.3, 2.4)),
+ true,
+ )),
+ );
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn powif64(a: f64, x: i32) -> f64;
+ pub fn sinf64(x: f64) -> f64;
+ pub fn minnumf64(x: f64, y: f64) -> f64;
+ }
+
+ const GOAL: f64 = powif64(1.2, 5) + sinf64(3.4) + minnumf64(-7.8, 1.3);
+ "#,
+ i128::from_le_bytes(pad16(
+ &f64::to_le_bytes(1.2f64.powi(5) + 3.4f64.sin() + (-7.8f64).min(1.3)),
+ true,
+ )),
+ );
+}
+
+#[test]
+fn atomic() {
+ check_number(
+ r#"
+ //- minicore: copy
+ extern "rust-intrinsic" {
+ pub fn atomic_load_seqcst<T: Copy>(src: *const T) -> T;
+ pub fn atomic_xchg_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_cxchg_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_store_release<T: Copy>(dst: *mut T, val: T);
+ pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xsub_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_and_acquire<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T;
+ pub fn atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
+ }
+
+ fn should_not_reach() {
+ _ // fails the test if executed
+ }
+
+ const GOAL: i32 = {
+ let mut x = 5;
+ atomic_store_release(&mut x, 10);
+ let mut y = atomic_xchg_acquire(&mut x, 100);
+ atomic_xadd_acqrel(&mut y, 20);
+ if (30, true) != atomic_cxchg_release_seqcst(&mut y, 30, 40) {
+ should_not_reach();
+ }
+ if (40, false) != atomic_cxchg_release_seqcst(&mut y, 30, 50) {
+ should_not_reach();
+ }
+ if (40, true) != atomic_cxchgweak_acquire_acquire(&mut y, 40, 30) {
+ should_not_reach();
+ }
+ let mut z = atomic_xsub_seqcst(&mut x, -200);
+ atomic_xor_seqcst(&mut x, 1024);
+ atomic_load_seqcst(&x) + z * 3 + atomic_load_seqcst(&y) * 2
+ };
+ "#,
+ 660 + 1024,
+ );
+}
+
+#[test]
+fn offset() {
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ extern "rust-intrinsic" {
+ pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
+ }
+
+ const GOAL: u8 = unsafe {
+ let ar: &[(u8, u8, u8)] = &[
+ (10, 11, 12),
+ (20, 21, 22),
+ (30, 31, 32),
+ (40, 41, 42),
+ (50, 51, 52),
+ ];
+ let ar: *const [(u8, u8, u8)] = ar;
+ let ar = ar as *const (u8, u8, u8);
+ let element = *offset(ar, 2);
+ element.1
+ };
+ "#,
+ 31,
+ );
+}
+
+#[test]
+fn arith_offset() {
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ extern "rust-intrinsic" {
+ pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
+ }
+
+ const GOAL: u8 = unsafe {
+ let ar: &[(u8, u8, u8)] = &[
+ (10, 11, 12),
+ (20, 21, 22),
+ (30, 31, 32),
+ (40, 41, 42),
+ (50, 51, 52),
+ ];
+ let ar: *const [(u8, u8, u8)] = ar;
+ let ar = ar as *const (u8, u8, u8);
+ let element = *arith_offset(arith_offset(ar, 102), -100);
+ element.1
+ };
+ "#,
+ 31,
+ );
+}
+
+#[test]
+fn copy_nonoverlapping() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ const GOAL: u8 = unsafe {
+ let mut x = 2;
+ let y = 5;
+ copy_nonoverlapping(&y, &mut x, 1);
+ x
+ };
+ "#,
+ 5,
+ );
+}
+
+#[test]
+fn copy() {
+ check_number(
+ r#"
+ //- minicore: coerce_unsized, index, slice
+ extern "rust-intrinsic" {
+ pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ const GOAL: i32 = unsafe {
+ let mut x = [1i32, 2, 3, 4, 5];
+ let y = (&mut x as *mut _) as *mut i32;
+ let z = (y as usize + 4) as *const i32;
+ copy(z, y, 4);
+ x[0] + x[1] + x[2] + x[3] + x[4]
+ };
+ "#,
+ 19,
+ );
+}
+
+#[test]
+fn ctpop() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn ctpop<T: Copy>(x: T) -> T;
+ }
+
+ const GOAL: i64 = ctpop(-29);
+ "#,
+ 61,
+ );
+}
+
+#[test]
+fn cttz() {
+ check_number(
+ r#"
+ extern "rust-intrinsic" {
+ pub fn cttz<T: Copy>(x: T) -> T;
+ }
+
+ const GOAL: i64 = cttz(-24);
+ "#,
+ 3,
+ );
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/db.rs b/src/tools/rust-analyzer/crates/hir-ty/src/db.rs
index 304c78767..9dd810f84 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/db.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/db.rs
@@ -1,27 +1,27 @@
//! The home of `HirDatabase`, which is the Salsa database containing all the
//! type inference-related queries.
-use std::sync::Arc;
+use std::sync;
use base_db::{impl_intern_key, salsa, CrateId, Upcast};
use hir_def::{
- db::DefDatabase,
- expr::ExprId,
- layout::{Layout, LayoutError, TargetDataLayout},
- AdtId, BlockId, ConstId, ConstParamId, DefWithBodyId, EnumVariantId, FunctionId, GenericDefId,
- ImplId, LifetimeParamId, LocalFieldId, TypeOrConstParamId, VariantId,
+ db::DefDatabase, hir::ExprId, layout::TargetDataLayout, AdtId, BlockId, ConstParamId,
+ DefWithBodyId, EnumVariantId, FunctionId, GeneralConstId, GenericDefId, ImplId,
+ LifetimeParamId, LocalFieldId, StaticId, TypeOrConstParamId, VariantId,
};
use la_arena::ArenaMap;
use smallvec::SmallVec;
+use triomphe::Arc;
use crate::{
chalk_db,
consteval::ConstEvalError,
+ layout::{Layout, LayoutError},
method_resolution::{InherentImpls, TraitImpls, TyFingerprint},
mir::{BorrowckResult, MirBody, MirLowerError},
- Binders, CallableDefId, Const, FnDefId, GenericArg, ImplTraitId, InferenceResult, Interner,
- PolyFnSig, QuantifiedWhereClause, ReturnTypeImplTraits, Substitution, TraitRef, Ty, TyDefId,
- ValueTyDefId,
+ Binders, CallableDefId, ClosureId, Const, FnDefId, GenericArg, ImplTraitId, InferenceResult,
+ Interner, PolyFnSig, QuantifiedWhereClause, ReturnTypeImplTraits, Substitution, TraitRef, Ty,
+ TyDefId, ValueTyDefId,
};
use hir_expand::name::Name;
@@ -38,8 +38,28 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
#[salsa::cycle(crate::mir::mir_body_recover)]
fn mir_body(&self, def: DefWithBodyId) -> Result<Arc<MirBody>, MirLowerError>;
+ #[salsa::invoke(crate::mir::mir_body_for_closure_query)]
+ fn mir_body_for_closure(&self, def: ClosureId) -> Result<Arc<MirBody>, MirLowerError>;
+
+ #[salsa::invoke(crate::mir::monomorphized_mir_body_query)]
+ #[salsa::cycle(crate::mir::monomorphized_mir_body_recover)]
+ fn monomorphized_mir_body(
+ &self,
+ def: DefWithBodyId,
+ subst: Substitution,
+ env: Arc<crate::TraitEnvironment>,
+ ) -> Result<Arc<MirBody>, MirLowerError>;
+
+ #[salsa::invoke(crate::mir::monomorphized_mir_body_for_closure_query)]
+ fn monomorphized_mir_body_for_closure(
+ &self,
+ def: ClosureId,
+ subst: Substitution,
+ env: Arc<crate::TraitEnvironment>,
+ ) -> Result<Arc<MirBody>, MirLowerError>;
+
#[salsa::invoke(crate::mir::borrowck_query)]
- fn borrowck(&self, def: DefWithBodyId) -> Result<Arc<BorrowckResult>, MirLowerError>;
+ fn borrowck(&self, def: DefWithBodyId) -> Result<Arc<[BorrowckResult]>, MirLowerError>;
#[salsa::invoke(crate::lower::ty_query)]
#[salsa::cycle(crate::lower::ty_recover)]
@@ -57,7 +77,12 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
#[salsa::invoke(crate::consteval::const_eval_query)]
#[salsa::cycle(crate::consteval::const_eval_recover)]
- fn const_eval(&self, def: ConstId) -> Result<Const, ConstEvalError>;
+ fn const_eval(&self, def: GeneralConstId, subst: Substitution)
+ -> Result<Const, ConstEvalError>;
+
+ #[salsa::invoke(crate::consteval::const_eval_static_query)]
+ #[salsa::cycle(crate::consteval::const_eval_static_recover)]
+ fn const_eval_static(&self, def: StaticId) -> Result<Const, ConstEvalError>;
#[salsa::invoke(crate::consteval::const_eval_discriminant_variant)]
#[salsa::cycle(crate::consteval::const_eval_discriminant_recover)]
@@ -71,7 +96,16 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
#[salsa::invoke(crate::layout::layout_of_adt_query)]
#[salsa::cycle(crate::layout::layout_of_adt_recover)]
- fn layout_of_adt(&self, def: AdtId, subst: Substitution) -> Result<Layout, LayoutError>;
+ fn layout_of_adt(
+ &self,
+ def: AdtId,
+ subst: Substitution,
+ krate: CrateId,
+ ) -> Result<Arc<Layout>, LayoutError>;
+
+ #[salsa::invoke(crate::layout::layout_of_ty_query)]
+ #[salsa::cycle(crate::layout::layout_of_ty_recover)]
+ fn layout_of_ty(&self, ty: Ty, krate: CrateId) -> Result<Arc<Layout>, LayoutError>;
#[salsa::invoke(crate::layout::target_data_layout_query)]
fn target_data_layout(&self, krate: CrateId) -> Option<Arc<TargetDataLayout>>;
@@ -97,6 +131,10 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
#[salsa::invoke(crate::lower::generic_predicates_query)]
fn generic_predicates(&self, def: GenericDefId) -> Arc<[Binders<QuantifiedWhereClause>]>;
+ #[salsa::invoke(crate::lower::trait_environment_for_body_query)]
+ #[salsa::transparent]
+ fn trait_environment_for_body(&self, def: DefWithBodyId) -> Arc<crate::TraitEnvironment>;
+
#[salsa::invoke(crate::lower::trait_environment_query)]
fn trait_environment(&self, def: GenericDefId) -> Arc<crate::TraitEnvironment>;
@@ -108,7 +146,7 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
fn inherent_impls_in_crate(&self, krate: CrateId) -> Arc<InherentImpls>;
#[salsa::invoke(InherentImpls::inherent_impls_in_block_query)]
- fn inherent_impls_in_block(&self, block: BlockId) -> Option<Arc<InherentImpls>>;
+ fn inherent_impls_in_block(&self, block: BlockId) -> Arc<InherentImpls>;
/// Collects all crates in the dependency graph that have impls for the
/// given fingerprint. This is only used for primitive types and types
@@ -125,10 +163,10 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
fn trait_impls_in_crate(&self, krate: CrateId) -> Arc<TraitImpls>;
#[salsa::invoke(TraitImpls::trait_impls_in_block_query)]
- fn trait_impls_in_block(&self, krate: BlockId) -> Option<Arc<TraitImpls>>;
+ fn trait_impls_in_block(&self, block: BlockId) -> Arc<TraitImpls>;
#[salsa::invoke(TraitImpls::trait_impls_in_deps_query)]
- fn trait_impls_in_deps(&self, krate: CrateId) -> Arc<TraitImpls>;
+ fn trait_impls_in_deps(&self, krate: CrateId) -> Arc<[Arc<TraitImpls>]>;
// Interned IDs for Chalk integration
#[salsa::interned]
@@ -148,24 +186,34 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
fn intern_generator(&self, id: (DefWithBodyId, ExprId)) -> InternedGeneratorId;
#[salsa::invoke(chalk_db::associated_ty_data_query)]
- fn associated_ty_data(&self, id: chalk_db::AssocTypeId) -> Arc<chalk_db::AssociatedTyDatum>;
+ fn associated_ty_data(
+ &self,
+ id: chalk_db::AssocTypeId,
+ ) -> sync::Arc<chalk_db::AssociatedTyDatum>;
#[salsa::invoke(chalk_db::trait_datum_query)]
- fn trait_datum(&self, krate: CrateId, trait_id: chalk_db::TraitId)
- -> Arc<chalk_db::TraitDatum>;
+ fn trait_datum(
+ &self,
+ krate: CrateId,
+ trait_id: chalk_db::TraitId,
+ ) -> sync::Arc<chalk_db::TraitDatum>;
#[salsa::invoke(chalk_db::struct_datum_query)]
fn struct_datum(
&self,
krate: CrateId,
struct_id: chalk_db::AdtId,
- ) -> Arc<chalk_db::StructDatum>;
+ ) -> sync::Arc<chalk_db::StructDatum>;
#[salsa::invoke(chalk_db::impl_datum_query)]
- fn impl_datum(&self, krate: CrateId, impl_id: chalk_db::ImplId) -> Arc<chalk_db::ImplDatum>;
+ fn impl_datum(
+ &self,
+ krate: CrateId,
+ impl_id: chalk_db::ImplId,
+ ) -> sync::Arc<chalk_db::ImplDatum>;
#[salsa::invoke(chalk_db::fn_def_datum_query)]
- fn fn_def_datum(&self, krate: CrateId, fn_def_id: FnDefId) -> Arc<chalk_db::FnDefDatum>;
+ fn fn_def_datum(&self, krate: CrateId, fn_def_id: FnDefId) -> sync::Arc<chalk_db::FnDefDatum>;
#[salsa::invoke(chalk_db::fn_def_variance_query)]
fn fn_def_variance(&self, fn_def_id: FnDefId) -> chalk_db::Variances;
@@ -178,7 +226,7 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
&self,
krate: CrateId,
id: chalk_db::AssociatedTyValueId,
- ) -> Arc<chalk_db::AssociatedTyValue>;
+ ) -> sync::Arc<chalk_db::AssociatedTyValue>;
#[salsa::invoke(crate::traits::normalize_projection_query)]
#[salsa::transparent]
@@ -193,6 +241,7 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
fn trait_solve(
&self,
krate: CrateId,
+ block: Option<BlockId>,
goal: crate::Canonical<crate::InEnvironment<crate::Goal>>,
) -> Option<crate::Solution>;
@@ -200,6 +249,7 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
fn trait_solve_query(
&self,
krate: CrateId,
+ block: Option<BlockId>,
goal: crate::Canonical<crate::InEnvironment<crate::Goal>>,
) -> Option<crate::Solution>;
@@ -207,20 +257,28 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> {
fn program_clauses_for_chalk_env(
&self,
krate: CrateId,
+ block: Option<BlockId>,
env: chalk_ir::Environment<Interner>,
) -> chalk_ir::ProgramClauses<Interner>;
}
fn infer_wait(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<InferenceResult> {
let _p = profile::span("infer:wait").detail(|| match def {
- DefWithBodyId::FunctionId(it) => db.function_data(it).name.to_string(),
- DefWithBodyId::StaticId(it) => db.static_data(it).name.clone().to_string(),
- DefWithBodyId::ConstId(it) => {
- db.const_data(it).name.clone().unwrap_or_else(Name::missing).to_string()
+ DefWithBodyId::FunctionId(it) => db.function_data(it).name.display(db.upcast()).to_string(),
+ DefWithBodyId::StaticId(it) => {
+ db.static_data(it).name.clone().display(db.upcast()).to_string()
}
+ DefWithBodyId::ConstId(it) => db
+ .const_data(it)
+ .name
+ .clone()
+ .unwrap_or_else(Name::missing)
+ .display(db.upcast())
+ .to_string(),
DefWithBodyId::VariantId(it) => {
- db.enum_data(it.parent).variants[it.local_id].name.to_string()
+ db.enum_data(it.parent).variants[it.local_id].name.display(db.upcast()).to_string()
}
+ DefWithBodyId::InTypeConstId(it) => format!("in type const {it:?}"),
});
db.infer_query(def)
}
@@ -228,10 +286,11 @@ fn infer_wait(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<InferenceResult>
fn trait_solve_wait(
db: &dyn HirDatabase,
krate: CrateId,
+ block: Option<BlockId>,
goal: crate::Canonical<crate::InEnvironment<crate::Goal>>,
) -> Option<crate::Solution> {
let _p = profile::span("trait_solve::wait");
- db.trait_solve_query(krate, goal)
+ db.trait_solve_query(krate, block, goal)
}
#[test]
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs
index d36b93e3b..1233469b9 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs
@@ -16,8 +16,8 @@ use std::fmt;
use base_db::CrateId;
use hir_def::{
- adt::VariantData,
- expr::{Pat, PatId},
+ data::adt::VariantData,
+ hir::{Pat, PatId},
src::HasSource,
AdtId, AttrDefId, ConstId, EnumId, FunctionId, ItemContainerId, Lookup, ModuleDefId, StaticId,
StructId,
@@ -223,7 +223,7 @@ impl<'a> DeclValidator<'a> {
}
// Check the function name.
- let function_name = data.name.to_string();
+ let function_name = data.name.display(self.db.upcast()).to_string();
let fn_name_replacement = to_lower_snake_case(&function_name).map(|new_name| Replacement {
current_name: data.name.clone(),
suggested_text: new_name,
@@ -244,7 +244,9 @@ impl<'a> DeclValidator<'a> {
id,
Replacement {
current_name: bind_name.clone(),
- suggested_text: to_lower_snake_case(&bind_name.to_string())?,
+ suggested_text: to_lower_snake_case(
+ &bind_name.display(self.db.upcast()).to_string(),
+ )?,
expected_case: CaseType::LowerSnakeCase,
},
))
@@ -287,7 +289,7 @@ impl<'a> DeclValidator<'a> {
ident_type: IdentType::Function,
ident: AstPtr::new(&ast_ptr),
expected_case: fn_name_replacement.expected_case,
- ident_text: fn_name_replacement.current_name.to_string(),
+ ident_text: fn_name_replacement.current_name.display(self.db.upcast()).to_string(),
suggested_text: fn_name_replacement.suggested_text,
};
@@ -343,7 +345,10 @@ impl<'a> DeclValidator<'a> {
ident_type,
ident: AstPtr::new(&name_ast),
expected_case: replacement.expected_case,
- ident_text: replacement.current_name.to_string(),
+ ident_text: replacement
+ .current_name
+ .display(self.db.upcast())
+ .to_string(),
suggested_text: replacement.suggested_text,
};
@@ -362,7 +367,7 @@ impl<'a> DeclValidator<'a> {
let non_snake_case_allowed = self.allowed(struct_id.into(), allow::NON_SNAKE_CASE, false);
// Check the structure name.
- let struct_name = data.name.to_string();
+ let struct_name = data.name.display(self.db.upcast()).to_string();
let struct_name_replacement = if !non_camel_case_allowed {
to_camel_case(&struct_name).map(|new_name| Replacement {
current_name: data.name.clone(),
@@ -379,7 +384,7 @@ impl<'a> DeclValidator<'a> {
if !non_snake_case_allowed {
if let VariantData::Record(fields) = data.variant_data.as_ref() {
for (_, field) in fields.iter() {
- let field_name = field.name.to_string();
+ let field_name = field.name.display(self.db.upcast()).to_string();
if let Some(new_name) = to_lower_snake_case(&field_name) {
let replacement = Replacement {
current_name: field.name.clone(),
@@ -434,7 +439,7 @@ impl<'a> DeclValidator<'a> {
ident_type: IdentType::Structure,
ident: AstPtr::new(&ast_ptr),
expected_case: replacement.expected_case,
- ident_text: replacement.current_name.to_string(),
+ ident_text: replacement.current_name.display(self.db.upcast()).to_string(),
suggested_text: replacement.suggested_text,
};
@@ -479,7 +484,7 @@ impl<'a> DeclValidator<'a> {
ident_type: IdentType::Field,
ident: AstPtr::new(&ast_ptr),
expected_case: field_to_rename.expected_case,
- ident_text: field_to_rename.current_name.to_string(),
+ ident_text: field_to_rename.current_name.display(self.db.upcast()).to_string(),
suggested_text: field_to_rename.suggested_text,
};
@@ -496,7 +501,7 @@ impl<'a> DeclValidator<'a> {
}
// Check the enum name.
- let enum_name = data.name.to_string();
+ let enum_name = data.name.display(self.db.upcast()).to_string();
let enum_name_replacement = to_camel_case(&enum_name).map(|new_name| Replacement {
current_name: data.name.clone(),
suggested_text: new_name,
@@ -510,7 +515,9 @@ impl<'a> DeclValidator<'a> {
.filter_map(|(_, variant)| {
Some(Replacement {
current_name: variant.name.clone(),
- suggested_text: to_camel_case(&variant.name.to_string())?,
+ suggested_text: to_camel_case(
+ &variant.name.display(self.db.upcast()).to_string(),
+ )?,
expected_case: CaseType::UpperCamelCase,
})
})
@@ -558,7 +565,7 @@ impl<'a> DeclValidator<'a> {
ident_type: IdentType::Enum,
ident: AstPtr::new(&ast_ptr),
expected_case: replacement.expected_case,
- ident_text: replacement.current_name.to_string(),
+ ident_text: replacement.current_name.display(self.db.upcast()).to_string(),
suggested_text: replacement.suggested_text,
};
@@ -603,7 +610,7 @@ impl<'a> DeclValidator<'a> {
ident_type: IdentType::Variant,
ident: AstPtr::new(&ast_ptr),
expected_case: variant_to_rename.expected_case,
- ident_text: variant_to_rename.current_name.to_string(),
+ ident_text: variant_to_rename.current_name.display(self.db.upcast()).to_string(),
suggested_text: variant_to_rename.suggested_text,
};
@@ -623,7 +630,7 @@ impl<'a> DeclValidator<'a> {
None => return,
};
- let const_name = name.to_string();
+ let const_name = name.display(self.db.upcast()).to_string();
let replacement = if let Some(new_name) = to_upper_snake_case(&const_name) {
Replacement {
current_name: name.clone(),
@@ -648,7 +655,7 @@ impl<'a> DeclValidator<'a> {
ident_type: IdentType::Constant,
ident: AstPtr::new(&ast_ptr),
expected_case: replacement.expected_case,
- ident_text: replacement.current_name.to_string(),
+ ident_text: replacement.current_name.display(self.db.upcast()).to_string(),
suggested_text: replacement.suggested_text,
};
@@ -668,7 +675,7 @@ impl<'a> DeclValidator<'a> {
let name = &data.name;
- let static_name = name.to_string();
+ let static_name = name.display(self.db.upcast()).to_string();
let replacement = if let Some(new_name) = to_upper_snake_case(&static_name) {
Replacement {
current_name: name.clone(),
@@ -693,7 +700,7 @@ impl<'a> DeclValidator<'a> {
ident_type: IdentType::StaticVariable,
ident: AstPtr::new(&ast_ptr),
expected_case: replacement.expected_case,
- ident_text: replacement.current_name.to_string(),
+ ident_text: replacement.current_name.display(self.db.upcast()).to_string(),
suggested_text: replacement.suggested_text,
};
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/expr.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/expr.rs
index 2e9066788..ab34dc88d 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/expr.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/expr.rs
@@ -3,7 +3,6 @@
//! fields, etc.
use std::fmt;
-use std::sync::Arc;
use either::Either;
use hir_def::lang_item::LangItem;
@@ -12,6 +11,7 @@ use hir_def::{ItemContainerId, Lookup};
use hir_expand::name;
use itertools::Itertools;
use rustc_hash::FxHashSet;
+use triomphe::Arc;
use typed_arena::Arena;
use crate::{
@@ -27,7 +27,7 @@ use crate::{
pub(crate) use hir_def::{
body::Body,
- expr::{Expr, ExprId, MatchArm, Pat, PatId},
+ hir::{Expr, ExprId, MatchArm, Pat, PatId},
LocalFieldId, VariantId,
};
@@ -207,7 +207,7 @@ impl ExprValidator {
let report = compute_match_usefulness(&cx, &m_arms, scrut_ty);
- // FIXME Report unreacheble arms
+ // FIXME Report unreachable arms
// https://github.com/rust-lang/rust/blob/f31622a50/compiler/rustc_mir_build/src/thir/pattern/check_match.rs#L200
let witnesses = report.non_exhaustiveness_witnesses;
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check.rs
index 859a37804..f8cdeaa5e 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check.rs
@@ -1,6 +1,6 @@
//! Validation of matches.
//!
-//! This module provides lowering from [hir_def::expr::Pat] to [self::Pat] and match
+//! This module provides lowering from [hir_def::hir::Pat] to [self::Pat] and match
//! checking algorithm.
//!
//! It is modeled on the rustc module `rustc_mir_build::thir::pattern`.
@@ -12,7 +12,7 @@ pub(crate) mod usefulness;
use chalk_ir::Mutability;
use hir_def::{
- adt::VariantData, body::Body, expr::PatId, AdtId, EnumVariantId, LocalFieldId, VariantId,
+ body::Body, data::adt::VariantData, hir::PatId, AdtId, EnumVariantId, LocalFieldId, VariantId,
};
use hir_expand::name::Name;
use stdx::{always, never};
@@ -125,15 +125,15 @@ impl<'a> PatCtxt<'a> {
let variant = self.infer.variant_resolution_for_pat(pat);
let kind = match self.body[pat] {
- hir_def::expr::Pat::Wild => PatKind::Wild,
+ hir_def::hir::Pat::Wild => PatKind::Wild,
- hir_def::expr::Pat::Lit(expr) => self.lower_lit(expr),
+ hir_def::hir::Pat::Lit(expr) => self.lower_lit(expr),
- hir_def::expr::Pat::Path(ref path) => {
+ hir_def::hir::Pat::Path(ref path) => {
return self.lower_path(pat, path);
}
- hir_def::expr::Pat::Tuple { ref args, ellipsis } => {
+ hir_def::hir::Pat::Tuple { ref args, ellipsis } => {
let arity = match *ty.kind(Interner) {
TyKind::Tuple(arity, _) => arity,
_ => {
@@ -146,13 +146,14 @@ impl<'a> PatCtxt<'a> {
PatKind::Leaf { subpatterns }
}
- hir_def::expr::Pat::Bind { id, subpat, .. } => {
- let bm = self.infer.pat_binding_modes[&pat];
+ hir_def::hir::Pat::Bind { id, subpat, .. } => {
+ let bm = self.infer.binding_modes[id];
+ ty = &self.infer[id];
let name = &self.body.bindings[id].name;
match (bm, ty.kind(Interner)) {
(BindingMode::Ref(_), TyKind::Ref(.., rty)) => ty = rty,
(BindingMode::Ref(_), _) => {
- never!("`ref {}` has wrong type {:?}", name, ty);
+ never!("`ref {}` has wrong type {:?}", name.display(self.db.upcast()), ty);
self.errors.push(PatternError::UnexpectedType);
return Pat { ty: ty.clone(), kind: PatKind::Wild.into() };
}
@@ -161,13 +162,13 @@ impl<'a> PatCtxt<'a> {
PatKind::Binding { name: name.clone(), subpattern: self.lower_opt_pattern(subpat) }
}
- hir_def::expr::Pat::TupleStruct { ref args, ellipsis, .. } if variant.is_some() => {
+ hir_def::hir::Pat::TupleStruct { ref args, ellipsis, .. } if variant.is_some() => {
let expected_len = variant.unwrap().variant_data(self.db.upcast()).fields().len();
let subpatterns = self.lower_tuple_subpats(args, expected_len, ellipsis);
self.lower_variant_or_leaf(pat, ty, subpatterns)
}
- hir_def::expr::Pat::Record { ref args, .. } if variant.is_some() => {
+ hir_def::hir::Pat::Record { ref args, .. } if variant.is_some() => {
let variant_data = variant.unwrap().variant_data(self.db.upcast());
let subpatterns = args
.iter()
@@ -187,12 +188,12 @@ impl<'a> PatCtxt<'a> {
}
}
}
- hir_def::expr::Pat::TupleStruct { .. } | hir_def::expr::Pat::Record { .. } => {
+ hir_def::hir::Pat::TupleStruct { .. } | hir_def::hir::Pat::Record { .. } => {
self.errors.push(PatternError::UnresolvedVariant);
PatKind::Wild
}
- hir_def::expr::Pat::Or(ref pats) => PatKind::Or { pats: self.lower_patterns(pats) },
+ hir_def::hir::Pat::Or(ref pats) => PatKind::Or { pats: self.lower_patterns(pats) },
_ => {
self.errors.push(PatternError::Unimplemented);
@@ -279,8 +280,8 @@ impl<'a> PatCtxt<'a> {
}
}
- fn lower_lit(&mut self, expr: hir_def::expr::ExprId) -> PatKind {
- use hir_def::expr::{Expr, Literal::Bool};
+ fn lower_lit(&mut self, expr: hir_def::hir::ExprId) -> PatKind {
+ use hir_def::hir::{Expr, Literal::Bool};
match self.body[expr] {
Expr::Literal(Bool(value)) => PatKind::LiteralBool { value },
@@ -297,7 +298,7 @@ impl HirDisplay for Pat {
match &*self.kind {
PatKind::Wild => write!(f, "_"),
PatKind::Binding { name, subpattern } => {
- write!(f, "{name}")?;
+ write!(f, "{}", name.display(f.db.upcast()))?;
if let Some(subpattern) = subpattern {
write!(f, " @ ")?;
subpattern.hir_fmt(f)?;
@@ -318,10 +319,14 @@ impl HirDisplay for Pat {
match variant {
VariantId::EnumVariantId(v) => {
let data = f.db.enum_data(v.parent);
- write!(f, "{}", data.variants[v.local_id].name)?;
+ write!(f, "{}", data.variants[v.local_id].name.display(f.db.upcast()))?;
+ }
+ VariantId::StructId(s) => {
+ write!(f, "{}", f.db.struct_data(s).name.display(f.db.upcast()))?
+ }
+ VariantId::UnionId(u) => {
+ write!(f, "{}", f.db.union_data(u).name.display(f.db.upcast()))?
}
- VariantId::StructId(s) => write!(f, "{}", f.db.struct_data(s).name)?,
- VariantId::UnionId(u) => write!(f, "{}", f.db.union_data(u).name)?,
};
let variant_data = variant.variant_data(f.db.upcast());
@@ -335,7 +340,11 @@ impl HirDisplay for Pat {
.map(|p| {
printed += 1;
WriteWith(move |f| {
- write!(f, "{}: ", rec_fields[p.field].name)?;
+ write!(
+ f,
+ "{}: ",
+ rec_fields[p.field].name.display(f.db.upcast())
+ )?;
p.pattern.hir_fmt(f)
})
});
@@ -379,7 +388,7 @@ impl HirDisplay for Pat {
}
PatKind::Deref { subpattern } => {
match self.ty.kind(Interner) {
- TyKind::Adt(adt, _) if is_box(adt.0, f.db) => write!(f, "box ")?,
+ TyKind::Adt(adt, _) if is_box(f.db, adt.0) => write!(f, "box ")?,
&TyKind::Ref(mutbl, ..) => {
write!(f, "&{}", if mutbl == Mutability::Mut { "mut " } else { "" })?
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/deconstruct_pat.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/deconstruct_pat.rs
index d130827a7..a0f6b9368 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/deconstruct_pat.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/deconstruct_pat.rs
@@ -82,7 +82,7 @@ fn expand_or_pat(pat: &Pat) -> Vec<&Pat> {
pats
}
-/// [Constructor] uses this in umimplemented variants.
+/// [Constructor] uses this in unimplemented variants.
/// It allows porting match expressions from upstream algorithm without losing semantics.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(super) enum Void {}
@@ -384,7 +384,7 @@ impl Constructor {
TyKind::Tuple(arity, ..) => arity,
TyKind::Ref(..) => 1,
TyKind::Adt(adt, ..) => {
- if is_box(adt.0, pcx.cx.db) {
+ if is_box(pcx.cx.db, adt.0) {
// The only legal patterns of type `Box` (outside `std`) are `_` and box
// patterns. If we're here we can assume this is a box pattern.
1
@@ -772,7 +772,7 @@ impl<'p> Fields<'p> {
(0..fields_len).map(|idx| LocalFieldId::from_raw(idx.into())).filter_map(move |fid| {
let ty = field_ty[fid].clone().substitute(Interner, substs);
- let ty = normalize(cx.db, cx.body, ty);
+ let ty = normalize(cx.db, cx.db.trait_environment_for_body(cx.body), ty);
let is_visible = matches!(adt, hir_def::AdtId::EnumId(..))
|| visibility[fid].is_visible_from(cx.db.upcast(), cx.module);
let is_uninhabited = cx.is_uninhabited(&ty);
@@ -800,7 +800,7 @@ impl<'p> Fields<'p> {
}
TyKind::Ref(.., rty) => Fields::wildcards_from_tys(cx, once(rty.clone())),
&TyKind::Adt(AdtId(adt), ref substs) => {
- if is_box(adt, cx.db) {
+ if is_box(cx.db, adt) {
// The only legal patterns of type `Box` (outside `std`) are `_` and box
// patterns. If we're here we can assume this is a box pattern.
let subst_ty = substs.at(Interner, 0).assert_ty_ref(Interner).clone();
@@ -905,7 +905,7 @@ impl<'p> DeconstructedPat<'p> {
}
fields = Fields::from_iter(cx, wilds)
}
- TyKind::Adt(adt, substs) if is_box(adt.0, cx.db) => {
+ TyKind::Adt(adt, substs) if is_box(cx.db, adt.0) => {
// The only legal patterns of type `Box` (outside `std`) are `_` and box
// patterns. If we're here we can assume this is a box pattern.
// FIXME(Nadrieril): A `Box` can in theory be matched either with `Box(_,
@@ -992,7 +992,7 @@ impl<'p> DeconstructedPat<'p> {
})
.collect(),
},
- TyKind::Adt(adt, _) if is_box(adt.0, cx.db) => {
+ TyKind::Adt(adt, _) if is_box(cx.db, adt.0) => {
// Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside
// of `std`). So this branch is only reachable when the feature is enabled and
// the pattern is a box pattern.
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/pat_util.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/pat_util.rs
index b89b4f2bf..217454499 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/pat_util.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/pat_util.rs
@@ -1,4 +1,4 @@
-//! Pattern untilities.
+//! Pattern utilities.
//!
//! Originates from `rustc_hir::pat_util`
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/usefulness.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/usefulness.rs
index c4d709a97..d737b24ad 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/usefulness.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/match_check/usefulness.rs
@@ -755,7 +755,7 @@ pub(crate) enum Reachability {
/// The arm is reachable. This additionally carries a set of or-pattern branches that have been
/// found to be unreachable despite the overall arm being reachable. Used only in the presence
/// of or-patterns, otherwise it stays empty.
- // FIXME: store ureachable subpattern IDs
+ // FIXME: store unreachable subpattern IDs
Reachable,
/// The arm is unreachable.
Unreachable,
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/unsafe_check.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/unsafe_check.rs
index d25c0ccf0..9f9a56ffa 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/unsafe_check.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/unsafe_check.rs
@@ -3,7 +3,7 @@
use hir_def::{
body::Body,
- expr::{Expr, ExprId, UnaryOp},
+ hir::{Expr, ExprId, UnaryOp},
resolver::{resolver_for_expr, ResolveValueResult, ValueNs},
DefWithBodyId,
};
@@ -18,9 +18,10 @@ pub fn missing_unsafe(db: &dyn HirDatabase, def: DefWithBodyId) -> Vec<ExprId> {
let is_unsafe = match def {
DefWithBodyId::FunctionId(it) => db.function_data(it).has_unsafe_kw(),
- DefWithBodyId::StaticId(_) | DefWithBodyId::ConstId(_) | DefWithBodyId::VariantId(_) => {
- false
- }
+ DefWithBodyId::StaticId(_)
+ | DefWithBodyId::ConstId(_)
+ | DefWithBodyId::VariantId(_)
+ | DefWithBodyId::InTypeConstId(_) => false,
};
if is_unsafe {
return res;
@@ -73,7 +74,7 @@ fn walk_unsafe(
}
Expr::Path(path) => {
let resolver = resolver_for_expr(db.upcast(), def, current);
- let value_or_partial = resolver.resolve_path_in_value_ns(db.upcast(), path.mod_path());
+ let value_or_partial = resolver.resolve_path_in_value_ns(db.upcast(), path);
if let Some(ResolveValueResult::ValueNs(ValueNs::StaticId(id))) = value_or_partial {
if db.static_data(id).mutable {
unsafe_expr_cb(UnsafeExpr { expr: current, inside_unsafe_block });
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/display.rs b/src/tools/rust-analyzer/crates/hir-ty/src/display.rs
index bd3eccfe4..c1df24d17 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/display.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/display.rs
@@ -2,37 +2,44 @@
//! HIR back into source code, and just displaying them for debugging/testing
//! purposes.
-use std::fmt::{self, Debug};
+use std::{
+ fmt::{self, Debug},
+ mem::size_of,
+};
use base_db::CrateId;
use chalk_ir::{BoundVar, TyKind};
use hir_def::{
- adt::VariantData,
- body,
+ data::adt::VariantData,
db::DefDatabase,
find_path,
generics::{TypeOrConstParamData, TypeParamProvenance},
item_scope::ItemInNs,
lang_item::{LangItem, LangItemTarget},
+ nameres::DefMap,
path::{Path, PathKind},
type_ref::{TraitBoundModifier, TypeBound, TypeRef},
visibility::Visibility,
- HasModule, ItemContainerId, LocalFieldId, Lookup, ModuleDefId, ModuleId, TraitId,
+ EnumVariantId, HasModule, ItemContainerId, LocalFieldId, Lookup, ModuleDefId, ModuleId,
+ TraitId,
};
use hir_expand::{hygiene::Hygiene, name::Name};
use intern::{Internable, Interned};
use itertools::Itertools;
+use la_arena::ArenaMap;
use smallvec::SmallVec;
+use stdx::never;
use crate::{
+ consteval::try_const_usize,
db::HirDatabase,
from_assoc_type_id, from_foreign_def_id, from_placeholder_idx,
- layout::layout_of_ty,
+ layout::Layout,
lt_from_placeholder_idx,
mapping::from_chalk,
mir::pad16,
primitive, to_assoc_type_id,
- utils::{self, generics},
+ utils::{self, detect_variant_from_bytes, generics, ClosureSubst},
AdtId, AliasEq, AliasTy, Binders, CallableDefId, CallableSig, Const, ConstScalar, ConstValue,
DomainGoal, GenericArg, ImplTraitId, Interner, Lifetime, LifetimeData, LifetimeOutlives,
MemoryMap, Mutability, OpaqueTy, ProjectionTy, ProjectionTyExt, QuantifiedWhereClause, Scalar,
@@ -64,6 +71,7 @@ pub struct HirFormatter<'a> {
curr_size: usize,
pub(crate) max_size: Option<usize>,
omit_verbose_types: bool,
+ closure_style: ClosureStyle,
display_target: DisplayTarget,
}
@@ -87,6 +95,7 @@ pub trait HirDisplay {
max_size: Option<usize>,
omit_verbose_types: bool,
display_target: DisplayTarget,
+ closure_style: ClosureStyle,
) -> HirDisplayWrapper<'a, Self>
where
Self: Sized,
@@ -95,7 +104,14 @@ pub trait HirDisplay {
!matches!(display_target, DisplayTarget::SourceCode { .. }),
"HirDisplayWrapper cannot fail with DisplaySourceCodeError, use HirDisplay::hir_fmt directly instead"
);
- HirDisplayWrapper { db, t: self, max_size, omit_verbose_types, display_target }
+ HirDisplayWrapper {
+ db,
+ t: self,
+ max_size,
+ omit_verbose_types,
+ display_target,
+ closure_style,
+ }
}
/// Returns a `Display`able type that is human-readable.
@@ -109,6 +125,7 @@ pub trait HirDisplay {
t: self,
max_size: None,
omit_verbose_types: false,
+ closure_style: ClosureStyle::ImplFn,
display_target: DisplayTarget::Diagnostics,
}
}
@@ -128,6 +145,7 @@ pub trait HirDisplay {
t: self,
max_size,
omit_verbose_types: true,
+ closure_style: ClosureStyle::ImplFn,
display_target: DisplayTarget::Diagnostics,
}
}
@@ -138,6 +156,7 @@ pub trait HirDisplay {
&'a self,
db: &'a dyn HirDatabase,
module_id: ModuleId,
+ allow_opaque: bool,
) -> Result<String, DisplaySourceCodeError> {
let mut result = String::new();
match self.hir_fmt(&mut HirFormatter {
@@ -147,7 +166,8 @@ pub trait HirDisplay {
curr_size: 0,
max_size: None,
omit_verbose_types: false,
- display_target: DisplayTarget::SourceCode { module_id },
+ closure_style: ClosureStyle::ImplFn,
+ display_target: DisplayTarget::SourceCode { module_id, allow_opaque },
}) {
Ok(()) => {}
Err(HirDisplayError::FmtError) => panic!("Writing to String can't fail!"),
@@ -166,6 +186,7 @@ pub trait HirDisplay {
t: self,
max_size: None,
omit_verbose_types: false,
+ closure_style: ClosureStyle::ImplFn,
display_target: DisplayTarget::Test,
}
}
@@ -235,26 +256,34 @@ pub enum DisplayTarget {
Diagnostics,
/// Display types for inserting them in source files.
/// The generated code should compile, so paths need to be qualified.
- SourceCode { module_id: ModuleId },
+ SourceCode { module_id: ModuleId, allow_opaque: bool },
/// Only for test purpose to keep real types
Test,
}
impl DisplayTarget {
- fn is_source_code(&self) -> bool {
+ fn is_source_code(self) -> bool {
matches!(self, Self::SourceCode { .. })
}
- fn is_test(&self) -> bool {
+
+ fn is_test(self) -> bool {
matches!(self, Self::Test)
}
+
+ fn allows_opaque(self) -> bool {
+ match self {
+ Self::SourceCode { allow_opaque, .. } => allow_opaque,
+ _ => true,
+ }
+ }
}
#[derive(Debug)]
pub enum DisplaySourceCodeError {
PathNotFound,
UnknownType,
- Closure,
Generator,
+ OpaqueType,
}
pub enum HirDisplayError {
@@ -274,9 +303,25 @@ pub struct HirDisplayWrapper<'a, T> {
t: &'a T,
max_size: Option<usize>,
omit_verbose_types: bool,
+ closure_style: ClosureStyle,
display_target: DisplayTarget,
}
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
+pub enum ClosureStyle {
+ /// `impl FnX(i32, i32) -> i32`, where `FnX` is the most special trait between `Fn`, `FnMut`, `FnOnce` that the
+ /// closure implements. This is the default.
+ ImplFn,
+ /// `|i32, i32| -> i32`
+ RANotation,
+ /// `{closure#14825}`, useful for some diagnostics (like type mismatch) and internal usage.
+ ClosureWithId,
+ /// `{closure#14825}<i32, ()>`, useful for internal usage.
+ ClosureWithSubst,
+ /// `…`, which is the `TYPE_HINT_TRUNCATION`
+ Hide,
+}
+
impl<T: HirDisplay> HirDisplayWrapper<'_, T> {
pub fn write_to<F: HirWrite>(&self, f: &mut F) -> Result<(), HirDisplayError> {
self.t.hir_fmt(&mut HirFormatter {
@@ -287,8 +332,14 @@ impl<T: HirDisplay> HirDisplayWrapper<'_, T> {
max_size: self.max_size,
omit_verbose_types: self.omit_verbose_types,
display_target: self.display_target,
+ closure_style: self.closure_style,
})
}
+
+ pub fn with_closure_style(mut self, c: ClosureStyle) -> Self {
+ self.closure_style = c;
+ self
+ }
}
impl<'a, T> fmt::Display for HirDisplayWrapper<'a, T>
@@ -330,7 +381,13 @@ impl HirDisplay for ProjectionTy {
let trait_ref = self.trait_ref(f.db);
write!(f, "<")?;
fmt_trait_ref(f, &trait_ref, true)?;
- write!(f, ">::{}", f.db.type_alias_data(from_assoc_type_id(self.associated_ty_id)).name)?;
+ write!(
+ f,
+ ">::{}",
+ f.db.type_alias_data(from_assoc_type_id(self.associated_ty_id))
+ .name
+ .display(f.db.upcast())
+ )?;
let proj_params_count =
self.substitution.len(Interner) - trait_ref.substitution.len(Interner);
let proj_params = &self.substitution.as_slice(Interner)[..proj_params_count];
@@ -373,10 +430,16 @@ impl HirDisplay for Const {
let id = from_placeholder_idx(f.db, *idx);
let generics = generics(f.db.upcast(), id.parent);
let param_data = &generics.params.type_or_consts[id.local_id];
- write!(f, "{}", param_data.name().unwrap())
+ write!(f, "{}", param_data.name().unwrap().display(f.db.upcast()))?;
+ Ok(())
}
ConstValue::Concrete(c) => match &c.interned {
ConstScalar::Bytes(b, m) => render_const_scalar(f, &b, m, &data.ty),
+ ConstScalar::UnevaluatedConst(c, parameters) => {
+ write!(f, "{}", c.name(f.db.upcast()))?;
+ hir_fmt_generics(f, parameters, c.generic_def(f.db.upcast()))?;
+ Ok(())
+ }
ConstScalar::Unknown => f.write_char('_'),
},
}
@@ -411,8 +474,11 @@ fn render_const_scalar(
memory_map: &MemoryMap,
ty: &Ty,
) -> Result<(), HirDisplayError> {
+ // FIXME: We need to get krate from the final callers of the hir display
+ // infrastructure and have it here as a field on `f`.
+ let krate = *f.db.crate_graph().crates_in_topological_order().last().unwrap();
match ty.kind(Interner) {
- chalk_ir::TyKind::Scalar(s) => match s {
+ TyKind::Scalar(s) => match s {
Scalar::Bool => write!(f, "{}", if b[0] == 0 { false } else { true }),
Scalar::Char => {
let x = u128::from_le_bytes(pad16(b, false)) as u32;
@@ -440,22 +506,90 @@ fn render_const_scalar(
}
},
},
- chalk_ir::TyKind::Ref(_, _, t) => match t.kind(Interner) {
- chalk_ir::TyKind::Str => {
+ TyKind::Ref(_, _, t) => match t.kind(Interner) {
+ TyKind::Str => {
let addr = usize::from_le_bytes(b[0..b.len() / 2].try_into().unwrap());
- let bytes = memory_map.0.get(&addr).map(|x| &**x).unwrap_or(&[]);
- let s = std::str::from_utf8(bytes).unwrap_or("<utf8-error>");
+ let size = usize::from_le_bytes(b[b.len() / 2..].try_into().unwrap());
+ let Some(bytes) = memory_map.get(addr, size) else {
+ return f.write_str("<ref-data-not-available>");
+ };
+ let s = std::str::from_utf8(&bytes).unwrap_or("<utf8-error>");
write!(f, "{s:?}")
}
- _ => f.write_str("<ref-not-supported>"),
+ TyKind::Slice(ty) => {
+ let addr = usize::from_le_bytes(b[0..b.len() / 2].try_into().unwrap());
+ let count = usize::from_le_bytes(b[b.len() / 2..].try_into().unwrap());
+ let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else {
+ return f.write_str("<layout-error>");
+ };
+ let size_one = layout.size.bytes_usize();
+ let Some(bytes) = memory_map.get(addr, size_one * count) else {
+ return f.write_str("<ref-data-not-available>");
+ };
+ f.write_str("&[")?;
+ let mut first = true;
+ for i in 0..count {
+ if first {
+ first = false;
+ } else {
+ f.write_str(", ")?;
+ }
+ let offset = size_one * i;
+ render_const_scalar(f, &bytes[offset..offset + size_one], memory_map, &ty)?;
+ }
+ f.write_str("]")
+ }
+ TyKind::Dyn(_) => {
+ let addr = usize::from_le_bytes(b[0..b.len() / 2].try_into().unwrap());
+ let ty_id = usize::from_le_bytes(b[b.len() / 2..].try_into().unwrap());
+ let Ok(t) = memory_map.vtable.ty(ty_id) else {
+ return f.write_str("<ty-missing-in-vtable-map>");
+ };
+ let Ok(layout) = f.db.layout_of_ty(t.clone(), krate) else {
+ return f.write_str("<layout-error>");
+ };
+ let size = layout.size.bytes_usize();
+ let Some(bytes) = memory_map.get(addr, size) else {
+ return f.write_str("<ref-data-not-available>");
+ };
+ f.write_str("&")?;
+ render_const_scalar(f, bytes, memory_map, t)
+ }
+ TyKind::Adt(adt, _) if b.len() == 2 * size_of::<usize>() => match adt.0 {
+ hir_def::AdtId::StructId(s) => {
+ let data = f.db.struct_data(s);
+ write!(f, "&{}", data.name.display(f.db.upcast()))?;
+ Ok(())
+ }
+ _ => {
+ return f.write_str("<unsized-enum-or-union>");
+ }
+ },
+ _ => {
+ let addr = usize::from_le_bytes(match b.try_into() {
+ Ok(b) => b,
+ Err(_) => {
+ never!(
+ "tried rendering ty {:?} in const ref with incorrect byte count {}",
+ t,
+ b.len()
+ );
+ return f.write_str("<layout-error>");
+ }
+ });
+ let Ok(layout) = f.db.layout_of_ty(t.clone(), krate) else {
+ return f.write_str("<layout-error>");
+ };
+ let size = layout.size.bytes_usize();
+ let Some(bytes) = memory_map.get(addr, size) else {
+ return f.write_str("<ref-data-not-available>");
+ };
+ f.write_str("&")?;
+ render_const_scalar(f, bytes, memory_map, t)
+ }
},
- chalk_ir::TyKind::Tuple(_, subst) => {
- // FIXME: Remove this line. If the target data layout is independent
- // of the krate, the `db.target_data_layout` and its callers like `layout_of_ty` don't need
- // to get krate. Otherwise, we need to get krate from the final callers of the hir display
- // infrastructure and have it here as a field on `f`.
- let krate = *f.db.crate_graph().crates_in_topological_order().last().unwrap();
- let Ok(layout) = layout_of_ty(f.db, ty, krate) else {
+ TyKind::Tuple(_, subst) => {
+ let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else {
return f.write_str("<layout-error>");
};
f.write_str("(")?;
@@ -468,7 +602,7 @@ fn render_const_scalar(
}
let ty = ty.assert_ty_ref(Interner); // Tuple only has type argument
let offset = layout.fields.offset(id).bytes_usize();
- let Ok(layout) = layout_of_ty(f.db, &ty, krate) else {
+ let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else {
f.write_str("<layout-error>")?;
continue;
};
@@ -477,62 +611,144 @@ fn render_const_scalar(
}
f.write_str(")")
}
- chalk_ir::TyKind::Adt(adt, subst) => match adt.0 {
- hir_def::AdtId::StructId(s) => {
- let data = f.db.struct_data(s);
- let Ok(layout) = f.db.layout_of_adt(adt.0, subst.clone()) else {
+ TyKind::Adt(adt, subst) => {
+ let Ok(layout) = f.db.layout_of_adt(adt.0, subst.clone(), krate) else {
+ return f.write_str("<layout-error>");
+ };
+ match adt.0 {
+ hir_def::AdtId::StructId(s) => {
+ let data = f.db.struct_data(s);
+ write!(f, "{}", data.name.display(f.db.upcast()))?;
+ let field_types = f.db.field_types(s.into());
+ render_variant_after_name(
+ &data.variant_data,
+ f,
+ &field_types,
+ adt.0.module(f.db.upcast()).krate(),
+ &layout,
+ subst,
+ b,
+ memory_map,
+ )
+ }
+ hir_def::AdtId::UnionId(u) => {
+ write!(f, "{}", f.db.union_data(u).name.display(f.db.upcast()))
+ }
+ hir_def::AdtId::EnumId(e) => {
+ let Some((var_id, var_layout)) =
+ detect_variant_from_bytes(&layout, f.db, krate, b, e) else {
+ return f.write_str("<failed-to-detect-variant>");
+ };
+ let data = &f.db.enum_data(e).variants[var_id];
+ write!(f, "{}", data.name.display(f.db.upcast()))?;
+ let field_types =
+ f.db.field_types(EnumVariantId { parent: e, local_id: var_id }.into());
+ render_variant_after_name(
+ &data.variant_data,
+ f,
+ &field_types,
+ adt.0.module(f.db.upcast()).krate(),
+ &var_layout,
+ subst,
+ b,
+ memory_map,
+ )
+ }
+ }
+ }
+ TyKind::FnDef(..) => ty.hir_fmt(f),
+ TyKind::Function(_) | TyKind::Raw(_, _) => {
+ let x = u128::from_le_bytes(pad16(b, false));
+ write!(f, "{:#X} as ", x)?;
+ ty.hir_fmt(f)
+ }
+ TyKind::Array(ty, len) => {
+ let Some(len) = try_const_usize(f.db, len) else {
+ return f.write_str("<unknown-array-len>");
+ };
+ let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else {
+ return f.write_str("<layout-error>");
+ };
+ let size_one = layout.size.bytes_usize();
+ f.write_str("[")?;
+ let mut first = true;
+ for i in 0..len as usize {
+ if first {
+ first = false;
+ } else {
+ f.write_str(", ")?;
+ }
+ let offset = size_one * i;
+ render_const_scalar(f, &b[offset..offset + size_one], memory_map, &ty)?;
+ }
+ f.write_str("]")
+ }
+ TyKind::Never => f.write_str("!"),
+ TyKind::Closure(_, _) => f.write_str("<closure>"),
+ TyKind::Generator(_, _) => f.write_str("<generator>"),
+ TyKind::GeneratorWitness(_, _) => f.write_str("<generator-witness>"),
+ // The below arms are unreachable, since const eval will bail out before here.
+ TyKind::Foreign(_) => f.write_str("<extern-type>"),
+ TyKind::Error
+ | TyKind::Placeholder(_)
+ | TyKind::Alias(_)
+ | TyKind::AssociatedType(_, _)
+ | TyKind::OpaqueType(_, _)
+ | TyKind::BoundVar(_)
+ | TyKind::InferenceVar(_, _) => f.write_str("<placeholder-or-unknown-type>"),
+ // The below arms are unreachable, since we handled them in ref case.
+ TyKind::Slice(_) | TyKind::Str | TyKind::Dyn(_) => f.write_str("<unsized-value>"),
+ }
+}
+
+fn render_variant_after_name(
+ data: &VariantData,
+ f: &mut HirFormatter<'_>,
+ field_types: &ArenaMap<LocalFieldId, Binders<Ty>>,
+ krate: CrateId,
+ layout: &Layout,
+ subst: &Substitution,
+ b: &[u8],
+ memory_map: &MemoryMap,
+) -> Result<(), HirDisplayError> {
+ match data {
+ VariantData::Record(fields) | VariantData::Tuple(fields) => {
+ let render_field = |f: &mut HirFormatter<'_>, id: LocalFieldId| {
+ let offset = layout.fields.offset(u32::from(id.into_raw()) as usize).bytes_usize();
+ let ty = field_types[id].clone().substitute(Interner, subst);
+ let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else {
return f.write_str("<layout-error>");
};
- match data.variant_data.as_ref() {
- VariantData::Record(fields) | VariantData::Tuple(fields) => {
- let field_types = f.db.field_types(s.into());
- let krate = adt.0.module(f.db.upcast()).krate();
- let render_field = |f: &mut HirFormatter<'_>, id: LocalFieldId| {
- let offset = layout
- .fields
- .offset(u32::from(id.into_raw()) as usize)
- .bytes_usize();
- let ty = field_types[id].clone().substitute(Interner, subst);
- let Ok(layout) = layout_of_ty(f.db, &ty, krate) else {
- return f.write_str("<layout-error>");
- };
- let size = layout.size.bytes_usize();
- render_const_scalar(f, &b[offset..offset + size], memory_map, &ty)
- };
- let mut it = fields.iter();
- if matches!(data.variant_data.as_ref(), VariantData::Record(_)) {
- write!(f, "{} {{", data.name)?;
- if let Some((id, data)) = it.next() {
- write!(f, " {}: ", data.name)?;
- render_field(f, id)?;
- }
- for (id, data) in it {
- write!(f, ", {}: ", data.name)?;
- render_field(f, id)?;
- }
- write!(f, " }}")?;
- } else {
- let mut it = it.map(|x| x.0);
- write!(f, "{}(", data.name)?;
- if let Some(id) = it.next() {
- render_field(f, id)?;
- }
- for id in it {
- write!(f, ", ")?;
- render_field(f, id)?;
- }
- write!(f, ")")?;
- }
- return Ok(());
- }
- VariantData::Unit => write!(f, "{}", data.name),
+ let size = layout.size.bytes_usize();
+ render_const_scalar(f, &b[offset..offset + size], memory_map, &ty)
+ };
+ let mut it = fields.iter();
+ if matches!(data, VariantData::Record(_)) {
+ write!(f, " {{")?;
+ if let Some((id, data)) = it.next() {
+ write!(f, " {}: ", data.name.display(f.db.upcast()))?;
+ render_field(f, id)?;
+ }
+ for (id, data) in it {
+ write!(f, ", {}: ", data.name.display(f.db.upcast()))?;
+ render_field(f, id)?;
}
+ write!(f, " }}")?;
+ } else {
+ let mut it = it.map(|x| x.0);
+ write!(f, "(")?;
+ if let Some(id) = it.next() {
+ render_field(f, id)?;
+ }
+ for id in it {
+ write!(f, ", ")?;
+ render_field(f, id)?;
+ }
+ write!(f, ")")?;
}
- hir_def::AdtId::UnionId(u) => write!(f, "{}", f.db.union_data(u).name),
- hir_def::AdtId::EnumId(_) => f.write_str("<enum-not-supported>"),
- },
- chalk_ir::TyKind::FnDef(..) => ty.hir_fmt(f),
- _ => f.write_str("<not-supported>"),
+ return Ok(());
+ }
+ VariantData::Unit => Ok(()),
}
}
@@ -689,11 +905,17 @@ impl HirDisplay for Ty {
let sig = db.callable_item_signature(def).substitute(Interner, parameters);
f.start_location_link(def.into());
match def {
- CallableDefId::FunctionId(ff) => write!(f, "fn {}", db.function_data(ff).name)?,
- CallableDefId::StructId(s) => write!(f, "{}", db.struct_data(s).name)?,
- CallableDefId::EnumVariantId(e) => {
- write!(f, "{}", db.enum_data(e.parent).variants[e.local_id].name)?
+ CallableDefId::FunctionId(ff) => {
+ write!(f, "fn {}", db.function_data(ff).name.display(f.db.upcast()))?
+ }
+ CallableDefId::StructId(s) => {
+ write!(f, "{}", db.struct_data(s).name.display(f.db.upcast()))?
}
+ CallableDefId::EnumVariantId(e) => write!(
+ f,
+ "{}",
+ db.enum_data(e.parent).variants[e.local_id].name.display(f.db.upcast())
+ )?,
};
f.end_location_link();
if parameters.len(Interner) > 0 {
@@ -733,16 +955,16 @@ impl HirDisplay for Ty {
hir_def::AdtId::UnionId(it) => db.union_data(it).name.clone(),
hir_def::AdtId::EnumId(it) => db.enum_data(it).name.clone(),
};
- write!(f, "{name}")?;
+ write!(f, "{}", name.display(f.db.upcast()))?;
}
- DisplayTarget::SourceCode { module_id } => {
+ DisplayTarget::SourceCode { module_id, allow_opaque: _ } => {
if let Some(path) = find_path::find_path(
db.upcast(),
ItemInNs::Types((*def_id).into()),
module_id,
false,
) {
- write!(f, "{path}")?;
+ write!(f, "{}", path.display(f.db.upcast()))?;
} else {
return Err(HirDisplayError::DisplaySourceCodeError(
DisplaySourceCodeError::PathNotFound,
@@ -752,82 +974,9 @@ impl HirDisplay for Ty {
}
f.end_location_link();
- if parameters.len(Interner) > 0 {
- let parameters_to_write = if f.display_target.is_source_code()
- || f.omit_verbose_types()
- {
- match self
- .as_generic_def(db)
- .map(|generic_def_id| db.generic_defaults(generic_def_id))
- .filter(|defaults| !defaults.is_empty())
- {
- None => parameters.as_slice(Interner),
- Some(default_parameters) => {
- fn should_show(
- parameter: &GenericArg,
- default_parameters: &[Binders<GenericArg>],
- i: usize,
- parameters: &Substitution,
- ) -> bool {
- if parameter.ty(Interner).map(|x| x.kind(Interner))
- == Some(&TyKind::Error)
- {
- return true;
- }
- if let Some(ConstValue::Concrete(c)) = parameter
- .constant(Interner)
- .map(|x| &x.data(Interner).value)
- {
- if c.interned == ConstScalar::Unknown {
- return true;
- }
- }
- let default_parameter = match default_parameters.get(i) {
- Some(x) => x,
- None => return true,
- };
- let actual_default =
- default_parameter.clone().substitute(Interner, &parameters);
- parameter != &actual_default
- }
- let mut default_from = 0;
- for (i, parameter) in parameters.iter(Interner).enumerate() {
- if should_show(parameter, &default_parameters, i, parameters) {
- default_from = i + 1;
- }
- }
- &parameters.as_slice(Interner)[0..default_from]
- }
- }
- } else {
- parameters.as_slice(Interner)
- };
- if !parameters_to_write.is_empty() {
- write!(f, "<")?;
-
- if f.display_target.is_source_code() {
- let mut first = true;
- for generic_arg in parameters_to_write {
- if !first {
- write!(f, ", ")?;
- }
- first = false;
-
- if generic_arg.ty(Interner).map(|ty| ty.kind(Interner))
- == Some(&TyKind::Error)
- {
- write!(f, "_")?;
- } else {
- generic_arg.hir_fmt(f)?;
- }
- }
- } else {
- f.write_joined(parameters_to_write, ", ")?;
- }
+ let generic_def = self.as_generic_def(db);
- write!(f, ">")?;
- }
- }
+ hir_fmt_generics(f, parameters, generic_def)?;
}
TyKind::AssociatedType(assoc_type_id, parameters) => {
let type_alias = from_assoc_type_id(*assoc_type_id);
@@ -841,12 +990,12 @@ impl HirDisplay for Ty {
// Use placeholder associated types when the target is test (https://rust-lang.github.io/chalk/book/clauses/type_equality.html#placeholder-associated-types)
if f.display_target.is_test() {
f.start_location_link(trait_.into());
- write!(f, "{}", trait_data.name)?;
+ write!(f, "{}", trait_data.name.display(f.db.upcast()))?;
f.end_location_link();
write!(f, "::")?;
f.start_location_link(type_alias.into());
- write!(f, "{}", type_alias_data.name)?;
+ write!(f, "{}", type_alias_data.name.display(f.db.upcast()))?;
f.end_location_link();
// Note that the generic args for the associated type come before those for the
// trait (including the self type).
@@ -869,10 +1018,15 @@ impl HirDisplay for Ty {
let alias = from_foreign_def_id(*type_alias);
let type_alias = db.type_alias_data(alias);
f.start_location_link(alias.into());
- write!(f, "{}", type_alias.name)?;
+ write!(f, "{}", type_alias.name.display(f.db.upcast()))?;
f.end_location_link();
}
TyKind::OpaqueType(opaque_ty_id, parameters) => {
+ if !f.display_target.allows_opaque() {
+ return Err(HirDisplayError::DisplaySourceCodeError(
+ DisplaySourceCodeError::OpaqueType,
+ ));
+ }
let impl_trait_id = db.lookup_intern_impl_trait_id((*opaque_ty_id).into());
match impl_trait_id {
ImplTraitId::ReturnTypeImplTrait(func, idx) => {
@@ -919,26 +1073,52 @@ impl HirDisplay for Ty {
}
}
}
- TyKind::Closure(.., substs) => {
+ TyKind::Closure(id, substs) => {
if f.display_target.is_source_code() {
- return Err(HirDisplayError::DisplaySourceCodeError(
- DisplaySourceCodeError::Closure,
- ));
+ if !f.display_target.allows_opaque() {
+ return Err(HirDisplayError::DisplaySourceCodeError(
+ DisplaySourceCodeError::OpaqueType,
+ ));
+ } else if f.closure_style != ClosureStyle::ImplFn {
+ never!("Only `impl Fn` is valid for displaying closures in source code");
+ }
}
- let sig = substs.at(Interner, 0).assert_ty_ref(Interner).callable_sig(db);
+ match f.closure_style {
+ ClosureStyle::Hide => return write!(f, "{TYPE_HINT_TRUNCATION}"),
+ ClosureStyle::ClosureWithId => {
+ return write!(f, "{{closure#{:?}}}", id.0.as_u32())
+ }
+ ClosureStyle::ClosureWithSubst => {
+ write!(f, "{{closure#{:?}}}", id.0.as_u32())?;
+ return hir_fmt_generics(f, substs, None);
+ }
+ _ => (),
+ }
+ let sig = ClosureSubst(substs).sig_ty().callable_sig(db);
if let Some(sig) = sig {
+ let (def, _) = db.lookup_intern_closure((*id).into());
+ let infer = db.infer(def);
+ let (_, kind) = infer.closure_info(id);
+ match f.closure_style {
+ ClosureStyle::ImplFn => write!(f, "impl {kind:?}(")?,
+ ClosureStyle::RANotation => write!(f, "|")?,
+ _ => unreachable!(),
+ }
if sig.params().is_empty() {
- write!(f, "||")?;
} else if f.should_truncate() {
- write!(f, "|{TYPE_HINT_TRUNCATION}|")?;
+ write!(f, "{TYPE_HINT_TRUNCATION}")?;
} else {
- write!(f, "|")?;
f.write_joined(sig.params(), ", ")?;
- write!(f, "|")?;
};
-
- write!(f, " -> ")?;
- sig.ret().hir_fmt(f)?;
+ match f.closure_style {
+ ClosureStyle::ImplFn => write!(f, ")")?,
+ ClosureStyle::RANotation => write!(f, "|")?,
+ _ => unreachable!(),
+ }
+ if f.closure_style == ClosureStyle::RANotation || !sig.ret().is_unit() {
+ write!(f, " -> ")?;
+ sig.ret().hir_fmt(f)?;
+ }
} else {
write!(f, "{{closure}}")?;
}
@@ -950,7 +1130,11 @@ impl HirDisplay for Ty {
match param_data {
TypeOrConstParamData::TypeParamData(p) => match p.provenance {
TypeParamProvenance::TypeParamList | TypeParamProvenance::TraitSelf => {
- write!(f, "{}", p.name.clone().unwrap_or_else(Name::missing))?
+ write!(
+ f,
+ "{}",
+ p.name.clone().unwrap_or_else(Name::missing).display(f.db.upcast())
+ )?
}
TypeParamProvenance::ArgumentImplTrait => {
let substs = generics.placeholder_subst(db);
@@ -979,7 +1163,7 @@ impl HirDisplay for Ty {
}
},
TypeOrConstParamData::ConstParamData(p) => {
- write!(f, "{}", p.name)?;
+ write!(f, "{}", p.name.display(f.db.upcast()))?;
}
}
}
@@ -1004,6 +1188,11 @@ impl HirDisplay for Ty {
}
TyKind::Alias(AliasTy::Projection(p_ty)) => p_ty.hir_fmt(f)?,
TyKind::Alias(AliasTy::Opaque(opaque_ty)) => {
+ if !f.display_target.allows_opaque() {
+ return Err(HirDisplayError::DisplaySourceCodeError(
+ DisplaySourceCodeError::OpaqueType,
+ ));
+ }
let impl_trait_id = db.lookup_intern_impl_trait_id(opaque_ty.opaque_ty_id.into());
match impl_trait_id {
ImplTraitId::ReturnTypeImplTrait(func, idx) => {
@@ -1067,6 +1256,88 @@ impl HirDisplay for Ty {
}
}
+fn hir_fmt_generics(
+ f: &mut HirFormatter<'_>,
+ parameters: &Substitution,
+ generic_def: Option<hir_def::GenericDefId>,
+) -> Result<(), HirDisplayError> {
+ let db = f.db;
+ let lifetime_args_count = generic_def.map_or(0, |g| db.generic_params(g).lifetimes.len());
+ if parameters.len(Interner) + lifetime_args_count > 0 {
+ let parameters_to_write = if f.display_target.is_source_code() || f.omit_verbose_types() {
+ match generic_def
+ .map(|generic_def_id| db.generic_defaults(generic_def_id))
+ .filter(|defaults| !defaults.is_empty())
+ {
+ None => parameters.as_slice(Interner),
+ Some(default_parameters) => {
+ fn should_show(
+ parameter: &GenericArg,
+ default_parameters: &[Binders<GenericArg>],
+ i: usize,
+ parameters: &Substitution,
+ ) -> bool {
+ if parameter.ty(Interner).map(|x| x.kind(Interner)) == Some(&TyKind::Error)
+ {
+ return true;
+ }
+ if let Some(ConstValue::Concrete(c)) =
+ parameter.constant(Interner).map(|x| &x.data(Interner).value)
+ {
+ if c.interned == ConstScalar::Unknown {
+ return true;
+ }
+ }
+ let default_parameter = match default_parameters.get(i) {
+ Some(x) => x,
+ None => return true,
+ };
+ let actual_default =
+ default_parameter.clone().substitute(Interner, &parameters);
+ parameter != &actual_default
+ }
+ let mut default_from = 0;
+ for (i, parameter) in parameters.iter(Interner).enumerate() {
+ if should_show(parameter, &default_parameters, i, parameters) {
+ default_from = i + 1;
+ }
+ }
+ &parameters.as_slice(Interner)[0..default_from]
+ }
+ }
+ } else {
+ parameters.as_slice(Interner)
+ };
+ if !parameters_to_write.is_empty() || lifetime_args_count != 0 {
+ write!(f, "<")?;
+ let mut first = true;
+ for _ in 0..lifetime_args_count {
+ if !first {
+ write!(f, ", ")?;
+ }
+ first = false;
+ write!(f, "'_")?;
+ }
+ for generic_arg in parameters_to_write {
+ if !first {
+ write!(f, ", ")?;
+ }
+ first = false;
+ if f.display_target.is_source_code()
+ && generic_arg.ty(Interner).map(|ty| ty.kind(Interner)) == Some(&TyKind::Error)
+ {
+ write!(f, "_")?;
+ } else {
+ generic_arg.hir_fmt(f)?;
+ }
+ }
+
+ write!(f, ">")?;
+ }
+ }
+ Ok(())
+}
+
impl HirDisplay for CallableSig {
fn hir_fmt(&self, f: &mut HirFormatter<'_>) -> Result<(), HirDisplayError> {
write!(f, "fn(")?;
@@ -1170,7 +1441,7 @@ fn write_bounds_like_dyn_trait(
// existential) here, which is the only thing that's
// possible in actual Rust, and hence don't print it
f.start_location_link(trait_.into());
- write!(f, "{}", f.db.trait_data(trait_).name)?;
+ write!(f, "{}", f.db.trait_data(trait_).name.display(f.db.upcast()))?;
f.end_location_link();
if let [_, params @ ..] = &*trait_ref.substitution.as_slice(Interner) {
if is_fn_trait {
@@ -1209,7 +1480,7 @@ fn write_bounds_like_dyn_trait(
let assoc_ty_id = from_assoc_type_id(proj.associated_ty_id);
let type_alias = f.db.type_alias_data(assoc_ty_id);
f.start_location_link(assoc_ty_id.into());
- write!(f, "{}", type_alias.name)?;
+ write!(f, "{}", type_alias.name.display(f.db.upcast()))?;
f.end_location_link();
let proj_arg_count = generics(f.db.upcast(), assoc_ty_id.into()).len_self();
@@ -1276,7 +1547,7 @@ fn fmt_trait_ref(
}
let trait_ = tr.hir_trait_id();
f.start_location_link(trait_.into());
- write!(f, "{}", f.db.trait_data(trait_).name)?;
+ write!(f, "{}", f.db.trait_data(trait_).name.display(f.db.upcast()))?;
f.end_location_link();
if tr.substitution.len(Interner) > 1 {
write!(f, "<")?;
@@ -1306,7 +1577,7 @@ impl HirDisplay for WhereClause {
write!(f, ">::",)?;
let type_alias = from_assoc_type_id(projection_ty.associated_ty_id);
f.start_location_link(type_alias.into());
- write!(f, "{}", f.db.type_alias_data(type_alias).name,)?;
+ write!(f, "{}", f.db.type_alias_data(type_alias).name.display(f.db.upcast()),)?;
f.end_location_link();
write!(f, " = ")?;
ty.hir_fmt(f)?;
@@ -1344,7 +1615,8 @@ impl HirDisplay for LifetimeData {
let id = lt_from_placeholder_idx(f.db, *idx);
let generics = generics(f.db.upcast(), id.parent);
let param_data = &generics.params.lifetimes[id.local_id];
- write!(f, "{}", param_data.name)
+ write!(f, "{}", param_data.name.display(f.db.upcast()))?;
+ Ok(())
}
LifetimeData::Static => write!(f, "'static"),
LifetimeData::Erased => Ok(()),
@@ -1376,7 +1648,7 @@ pub fn write_visibility(
Visibility::Public => write!(f, "pub "),
Visibility::Module(vis_id) => {
let def_map = module_id.def_map(f.db.upcast());
- let root_module_id = def_map.module_id(def_map.root());
+ let root_module_id = def_map.module_id(DefMap::ROOT);
if vis_id == module_id {
// pub(self) or omitted
Ok(())
@@ -1420,7 +1692,7 @@ impl HirDisplay for TypeRef {
};
write!(f, "&")?;
if let Some(lifetime) = lifetime {
- write!(f, "{} ", lifetime.name)?;
+ write!(f, "{} ", lifetime.name.display(f.db.upcast()))?;
}
write!(f, "{mutability}")?;
inner.hir_fmt(f)?;
@@ -1428,7 +1700,7 @@ impl HirDisplay for TypeRef {
TypeRef::Array(inner, len) => {
write!(f, "[")?;
inner.hir_fmt(f)?;
- write!(f, "; {len}]")?;
+ write!(f, "; {}]", len.display(f.db.upcast()))?;
}
TypeRef::Slice(inner) => {
write!(f, "[")?;
@@ -1445,7 +1717,7 @@ impl HirDisplay for TypeRef {
for index in 0..function_parameters.len() {
let (param_name, param_type) = &function_parameters[index];
if let Some(name) = param_name {
- write!(f, "{name}: ")?;
+ write!(f, "{}: ", name.display(f.db.upcast()))?;
}
param_type.hir_fmt(f)?;
@@ -1477,7 +1749,10 @@ impl HirDisplay for TypeRef {
}
TypeRef::Macro(macro_call) => {
let macro_call = macro_call.to_node(f.db.upcast());
- let ctx = body::LowerCtx::with_hygiene(f.db.upcast(), &Hygiene::new_unhygienic());
+ let ctx = hir_def::lower::LowerCtx::with_hygiene(
+ f.db.upcast(),
+ &Hygiene::new_unhygienic(),
+ );
match macro_call.path() {
Some(path) => match Path::from_src(path, &ctx) {
Some(path) => path.hir_fmt(f)?,
@@ -1503,9 +1778,13 @@ impl HirDisplay for TypeBound {
}
path.hir_fmt(f)
}
- TypeBound::Lifetime(lifetime) => write!(f, "{}", lifetime.name),
+ TypeBound::Lifetime(lifetime) => write!(f, "{}", lifetime.name.display(f.db.upcast())),
TypeBound::ForLifetime(lifetimes, path) => {
- write!(f, "for<{}> ", lifetimes.iter().format(", "))?;
+ write!(
+ f,
+ "for<{}> ",
+ lifetimes.iter().map(|it| it.display(f.db.upcast())).format(", ")
+ )?;
path.hir_fmt(f)
}
TypeBound::Error => write!(f, "{{error}}"),
@@ -1551,7 +1830,7 @@ impl HirDisplay for Path {
if !matches!(self.kind(), PathKind::Plain) || seg_idx > 0 {
write!(f, "::")?;
}
- write!(f, "{}", segment.name)?;
+ write!(f, "{}", segment.name.display(f.db.upcast()))?;
if let Some(generic_args) = segment.args_and_bindings {
// We should be in type context, so format as `Foo<Bar>` instead of `Foo::<Bar>`.
// Do we actually format expressions?
@@ -1598,7 +1877,7 @@ impl HirDisplay for Path {
} else {
write!(f, ", ")?;
}
- write!(f, "{}", binding.name)?;
+ write!(f, "{}", binding.name.display(f.db.upcast()))?;
match &binding.type_ref {
Some(ty) => {
write!(f, " = ")?;
@@ -1621,8 +1900,10 @@ impl HirDisplay for hir_def::path::GenericArg {
fn hir_fmt(&self, f: &mut HirFormatter<'_>) -> Result<(), HirDisplayError> {
match self {
hir_def::path::GenericArg::Type(ty) => ty.hir_fmt(f),
- hir_def::path::GenericArg::Const(c) => write!(f, "{c}"),
- hir_def::path::GenericArg::Lifetime(lifetime) => write!(f, "{}", lifetime.name),
+ hir_def::path::GenericArg::Const(c) => write!(f, "{}", c.display(f.db.upcast())),
+ hir_def::path::GenericArg::Lifetime(lifetime) => {
+ write!(f, "{}", lifetime.name.display(f.db.upcast()))
+ }
}
}
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer.rs
index 7de5b4295..1ac0837b5 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/infer.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer.rs
@@ -13,34 +13,43 @@
//! to certain types. To record this, we use the union-find implementation from
//! the `ena` crate, which is extracted from rustc.
-use std::ops::Index;
-use std::sync::Arc;
+use std::{convert::identity, ops::Index};
-use chalk_ir::{cast::Cast, ConstValue, DebruijnIndex, Mutability, Safety, Scalar, TypeFlags};
+use chalk_ir::{
+ cast::Cast, fold::TypeFoldable, interner::HasInterner, DebruijnIndex, Mutability, Safety,
+ Scalar, TyKind, TypeFlags,
+};
use either::Either;
use hir_def::{
body::Body,
builtin_type::{BuiltinInt, BuiltinType, BuiltinUint},
data::{ConstData, StaticData},
- expr::{BindingAnnotation, BindingId, ExprId, ExprOrPatId, PatId},
+ hir::LabelId,
+ hir::{BindingAnnotation, BindingId, ExprId, ExprOrPatId, PatId},
lang_item::{LangItem, LangItemTarget},
layout::Integer,
- path::Path,
+ path::{ModPath, Path},
resolver::{HasResolver, ResolveValueResult, Resolver, TypeNs, ValueNs},
type_ref::TypeRef,
- AdtId, AssocItemId, DefWithBodyId, EnumVariantId, FieldId, FunctionId, HasModule,
- ItemContainerId, Lookup, TraitId, TypeAliasId, VariantId,
+ AdtId, AssocItemId, DefWithBodyId, EnumVariantId, FieldId, FunctionId, ItemContainerId, Lookup,
+ TraitId, TypeAliasId, VariantId,
};
use hir_expand::name::{name, Name};
-use la_arena::ArenaMap;
+use la_arena::{ArenaMap, Entry};
use rustc_hash::{FxHashMap, FxHashSet};
-use stdx::always;
+use stdx::{always, never};
+use triomphe::Arc;
use crate::{
- db::HirDatabase, fold_tys, fold_tys_and_consts, infer::coerce::CoerceMany,
- lower::ImplTraitLoweringMode, to_assoc_type_id, AliasEq, AliasTy, Const, DomainGoal,
- GenericArg, Goal, ImplTraitId, InEnvironment, Interner, ProjectionTy, RpitId, Substitution,
- TraitEnvironment, TraitRef, Ty, TyBuilder, TyExt, TyKind,
+ db::HirDatabase,
+ fold_tys,
+ infer::coerce::CoerceMany,
+ lower::ImplTraitLoweringMode,
+ static_lifetime, to_assoc_type_id,
+ traits::FnTrait,
+ utils::{InTypeConstIdMetadata, UnevaluatedConstEvaluatorFolder},
+ AliasEq, AliasTy, ClosureId, DomainGoal, GenericArg, Goal, ImplTraitId, InEnvironment,
+ Interner, ProjectionTy, RpitId, Substitution, TraitEnvironment, TraitRef, Ty, TyBuilder, TyExt,
};
// This lint has a false positive here. See the link below for details.
@@ -51,12 +60,15 @@ pub use coerce::could_coerce;
#[allow(unreachable_pub)]
pub use unify::could_unify;
+pub(crate) use self::closure::{CaptureKind, CapturedItem, CapturedItemWithoutTy};
+
pub(crate) mod unify;
mod path;
mod expr;
mod pat;
mod coerce;
-mod closure;
+pub(crate) mod closure;
+mod mutability;
/// The entry point of type inference.
pub(crate) fn infer_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<InferenceResult> {
@@ -95,10 +107,24 @@ pub(crate) fn infer_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<Infer
},
});
}
+ DefWithBodyId::InTypeConstId(c) => {
+ // FIXME(const-generic-body): We should not get the return type in this way.
+ ctx.return_ty = c
+ .lookup(db.upcast())
+ .thing
+ .box_any()
+ .downcast::<InTypeConstIdMetadata>()
+ .unwrap()
+ .0;
+ }
}
ctx.infer_body();
+ ctx.infer_mut_body();
+
+ ctx.infer_closures();
+
Arc::new(ctx.resolve_all())
}
@@ -106,14 +132,15 @@ pub(crate) fn infer_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<Infer
///
/// This is appropriate to use only after type-check: it assumes
/// that normalization will succeed, for example.
-pub(crate) fn normalize(db: &dyn HirDatabase, owner: DefWithBodyId, ty: Ty) -> Ty {
- if !ty.data(Interner).flags.intersects(TypeFlags::HAS_PROJECTION) {
+pub(crate) fn normalize(db: &dyn HirDatabase, trait_env: Arc<TraitEnvironment>, ty: Ty) -> Ty {
+ // FIXME: TypeFlags::HAS_CT_PROJECTION is not implemented in chalk, so TypeFlags::HAS_PROJECTION only
+ // works for the type case, so we check array unconditionally. Remove the array part
+ // when the bug in chalk becomes fixed.
+ if !ty.data(Interner).flags.intersects(TypeFlags::HAS_PROJECTION)
+ && !matches!(ty.kind(Interner), TyKind::Array(..))
+ {
return ty;
}
- let krate = owner.module(db.upcast()).krate();
- let trait_env = owner
- .as_generic_def_id()
- .map_or_else(|| Arc::new(TraitEnvironment::empty(krate)), |d| db.trait_environment(d));
let mut table = unify::InferenceTable::new(db, trait_env);
let ty_with_vars = table.normalize_associated_types_in(ty);
@@ -188,7 +215,7 @@ pub enum InferenceDiagnostic {
/// Contains the type the field resolves to
field_with_same_name: Option<Ty>,
},
- // FIXME: Make this proper
+ // FIXME: This should be emitted in body lowering
BreakOutsideOfLoop {
expr: ExprId,
is_break: bool,
@@ -203,6 +230,10 @@ pub enum InferenceDiagnostic {
call_expr: ExprId,
found: Ty,
},
+ TypedHole {
+ expr: ExprId,
+ expected: Ty,
+ },
}
/// A mismatch between an expected and an inferred type.
@@ -276,6 +307,13 @@ pub struct Adjustment {
pub target: Ty,
}
+impl Adjustment {
+ pub fn borrow(m: Mutability, ty: Ty) -> Self {
+ let ty = TyKind::Ref(m, static_lifetime(), ty).intern(Interner);
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(m)), target: ty }
+ }
+}
+
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum Adjust {
/// Go from ! to any type.
@@ -304,6 +342,13 @@ pub enum AutoBorrow {
RawPtr(Mutability),
}
+impl AutoBorrow {
+ fn mutability(self) -> Mutability {
+ let (AutoBorrow::Ref(m) | AutoBorrow::RawPtr(m)) = self;
+ m
+ }
+}
+
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum PointerCast {
/// Go from a fn-item type to a fn-pointer type.
@@ -337,6 +382,10 @@ pub enum PointerCast {
}
/// The result of type inference: A mapping from expressions and patterns to types.
+///
+/// When you add a field that stores types (including `Substitution` and the like), don't forget
+/// `resolve_completely()`'ing them in `InferenceContext::resolve_all()`. Inference variables must
+/// not appear in the final inference result.
#[derive(Clone, PartialEq, Eq, Debug, Default)]
pub struct InferenceResult {
/// For each method call expr, records the function it resolves to.
@@ -363,8 +412,11 @@ pub struct InferenceResult {
standard_types: InternedStandardTypes,
/// Stores the types which were implicitly dereferenced in pattern binding modes.
pub pat_adjustments: FxHashMap<PatId, Vec<Ty>>,
- pub pat_binding_modes: FxHashMap<PatId, BindingMode>,
+ pub binding_modes: ArenaMap<BindingId, BindingMode>,
pub expr_adjustments: FxHashMap<ExprId, Vec<Adjustment>>,
+ pub(crate) closure_info: FxHashMap<ClosureId, (Vec<CapturedItem>, FnTrait)>,
+ // FIXME: remove this field
+ pub mutated_bindings_in_closure: FxHashSet<BindingId>,
}
impl InferenceResult {
@@ -401,6 +453,9 @@ impl InferenceResult {
_ => None,
})
}
+ pub fn closure_info(&self, closure: &ClosureId) -> &(Vec<CapturedItem>, FnTrait) {
+ self.closure_info.get(closure).unwrap()
+ }
}
impl Index<ExprId> for InferenceResult {
@@ -435,7 +490,6 @@ pub(crate) struct InferenceContext<'a> {
pub(crate) body: &'a Body,
pub(crate) resolver: Resolver,
table: unify::InferenceTable<'a>,
- trait_env: Arc<TraitEnvironment>,
/// The traits in scope, disregarding block modules. This is used for caching purposes.
traits_in_scope: FxHashSet<TraitId>,
pub(crate) result: InferenceResult,
@@ -453,6 +507,14 @@ pub(crate) struct InferenceContext<'a> {
resume_yield_tys: Option<(Ty, Ty)>,
diverges: Diverges,
breakables: Vec<BreakableContext>,
+
+ // fields related to closure capture
+ current_captures: Vec<CapturedItemWithoutTy>,
+ current_closure: Option<ClosureId>,
+ /// Stores the list of closure ids that need to be analyzed before this closure. See the
+ /// comment on `InferenceContext::sort_closures`
+ closure_dependencies: FxHashMap<ClosureId, Vec<ClosureId>>,
+ deferred_closures: FxHashMap<ClosureId, Vec<(Ty, Ty, Vec<Ty>, ExprId)>>,
}
#[derive(Clone, Debug)]
@@ -462,7 +524,7 @@ struct BreakableContext {
/// The coercion target of the context.
coerce: Option<CoerceMany>,
/// The optional label of the context.
- label: Option<name::Name>,
+ label: Option<LabelId>,
kind: BreakableKind,
}
@@ -477,21 +539,21 @@ enum BreakableKind {
fn find_breakable<'c>(
ctxs: &'c mut [BreakableContext],
- label: Option<&name::Name>,
+ label: Option<LabelId>,
) -> Option<&'c mut BreakableContext> {
let mut ctxs = ctxs
.iter_mut()
.rev()
.take_while(|it| matches!(it.kind, BreakableKind::Block | BreakableKind::Loop));
match label {
- Some(_) => ctxs.find(|ctx| ctx.label.as_ref() == label),
+ Some(_) => ctxs.find(|ctx| ctx.label == label),
None => ctxs.find(|ctx| matches!(ctx.kind, BreakableKind::Loop)),
}
}
fn find_continuable<'c>(
ctxs: &'c mut [BreakableContext],
- label: Option<&name::Name>,
+ label: Option<LabelId>,
) -> Option<&'c mut BreakableContext> {
match label {
Some(_) => find_breakable(ctxs, label).filter(|it| matches!(it.kind, BreakableKind::Loop)),
@@ -506,14 +568,10 @@ impl<'a> InferenceContext<'a> {
body: &'a Body,
resolver: Resolver,
) -> Self {
- let krate = owner.module(db.upcast()).krate();
- let trait_env = owner
- .as_generic_def_id()
- .map_or_else(|| Arc::new(TraitEnvironment::empty(krate)), |d| db.trait_environment(d));
+ let trait_env = db.trait_environment_for_body(owner);
InferenceContext {
result: InferenceResult::default(),
- table: unify::InferenceTable::new(db, trait_env.clone()),
- trait_env,
+ table: unify::InferenceTable::new(db, trait_env),
return_ty: TyKind::Error.intern(Interner), // set in collect_* calls
resume_yield_tys: None,
return_coercion: None,
@@ -524,6 +582,10 @@ impl<'a> InferenceContext<'a> {
resolver,
diverges: Diverges::Maybe,
breakables: Vec::new(),
+ current_captures: vec![],
+ current_closure: None,
+ deferred_closures: FxHashMap::default(),
+ closure_dependencies: FxHashMap::default(),
}
}
@@ -533,6 +595,30 @@ impl<'a> InferenceContext<'a> {
// there is no problem in it being `pub(crate)`, remove this comment.
pub(crate) fn resolve_all(self) -> InferenceResult {
let InferenceContext { mut table, mut result, .. } = self;
+ // Destructure every single field so whenever new fields are added to `InferenceResult` we
+ // don't forget to handle them here.
+ let InferenceResult {
+ method_resolutions,
+ field_resolutions: _,
+ variant_resolutions: _,
+ assoc_resolutions,
+ diagnostics,
+ type_of_expr,
+ type_of_pat,
+ type_of_binding,
+ type_of_rpit,
+ type_of_for_iterator,
+ type_mismatches,
+ standard_types: _,
+ pat_adjustments,
+ binding_modes: _,
+ expr_adjustments,
+ // Types in `closure_info` have already been `resolve_completely()`'d during
+ // `InferenceContext::infer_closures()` (in `HirPlace::ty()` specifically), so no need
+ // to resolve them here.
+ closure_info: _,
+ mutated_bindings_in_closure: _,
+ } = &mut result;
table.fallback_if_possible();
@@ -541,62 +627,63 @@ impl<'a> InferenceContext<'a> {
// make sure diverging type variables are marked as such
table.propagate_diverging_flag();
- for ty in result.type_of_expr.values_mut() {
+ for ty in type_of_expr.values_mut() {
*ty = table.resolve_completely(ty.clone());
}
- for ty in result.type_of_pat.values_mut() {
+ for ty in type_of_pat.values_mut() {
*ty = table.resolve_completely(ty.clone());
}
- for ty in result.type_of_binding.values_mut() {
+ for ty in type_of_binding.values_mut() {
*ty = table.resolve_completely(ty.clone());
}
- for ty in result.type_of_rpit.values_mut() {
+ for ty in type_of_rpit.values_mut() {
*ty = table.resolve_completely(ty.clone());
}
- for ty in result.type_of_for_iterator.values_mut() {
+ for ty in type_of_for_iterator.values_mut() {
*ty = table.resolve_completely(ty.clone());
}
- for mismatch in result.type_mismatches.values_mut() {
+ for mismatch in type_mismatches.values_mut() {
mismatch.expected = table.resolve_completely(mismatch.expected.clone());
mismatch.actual = table.resolve_completely(mismatch.actual.clone());
}
- result.diagnostics.retain_mut(|diagnostic| {
- if let InferenceDiagnostic::ExpectedFunction { found: ty, .. }
- | InferenceDiagnostic::UnresolvedField { receiver: ty, .. }
- | InferenceDiagnostic::UnresolvedMethodCall { receiver: ty, .. } = diagnostic
- {
- *ty = table.resolve_completely(ty.clone());
- // FIXME: Remove this when we are on par with rustc in terms of inference
- if ty.contains_unknown() {
- return false;
- }
+ diagnostics.retain_mut(|diagnostic| {
+ use InferenceDiagnostic::*;
+ match diagnostic {
+ ExpectedFunction { found: ty, .. }
+ | UnresolvedField { receiver: ty, .. }
+ | UnresolvedMethodCall { receiver: ty, .. } => {
+ *ty = table.resolve_completely(ty.clone());
+ // FIXME: Remove this when we are on par with rustc in terms of inference
+ if ty.contains_unknown() {
+ return false;
+ }
- if let InferenceDiagnostic::UnresolvedMethodCall { field_with_same_name, .. } =
- diagnostic
- {
- let clear = if let Some(ty) = field_with_same_name {
- *ty = table.resolve_completely(ty.clone());
- ty.contains_unknown()
- } else {
- false
- };
- if clear {
- *field_with_same_name = None;
+ if let UnresolvedMethodCall { field_with_same_name, .. } = diagnostic {
+ if let Some(ty) = field_with_same_name {
+ *ty = table.resolve_completely(ty.clone());
+ if ty.contains_unknown() {
+ *field_with_same_name = None;
+ }
+ }
}
}
+ TypedHole { expected: ty, .. } => {
+ *ty = table.resolve_completely(ty.clone());
+ }
+ _ => (),
}
true
});
- for (_, subst) in result.method_resolutions.values_mut() {
+ for (_, subst) in method_resolutions.values_mut() {
*subst = table.resolve_completely(subst.clone());
}
- for (_, subst) in result.assoc_resolutions.values_mut() {
+ for (_, subst) in assoc_resolutions.values_mut() {
*subst = table.resolve_completely(subst.clone());
}
- for adjustment in result.expr_adjustments.values_mut().flatten() {
+ for adjustment in expr_adjustments.values_mut().flatten() {
adjustment.target = table.resolve_completely(adjustment.target.clone());
}
- for adjustment in result.pat_adjustments.values_mut().flatten() {
+ for adjustment in pat_adjustments.values_mut().flatten() {
*adjustment = table.resolve_completely(adjustment.clone());
}
result
@@ -612,10 +699,10 @@ impl<'a> InferenceContext<'a> {
fn collect_fn(&mut self, func: FunctionId) {
let data = self.db.function_data(func);
- let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver)
+ let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver, func.into())
.with_impl_trait_mode(ImplTraitLoweringMode::Param);
let mut param_tys =
- data.params.iter().map(|(_, type_ref)| ctx.lower_ty(type_ref)).collect::<Vec<_>>();
+ data.params.iter().map(|type_ref| ctx.lower_ty(type_ref)).collect::<Vec<_>>();
// Check if function contains a va_list, if it does then we append it to the parameter types
// that are collected from the function data
if data.is_varargs() {
@@ -634,14 +721,9 @@ impl<'a> InferenceContext<'a> {
self.infer_top_pat(*pat, &ty);
}
- let error_ty = &TypeRef::Error;
- let return_ty = if data.has_async_kw() {
- data.async_ret_type.as_deref().unwrap_or(error_ty)
- } else {
- &*data.ret_type
- };
+ let return_ty = &*data.ret_type;
- let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver)
+ let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver, self.owner.into())
.with_impl_trait_mode(ImplTraitLoweringMode::Opaque);
let return_ty = ctx.lower_ty(return_ty);
let return_ty = self.insert_type_vars(return_ty);
@@ -649,36 +731,16 @@ impl<'a> InferenceContext<'a> {
let return_ty = if let Some(rpits) = self.db.return_type_impl_traits(func) {
// RPIT opaque types use substitution of their parent function.
let fn_placeholders = TyBuilder::placeholder_subst(self.db, func);
- fold_tys(
- return_ty,
- |ty, _| {
- let opaque_ty_id = match ty.kind(Interner) {
- TyKind::OpaqueType(opaque_ty_id, _) => *opaque_ty_id,
- _ => return ty,
- };
- let idx = match self.db.lookup_intern_impl_trait_id(opaque_ty_id.into()) {
- ImplTraitId::ReturnTypeImplTrait(_, idx) => idx,
- _ => unreachable!(),
- };
- let bounds = (*rpits).map_ref(|rpits| {
- rpits.impl_traits[idx].bounds.map_ref(|it| it.into_iter())
- });
- let var = self.table.new_type_var();
- let var_subst = Substitution::from1(Interner, var.clone());
- for bound in bounds {
- let predicate =
- bound.map(|it| it.cloned()).substitute(Interner, &fn_placeholders);
- let (var_predicate, binders) = predicate
- .substitute(Interner, &var_subst)
- .into_value_and_skipped_binders();
- always!(binders.is_empty(Interner)); // quantified where clauses not yet handled
- self.push_obligation(var_predicate.cast(Interner));
- }
- self.result.type_of_rpit.insert(idx, var.clone());
- var
- },
- DebruijnIndex::INNERMOST,
- )
+ let result =
+ self.insert_inference_vars_for_rpit(return_ty, rpits.clone(), fn_placeholders);
+ let rpits = rpits.skip_binders();
+ for (id, _) in rpits.impl_traits.iter() {
+ if let Entry::Vacant(e) = self.result.type_of_rpit.entry(id) {
+ never!("Missed RPIT in `insert_inference_vars_for_rpit`");
+ e.insert(TyKind::Error.intern(Interner));
+ }
+ }
+ result
} else {
return_ty
};
@@ -687,6 +749,50 @@ impl<'a> InferenceContext<'a> {
self.return_coercion = Some(CoerceMany::new(self.return_ty.clone()));
}
+ fn insert_inference_vars_for_rpit<T>(
+ &mut self,
+ t: T,
+ rpits: Arc<chalk_ir::Binders<crate::ReturnTypeImplTraits>>,
+ fn_placeholders: Substitution,
+ ) -> T
+ where
+ T: crate::HasInterner<Interner = Interner> + crate::TypeFoldable<Interner>,
+ {
+ fold_tys(
+ t,
+ |ty, _| {
+ let opaque_ty_id = match ty.kind(Interner) {
+ TyKind::OpaqueType(opaque_ty_id, _) => *opaque_ty_id,
+ _ => return ty,
+ };
+ let idx = match self.db.lookup_intern_impl_trait_id(opaque_ty_id.into()) {
+ ImplTraitId::ReturnTypeImplTrait(_, idx) => idx,
+ _ => unreachable!(),
+ };
+ let bounds = (*rpits)
+ .map_ref(|rpits| rpits.impl_traits[idx].bounds.map_ref(|it| it.into_iter()));
+ let var = self.table.new_type_var();
+ let var_subst = Substitution::from1(Interner, var.clone());
+ for bound in bounds {
+ let predicate =
+ bound.map(|it| it.cloned()).substitute(Interner, &fn_placeholders);
+ let (var_predicate, binders) =
+ predicate.substitute(Interner, &var_subst).into_value_and_skipped_binders();
+ always!(binders.is_empty(Interner)); // quantified where clauses not yet handled
+ let var_predicate = self.insert_inference_vars_for_rpit(
+ var_predicate,
+ rpits.clone(),
+ fn_placeholders.clone(),
+ );
+ self.push_obligation(var_predicate.cast(Interner));
+ }
+ self.result.type_of_rpit.insert(idx, var.clone());
+ var
+ },
+ DebruijnIndex::INNERMOST,
+ )
+ }
+
fn infer_body(&mut self) {
match self.return_coercion {
Some(_) => self.infer_return(self.body.body_expr),
@@ -732,7 +838,7 @@ impl<'a> InferenceContext<'a> {
}
fn make_ty(&mut self, type_ref: &TypeRef) -> Ty {
- let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver);
+ let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver, self.owner.into());
let ty = ctx.lower_ty(type_ref);
let ty = self.insert_type_vars(ty);
self.normalize_associated_types_in(ty)
@@ -742,43 +848,16 @@ impl<'a> InferenceContext<'a> {
self.result.standard_types.unknown.clone()
}
- /// Replaces ConstScalar::Unknown by a new type var, so we can maybe still infer it.
- fn insert_const_vars_shallow(&mut self, c: Const) -> Const {
- let data = c.data(Interner);
- match &data.value {
- ConstValue::Concrete(cc) => match cc.interned {
- crate::ConstScalar::Unknown => self.table.new_const_var(data.ty.clone()),
- _ => c,
- },
- _ => c,
- }
- }
-
/// Replaces `Ty::Error` by a new type var, so we can maybe still infer it.
fn insert_type_vars_shallow(&mut self, ty: Ty) -> Ty {
- match ty.kind(Interner) {
- TyKind::Error => self.table.new_type_var(),
- TyKind::InferenceVar(..) => {
- let ty_resolved = self.resolve_ty_shallow(&ty);
- if ty_resolved.is_unknown() {
- self.table.new_type_var()
- } else {
- ty
- }
- }
- _ => ty,
- }
+ self.table.insert_type_vars_shallow(ty)
}
- fn insert_type_vars(&mut self, ty: Ty) -> Ty {
- fold_tys_and_consts(
- ty,
- |x, _| match x {
- Either::Left(ty) => Either::Left(self.insert_type_vars_shallow(ty)),
- Either::Right(c) => Either::Right(self.insert_const_vars_shallow(c)),
- },
- DebruijnIndex::INNERMOST,
- )
+ fn insert_type_vars<T>(&mut self, ty: T) -> T
+ where
+ T: HasInterner<Interner = Interner> + TypeFoldable<Interner>,
+ {
+ self.table.insert_type_vars(ty)
}
fn push_obligation(&mut self, o: DomainGoal) {
@@ -786,7 +865,80 @@ impl<'a> InferenceContext<'a> {
}
fn unify(&mut self, ty1: &Ty, ty2: &Ty) -> bool {
- self.table.unify(ty1, ty2)
+ let ty1 = ty1
+ .clone()
+ .try_fold_with(
+ &mut UnevaluatedConstEvaluatorFolder { db: self.db },
+ DebruijnIndex::INNERMOST,
+ )
+ .unwrap();
+ let ty2 = ty2
+ .clone()
+ .try_fold_with(
+ &mut UnevaluatedConstEvaluatorFolder { db: self.db },
+ DebruijnIndex::INNERMOST,
+ )
+ .unwrap();
+ self.table.unify(&ty1, &ty2)
+ }
+
+ /// Attempts to returns the deeply last field of nested structures, but
+ /// does not apply any normalization in its search. Returns the same type
+ /// if input `ty` is not a structure at all.
+ fn struct_tail_without_normalization(&mut self, ty: Ty) -> Ty {
+ self.struct_tail_with_normalize(ty, identity)
+ }
+
+ /// Returns the deeply last field of nested structures, or the same type if
+ /// not a structure at all. Corresponds to the only possible unsized field,
+ /// and its type can be used to determine unsizing strategy.
+ ///
+ /// This is parameterized over the normalization strategy (i.e. how to
+ /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
+ /// function to indicate no normalization should take place.
+ fn struct_tail_with_normalize(
+ &mut self,
+ mut ty: Ty,
+ mut normalize: impl FnMut(Ty) -> Ty,
+ ) -> Ty {
+ // FIXME: fetch the limit properly
+ let recursion_limit = 10;
+ for iteration in 0.. {
+ if iteration > recursion_limit {
+ return self.err_ty();
+ }
+ match ty.kind(Interner) {
+ TyKind::Adt(chalk_ir::AdtId(hir_def::AdtId::StructId(struct_id)), substs) => {
+ match self.db.field_types((*struct_id).into()).values().next_back().cloned() {
+ Some(field) => {
+ ty = field.substitute(Interner, substs);
+ }
+ None => break,
+ }
+ }
+ TyKind::Adt(..) => break,
+ TyKind::Tuple(_, substs) => {
+ match substs
+ .as_slice(Interner)
+ .split_last()
+ .and_then(|(last_ty, _)| last_ty.ty(Interner))
+ {
+ Some(last_ty) => ty = last_ty.clone(),
+ None => break,
+ }
+ }
+ TyKind::Alias(..) => {
+ let normalized = normalize(ty.clone());
+ if ty == normalized {
+ return ty;
+ } else {
+ ty = normalized;
+ }
+ }
+ _ => break,
+ }
+ }
+ ty
}
/// Recurses through the given type, normalizing associated types mentioned
@@ -795,7 +947,10 @@ impl<'a> InferenceContext<'a> {
/// type annotation (e.g. from a let type annotation, field type or function
/// call). `make_ty` handles this already, but e.g. for field types we need
/// to do it as well.
- fn normalize_associated_types_in(&mut self, ty: Ty) -> Ty {
+ fn normalize_associated_types_in<T>(&mut self, ty: T) -> T
+ where
+ T: HasInterner<Interner = Interner> + TypeFoldable<Interner>,
+ {
self.table.normalize_associated_types_in(ty)
}
@@ -847,11 +1002,9 @@ impl<'a> InferenceContext<'a> {
Some(path) => path,
None => return (self.err_ty(), None),
};
- let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver);
- // FIXME: this should resolve assoc items as well, see this example:
- // https://play.rust-lang.org/?gist=087992e9e22495446c01c0d4e2d69521
+ let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver, self.owner.into());
let (resolution, unresolved) = if value_ns {
- match self.resolver.resolve_path_in_value_ns(self.db.upcast(), path.mod_path()) {
+ match self.resolver.resolve_path_in_value_ns(self.db.upcast(), path) {
Some(ResolveValueResult::ValueNs(value)) => match value {
ValueNs::EnumVariantId(var) => {
let substs = ctx.substs_from_path(path, var.into(), true);
@@ -872,11 +1025,15 @@ impl<'a> InferenceContext<'a> {
None => return (self.err_ty(), None),
}
} else {
- match self.resolver.resolve_path_in_type_ns(self.db.upcast(), path.mod_path()) {
+ match self.resolver.resolve_path_in_type_ns(self.db.upcast(), path) {
Some(it) => it,
None => return (self.err_ty(), None),
}
};
+ let Some(mod_path) = path.mod_path() else {
+ never!("resolver should always resolve lang item paths");
+ return (self.err_ty(), None);
+ };
return match resolution {
TypeNs::AdtId(AdtId::StructId(strukt)) => {
let substs = ctx.substs_from_path(path, strukt.into(), true);
@@ -899,8 +1056,68 @@ impl<'a> InferenceContext<'a> {
TypeNs::SelfType(impl_id) => {
let generics = crate::utils::generics(self.db.upcast(), impl_id.into());
let substs = generics.placeholder_subst(self.db);
- let ty = self.db.impl_self_ty(impl_id).substitute(Interner, &substs);
- self.resolve_variant_on_alias(ty, unresolved, path)
+ let mut ty = self.db.impl_self_ty(impl_id).substitute(Interner, &substs);
+
+ let Some(mut remaining_idx) = unresolved else {
+ return self.resolve_variant_on_alias(ty, None, mod_path);
+ };
+
+ let mut remaining_segments = path.segments().skip(remaining_idx);
+
+ // We need to try resolving unresolved segments one by one because each may resolve
+ // to a projection, which `TyLoweringContext` cannot handle on its own.
+ while !remaining_segments.is_empty() {
+ let resolved_segment = path.segments().get(remaining_idx - 1).unwrap();
+ let current_segment = remaining_segments.take(1);
+
+ // If we can resolve to an enum variant, it takes priority over associated type
+ // of the same name.
+ if let Some((AdtId::EnumId(id), _)) = ty.as_adt() {
+ let enum_data = self.db.enum_data(id);
+ let name = current_segment.first().unwrap().name;
+ if let Some(local_id) = enum_data.variant(name) {
+ let variant = EnumVariantId { parent: id, local_id };
+ return if remaining_segments.len() == 1 {
+ (ty, Some(variant.into()))
+ } else {
+ // We still have unresolved paths, but enum variants never have
+ // associated types!
+ (self.err_ty(), None)
+ };
+ }
+ }
+
+ // `lower_partly_resolved_path()` returns `None` as type namespace unless
+ // `remaining_segments` is empty, which is never the case here. We don't know
+ // which namespace the new `ty` is in until normalized anyway.
+ (ty, _) = ctx.lower_partly_resolved_path(
+ resolution,
+ resolved_segment,
+ current_segment,
+ false,
+ );
+
+ ty = self.table.insert_type_vars(ty);
+ ty = self.table.normalize_associated_types_in(ty);
+ ty = self.table.resolve_ty_shallow(&ty);
+ if ty.is_unknown() {
+ return (self.err_ty(), None);
+ }
+
+ // FIXME(inherent_associated_types): update `resolution` based on `ty` here.
+ remaining_idx += 1;
+ remaining_segments = remaining_segments.skip(1);
+ }
+
+ let variant = ty.as_adt().and_then(|(id, _)| match id {
+ AdtId::StructId(s) => Some(VariantId::StructId(s)),
+ AdtId::UnionId(u) => Some(VariantId::UnionId(u)),
+ AdtId::EnumId(_) => {
+ // FIXME Error E0071, expected struct, variant or union type, found enum `Foo`
+ None
+ }
+ });
+ (ty, variant)
}
TypeNs::TypeAliasId(it) => {
let container = it.lookup(self.db.upcast()).container;
@@ -917,7 +1134,7 @@ impl<'a> InferenceContext<'a> {
let ty = TyBuilder::def_ty(self.db, it.into(), parent_subst)
.fill_with_inference_vars(&mut self.table)
.build();
- self.resolve_variant_on_alias(ty, unresolved, path)
+ self.resolve_variant_on_alias(ty, unresolved, mod_path)
}
TypeNs::AdtSelfType(_) => {
// FIXME this could happen in array size expressions, once we're checking them
@@ -953,9 +1170,9 @@ impl<'a> InferenceContext<'a> {
&mut self,
ty: Ty,
unresolved: Option<usize>,
- path: &Path,
+ path: &ModPath,
) -> (Ty, Option<VariantId>) {
- let remaining = unresolved.map(|x| path.segments().skip(x).len()).filter(|x| x > &0);
+ let remaining = unresolved.map(|x| path.segments()[x..].len()).filter(|x| x > &0);
match remaining {
None => {
let variant = ty.as_adt().and_then(|(adt_id, _)| match adt_id {
@@ -969,7 +1186,7 @@ impl<'a> InferenceContext<'a> {
(ty, variant)
}
Some(1) => {
- let segment = path.mod_path().segments().last().unwrap();
+ let segment = path.segments().last().unwrap();
// this could be an enum variant or associated type
if let Some((AdtId::EnumId(enum_id), _)) = ty.as_adt() {
let enum_data = self.db.enum_data(enum_id);
@@ -993,22 +1210,6 @@ impl<'a> InferenceContext<'a> {
self.db.lang_item(krate, item)
}
- fn resolve_into_iter_item(&self) -> Option<TypeAliasId> {
- let ItemContainerId::TraitId(trait_) = self.resolve_lang_item(LangItem::IntoIterIntoIter)?
- .as_function()?
- .lookup(self.db.upcast()).container
- else { return None };
- self.db.trait_data(trait_).associated_type_by_name(&name![IntoIter])
- }
-
- fn resolve_iterator_item(&self) -> Option<TypeAliasId> {
- let ItemContainerId::TraitId(trait_) = self.resolve_lang_item(LangItem::IteratorNext)?
- .as_function()?
- .lookup(self.db.upcast()).container
- else { return None };
- self.db.trait_data(trait_).associated_type_by_name(&name![Item])
- }
-
fn resolve_output_on(&self, trait_: TraitId) -> Option<TypeAliasId> {
self.db.trait_data(trait_).associated_type_by_name(&name![Output])
}
@@ -1017,10 +1218,6 @@ impl<'a> InferenceContext<'a> {
self.resolve_lang_item(lang)?.as_trait()
}
- fn resolve_ops_try_output(&self) -> Option<TypeAliasId> {
- self.resolve_output_on(self.resolve_lang_trait(LangItem::Try)?)
- }
-
fn resolve_ops_neg_output(&self) -> Option<TypeAliasId> {
self.resolve_output_on(self.resolve_lang_trait(LangItem::Neg)?)
}
@@ -1136,9 +1333,8 @@ impl Expectation {
/// which still is useful, because it informs integer literals and the like.
/// See the test case `test/ui/coerce-expect-unsized.rs` and #20169
/// for examples of where this comes up,.
- fn rvalue_hint(table: &mut unify::InferenceTable<'_>, ty: Ty) -> Self {
- // FIXME: do struct_tail_without_normalization
- match table.resolve_ty_shallow(&ty).kind(Interner) {
+ fn rvalue_hint(ctx: &mut InferenceContext<'_>, ty: Ty) -> Self {
+ match ctx.struct_tail_without_normalization(ty.clone()).kind(Interner) {
TyKind::Slice(_) | TyKind::Str | TyKind::Dyn(_) => Expectation::RValueLikeUnsized(ty),
_ => Expectation::has_type(ty),
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs
index a6449d019..ff64ae252 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs
@@ -1,12 +1,33 @@
//! Inference of closure parameter types based on the closure's expected type.
-use chalk_ir::{cast::Cast, AliasEq, AliasTy, FnSubst, WhereClause};
-use hir_def::{expr::ExprId, HasModule};
+use std::{cmp, collections::HashMap, convert::Infallible, mem};
+
+use chalk_ir::{
+ cast::Cast,
+ fold::{FallibleTypeFolder, TypeFoldable},
+ AliasEq, AliasTy, BoundVar, DebruijnIndex, FnSubst, Mutability, TyKind, WhereClause,
+};
+use hir_def::{
+ data::adt::VariantData,
+ hir::{Array, BinaryOp, BindingId, CaptureBy, Expr, ExprId, Pat, PatId, Statement, UnaryOp},
+ lang_item::LangItem,
+ resolver::{resolver_for_expr, ResolveValueResult, ValueNs},
+ DefWithBodyId, FieldId, HasModule, VariantId,
+};
+use hir_expand::name;
+use rustc_hash::FxHashMap;
use smallvec::SmallVec;
+use stdx::never;
use crate::{
- to_chalk_trait_id, utils, ChalkTraitId, DynTy, FnPointer, FnSig, Interner, Substitution, Ty,
- TyExt, TyKind,
+ db::HirDatabase,
+ from_placeholder_idx, make_binders,
+ mir::{BorrowKind, MirSpan, ProjectionElem},
+ static_lifetime, to_chalk_trait_id,
+ traits::FnTrait,
+ utils::{self, generics, Generics},
+ Adjust, Adjustment, Binders, BindingMode, ChalkTraitId, ClosureId, DynTy, FnPointer, FnSig,
+ Interner, Substitution, Ty, TyExt,
};
use super::{Expectation, InferenceContext};
@@ -86,3 +107,905 @@ impl InferenceContext<'_> {
None
}
}
+
+// The below functions handle capture and closure kind (Fn, FnMut, ..)
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub(crate) struct HirPlace {
+ pub(crate) local: BindingId,
+ pub(crate) projections: Vec<ProjectionElem<Infallible, Ty>>,
+}
+
+impl HirPlace {
+ fn ty(&self, ctx: &mut InferenceContext<'_>) -> Ty {
+ let mut ty = ctx.table.resolve_completely(ctx.result[self.local].clone());
+ for p in &self.projections {
+ ty = p.projected_ty(
+ ty,
+ ctx.db,
+ |_, _, _| {
+ unreachable!("Closure field only happens in MIR");
+ },
+ ctx.owner.module(ctx.db.upcast()).krate(),
+ );
+ }
+ ty.clone()
+ }
+
+ fn capture_kind_of_truncated_place(
+ &self,
+ mut current_capture: CaptureKind,
+ len: usize,
+ ) -> CaptureKind {
+ match current_capture {
+ CaptureKind::ByRef(BorrowKind::Mut { .. }) => {
+ if self.projections[len..].iter().any(|x| *x == ProjectionElem::Deref) {
+ current_capture = CaptureKind::ByRef(BorrowKind::Unique);
+ }
+ }
+ _ => (),
+ }
+ current_capture
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+pub enum CaptureKind {
+ ByRef(BorrowKind),
+ ByValue,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct CapturedItem {
+ pub(crate) place: HirPlace,
+ pub(crate) kind: CaptureKind,
+ pub(crate) span: MirSpan,
+ pub(crate) ty: Binders<Ty>,
+}
+
+impl CapturedItem {
+ pub fn local(&self) -> BindingId {
+ self.place.local
+ }
+
+ pub fn ty(&self, subst: &Substitution) -> Ty {
+ self.ty.clone().substitute(Interner, utils::ClosureSubst(subst).parent_subst())
+ }
+
+ pub fn kind(&self) -> CaptureKind {
+ self.kind
+ }
+
+ pub fn display_place(&self, owner: DefWithBodyId, db: &dyn HirDatabase) -> String {
+ let body = db.body(owner);
+ let mut result = body[self.place.local].name.display(db.upcast()).to_string();
+ let mut field_need_paren = false;
+ for proj in &self.place.projections {
+ match proj {
+ ProjectionElem::Deref => {
+ result = format!("*{result}");
+ field_need_paren = true;
+ }
+ ProjectionElem::Field(f) => {
+ if field_need_paren {
+ result = format!("({result})");
+ }
+ let variant_data = f.parent.variant_data(db.upcast());
+ let field = match &*variant_data {
+ VariantData::Record(fields) => fields[f.local_id]
+ .name
+ .as_str()
+ .unwrap_or("[missing field]")
+ .to_string(),
+ VariantData::Tuple(fields) => fields
+ .iter()
+ .position(|x| x.0 == f.local_id)
+ .unwrap_or_default()
+ .to_string(),
+ VariantData::Unit => "[missing field]".to_string(),
+ };
+ result = format!("{result}.{field}");
+ field_need_paren = false;
+ }
+ &ProjectionElem::TupleOrClosureField(field) => {
+ if field_need_paren {
+ result = format!("({result})");
+ }
+ result = format!("{result}.{field}");
+ field_need_paren = false;
+ }
+ ProjectionElem::Index(_)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::OpaqueCast(_) => {
+ never!("Not happen in closure capture");
+ continue;
+ }
+ }
+ }
+ result
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub(crate) struct CapturedItemWithoutTy {
+ pub(crate) place: HirPlace,
+ pub(crate) kind: CaptureKind,
+ pub(crate) span: MirSpan,
+}
+
+impl CapturedItemWithoutTy {
+ fn with_ty(self, ctx: &mut InferenceContext<'_>) -> CapturedItem {
+ let ty = self.place.ty(ctx).clone();
+ let ty = match &self.kind {
+ CaptureKind::ByValue => ty,
+ CaptureKind::ByRef(bk) => {
+ let m = match bk {
+ BorrowKind::Mut { .. } => Mutability::Mut,
+ _ => Mutability::Not,
+ };
+ TyKind::Ref(m, static_lifetime(), ty).intern(Interner)
+ }
+ };
+ return CapturedItem {
+ place: self.place,
+ kind: self.kind,
+ span: self.span,
+ ty: replace_placeholder_with_binder(ctx.db, ctx.owner, ty),
+ };
+
+ fn replace_placeholder_with_binder(
+ db: &dyn HirDatabase,
+ owner: DefWithBodyId,
+ ty: Ty,
+ ) -> Binders<Ty> {
+ struct Filler<'a> {
+ db: &'a dyn HirDatabase,
+ generics: Generics,
+ }
+ impl FallibleTypeFolder<Interner> for Filler<'_> {
+ type Error = ();
+
+ fn as_dyn(&mut self) -> &mut dyn FallibleTypeFolder<Interner, Error = Self::Error> {
+ self
+ }
+
+ fn interner(&self) -> Interner {
+ Interner
+ }
+
+ fn try_fold_free_placeholder_const(
+ &mut self,
+ ty: chalk_ir::Ty<Interner>,
+ idx: chalk_ir::PlaceholderIndex,
+ outer_binder: DebruijnIndex,
+ ) -> Result<chalk_ir::Const<Interner>, Self::Error> {
+ let x = from_placeholder_idx(self.db, idx);
+ let Some(idx) = self.generics.param_idx(x) else {
+ return Err(());
+ };
+ Ok(BoundVar::new(outer_binder, idx).to_const(Interner, ty))
+ }
+
+ fn try_fold_free_placeholder_ty(
+ &mut self,
+ idx: chalk_ir::PlaceholderIndex,
+ outer_binder: DebruijnIndex,
+ ) -> std::result::Result<Ty, Self::Error> {
+ let x = from_placeholder_idx(self.db, idx);
+ let Some(idx) = self.generics.param_idx(x) else {
+ return Err(());
+ };
+ Ok(BoundVar::new(outer_binder, idx).to_ty(Interner))
+ }
+ }
+ let Some(generic_def) = owner.as_generic_def_id() else {
+ return Binders::empty(Interner, ty);
+ };
+ let filler = &mut Filler { db, generics: generics(db.upcast(), generic_def) };
+ let result = ty.clone().try_fold_with(filler, DebruijnIndex::INNERMOST).unwrap_or(ty);
+ make_binders(db, &filler.generics, result)
+ }
+ }
+}
+
+impl InferenceContext<'_> {
+ fn place_of_expr(&mut self, tgt_expr: ExprId) -> Option<HirPlace> {
+ let r = self.place_of_expr_without_adjust(tgt_expr)?;
+ let default = vec![];
+ let adjustments = self.result.expr_adjustments.get(&tgt_expr).unwrap_or(&default);
+ apply_adjusts_to_place(r, adjustments)
+ }
+
+ fn place_of_expr_without_adjust(&mut self, tgt_expr: ExprId) -> Option<HirPlace> {
+ match &self.body[tgt_expr] {
+ Expr::Path(p) => {
+ let resolver = resolver_for_expr(self.db.upcast(), self.owner, tgt_expr);
+ if let Some(r) = resolver.resolve_path_in_value_ns(self.db.upcast(), p) {
+ if let ResolveValueResult::ValueNs(v) = r {
+ if let ValueNs::LocalBinding(b) = v {
+ return Some(HirPlace { local: b, projections: vec![] });
+ }
+ }
+ }
+ }
+ Expr::Field { expr, name } => {
+ let mut place = self.place_of_expr(*expr)?;
+ if let TyKind::Tuple(..) = self.expr_ty(*expr).kind(Interner) {
+ let index = name.as_tuple_index()?;
+ place.projections.push(ProjectionElem::TupleOrClosureField(index))
+ } else {
+ let field = self.result.field_resolution(tgt_expr)?;
+ place.projections.push(ProjectionElem::Field(field));
+ }
+ return Some(place);
+ }
+ Expr::UnaryOp { expr, op: UnaryOp::Deref } => {
+ if matches!(
+ self.expr_ty_after_adjustments(*expr).kind(Interner),
+ TyKind::Ref(..) | TyKind::Raw(..)
+ ) {
+ let mut place = self.place_of_expr(*expr)?;
+ place.projections.push(ProjectionElem::Deref);
+ return Some(place);
+ }
+ }
+ _ => (),
+ }
+ None
+ }
+
+ fn push_capture(&mut self, capture: CapturedItemWithoutTy) {
+ self.current_captures.push(capture);
+ }
+
+ fn ref_expr(&mut self, expr: ExprId) {
+ if let Some(place) = self.place_of_expr(expr) {
+ self.add_capture(place, CaptureKind::ByRef(BorrowKind::Shared), expr.into());
+ }
+ self.walk_expr(expr);
+ }
+
+ fn add_capture(&mut self, place: HirPlace, kind: CaptureKind, span: MirSpan) {
+ if self.is_upvar(&place) {
+ self.push_capture(CapturedItemWithoutTy { place, kind, span });
+ }
+ }
+
+ fn mutate_expr(&mut self, expr: ExprId) {
+ if let Some(place) = self.place_of_expr(expr) {
+ self.add_capture(
+ place,
+ CaptureKind::ByRef(BorrowKind::Mut { allow_two_phase_borrow: false }),
+ expr.into(),
+ );
+ }
+ self.walk_expr(expr);
+ }
+
+ fn consume_expr(&mut self, expr: ExprId) {
+ if let Some(place) = self.place_of_expr(expr) {
+ self.consume_place(place, expr.into());
+ }
+ self.walk_expr(expr);
+ }
+
+ fn consume_place(&mut self, place: HirPlace, span: MirSpan) {
+ if self.is_upvar(&place) {
+ let ty = place.ty(self).clone();
+ let kind = if self.is_ty_copy(ty) {
+ CaptureKind::ByRef(BorrowKind::Shared)
+ } else {
+ CaptureKind::ByValue
+ };
+ self.push_capture(CapturedItemWithoutTy { place, kind, span });
+ }
+ }
+
+ fn walk_expr_with_adjust(&mut self, tgt_expr: ExprId, adjustment: &[Adjustment]) {
+ if let Some((last, rest)) = adjustment.split_last() {
+ match last.kind {
+ Adjust::NeverToAny | Adjust::Deref(None) | Adjust::Pointer(_) => {
+ self.walk_expr_with_adjust(tgt_expr, rest)
+ }
+ Adjust::Deref(Some(m)) => match m.0 {
+ Some(m) => {
+ self.ref_capture_with_adjusts(m, tgt_expr, rest);
+ }
+ None => unreachable!(),
+ },
+ Adjust::Borrow(b) => {
+ self.ref_capture_with_adjusts(b.mutability(), tgt_expr, rest);
+ }
+ }
+ } else {
+ self.walk_expr_without_adjust(tgt_expr);
+ }
+ }
+
+ fn ref_capture_with_adjusts(&mut self, m: Mutability, tgt_expr: ExprId, rest: &[Adjustment]) {
+ let capture_kind = match m {
+ Mutability::Mut => {
+ CaptureKind::ByRef(BorrowKind::Mut { allow_two_phase_borrow: false })
+ }
+ Mutability::Not => CaptureKind::ByRef(BorrowKind::Shared),
+ };
+ if let Some(place) = self.place_of_expr_without_adjust(tgt_expr) {
+ if let Some(place) = apply_adjusts_to_place(place, rest) {
+ self.add_capture(place, capture_kind, tgt_expr.into());
+ }
+ }
+ self.walk_expr_with_adjust(tgt_expr, rest);
+ }
+
+ fn walk_expr(&mut self, tgt_expr: ExprId) {
+ if let Some(x) = self.result.expr_adjustments.get_mut(&tgt_expr) {
+ // FIXME: this take is completely unneeded, and just is here to make borrow checker
+ // happy. Remove it if you can.
+ let x_taken = mem::take(x);
+ self.walk_expr_with_adjust(tgt_expr, &x_taken);
+ *self.result.expr_adjustments.get_mut(&tgt_expr).unwrap() = x_taken;
+ } else {
+ self.walk_expr_without_adjust(tgt_expr);
+ }
+ }
+
+ fn walk_expr_without_adjust(&mut self, tgt_expr: ExprId) {
+ match &self.body[tgt_expr] {
+ Expr::If { condition, then_branch, else_branch } => {
+ self.consume_expr(*condition);
+ self.consume_expr(*then_branch);
+ if let &Some(expr) = else_branch {
+ self.consume_expr(expr);
+ }
+ }
+ Expr::Async { statements, tail, .. }
+ | Expr::Unsafe { statements, tail, .. }
+ | Expr::Block { statements, tail, .. } => {
+ for s in statements.iter() {
+ match s {
+ Statement::Let { pat, type_ref: _, initializer, else_branch } => {
+ if let Some(else_branch) = else_branch {
+ self.consume_expr(*else_branch);
+ if let Some(initializer) = initializer {
+ self.consume_expr(*initializer);
+ }
+ return;
+ }
+ if let Some(initializer) = initializer {
+ self.walk_expr(*initializer);
+ if let Some(place) = self.place_of_expr(*initializer) {
+ self.consume_with_pat(place, *pat);
+ }
+ }
+ }
+ Statement::Expr { expr, has_semi: _ } => {
+ self.consume_expr(*expr);
+ }
+ }
+ }
+ if let Some(tail) = tail {
+ self.consume_expr(*tail);
+ }
+ }
+ Expr::While { condition, body, label: _ } => {
+ self.consume_expr(*condition);
+ self.consume_expr(*body);
+ }
+ Expr::Call { callee, args, is_assignee_expr: _ } => {
+ self.consume_expr(*callee);
+ self.consume_exprs(args.iter().copied());
+ }
+ Expr::MethodCall { receiver, args, .. } => {
+ self.consume_expr(*receiver);
+ self.consume_exprs(args.iter().copied());
+ }
+ Expr::Match { expr, arms } => {
+ for arm in arms.iter() {
+ self.consume_expr(arm.expr);
+ if let Some(guard) = arm.guard {
+ self.consume_expr(guard);
+ }
+ }
+ self.walk_expr(*expr);
+ if let Some(discr_place) = self.place_of_expr(*expr) {
+ if self.is_upvar(&discr_place) {
+ let mut capture_mode = None;
+ for arm in arms.iter() {
+ self.walk_pat(&mut capture_mode, arm.pat);
+ }
+ if let Some(c) = capture_mode {
+ self.push_capture(CapturedItemWithoutTy {
+ place: discr_place,
+ kind: c,
+ span: (*expr).into(),
+ })
+ }
+ }
+ }
+ }
+ Expr::Break { expr, label: _ }
+ | Expr::Return { expr }
+ | Expr::Yield { expr }
+ | Expr::Yeet { expr } => {
+ if let &Some(expr) = expr {
+ self.consume_expr(expr);
+ }
+ }
+ Expr::RecordLit { fields, spread, .. } => {
+ if let &Some(expr) = spread {
+ self.consume_expr(expr);
+ }
+ self.consume_exprs(fields.iter().map(|x| x.expr));
+ }
+ Expr::Field { expr, name: _ } => self.select_from_expr(*expr),
+ Expr::UnaryOp { expr, op: UnaryOp::Deref } => {
+ if matches!(
+ self.expr_ty_after_adjustments(*expr).kind(Interner),
+ TyKind::Ref(..) | TyKind::Raw(..)
+ ) {
+ self.select_from_expr(*expr);
+ } else if let Some((f, _)) = self.result.method_resolution(tgt_expr) {
+ let mutability = 'b: {
+ if let Some(deref_trait) =
+ self.resolve_lang_item(LangItem::DerefMut).and_then(|x| x.as_trait())
+ {
+ if let Some(deref_fn) =
+ self.db.trait_data(deref_trait).method_by_name(&name![deref_mut])
+ {
+ break 'b deref_fn == f;
+ }
+ }
+ false
+ };
+ if mutability {
+ self.mutate_expr(*expr);
+ } else {
+ self.ref_expr(*expr);
+ }
+ } else {
+ self.select_from_expr(*expr);
+ }
+ }
+ Expr::UnaryOp { expr, op: _ }
+ | Expr::Array(Array::Repeat { initializer: expr, repeat: _ })
+ | Expr::Await { expr }
+ | Expr::Loop { body: expr, label: _ }
+ | Expr::Let { pat: _, expr }
+ | Expr::Box { expr }
+ | Expr::Cast { expr, type_ref: _ } => {
+ self.consume_expr(*expr);
+ }
+ Expr::Ref { expr, rawness: _, mutability } => match mutability {
+ hir_def::type_ref::Mutability::Shared => self.ref_expr(*expr),
+ hir_def::type_ref::Mutability::Mut => self.mutate_expr(*expr),
+ },
+ Expr::BinaryOp { lhs, rhs, op } => {
+ let Some(op) = op else {
+ return;
+ };
+ if matches!(op, BinaryOp::Assignment { .. }) {
+ self.mutate_expr(*lhs);
+ self.consume_expr(*rhs);
+ return;
+ }
+ self.consume_expr(*lhs);
+ self.consume_expr(*rhs);
+ }
+ Expr::Range { lhs, rhs, range_type: _ } => {
+ if let &Some(expr) = lhs {
+ self.consume_expr(expr);
+ }
+ if let &Some(expr) = rhs {
+ self.consume_expr(expr);
+ }
+ }
+ Expr::Index { base, index } => {
+ self.select_from_expr(*base);
+ self.consume_expr(*index);
+ }
+ Expr::Closure { .. } => {
+ let ty = self.expr_ty(tgt_expr);
+ let TyKind::Closure(id, _) = ty.kind(Interner) else {
+ never!("closure type is always closure");
+ return;
+ };
+ let (captures, _) =
+ self.result.closure_info.get(id).expect(
+ "We sort closures, so we should always have data for inner closures",
+ );
+ let mut cc = mem::take(&mut self.current_captures);
+ cc.extend(captures.iter().filter(|x| self.is_upvar(&x.place)).map(|x| {
+ CapturedItemWithoutTy { place: x.place.clone(), kind: x.kind, span: x.span }
+ }));
+ self.current_captures = cc;
+ }
+ Expr::Array(Array::ElementList { elements: exprs, is_assignee_expr: _ })
+ | Expr::Tuple { exprs, is_assignee_expr: _ } => {
+ self.consume_exprs(exprs.iter().copied())
+ }
+ Expr::Missing
+ | Expr::Continue { .. }
+ | Expr::Path(_)
+ | Expr::Literal(_)
+ | Expr::Const(_)
+ | Expr::Underscore => (),
+ }
+ }
+
+ fn walk_pat(&mut self, result: &mut Option<CaptureKind>, pat: PatId) {
+ let mut update_result = |ck: CaptureKind| match result {
+ Some(r) => {
+ *r = cmp::max(*r, ck);
+ }
+ None => *result = Some(ck),
+ };
+
+ self.walk_pat_inner(
+ pat,
+ &mut update_result,
+ BorrowKind::Mut { allow_two_phase_borrow: false },
+ );
+ }
+
+ fn walk_pat_inner(
+ &mut self,
+ p: PatId,
+ update_result: &mut impl FnMut(CaptureKind),
+ mut for_mut: BorrowKind,
+ ) {
+ match &self.body[p] {
+ Pat::Ref { .. }
+ | Pat::Box { .. }
+ | Pat::Missing
+ | Pat::Wild
+ | Pat::Tuple { .. }
+ | Pat::Or(_) => (),
+ Pat::TupleStruct { .. } | Pat::Record { .. } => {
+ if let Some(variant) = self.result.variant_resolution_for_pat(p) {
+ let adt = variant.adt_id();
+ let is_multivariant = match adt {
+ hir_def::AdtId::EnumId(e) => self.db.enum_data(e).variants.len() != 1,
+ _ => false,
+ };
+ if is_multivariant {
+ update_result(CaptureKind::ByRef(BorrowKind::Shared));
+ }
+ }
+ }
+ Pat::Slice { .. }
+ | Pat::ConstBlock(_)
+ | Pat::Path(_)
+ | Pat::Lit(_)
+ | Pat::Range { .. } => {
+ update_result(CaptureKind::ByRef(BorrowKind::Shared));
+ }
+ Pat::Bind { id, .. } => match self.result.binding_modes[*id] {
+ crate::BindingMode::Move => {
+ if self.is_ty_copy(self.result.type_of_binding[*id].clone()) {
+ update_result(CaptureKind::ByRef(BorrowKind::Shared));
+ } else {
+ update_result(CaptureKind::ByValue);
+ }
+ }
+ crate::BindingMode::Ref(r) => match r {
+ Mutability::Mut => update_result(CaptureKind::ByRef(for_mut)),
+ Mutability::Not => update_result(CaptureKind::ByRef(BorrowKind::Shared)),
+ },
+ },
+ }
+ if self.result.pat_adjustments.get(&p).map_or(false, |x| !x.is_empty()) {
+ for_mut = BorrowKind::Unique;
+ }
+ self.body.walk_pats_shallow(p, |p| self.walk_pat_inner(p, update_result, for_mut));
+ }
+
+ fn expr_ty(&self, expr: ExprId) -> Ty {
+ self.result[expr].clone()
+ }
+
+ fn expr_ty_after_adjustments(&self, e: ExprId) -> Ty {
+ let mut ty = None;
+ if let Some(x) = self.result.expr_adjustments.get(&e) {
+ if let Some(x) = x.last() {
+ ty = Some(x.target.clone());
+ }
+ }
+ ty.unwrap_or_else(|| self.expr_ty(e))
+ }
+
+ fn is_upvar(&self, place: &HirPlace) -> bool {
+ if let Some(c) = self.current_closure {
+ let (_, root) = self.db.lookup_intern_closure(c.into());
+ return self.body.is_binding_upvar(place.local, root);
+ }
+ false
+ }
+
+ fn is_ty_copy(&mut self, ty: Ty) -> bool {
+ if let TyKind::Closure(id, _) = ty.kind(Interner) {
+ // FIXME: We handle closure as a special case, since chalk consider every closure as copy. We
+ // should probably let chalk know which closures are copy, but I don't know how doing it
+ // without creating query cycles.
+ return self.result.closure_info.get(id).map(|x| x.1 == FnTrait::Fn).unwrap_or(true);
+ }
+ self.table.resolve_completely(ty).is_copy(self.db, self.owner)
+ }
+
+ fn select_from_expr(&mut self, expr: ExprId) {
+ self.walk_expr(expr);
+ }
+
+ fn adjust_for_move_closure(&mut self) {
+ for capture in &mut self.current_captures {
+ if let Some(first_deref) =
+ capture.place.projections.iter().position(|proj| *proj == ProjectionElem::Deref)
+ {
+ capture.place.projections.truncate(first_deref);
+ }
+ capture.kind = CaptureKind::ByValue;
+ }
+ }
+
+ fn minimize_captures(&mut self) {
+ self.current_captures.sort_by_key(|x| x.place.projections.len());
+ let mut hash_map = HashMap::<HirPlace, usize>::new();
+ let result = mem::take(&mut self.current_captures);
+ for item in result {
+ let mut lookup_place = HirPlace { local: item.place.local, projections: vec![] };
+ let mut it = item.place.projections.iter();
+ let prev_index = loop {
+ if let Some(k) = hash_map.get(&lookup_place) {
+ break Some(*k);
+ }
+ match it.next() {
+ Some(x) => lookup_place.projections.push(x.clone()),
+ None => break None,
+ }
+ };
+ match prev_index {
+ Some(p) => {
+ let len = self.current_captures[p].place.projections.len();
+ let kind_after_truncate =
+ item.place.capture_kind_of_truncated_place(item.kind, len);
+ self.current_captures[p].kind =
+ cmp::max(kind_after_truncate, self.current_captures[p].kind);
+ }
+ None => {
+ hash_map.insert(item.place.clone(), self.current_captures.len());
+ self.current_captures.push(item);
+ }
+ }
+ }
+ }
+
+ fn consume_with_pat(&mut self, mut place: HirPlace, pat: PatId) {
+ let cnt = self.result.pat_adjustments.get(&pat).map(|x| x.len()).unwrap_or_default();
+ place.projections = place
+ .projections
+ .iter()
+ .cloned()
+ .chain((0..cnt).map(|_| ProjectionElem::Deref))
+ .collect::<Vec<_>>()
+ .into();
+ match &self.body[pat] {
+ Pat::Missing | Pat::Wild => (),
+ Pat::Tuple { args, ellipsis } => {
+ let (al, ar) = args.split_at(ellipsis.unwrap_or(args.len()));
+ let field_count = match self.result[pat].kind(Interner) {
+ TyKind::Tuple(_, s) => s.len(Interner),
+ _ => return,
+ };
+ let fields = 0..field_count;
+ let it = al.iter().zip(fields.clone()).chain(ar.iter().rev().zip(fields.rev()));
+ for (arg, i) in it {
+ let mut p = place.clone();
+ p.projections.push(ProjectionElem::TupleOrClosureField(i));
+ self.consume_with_pat(p, *arg);
+ }
+ }
+ Pat::Or(pats) => {
+ for pat in pats.iter() {
+ self.consume_with_pat(place.clone(), *pat);
+ }
+ }
+ Pat::Record { args, .. } => {
+ let Some(variant) = self.result.variant_resolution_for_pat(pat) else {
+ return;
+ };
+ match variant {
+ VariantId::EnumVariantId(_) | VariantId::UnionId(_) => {
+ self.consume_place(place, pat.into())
+ }
+ VariantId::StructId(s) => {
+ let vd = &*self.db.struct_data(s).variant_data;
+ for field_pat in args.iter() {
+ let arg = field_pat.pat;
+ let Some(local_id) = vd.field(&field_pat.name) else {
+ continue;
+ };
+ let mut p = place.clone();
+ p.projections.push(ProjectionElem::Field(FieldId {
+ parent: variant.into(),
+ local_id,
+ }));
+ self.consume_with_pat(p, arg);
+ }
+ }
+ }
+ }
+ Pat::Range { .. }
+ | Pat::Slice { .. }
+ | Pat::ConstBlock(_)
+ | Pat::Path(_)
+ | Pat::Lit(_) => self.consume_place(place, pat.into()),
+ Pat::Bind { id, subpat: _ } => {
+ let mode = self.result.binding_modes[*id];
+ let capture_kind = match mode {
+ BindingMode::Move => {
+ self.consume_place(place, pat.into());
+ return;
+ }
+ BindingMode::Ref(Mutability::Not) => BorrowKind::Shared,
+ BindingMode::Ref(Mutability::Mut) => {
+ BorrowKind::Mut { allow_two_phase_borrow: false }
+ }
+ };
+ self.add_capture(place, CaptureKind::ByRef(capture_kind), pat.into());
+ }
+ Pat::TupleStruct { path: _, args, ellipsis } => {
+ let Some(variant) = self.result.variant_resolution_for_pat(pat) else {
+ return;
+ };
+ match variant {
+ VariantId::EnumVariantId(_) | VariantId::UnionId(_) => {
+ self.consume_place(place, pat.into())
+ }
+ VariantId::StructId(s) => {
+ let vd = &*self.db.struct_data(s).variant_data;
+ let (al, ar) = args.split_at(ellipsis.unwrap_or(args.len()));
+ let fields = vd.fields().iter();
+ let it =
+ al.iter().zip(fields.clone()).chain(ar.iter().rev().zip(fields.rev()));
+ for (arg, (i, _)) in it {
+ let mut p = place.clone();
+ p.projections.push(ProjectionElem::Field(FieldId {
+ parent: variant.into(),
+ local_id: i,
+ }));
+ self.consume_with_pat(p, *arg);
+ }
+ }
+ }
+ }
+ Pat::Ref { pat, mutability: _ } => {
+ place.projections.push(ProjectionElem::Deref);
+ self.consume_with_pat(place, *pat)
+ }
+ Pat::Box { .. } => (), // not supported
+ }
+ }
+
+ fn consume_exprs(&mut self, exprs: impl Iterator<Item = ExprId>) {
+ for expr in exprs {
+ self.consume_expr(expr);
+ }
+ }
+
+ fn closure_kind(&self) -> FnTrait {
+ let mut r = FnTrait::Fn;
+ for x in &self.current_captures {
+ r = cmp::min(
+ r,
+ match &x.kind {
+ CaptureKind::ByRef(BorrowKind::Unique | BorrowKind::Mut { .. }) => {
+ FnTrait::FnMut
+ }
+ CaptureKind::ByRef(BorrowKind::Shallow | BorrowKind::Shared) => FnTrait::Fn,
+ CaptureKind::ByValue => FnTrait::FnOnce,
+ },
+ )
+ }
+ r
+ }
+
+ fn analyze_closure(&mut self, closure: ClosureId) -> FnTrait {
+ let (_, root) = self.db.lookup_intern_closure(closure.into());
+ self.current_closure = Some(closure);
+ let Expr::Closure { body, capture_by, .. } = &self.body[root] else {
+ unreachable!("Closure expression id is always closure");
+ };
+ self.consume_expr(*body);
+ for item in &self.current_captures {
+ if matches!(item.kind, CaptureKind::ByRef(BorrowKind::Mut { .. }))
+ && !item.place.projections.contains(&ProjectionElem::Deref)
+ {
+ // FIXME: remove the `mutated_bindings_in_closure` completely and add proper fake reads in
+ // MIR. I didn't do that due duplicate diagnostics.
+ self.result.mutated_bindings_in_closure.insert(item.place.local);
+ }
+ }
+ // closure_kind should be done before adjust_for_move_closure
+ let closure_kind = self.closure_kind();
+ match capture_by {
+ CaptureBy::Value => self.adjust_for_move_closure(),
+ CaptureBy::Ref => (),
+ }
+ self.minimize_captures();
+ let result = mem::take(&mut self.current_captures);
+ let captures = result.into_iter().map(|x| x.with_ty(self)).collect::<Vec<_>>();
+ self.result.closure_info.insert(closure, (captures, closure_kind));
+ closure_kind
+ }
+
+ pub(crate) fn infer_closures(&mut self) {
+ let deferred_closures = self.sort_closures();
+ for (closure, exprs) in deferred_closures.into_iter().rev() {
+ self.current_captures = vec![];
+ let kind = self.analyze_closure(closure);
+
+ for (derefed_callee, callee_ty, params, expr) in exprs {
+ if let &Expr::Call { callee, .. } = &self.body[expr] {
+ let mut adjustments =
+ self.result.expr_adjustments.remove(&callee).unwrap_or_default();
+ self.write_fn_trait_method_resolution(
+ kind,
+ &derefed_callee,
+ &mut adjustments,
+ &callee_ty,
+ &params,
+ expr,
+ );
+ self.result.expr_adjustments.insert(callee, adjustments);
+ }
+ }
+ }
+ }
+
+ /// We want to analyze some closures before others, to have a correct analysis:
+ /// * We should analyze nested closures before the parent, since the parent should capture some of
+ /// the things that its children captures.
+ /// * If a closure calls another closure, we need to analyze the callee, to find out how we should
+ /// capture it (e.g. by move for FnOnce)
+ ///
+ /// These dependencies are collected in the main inference. We do a topological sort in this function. It
+ /// will consume the `deferred_closures` field and return its content in a sorted vector.
+ fn sort_closures(&mut self) -> Vec<(ClosureId, Vec<(Ty, Ty, Vec<Ty>, ExprId)>)> {
+ let mut deferred_closures = mem::take(&mut self.deferred_closures);
+ let mut dependents_count: FxHashMap<ClosureId, usize> =
+ deferred_closures.keys().map(|x| (*x, 0)).collect();
+ for (_, deps) in &self.closure_dependencies {
+ for dep in deps {
+ *dependents_count.entry(*dep).or_default() += 1;
+ }
+ }
+ let mut queue: Vec<_> =
+ deferred_closures.keys().copied().filter(|x| dependents_count[x] == 0).collect();
+ let mut result = vec![];
+ while let Some(x) = queue.pop() {
+ if let Some(d) = deferred_closures.remove(&x) {
+ result.push((x, d));
+ }
+ for dep in self.closure_dependencies.get(&x).into_iter().flat_map(|x| x.iter()) {
+ let cnt = dependents_count.get_mut(dep).unwrap();
+ *cnt -= 1;
+ if *cnt == 0 {
+ queue.push(*dep);
+ }
+ }
+ }
+ result
+ }
+}
+
+fn apply_adjusts_to_place(mut r: HirPlace, adjustments: &[Adjustment]) -> Option<HirPlace> {
+ for adj in adjustments {
+ match &adj.kind {
+ Adjust::Deref(None) => {
+ r.projections.push(ProjectionElem::Deref);
+ }
+ _ => return None,
+ }
+ }
+ Some(r)
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs
index 48c915302..05a476f63 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs
@@ -5,14 +5,15 @@
//! See <https://doc.rust-lang.org/nomicon/coercions.html> and
//! `rustc_hir_analysis/check/coercion.rs`.
-use std::{iter, sync::Arc};
+use std::iter;
-use chalk_ir::{cast::Cast, BoundVar, Goal, Mutability, TyVariableKind};
+use chalk_ir::{cast::Cast, BoundVar, Goal, Mutability, TyKind, TyVariableKind};
use hir_def::{
- expr::ExprId,
+ hir::ExprId,
lang_item::{LangItem, LangItemTarget},
};
use stdx::always;
+use triomphe::Arc;
use crate::{
autoderef::{Autoderef, AutoderefKind},
@@ -21,8 +22,10 @@ use crate::{
Adjust, Adjustment, AutoBorrow, InferOk, InferenceContext, OverloadedDeref, PointerCast,
TypeError, TypeMismatch,
},
- static_lifetime, Canonical, DomainGoal, FnPointer, FnSig, Guidance, InEnvironment, Interner,
- Solution, Substitution, TraitEnvironment, Ty, TyBuilder, TyExt, TyKind,
+ static_lifetime,
+ utils::ClosureSubst,
+ Canonical, DomainGoal, FnPointer, FnSig, Guidance, InEnvironment, Interner, Solution,
+ Substitution, TraitEnvironment, Ty, TyBuilder, TyExt,
};
use super::unify::InferenceTable;
@@ -47,15 +50,23 @@ fn success(
Ok(InferOk { goals, value: (adj, target) })
}
+pub(super) enum CoercionCause {
+ // FIXME: Make better use of this. Right now things like return and break without a value
+ // use it to point to themselves, causing us to report a mismatch on those expressions even
+ // though technically they themselves are `!`
+ Expr(ExprId),
+}
+
#[derive(Clone, Debug)]
pub(super) struct CoerceMany {
expected_ty: Ty,
final_ty: Option<Ty>,
+ expressions: Vec<ExprId>,
}
impl CoerceMany {
pub(super) fn new(expected: Ty) -> Self {
- CoerceMany { expected_ty: expected, final_ty: None }
+ CoerceMany { expected_ty: expected, final_ty: None, expressions: vec![] }
}
/// Returns the "expected type" with which this coercion was
@@ -86,8 +97,12 @@ impl CoerceMany {
}
}
- pub(super) fn coerce_forced_unit(&mut self, ctx: &mut InferenceContext<'_>) {
- self.coerce(ctx, None, &ctx.result.standard_types.unit.clone())
+ pub(super) fn coerce_forced_unit(
+ &mut self,
+ ctx: &mut InferenceContext<'_>,
+ cause: CoercionCause,
+ ) {
+ self.coerce(ctx, None, &ctx.result.standard_types.unit.clone(), cause)
}
/// Merge two types from different branches, with possible coercion.
@@ -102,6 +117,7 @@ impl CoerceMany {
ctx: &mut InferenceContext<'_>,
expr: Option<ExprId>,
expr_ty: &Ty,
+ cause: CoercionCause,
) {
let expr_ty = ctx.resolve_ty_shallow(expr_ty);
self.expected_ty = ctx.resolve_ty_shallow(&self.expected_ty);
@@ -110,6 +126,8 @@ impl CoerceMany {
// pointers to have a chance at getting a match. See
// https://github.com/rust-lang/rust/blob/7b805396bf46dce972692a6846ce2ad8481c5f85/src/librustc_typeck/check/coercion.rs#L877-L916
let sig = match (self.merged_ty().kind(Interner), expr_ty.kind(Interner)) {
+ (TyKind::FnDef(x, _), TyKind::FnDef(y, _)) if x == y => None,
+ (TyKind::Closure(x, _), TyKind::Closure(y, _)) if x == y => None,
(TyKind::FnDef(..) | TyKind::Closure(..), TyKind::FnDef(..) | TyKind::Closure(..)) => {
// FIXME: we're ignoring safety here. To be more correct, if we have one FnDef and one Closure,
// we should be coercing the closure to a fn pointer of the safety of the FnDef
@@ -125,8 +143,15 @@ impl CoerceMany {
let result1 = ctx.table.coerce_inner(self.merged_ty(), &target_ty);
let result2 = ctx.table.coerce_inner(expr_ty.clone(), &target_ty);
if let (Ok(result1), Ok(result2)) = (result1, result2) {
- ctx.table.register_infer_ok(result1);
- ctx.table.register_infer_ok(result2);
+ ctx.table.register_infer_ok(InferOk { value: (), goals: result1.goals });
+ for &e in &self.expressions {
+ ctx.write_expr_adj(e, result1.value.0.clone());
+ }
+ ctx.table.register_infer_ok(InferOk { value: (), goals: result2.goals });
+ if let Some(expr) = expr {
+ ctx.write_expr_adj(expr, result2.value.0);
+ self.expressions.push(expr);
+ }
return self.final_ty = Some(target_ty);
}
}
@@ -140,14 +165,19 @@ impl CoerceMany {
} else if let Ok(res) = ctx.coerce(expr, &self.merged_ty(), &expr_ty) {
self.final_ty = Some(res);
} else {
- if let Some(id) = expr {
- ctx.result.type_mismatches.insert(
- id.into(),
- TypeMismatch { expected: self.merged_ty().clone(), actual: expr_ty.clone() },
- );
+ match cause {
+ CoercionCause::Expr(id) => {
+ ctx.result.type_mismatches.insert(
+ id.into(),
+ TypeMismatch { expected: self.merged_ty(), actual: expr_ty.clone() },
+ );
+ }
}
cov_mark::hit!(coerce_merge_fail_fallback);
}
+ if let Some(expr) = expr {
+ self.expressions.push(expr);
+ }
}
}
@@ -625,7 +655,7 @@ impl<'a> InferenceTable<'a> {
// Need to find out in what cases this is necessary
let solution = self
.db
- .trait_solve(krate, canonicalized.value.clone().cast(Interner))
+ .trait_solve(krate, self.trait_env.block, canonicalized.value.clone().cast(Interner))
.ok_or(TypeError)?;
match solution {
@@ -657,7 +687,7 @@ impl<'a> InferenceTable<'a> {
}
fn coerce_closure_fn_ty(closure_substs: &Substitution, safety: chalk_ir::Safety) -> Ty {
- let closure_sig = closure_substs.at(Interner, 0).assert_ty_ref(Interner).clone();
+ let closure_sig = ClosureSubst(closure_substs).sig_ty().clone();
match closure_sig.kind(Interner) {
TyKind::Function(fn_ty) => TyKind::Function(FnPointer {
num_binders: fn_ty.num_binders,
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs
index ee186673e..194471f00 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs
@@ -6,37 +6,43 @@ use std::{
};
use chalk_ir::{
- cast::Cast, fold::Shift, DebruijnIndex, GenericArgData, Mutability, TyKind, TyVariableKind,
+ cast::Cast, fold::Shift, DebruijnIndex, GenericArgData, Mutability, TyVariableKind,
};
use hir_def::{
- expr::{
+ generics::TypeOrConstParamData,
+ hir::{
ArithOp, Array, BinaryOp, ClosureKind, Expr, ExprId, LabelId, Literal, Statement, UnaryOp,
},
- generics::TypeOrConstParamData,
- lang_item::LangItem,
+ lang_item::{LangItem, LangItemTarget},
path::{GenericArg, GenericArgs},
- ConstParamId, FieldId, ItemContainerId, Lookup,
+ BlockId, ConstParamId, FieldId, ItemContainerId, Lookup,
};
use hir_expand::name::{name, Name};
use stdx::always;
use syntax::ast::RangeOp;
+use triomphe::Arc;
use crate::{
- autoderef::{self, Autoderef},
+ autoderef::{builtin_deref, deref_by_trait, Autoderef},
consteval,
infer::{
- coerce::CoerceMany, find_continuable, pat::contains_explicit_ref_binding, BreakableKind,
+ coerce::{CoerceMany, CoercionCause},
+ find_continuable,
+ pat::contains_explicit_ref_binding,
+ BreakableKind,
},
+ lang_items::lang_items_for_bin_op,
lower::{
const_or_path_to_chalk, generic_arg_to_chalk, lower_to_chalk_mutability, ParamLoweringMode,
},
mapping::{from_chalk, ToChalk},
- method_resolution::{self, lang_items_for_bin_op, VisibleFromModule},
+ method_resolution::{self, VisibleFromModule},
primitive::{self, UintTy},
static_lifetime, to_chalk_trait_id,
+ traits::FnTrait,
utils::{generics, Generics},
Adjust, Adjustment, AdtId, AutoBorrow, Binders, CallableDefId, FnPointer, FnSig, FnSubst,
- Interner, Rawness, Scalar, Substitution, TraitRef, Ty, TyBuilder, TyExt,
+ Interner, Rawness, Scalar, Substitution, TraitRef, Ty, TyBuilder, TyExt, TyKind,
};
use super::{
@@ -83,10 +89,10 @@ impl<'a> InferenceContext<'a> {
}
}
- pub(super) fn infer_expr_coerce_never(&mut self, expr: ExprId, expected: &Expectation) -> Ty {
+ fn infer_expr_coerce_never(&mut self, expr: ExprId, expected: &Expectation) -> Ty {
let ty = self.infer_expr_inner(expr, expected);
// While we don't allow *arbitrary* coercions here, we *do* allow
- // coercions from ! to `expected`.
+ // coercions from `!` to `expected`.
if ty.is_never() {
if let Some(adjustments) = self.result.expr_adjustments.get(&expr) {
return if let [Adjustment { kind: Adjust::NeverToAny, target }] = &**adjustments {
@@ -96,13 +102,22 @@ impl<'a> InferenceContext<'a> {
};
}
- let adj_ty = self.table.new_type_var();
- self.write_expr_adj(
- expr,
- vec![Adjustment { kind: Adjust::NeverToAny, target: adj_ty.clone() }],
- );
- adj_ty
+ if let Some(target) = expected.only_has_type(&mut self.table) {
+ self.coerce(Some(expr), &ty, &target)
+ .expect("never-to-any coercion should always succeed")
+ } else {
+ ty
+ }
} else {
+ if let Some(expected_ty) = expected.only_has_type(&mut self.table) {
+ let could_unify = self.unify(&ty, &expected_ty);
+ if !could_unify {
+ self.result.type_mismatches.insert(
+ expr.into(),
+ TypeMismatch { expected: expected_ty, actual: ty.clone() },
+ );
+ }
+ }
ty
}
}
@@ -120,24 +135,28 @@ impl<'a> InferenceContext<'a> {
);
let condition_diverges = mem::replace(&mut self.diverges, Diverges::Maybe);
- let mut both_arms_diverge = Diverges::Always;
let then_ty = self.infer_expr_inner(then_branch, expected);
- both_arms_diverge &= mem::replace(&mut self.diverges, Diverges::Maybe);
+ let then_diverges = mem::replace(&mut self.diverges, Diverges::Maybe);
let mut coerce = CoerceMany::new(expected.coercion_target_type(&mut self.table));
- coerce.coerce(self, Some(then_branch), &then_ty);
+ coerce.coerce(self, Some(then_branch), &then_ty, CoercionCause::Expr(then_branch));
match else_branch {
Some(else_branch) => {
let else_ty = self.infer_expr_inner(else_branch, expected);
- coerce.coerce(self, Some(else_branch), &else_ty);
+ let else_diverges = mem::replace(&mut self.diverges, Diverges::Maybe);
+ coerce.coerce(
+ self,
+ Some(else_branch),
+ &else_ty,
+ CoercionCause::Expr(else_branch),
+ );
+ self.diverges = condition_diverges | then_diverges & else_diverges;
}
None => {
- coerce.coerce_forced_unit(self);
+ coerce.coerce_forced_unit(self, CoercionCause::Expr(tgt_expr));
+ self.diverges = condition_diverges;
}
}
- both_arms_diverge &= self.diverges;
-
- self.diverges = condition_diverges | both_arms_diverge;
coerce.complete(self)
}
@@ -146,67 +165,21 @@ impl<'a> InferenceContext<'a> {
self.infer_top_pat(pat, &input_ty);
self.result.standard_types.bool_.clone()
}
- Expr::Block { statements, tail, label, id: _ } => {
- self.infer_block(tgt_expr, statements, *tail, *label, expected)
+ Expr::Block { statements, tail, label, id } => {
+ self.infer_block(tgt_expr, *id, statements, *tail, *label, expected)
}
- Expr::Unsafe { id: _, statements, tail } => {
- self.infer_block(tgt_expr, statements, *tail, None, expected)
+ Expr::Unsafe { id, statements, tail } => {
+ self.infer_block(tgt_expr, *id, statements, *tail, None, expected)
}
- Expr::Const { id: _, statements, tail } => {
+ Expr::Const(id) => {
self.with_breakable_ctx(BreakableKind::Border, None, None, |this| {
- this.infer_block(tgt_expr, statements, *tail, None, expected)
+ let loc = this.db.lookup_intern_anonymous_const(*id);
+ this.infer_expr(loc.root, expected)
})
.1
}
- Expr::TryBlock { id: _, statements, tail } => {
- // The type that is returned from the try block
- let try_ty = self.table.new_type_var();
- if let Some(ty) = expected.only_has_type(&mut self.table) {
- self.unify(&try_ty, &ty);
- }
-
- // The ok-ish type that is expected from the last expression
- let ok_ty =
- self.resolve_associated_type(try_ty.clone(), self.resolve_ops_try_output());
-
- self.infer_block(
- tgt_expr,
- statements,
- *tail,
- None,
- &Expectation::has_type(ok_ty.clone()),
- );
- try_ty
- }
- Expr::Async { id: _, statements, tail } => {
- let ret_ty = self.table.new_type_var();
- let prev_diverges = mem::replace(&mut self.diverges, Diverges::Maybe);
- let prev_ret_ty = mem::replace(&mut self.return_ty, ret_ty.clone());
- let prev_ret_coercion =
- mem::replace(&mut self.return_coercion, Some(CoerceMany::new(ret_ty.clone())));
-
- let (_, inner_ty) =
- self.with_breakable_ctx(BreakableKind::Border, None, None, |this| {
- this.infer_block(
- tgt_expr,
- statements,
- *tail,
- None,
- &Expectation::has_type(ret_ty),
- )
- });
-
- self.diverges = prev_diverges;
- self.return_ty = prev_ret_ty;
- self.return_coercion = prev_ret_coercion;
-
- // Use the first type parameter as the output type of future.
- // existential type AsyncBlockImplTrait<InnerType>: Future<Output = InnerType>
- let impl_trait_id =
- crate::ImplTraitId::AsyncBlockTypeImplTrait(self.owner, tgt_expr);
- let opaque_ty_id = self.db.intern_impl_trait_id(impl_trait_id).into();
- TyKind::OpaqueType(opaque_ty_id, Substitution::from1(Interner, inner_ty))
- .intern(Interner)
+ Expr::Async { id, statements, tail } => {
+ self.infer_async_block(tgt_expr, id, statements, tail)
}
&Expr::Loop { body, label } => {
// FIXME: should be:
@@ -238,25 +211,7 @@ impl<'a> InferenceContext<'a> {
self.diverges = Diverges::Maybe;
TyBuilder::unit()
}
- &Expr::For { iterable, body, pat, label } => {
- let iterable_ty = self.infer_expr(iterable, &Expectation::none());
- let into_iter_ty =
- self.resolve_associated_type(iterable_ty, self.resolve_into_iter_item());
- let pat_ty = self
- .resolve_associated_type(into_iter_ty.clone(), self.resolve_iterator_item());
-
- self.result.type_of_for_iterator.insert(tgt_expr, into_iter_ty);
-
- self.infer_top_pat(pat, &pat_ty);
- self.with_breakable_ctx(BreakableKind::Loop, None, label, |this| {
- this.infer_expr(body, &Expectation::HasType(TyBuilder::unit()));
- });
-
- // the body may not run, so it diverging doesn't mean we diverge
- self.diverges = Diverges::Maybe;
- TyBuilder::unit()
- }
- Expr::Closure { body, args, ret_type, arg_types, closure_kind } => {
+ Expr::Closure { body, args, ret_type, arg_types, closure_kind, capture_by: _ } => {
assert_eq!(args.len(), arg_types.len());
let mut sig_tys = Vec::with_capacity(arg_types.len() + 1);
@@ -276,18 +231,7 @@ impl<'a> InferenceContext<'a> {
None => self.table.new_type_var(),
};
if let ClosureKind::Async = closure_kind {
- // Use the first type parameter as the output type of future.
- // existential type AsyncBlockImplTrait<InnerType>: Future<Output = InnerType>
- let impl_trait_id =
- crate::ImplTraitId::AsyncBlockTypeImplTrait(self.owner, *body);
- let opaque_ty_id = self.db.intern_impl_trait_id(impl_trait_id).into();
- sig_tys.push(
- TyKind::OpaqueType(
- opaque_ty_id,
- Substitution::from1(Interner, ret_ty.clone()),
- )
- .intern(Interner),
- );
+ sig_tys.push(self.lower_async_block_type_impl_trait(ret_ty.clone(), *body));
} else {
sig_tys.push(ret_ty.clone());
}
@@ -302,7 +246,7 @@ impl<'a> InferenceContext<'a> {
})
.intern(Interner);
- let (ty, resume_yield_tys) = match closure_kind {
+ let (id, ty, resume_yield_tys) = match closure_kind {
ClosureKind::Generator(_) => {
// FIXME: report error when there are more than 1 parameter.
let resume_ty = match sig_tys.first() {
@@ -322,17 +266,20 @@ impl<'a> InferenceContext<'a> {
let generator_id = self.db.intern_generator((self.owner, tgt_expr)).into();
let generator_ty = TyKind::Generator(generator_id, subst).intern(Interner);
- (generator_ty, Some((resume_ty, yield_ty)))
+ (None, generator_ty, Some((resume_ty, yield_ty)))
}
ClosureKind::Closure | ClosureKind::Async => {
let closure_id = self.db.intern_closure((self.owner, tgt_expr)).into();
let closure_ty = TyKind::Closure(
closure_id,
- Substitution::from1(Interner, sig_ty.clone()),
+ TyBuilder::subst_for_closure(self.db, self.owner, sig_ty.clone()),
)
.intern(Interner);
-
- (closure_ty, None)
+ self.deferred_closures.entry(closure_id).or_default();
+ if let Some(c) = self.current_closure {
+ self.closure_dependencies.entry(c).or_default().push(closure_id);
+ }
+ (Some(closure_id), closure_ty, None)
}
};
@@ -348,9 +295,10 @@ impl<'a> InferenceContext<'a> {
// FIXME: lift these out into a struct
let prev_diverges = mem::replace(&mut self.diverges, Diverges::Maybe);
+ let prev_closure = mem::replace(&mut self.current_closure, id);
let prev_ret_ty = mem::replace(&mut self.return_ty, ret_ty.clone());
let prev_ret_coercion =
- mem::replace(&mut self.return_coercion, Some(CoerceMany::new(ret_ty.clone())));
+ mem::replace(&mut self.return_coercion, Some(CoerceMany::new(ret_ty)));
let prev_resume_yield_tys =
mem::replace(&mut self.resume_yield_tys, resume_yield_tys);
@@ -361,6 +309,7 @@ impl<'a> InferenceContext<'a> {
self.diverges = prev_diverges;
self.return_ty = prev_ret_ty;
self.return_coercion = prev_ret_coercion;
+ self.current_closure = prev_closure;
self.resume_yield_tys = prev_resume_yield_tys;
ty
@@ -385,16 +334,31 @@ impl<'a> InferenceContext<'a> {
|| res.is_none();
let (param_tys, ret_ty) = match res {
Some((func, params, ret_ty)) => {
- let adjustments = auto_deref_adjust_steps(&derefs);
- // FIXME: Handle call adjustments for Fn/FnMut
- self.write_expr_adj(*callee, adjustments);
- if let Some((trait_, func)) = func {
- let subst = TyBuilder::subst_for_def(self.db, trait_, None)
- .push(callee_ty.clone())
- .push(TyBuilder::tuple_with(params.iter().cloned()))
- .build();
- self.write_method_resolution(tgt_expr, func, subst.clone());
+ let mut adjustments = auto_deref_adjust_steps(&derefs);
+ if let TyKind::Closure(c, _) =
+ self.table.resolve_completely(callee_ty.clone()).kind(Interner)
+ {
+ if let Some(par) = self.current_closure {
+ self.closure_dependencies.entry(par).or_default().push(*c);
+ }
+ self.deferred_closures.entry(*c).or_default().push((
+ derefed_callee.clone(),
+ callee_ty.clone(),
+ params.clone(),
+ tgt_expr,
+ ));
}
+ if let Some(fn_x) = func {
+ self.write_fn_trait_method_resolution(
+ fn_x,
+ &derefed_callee,
+ &mut adjustments,
+ &callee_ty,
+ &params,
+ tgt_expr,
+ );
+ }
+ self.write_expr_adj(*callee, adjustments);
(params, ret_ty)
}
None => {
@@ -470,7 +434,7 @@ impl<'a> InferenceContext<'a> {
let arm_ty = self.infer_expr_inner(arm.expr, &expected);
all_arms_diverge &= self.diverges;
- coerce.coerce(self, Some(arm.expr), &arm_ty);
+ coerce.coerce(self, Some(arm.expr), &arm_ty, CoercionCause::Expr(arm.expr));
}
self.diverges = matchee_diverges | all_arms_diverge;
@@ -484,8 +448,8 @@ impl<'a> InferenceContext<'a> {
self.resolver.reset_to_guard(g);
ty
}
- Expr::Continue { label } => {
- if let None = find_continuable(&mut self.breakables, label.as_ref()) {
+ &Expr::Continue { label } => {
+ if let None = find_continuable(&mut self.breakables, label) {
self.push_diagnostic(InferenceDiagnostic::BreakOutsideOfLoop {
expr: tgt_expr,
is_break: false,
@@ -494,9 +458,9 @@ impl<'a> InferenceContext<'a> {
};
self.result.standard_types.never.clone()
}
- Expr::Break { expr, label } => {
- let val_ty = if let Some(expr) = *expr {
- let opt_coerce_to = match find_breakable(&mut self.breakables, label.as_ref()) {
+ &Expr::Break { expr, label } => {
+ let val_ty = if let Some(expr) = expr {
+ let opt_coerce_to = match find_breakable(&mut self.breakables, label) {
Some(ctxt) => match &ctxt.coerce {
Some(coerce) => coerce.expected_ty(),
None => {
@@ -515,13 +479,17 @@ impl<'a> InferenceContext<'a> {
TyBuilder::unit()
};
- match find_breakable(&mut self.breakables, label.as_ref()) {
+ match find_breakable(&mut self.breakables, label) {
Some(ctxt) => match ctxt.coerce.take() {
Some(mut coerce) => {
- coerce.coerce(self, *expr, &val_ty);
+ let cause = match expr {
+ Some(expr) => CoercionCause::Expr(expr),
+ None => CoercionCause::Expr(tgt_expr),
+ };
+ coerce.coerce(self, expr, &val_ty, cause);
// Avoiding borrowck
- let ctxt = find_breakable(&mut self.breakables, label.as_ref())
+ let ctxt = find_breakable(&mut self.breakables, label)
.expect("breakable stack changed during coercion");
ctxt.may_break = true;
ctxt.coerce = Some(coerce);
@@ -538,7 +506,7 @@ impl<'a> InferenceContext<'a> {
}
self.result.standard_types.never.clone()
}
- &Expr::Return { expr } => self.infer_expr_return(expr),
+ &Expr::Return { expr } => self.infer_expr_return(tgt_expr, expr),
Expr::Yield { expr } => {
if let Some((resume_ty, yield_ty)) = self.resume_yield_tys.clone() {
if let Some(expr) = expr {
@@ -589,6 +557,9 @@ impl<'a> InferenceContext<'a> {
let field_ty = field_def.map_or(self.err_ty(), |it| {
field_types[it.local_id].clone().substitute(Interner, &substs)
});
+ // Field type might have some unknown types
+ // FIXME: we may want to emit a single type variable for all instance of type fields?
+ let field_ty = self.insert_type_vars(field_ty);
self.infer_expr_coerce(field.expr, &Expectation::has_type(field_ty));
}
if let Some(expr) = spread {
@@ -601,26 +572,18 @@ impl<'a> InferenceContext<'a> {
let inner_ty = self.infer_expr_inner(*expr, &Expectation::none());
self.resolve_associated_type(inner_ty, self.resolve_future_future_output())
}
- Expr::Try { expr } => {
- let inner_ty = self.infer_expr_inner(*expr, &Expectation::none());
- if let Some(trait_) = self.resolve_lang_trait(LangItem::Try) {
- if let Some(func) = self.db.trait_data(trait_).method_by_name(&name!(branch)) {
- let subst = TyBuilder::subst_for_def(self.db, trait_, None)
- .push(inner_ty.clone())
- .build();
- self.write_method_resolution(tgt_expr, func, subst.clone());
- }
- let try_output = self.resolve_output_on(trait_);
- self.resolve_associated_type(inner_ty, try_output)
- } else {
- self.err_ty()
- }
- }
Expr::Cast { expr, type_ref } => {
let cast_ty = self.make_ty(type_ref);
// FIXME: propagate the "castable to" expectation
- let _inner_ty = self.infer_expr_no_expect(*expr);
- // FIXME check the cast...
+ let inner_ty = self.infer_expr_no_expect(*expr);
+ match (inner_ty.kind(Interner), cast_ty.kind(Interner)) {
+ (TyKind::Ref(_, _, inner), TyKind::Raw(_, cast)) => {
+ // FIXME: record invalid cast diagnostic in case of mismatch
+ self.unify(inner, cast);
+ }
+ // FIXME check the other kinds of cast...
+ _ => (),
+ }
cast_ty
}
Expr::Ref { expr, rawness, mutability } => {
@@ -638,7 +601,7 @@ impl<'a> InferenceContext<'a> {
// FIXME: record type error - expected reference but found ptr,
// which cannot be coerced
}
- Expectation::rvalue_hint(&mut self.table, Ty::clone(exp_inner))
+ Expectation::rvalue_hint(self, Ty::clone(exp_inner))
} else {
Expectation::none()
};
@@ -656,7 +619,25 @@ impl<'a> InferenceContext<'a> {
// FIXME: Note down method resolution her
match op {
UnaryOp::Deref => {
- autoderef::deref(&mut self.table, inner_ty).unwrap_or_else(|| self.err_ty())
+ if let Some(deref_trait) = self.resolve_lang_trait(LangItem::Deref) {
+ if let Some(deref_fn) =
+ self.db.trait_data(deref_trait).method_by_name(&name![deref])
+ {
+ // FIXME: this is wrong in multiple ways, subst is empty, and we emit it even for builtin deref (note that
+ // the mutability is not wrong, and will be fixed in `self.infer_mut`).
+ self.write_method_resolution(
+ tgt_expr,
+ deref_fn,
+ Substitution::empty(Interner),
+ );
+ }
+ }
+ if let Some(derefed) = builtin_deref(&mut self.table, &inner_ty, true) {
+ self.resolve_ty_shallow(derefed)
+ } else {
+ deref_by_trait(&mut self.table, inner_ty)
+ .unwrap_or_else(|| self.err_ty())
+ }
}
UnaryOp::Neg => {
match inner_ty.kind(Interner) {
@@ -767,14 +748,16 @@ impl<'a> InferenceContext<'a> {
let canonicalized = self.canonicalize(base_ty.clone());
let receiver_adjustments = method_resolution::resolve_indexing_op(
self.db,
- self.trait_env.clone(),
+ self.table.trait_env.clone(),
canonicalized.value,
index_trait,
);
- let (self_ty, adj) = receiver_adjustments
+ let (self_ty, mut adj) = receiver_adjustments
.map_or((self.err_ty(), Vec::new()), |adj| {
adj.apply(&mut self.table, base_ty)
});
+ // mutability will be fixed up in `InferenceContext::infer_mut`;
+ adj.push(Adjustment::borrow(Mutability::Not, self_ty.clone()));
self.write_expr_adj(*base, adj);
if let Some(func) =
self.db.trait_data(index_trait).method_by_name(&name!(index))
@@ -783,7 +766,7 @@ impl<'a> InferenceContext<'a> {
.push(self_ty.clone())
.push(index_ty.clone())
.build();
- self.write_method_resolution(tgt_expr, func, substs.clone());
+ self.write_method_resolution(tgt_expr, func, substs);
}
self.resolve_associated_type_with_params(
self_ty,
@@ -834,6 +817,20 @@ impl<'a> InferenceContext<'a> {
let array_type = TyKind::Array(byte_type, len).intern(Interner);
TyKind::Ref(Mutability::Not, static_lifetime(), array_type).intern(Interner)
}
+ Literal::CString(..) => TyKind::Ref(
+ Mutability::Not,
+ static_lifetime(),
+ self.resolve_lang_item(LangItem::CStr)
+ .and_then(LangItemTarget::as_struct)
+ .map_or_else(
+ || self.err_ty(),
+ |strukt| {
+ TyKind::Adt(AdtId(strukt.into()), Substitution::empty(Interner))
+ .intern(Interner)
+ },
+ ),
+ )
+ .intern(Interner),
Literal::Char(..) => TyKind::Scalar(Scalar::Char).intern(Interner),
Literal::Int(_v, ty) => match ty {
Some(int_ty) => {
@@ -859,9 +856,15 @@ impl<'a> InferenceContext<'a> {
},
Expr::Underscore => {
// Underscore expressions may only appear in assignee expressions,
- // which are handled by `infer_assignee_expr()`, so any underscore
- // expression reaching this branch is an error.
- self.err_ty()
+ // which are handled by `infer_assignee_expr()`.
+ // Any other underscore expression is an error, we render a specialized diagnostic
+ // to let the user know what type is expected though.
+ let expected = expected.to_option(&mut self.table).unwrap_or_else(|| self.err_ty());
+ self.push_diagnostic(InferenceDiagnostic::TypedHole {
+ expr: tgt_expr,
+ expected: expected.clone(),
+ });
+ expected
}
};
// use a new type variable if we got unknown here
@@ -874,6 +877,88 @@ impl<'a> InferenceContext<'a> {
ty
}
+ fn infer_async_block(
+ &mut self,
+ tgt_expr: ExprId,
+ id: &Option<BlockId>,
+ statements: &[Statement],
+ tail: &Option<ExprId>,
+ ) -> Ty {
+ let ret_ty = self.table.new_type_var();
+ let prev_diverges = mem::replace(&mut self.diverges, Diverges::Maybe);
+ let prev_ret_ty = mem::replace(&mut self.return_ty, ret_ty.clone());
+ let prev_ret_coercion =
+ mem::replace(&mut self.return_coercion, Some(CoerceMany::new(ret_ty.clone())));
+
+ let (_, inner_ty) = self.with_breakable_ctx(BreakableKind::Border, None, None, |this| {
+ this.infer_block(tgt_expr, *id, statements, *tail, None, &Expectation::has_type(ret_ty))
+ });
+
+ self.diverges = prev_diverges;
+ self.return_ty = prev_ret_ty;
+ self.return_coercion = prev_ret_coercion;
+
+ self.lower_async_block_type_impl_trait(inner_ty, tgt_expr)
+ }
+
+ pub(crate) fn lower_async_block_type_impl_trait(
+ &mut self,
+ inner_ty: Ty,
+ tgt_expr: ExprId,
+ ) -> Ty {
+ // Use the first type parameter as the output type of future.
+ // existential type AsyncBlockImplTrait<InnerType>: Future<Output = InnerType>
+ let impl_trait_id = crate::ImplTraitId::AsyncBlockTypeImplTrait(self.owner, tgt_expr);
+ let opaque_ty_id = self.db.intern_impl_trait_id(impl_trait_id).into();
+ TyKind::OpaqueType(opaque_ty_id, Substitution::from1(Interner, inner_ty)).intern(Interner)
+ }
+
+ pub(crate) fn write_fn_trait_method_resolution(
+ &mut self,
+ fn_x: FnTrait,
+ derefed_callee: &Ty,
+ adjustments: &mut Vec<Adjustment>,
+ callee_ty: &Ty,
+ params: &Vec<Ty>,
+ tgt_expr: ExprId,
+ ) {
+ match fn_x {
+ FnTrait::FnOnce => (),
+ FnTrait::FnMut => {
+ if let TyKind::Ref(Mutability::Mut, _, inner) = derefed_callee.kind(Interner) {
+ if adjustments
+ .last()
+ .map(|x| matches!(x.kind, Adjust::Borrow(_)))
+ .unwrap_or(true)
+ {
+ // prefer reborrow to move
+ adjustments
+ .push(Adjustment { kind: Adjust::Deref(None), target: inner.clone() });
+ adjustments.push(Adjustment::borrow(Mutability::Mut, inner.clone()))
+ }
+ } else {
+ adjustments.push(Adjustment::borrow(Mutability::Mut, derefed_callee.clone()));
+ }
+ }
+ FnTrait::Fn => {
+ if !matches!(derefed_callee.kind(Interner), TyKind::Ref(Mutability::Not, _, _)) {
+ adjustments.push(Adjustment::borrow(Mutability::Not, derefed_callee.clone()));
+ }
+ }
+ }
+ let Some(trait_) = fn_x.get_id(self.db, self.table.trait_env.krate) else {
+ return;
+ };
+ let trait_data = self.db.trait_data(trait_);
+ if let Some(func) = trait_data.method_by_name(&fn_x.method_name()) {
+ let subst = TyBuilder::subst_for_def(self.db, trait_, None)
+ .push(callee_ty.clone())
+ .push(TyBuilder::tuple_with(params.iter().cloned()))
+ .build();
+ self.write_method_resolution(tgt_expr, func, subst.clone());
+ }
+ }
+
fn infer_expr_array(
&mut self,
array: &Array,
@@ -892,10 +977,10 @@ impl<'a> InferenceContext<'a> {
(elem_ty, consteval::usize_const(self.db, Some(0), krate))
}
Array::ElementList { elements, .. } => {
- let mut coerce = CoerceMany::new(elem_ty.clone());
+ let mut coerce = CoerceMany::new(elem_ty);
for &expr in elements.iter() {
let cur_elem_ty = self.infer_expr_inner(expr, &expected);
- coerce.coerce(self, Some(expr), &cur_elem_ty);
+ coerce.coerce(self, Some(expr), &cur_elem_ty, CoercionCause::Expr(expr));
}
(
coerce.complete(self),
@@ -904,12 +989,13 @@ impl<'a> InferenceContext<'a> {
}
&Array::Repeat { initializer, repeat } => {
self.infer_expr_coerce(initializer, &Expectation::has_type(elem_ty.clone()));
- self.infer_expr(
- repeat,
- &Expectation::HasType(
- TyKind::Scalar(Scalar::Uint(UintTy::Usize)).intern(Interner),
- ),
- );
+ let usize = TyKind::Scalar(Scalar::Uint(UintTy::Usize)).intern(Interner);
+ match self.body[repeat] {
+ Expr::Underscore => {
+ self.write_expr_ty(repeat, usize);
+ }
+ _ => _ = self.infer_expr(repeat, &Expectation::HasType(usize)),
+ }
(
elem_ty,
@@ -928,7 +1014,8 @@ impl<'a> InferenceContext<'a> {
)
}
};
-
+ // Try to evaluate unevaluated constant, and insert variable if is not possible.
+ let len = self.table.insert_const_vars_shallow(len);
TyKind::Array(elem_ty, len).intern(Interner)
}
@@ -940,18 +1027,18 @@ impl<'a> InferenceContext<'a> {
.expected_ty();
let return_expr_ty = self.infer_expr_inner(expr, &Expectation::HasType(ret_ty));
let mut coerce_many = self.return_coercion.take().unwrap();
- coerce_many.coerce(self, Some(expr), &return_expr_ty);
+ coerce_many.coerce(self, Some(expr), &return_expr_ty, CoercionCause::Expr(expr));
self.return_coercion = Some(coerce_many);
}
- fn infer_expr_return(&mut self, expr: Option<ExprId>) -> Ty {
+ fn infer_expr_return(&mut self, ret: ExprId, expr: Option<ExprId>) -> Ty {
match self.return_coercion {
Some(_) => {
if let Some(expr) = expr {
self.infer_return(expr);
} else {
let mut coerce = self.return_coercion.take().unwrap();
- coerce.coerce_forced_unit(self);
+ coerce.coerce_forced_unit(self, CoercionCause::Expr(ret));
self.return_coercion = Some(coerce);
}
}
@@ -976,7 +1063,7 @@ impl<'a> InferenceContext<'a> {
.filter(|(e_adt, _)| e_adt == &box_id)
.map(|(_, subts)| {
let g = subts.at(Interner, 0);
- Expectation::rvalue_hint(table, Ty::clone(g.assert_ty_ref(Interner)))
+ Expectation::rvalue_hint(self, Ty::clone(g.assert_ty_ref(Interner)))
})
.unwrap_or_else(Expectation::none);
@@ -1185,6 +1272,7 @@ impl<'a> InferenceContext<'a> {
fn infer_block(
&mut self,
expr: ExprId,
+ block_id: Option<BlockId>,
statements: &[Statement],
tail: Option<ExprId>,
label: Option<LabelId>,
@@ -1192,9 +1280,14 @@ impl<'a> InferenceContext<'a> {
) -> Ty {
let coerce_ty = expected.coercion_target_type(&mut self.table);
let g = self.resolver.update_to_inner_scope(self.db.upcast(), self.owner, expr);
+ let prev_env = block_id.map(|block_id| {
+ let prev_env = self.table.trait_env.clone();
+ Arc::make_mut(&mut self.table.trait_env).block = Some(block_id);
+ prev_env
+ });
let (break_ty, ty) =
- self.with_breakable_ctx(BreakableKind::Block, Some(coerce_ty.clone()), label, |this| {
+ self.with_breakable_ctx(BreakableKind::Block, Some(coerce_ty), label, |this| {
for stmt in statements {
match stmt {
Statement::Let { pat, type_ref, initializer, else_branch } => {
@@ -1280,6 +1373,9 @@ impl<'a> InferenceContext<'a> {
}
});
self.resolver.reset_to_guard(g);
+ if let Some(prev_env) = prev_env {
+ self.table.trait_env = prev_env;
+ }
break_ty.unwrap_or(ty)
}
@@ -1378,7 +1474,7 @@ impl<'a> InferenceContext<'a> {
method_resolution::lookup_method(
self.db,
&canonicalized_receiver.value,
- self.trait_env.clone(),
+ self.table.trait_env.clone(),
self.get_traits_in_scope().as_ref().left_or_else(|&it| it),
VisibleFromModule::Filter(self.resolver.module()),
name,
@@ -1411,7 +1507,7 @@ impl<'a> InferenceContext<'a> {
let resolved = method_resolution::lookup_method(
self.db,
&canonicalized_receiver.value,
- self.trait_env.clone(),
+ self.table.trait_env.clone(),
self.get_traits_in_scope().as_ref().left_or_else(|&it| it),
VisibleFromModule::Filter(self.resolver.module()),
method_name,
@@ -1562,7 +1658,7 @@ impl<'a> InferenceContext<'a> {
// the parameter to coerce to the expected type (for example in
// `coerce_unsize_expected_type_4`).
let param_ty = self.normalize_associated_types_in(param_ty);
- let expected = Expectation::rvalue_hint(&mut self.table, expected_ty);
+ let expected = Expectation::rvalue_hint(self, expected_ty);
// infer with the expected type we have...
let ty = self.infer_expr_inner(arg, &expected);
@@ -1575,9 +1671,10 @@ impl<'a> InferenceContext<'a> {
} else {
param_ty
};
- if !coercion_target.is_unknown()
- && self.coerce(Some(arg), &ty, &coercion_target).is_err()
- {
+ // The function signature may contain some unknown types, so we need to insert
+ // type vars here to avoid type mismatch false positive.
+ let coercion_target = self.insert_type_vars(coercion_target);
+ if self.coerce(Some(arg), &ty, &coercion_target).is_err() {
self.result.type_mismatches.insert(
arg.into(),
TypeMismatch { expected: coercion_target, actual: ty.clone() },
@@ -1618,6 +1715,7 @@ impl<'a> InferenceContext<'a> {
const_or_path_to_chalk(
this.db,
&this.resolver,
+ this.owner.into(),
ty,
c,
ParamLoweringMode::Placeholder,
@@ -1868,7 +1966,6 @@ impl<'a> InferenceContext<'a> {
cb: impl FnOnce(&mut Self) -> T,
) -> (Option<Ty>, T) {
self.breakables.push({
- let label = label.map(|label| self.body[label].name.clone());
BreakableContext { kind, may_break: false, coerce: ty.map(CoerceMany::new), label }
});
let res = cb(self);
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/mutability.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/mutability.rs
new file mode 100644
index 000000000..46f2e1d7d
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/mutability.rs
@@ -0,0 +1,218 @@
+//! Finds if an expression is an immutable context or a mutable context, which is used in selecting
+//! between `Deref` and `DerefMut` or `Index` and `IndexMut` or similar.
+
+use chalk_ir::Mutability;
+use hir_def::{
+ hir::{Array, BinaryOp, BindingAnnotation, Expr, ExprId, PatId, Statement, UnaryOp},
+ lang_item::LangItem,
+};
+use hir_expand::name;
+
+use crate::{lower::lower_to_chalk_mutability, Adjust, Adjustment, AutoBorrow, OverloadedDeref};
+
+use super::InferenceContext;
+
+impl<'a> InferenceContext<'a> {
+ pub(crate) fn infer_mut_body(&mut self) {
+ self.infer_mut_expr(self.body.body_expr, Mutability::Not);
+ }
+
+ fn infer_mut_expr(&mut self, tgt_expr: ExprId, mut mutability: Mutability) {
+ if let Some(adjustments) = self.result.expr_adjustments.get_mut(&tgt_expr) {
+ for adj in adjustments.iter_mut().rev() {
+ match &mut adj.kind {
+ Adjust::NeverToAny | Adjust::Deref(None) | Adjust::Pointer(_) => (),
+ Adjust::Deref(Some(d)) => *d = OverloadedDeref(Some(mutability)),
+ Adjust::Borrow(b) => match b {
+ AutoBorrow::Ref(m) | AutoBorrow::RawPtr(m) => mutability = *m,
+ },
+ }
+ }
+ }
+ self.infer_mut_expr_without_adjust(tgt_expr, mutability);
+ }
+
+ fn infer_mut_expr_without_adjust(&mut self, tgt_expr: ExprId, mutability: Mutability) {
+ match &self.body[tgt_expr] {
+ Expr::Missing => (),
+ &Expr::If { condition, then_branch, else_branch } => {
+ self.infer_mut_expr(condition, Mutability::Not);
+ self.infer_mut_expr(then_branch, Mutability::Not);
+ if let Some(else_branch) = else_branch {
+ self.infer_mut_expr(else_branch, Mutability::Not);
+ }
+ }
+ Expr::Const(id) => {
+ let loc = self.db.lookup_intern_anonymous_const(*id);
+ self.infer_mut_expr(loc.root, Mutability::Not);
+ }
+ Expr::Let { pat, expr } => self.infer_mut_expr(*expr, self.pat_bound_mutability(*pat)),
+ Expr::Block { id: _, statements, tail, label: _ }
+ | Expr::Async { id: _, statements, tail }
+ | Expr::Unsafe { id: _, statements, tail } => {
+ for st in statements.iter() {
+ match st {
+ Statement::Let { pat, type_ref: _, initializer, else_branch } => {
+ if let Some(i) = initializer {
+ self.infer_mut_expr(*i, self.pat_bound_mutability(*pat));
+ }
+ if let Some(e) = else_branch {
+ self.infer_mut_expr(*e, Mutability::Not);
+ }
+ }
+ Statement::Expr { expr, has_semi: _ } => {
+ self.infer_mut_expr(*expr, Mutability::Not);
+ }
+ }
+ }
+ if let Some(tail) = tail {
+ self.infer_mut_expr(*tail, Mutability::Not);
+ }
+ }
+ &Expr::While { condition: c, body, label: _ } => {
+ self.infer_mut_expr(c, Mutability::Not);
+ self.infer_mut_expr(body, Mutability::Not);
+ }
+ Expr::MethodCall { receiver: x, method_name: _, args, generic_args: _ }
+ | Expr::Call { callee: x, args, is_assignee_expr: _ } => {
+ self.infer_mut_not_expr_iter(args.iter().copied().chain(Some(*x)));
+ }
+ Expr::Match { expr, arms } => {
+ let m = self.pat_iter_bound_mutability(arms.iter().map(|x| x.pat));
+ self.infer_mut_expr(*expr, m);
+ for arm in arms.iter() {
+ self.infer_mut_expr(arm.expr, Mutability::Not);
+ if let Some(g) = arm.guard {
+ self.infer_mut_expr(g, Mutability::Not);
+ }
+ }
+ }
+ Expr::Yield { expr }
+ | Expr::Yeet { expr }
+ | Expr::Return { expr }
+ | Expr::Break { expr, label: _ } => {
+ if let &Some(expr) = expr {
+ self.infer_mut_expr(expr, Mutability::Not);
+ }
+ }
+ Expr::RecordLit { path: _, fields, spread, ellipsis: _, is_assignee_expr: _ } => {
+ self.infer_mut_not_expr_iter(fields.iter().map(|x| x.expr).chain(*spread))
+ }
+ &Expr::Index { base, index } => {
+ if mutability == Mutability::Mut {
+ if let Some((f, _)) = self.result.method_resolutions.get_mut(&tgt_expr) {
+ if let Some(index_trait) = self
+ .db
+ .lang_item(self.table.trait_env.krate, LangItem::IndexMut)
+ .and_then(|l| l.as_trait())
+ {
+ if let Some(index_fn) =
+ self.db.trait_data(index_trait).method_by_name(&name![index_mut])
+ {
+ *f = index_fn;
+ let base_adjustments = self
+ .result
+ .expr_adjustments
+ .get_mut(&base)
+ .and_then(|it| it.last_mut());
+ if let Some(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(mutability)),
+ ..
+ }) = base_adjustments
+ {
+ *mutability = Mutability::Mut;
+ }
+ }
+ }
+ }
+ }
+ self.infer_mut_expr(base, mutability);
+ self.infer_mut_expr(index, Mutability::Not);
+ }
+ Expr::UnaryOp { expr, op: UnaryOp::Deref } => {
+ if let Some((f, _)) = self.result.method_resolutions.get_mut(&tgt_expr) {
+ if mutability == Mutability::Mut {
+ if let Some(deref_trait) = self
+ .db
+ .lang_item(self.table.trait_env.krate, LangItem::DerefMut)
+ .and_then(|l| l.as_trait())
+ {
+ if let Some(deref_fn) =
+ self.db.trait_data(deref_trait).method_by_name(&name![deref_mut])
+ {
+ *f = deref_fn;
+ }
+ }
+ }
+ }
+ self.infer_mut_expr(*expr, mutability);
+ }
+ Expr::Field { expr, name: _ } => {
+ self.infer_mut_expr(*expr, mutability);
+ }
+ Expr::UnaryOp { expr, op: _ }
+ | Expr::Range { lhs: Some(expr), rhs: None, range_type: _ }
+ | Expr::Range { rhs: Some(expr), lhs: None, range_type: _ }
+ | Expr::Await { expr }
+ | Expr::Box { expr }
+ | Expr::Loop { body: expr, label: _ }
+ | Expr::Cast { expr, type_ref: _ } => {
+ self.infer_mut_expr(*expr, Mutability::Not);
+ }
+ Expr::Ref { expr, rawness: _, mutability } => {
+ let mutability = lower_to_chalk_mutability(*mutability);
+ self.infer_mut_expr(*expr, mutability);
+ }
+ Expr::BinaryOp { lhs, rhs, op: Some(BinaryOp::Assignment { .. }) } => {
+ self.infer_mut_expr(*lhs, Mutability::Mut);
+ self.infer_mut_expr(*rhs, Mutability::Not);
+ }
+ Expr::Array(Array::Repeat { initializer: lhs, repeat: rhs })
+ | Expr::BinaryOp { lhs, rhs, op: _ }
+ | Expr::Range { lhs: Some(lhs), rhs: Some(rhs), range_type: _ } => {
+ self.infer_mut_expr(*lhs, Mutability::Not);
+ self.infer_mut_expr(*rhs, Mutability::Not);
+ }
+ Expr::Closure { body, .. } => {
+ self.infer_mut_expr(*body, Mutability::Not);
+ }
+ Expr::Tuple { exprs, is_assignee_expr: _ }
+ | Expr::Array(Array::ElementList { elements: exprs, is_assignee_expr: _ }) => {
+ self.infer_mut_not_expr_iter(exprs.iter().copied());
+ }
+ // These don't need any action, as they don't have sub expressions
+ Expr::Range { lhs: None, rhs: None, range_type: _ }
+ | Expr::Literal(_)
+ | Expr::Path(_)
+ | Expr::Continue { .. }
+ | Expr::Underscore => (),
+ }
+ }
+
+ fn infer_mut_not_expr_iter(&mut self, exprs: impl Iterator<Item = ExprId>) {
+ for expr in exprs {
+ self.infer_mut_expr(expr, Mutability::Not);
+ }
+ }
+
+ fn pat_iter_bound_mutability(&self, mut pat: impl Iterator<Item = PatId>) -> Mutability {
+ if pat.any(|p| self.pat_bound_mutability(p) == Mutability::Mut) {
+ Mutability::Mut
+ } else {
+ Mutability::Not
+ }
+ }
+
+ /// Checks if the pat contains a `ref mut` binding. Such paths makes the context of bounded expressions
+ /// mutable. For example in `let (ref mut x0, ref x1) = *x;` we need to use `DerefMut` for `*x` but in
+ /// `let (ref x0, ref x1) = *x;` we should use `Deref`.
+ fn pat_bound_mutability(&self, pat: PatId) -> Mutability {
+ let mut r = Mutability::Not;
+ self.body.walk_bindings_in_pat(pat, |b| {
+ if self.body.bindings[b].mode == BindingAnnotation::RefMut {
+ r = Mutability::Mut;
+ }
+ });
+ r
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs
index 5f839fc30..2480f8bab 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs
@@ -5,7 +5,7 @@ use std::iter::repeat_with;
use chalk_ir::Mutability;
use hir_def::{
body::Body,
- expr::{Binding, BindingAnnotation, BindingId, Expr, ExprId, ExprOrPatId, Literal, Pat, PatId},
+ hir::{Binding, BindingAnnotation, BindingId, Expr, ExprId, ExprOrPatId, Literal, Pat, PatId},
path::Path,
};
use hir_expand::name::Name;
@@ -255,15 +255,15 @@ impl<'a> InferenceContext<'a> {
self.infer_slice_pat(&expected, prefix, slice, suffix, default_bm)
}
Pat::Wild => expected.clone(),
- Pat::Range { start, end } => {
- let start_ty = self.infer_expr(*start, &Expectation::has_type(expected.clone()));
- self.infer_expr(*end, &Expectation::has_type(start_ty))
+ Pat::Range { .. } => {
+ // FIXME: do some checks here.
+ expected.clone()
}
&Pat::Lit(expr) => {
// Don't emit type mismatches again, the expression lowering already did that.
let ty = self.infer_lit_pat(expr, &expected);
self.write_pat_ty(pat, ty.clone());
- return ty;
+ return self.pat_ty_after_adjustment(pat);
}
Pat::Box { inner } => match self.resolve_boxed_box() {
Some(box_adt) => {
@@ -298,22 +298,38 @@ impl<'a> InferenceContext<'a> {
.type_mismatches
.insert(pat.into(), TypeMismatch { expected, actual: ty.clone() });
}
- self.write_pat_ty(pat, ty.clone());
- ty
+ self.write_pat_ty(pat, ty);
+ self.pat_ty_after_adjustment(pat)
+ }
+
+ fn pat_ty_after_adjustment(&self, pat: PatId) -> Ty {
+ self.result
+ .pat_adjustments
+ .get(&pat)
+ .and_then(|x| x.first())
+ .unwrap_or(&self.result.type_of_pat[pat])
+ .clone()
}
fn infer_ref_pat(
&mut self,
- pat: PatId,
+ inner_pat: PatId,
mutability: Mutability,
expected: &Ty,
default_bm: BindingMode,
) -> Ty {
let expectation = match expected.as_reference() {
Some((inner_ty, _lifetime, _exp_mut)) => inner_ty.clone(),
- _ => self.result.standard_types.unknown.clone(),
+ None => {
+ let inner_ty = self.table.new_type_var();
+ let ref_ty =
+ TyKind::Ref(mutability, static_lifetime(), inner_ty.clone()).intern(Interner);
+ // Unification failure will be reported by the caller.
+ self.unify(&ref_ty, expected);
+ inner_ty
+ }
};
- let subty = self.infer_pat(pat, &expectation, default_bm);
+ let subty = self.infer_pat(inner_pat, &expectation, default_bm);
TyKind::Ref(mutability, static_lifetime(), subty).intern(Interner)
}
@@ -331,7 +347,7 @@ impl<'a> InferenceContext<'a> {
} else {
BindingMode::convert(mode)
};
- self.result.pat_binding_modes.insert(pat, mode);
+ self.result.binding_modes.insert(binding, mode);
let inner_ty = match subpat {
Some(subpat) => self.infer_pat(subpat, &expected, default_bm),
@@ -345,7 +361,7 @@ impl<'a> InferenceContext<'a> {
}
BindingMode::Move => inner_ty.clone(),
};
- self.write_pat_ty(pat, bound_ty.clone());
+ self.write_pat_ty(pat, inner_ty.clone());
self.write_binding_ty(binding, bound_ty);
return inner_ty;
}
@@ -370,7 +386,7 @@ impl<'a> InferenceContext<'a> {
if let &Some(slice_pat_id) = slice {
let rest_pat_ty = match expected.kind(Interner) {
TyKind::Array(_, length) => {
- let len = try_const_usize(length);
+ let len = try_const_usize(self.db, length);
let len =
len.and_then(|len| len.checked_sub((prefix.len() + suffix.len()) as u128));
TyKind::Array(elem_ty.clone(), usize_const(self.db, len, self.resolver.krate()))
@@ -419,17 +435,10 @@ fn is_non_ref_pat(body: &hir_def::body::Body, pat: PatId) -> bool {
// FIXME: ConstBlock/Path/Lit might actually evaluate to ref, but inference is unimplemented.
Pat::Path(..) => true,
Pat::ConstBlock(..) => true,
- Pat::Lit(expr) => {
- !matches!(body[*expr], Expr::Literal(Literal::String(..) | Literal::ByteString(..)))
- }
- Pat::Bind { id, subpat: Some(subpat), .. }
- if matches!(
- body.bindings[*id].mode,
- BindingAnnotation::Mutable | BindingAnnotation::Unannotated
- ) =>
- {
- is_non_ref_pat(body, *subpat)
- }
+ Pat::Lit(expr) => !matches!(
+ body[*expr],
+ Expr::Literal(Literal::String(..) | Literal::CString(..) | Literal::ByteString(..))
+ ),
Pat::Wild | Pat::Bind { .. } | Pat::Ref { .. } | Pat::Box { .. } | Pat::Missing => false,
}
}
@@ -437,7 +446,7 @@ fn is_non_ref_pat(body: &hir_def::body::Body, pat: PatId) -> bool {
pub(super) fn contains_explicit_ref_binding(body: &Body, pat_id: PatId) -> bool {
let mut res = false;
body.walk_pats(pat_id, &mut |pat| {
- res |= matches!(pat, Pat::Bind { id, .. } if body.bindings[*id].mode == BindingAnnotation::Ref);
+ res |= matches!(body[pat], Pat::Bind { id, .. } if body.bindings[id].mode == BindingAnnotation::Ref);
});
res
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/path.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/path.rs
index 2267fedaa..79d9e21e7 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/path.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/path.rs
@@ -4,7 +4,7 @@ use chalk_ir::cast::Cast;
use hir_def::{
path::{Path, PathSegment},
resolver::{ResolveValueResult, TypeNs, ValueNs},
- AdtId, AssocItemId, EnumVariantId, ItemContainerId, Lookup,
+ AdtId, AssocItemId, EnumVariantId, GenericDefId, ItemContainerId, Lookup,
};
use hir_expand::name::Name;
use stdx::never;
@@ -13,6 +13,7 @@ use crate::{
builder::ParamKind,
consteval,
method_resolution::{self, VisibleFromModule},
+ to_chalk_trait_id,
utils::generics,
InferenceDiagnostic, Interner, Substitution, TraitRefExt, Ty, TyBuilder, TyExt, TyKind,
ValueTyDefId,
@@ -20,26 +21,44 @@ use crate::{
use super::{ExprOrPatId, InferenceContext, TraitRef};
-impl<'a> InferenceContext<'a> {
+impl InferenceContext<'_> {
pub(super) fn infer_path(&mut self, path: &Path, id: ExprOrPatId) -> Option<Ty> {
- let ty = self.resolve_value_path(path, id)?;
- let ty = self.insert_type_vars(ty);
+ let (value_def, generic_def, substs) = match self.resolve_value_path(path, id)? {
+ ValuePathResolution::GenericDef(value_def, generic_def, substs) => {
+ (value_def, generic_def, substs)
+ }
+ ValuePathResolution::NonGeneric(ty) => return Some(ty),
+ };
+ let substs = self.insert_type_vars(substs);
+ let substs = self.normalize_associated_types_in(substs);
+
+ self.add_required_obligations_for_value_path(generic_def, &substs);
+
+ let ty = self.db.value_ty(value_def).substitute(Interner, &substs);
let ty = self.normalize_associated_types_in(ty);
Some(ty)
}
- fn resolve_value_path(&mut self, path: &Path, id: ExprOrPatId) -> Option<Ty> {
+ fn resolve_value_path(&mut self, path: &Path, id: ExprOrPatId) -> Option<ValuePathResolution> {
let (value, self_subst) = if let Some(type_ref) = path.type_anchor() {
- let Some(last) = path.segments().last() else { return None };
- let ty = self.make_ty(type_ref);
+ let last = path.segments().last()?;
+
+ // Don't use `self.make_ty()` here as we need `orig_ns`.
+ let ctx =
+ crate::lower::TyLoweringContext::new(self.db, &self.resolver, self.owner.into());
+ let (ty, orig_ns) = ctx.lower_ty_ext(type_ref);
+ let ty = self.table.insert_type_vars(ty);
+ let ty = self.table.normalize_associated_types_in(ty);
+
let remaining_segments_for_ty = path.segments().take(path.segments().len() - 1);
- let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver);
- let (ty, _) = ctx.lower_ty_relative_path(ty, None, remaining_segments_for_ty);
+ let (ty, _) = ctx.lower_ty_relative_path(ty, orig_ns, remaining_segments_for_ty);
+ let ty = self.table.insert_type_vars(ty);
+ let ty = self.table.normalize_associated_types_in(ty);
self.resolve_ty_assoc_item(ty, last.name, id).map(|(it, substs)| (it, Some(substs)))?
} else {
// FIXME: report error, unresolved first path segment
let value_or_partial =
- self.resolver.resolve_path_in_value_ns(self.db.upcast(), path.mod_path())?;
+ self.resolver.resolve_path_in_value_ns(self.db.upcast(), path)?;
match value_or_partial {
ResolveValueResult::ValueNs(it) => (it, None),
@@ -49,9 +68,9 @@ impl<'a> InferenceContext<'a> {
}
};
- let typable: ValueTyDefId = match value {
+ let value_def = match value {
ValueNs::LocalBinding(pat) => match self.result.type_of_binding.get(pat) {
- Some(ty) => return Some(ty.clone()),
+ Some(ty) => return Some(ValuePathResolution::NonGeneric(ty.clone())),
None => {
never!("uninferred pattern?");
return None;
@@ -75,28 +94,45 @@ impl<'a> InferenceContext<'a> {
let substs = generics.placeholder_subst(self.db);
let ty = self.db.impl_self_ty(impl_id).substitute(Interner, &substs);
if let Some((AdtId::StructId(struct_id), substs)) = ty.as_adt() {
- let ty = self.db.value_ty(struct_id.into()).substitute(Interner, &substs);
- return Some(ty);
+ return Some(ValuePathResolution::GenericDef(
+ struct_id.into(),
+ struct_id.into(),
+ substs.clone(),
+ ));
} else {
// FIXME: report error, invalid Self reference
return None;
}
}
- ValueNs::GenericParam(it) => return Some(self.db.const_param_ty(it)),
+ ValueNs::GenericParam(it) => {
+ return Some(ValuePathResolution::NonGeneric(self.db.const_param_ty(it)))
+ }
};
- let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver);
- let substs = ctx.substs_from_path(path, typable, true);
+ let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver, self.owner.into());
+ let substs = ctx.substs_from_path(path, value_def, true);
let substs = substs.as_slice(Interner);
let parent_substs = self_subst.or_else(|| {
- let generics = generics(self.db.upcast(), typable.to_generic_def_id()?);
+ let generics = generics(self.db.upcast(), value_def.to_generic_def_id()?);
let parent_params_len = generics.parent_generics()?.len();
let parent_args = &substs[substs.len() - parent_params_len..];
Some(Substitution::from_iter(Interner, parent_args))
});
let parent_substs_len = parent_substs.as_ref().map_or(0, |s| s.len(Interner));
let mut it = substs.iter().take(substs.len() - parent_substs_len).cloned();
- let ty = TyBuilder::value_ty(self.db, typable, parent_substs)
+
+ let Some(generic_def) = value_def.to_generic_def_id() else {
+ // `value_def` is the kind of item that can never be generic (i.e. statics, at least
+ // currently). We can just skip the binders to get its type.
+ let (ty, binders) = self.db.value_ty(value_def).into_value_and_skipped_binders();
+ stdx::always!(
+ parent_substs.is_none() && binders.is_empty(Interner),
+ "non-empty binders for non-generic def",
+ );
+ return Some(ValuePathResolution::NonGeneric(ty));
+ };
+ let builder = TyBuilder::subst_for_def(self.db, generic_def, parent_substs);
+ let substs = builder
.fill(|x| {
it.next().unwrap_or_else(|| match x {
ParamKind::Type => self.result.standard_types.unknown.clone().cast(Interner),
@@ -104,7 +140,35 @@ impl<'a> InferenceContext<'a> {
})
})
.build();
- Some(ty)
+
+ Some(ValuePathResolution::GenericDef(value_def, generic_def, substs))
+ }
+
+ fn add_required_obligations_for_value_path(&mut self, def: GenericDefId, subst: &Substitution) {
+ let predicates = self.db.generic_predicates(def);
+ for predicate in predicates.iter() {
+ let (predicate, binders) =
+ predicate.clone().substitute(Interner, &subst).into_value_and_skipped_binders();
+ // Quantified where clauses are not yet handled.
+ stdx::always!(binders.is_empty(Interner));
+ self.push_obligation(predicate.cast(Interner));
+ }
+
+ // We need to add `Self: Trait` obligation when `def` is a trait assoc item.
+ let container = match def {
+ GenericDefId::FunctionId(id) => id.lookup(self.db.upcast()).container,
+ GenericDefId::ConstId(id) => id.lookup(self.db.upcast()).container,
+ _ => return,
+ };
+
+ if let ItemContainerId::TraitId(trait_) = container {
+ let param_len = generics(self.db.upcast(), def).len_self();
+ let parent_subst =
+ Substitution::from_iter(Interner, subst.iter(Interner).skip(param_len));
+ let trait_ref =
+ TraitRef { trait_id: to_chalk_trait_id(trait_), substitution: parent_subst };
+ self.push_obligation(trait_ref.cast(Interner));
+ }
}
fn resolve_assoc_item(
@@ -127,7 +191,11 @@ impl<'a> InferenceContext<'a> {
(TypeNs::TraitId(trait_), true) => {
let segment =
remaining_segments.last().expect("there should be at least one segment here");
- let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver);
+ let ctx = crate::lower::TyLoweringContext::new(
+ self.db,
+ &self.resolver,
+ self.owner.into(),
+ );
let trait_ref =
ctx.lower_trait_ref_from_resolved_path(trait_, resolved_segment, None);
self.resolve_trait_assoc_item(trait_ref, segment, id)
@@ -139,7 +207,11 @@ impl<'a> InferenceContext<'a> {
// as Iterator>::Item::default`)
let remaining_segments_for_ty =
remaining_segments.take(remaining_segments.len() - 1);
- let ctx = crate::lower::TyLoweringContext::new(self.db, &self.resolver);
+ let ctx = crate::lower::TyLoweringContext::new(
+ self.db,
+ &self.resolver,
+ self.owner.into(),
+ );
let (ty, _) = ctx.lower_partly_resolved_path(
def,
resolved_segment,
@@ -169,7 +241,7 @@ impl<'a> InferenceContext<'a> {
) -> Option<(ValueNs, Substitution)> {
let trait_ = trait_ref.hir_trait_id();
let item =
- self.db.trait_data(trait_).items.iter().map(|(_name, id)| (*id)).find_map(|item| {
+ self.db.trait_data(trait_).items.iter().map(|(_name, id)| *id).find_map(|item| {
match item {
AssocItemId::FunctionId(func) => {
if segment.name == &self.db.function_data(func).name {
@@ -288,7 +360,7 @@ impl<'a> InferenceContext<'a> {
name: &Name,
id: ExprOrPatId,
) -> Option<(ValueNs, Substitution)> {
- let ty = self.resolve_ty_shallow(ty);
+ let ty = self.resolve_ty_shallow(&ty);
let (enum_id, subst) = match ty.as_adt() {
Some((AdtId::EnumId(e), subst)) => (e, subst),
_ => return None,
@@ -300,3 +372,10 @@ impl<'a> InferenceContext<'a> {
Some((ValueNs::EnumVariantId(variant), subst.clone()))
}
}
+
+enum ValuePathResolution {
+ // It's awkward to wrap a single ID in two enums, but we need both and this saves fallible
+ // conversion between them + `unwrap()`.
+ GenericDef(ValueTyDefId, GenericDefId, Substitution),
+ NonGeneric(Ty),
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs
index 504f0743a..e33d8f179 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs
@@ -1,23 +1,25 @@
//! Unification and canonicalization logic.
-use std::{fmt, iter, mem, sync::Arc};
+use std::{fmt, iter, mem};
use chalk_ir::{
cast::Cast, fold::TypeFoldable, interner::HasInterner, zip::Zip, CanonicalVarKind, FloatTy,
IntTy, TyVariableKind, UniverseIndex,
};
use chalk_solve::infer::ParameterEnaVariableExt;
+use either::Either;
use ena::unify::UnifyKey;
-use hir_def::{FunctionId, TraitId};
use hir_expand::name;
use stdx::never;
+use triomphe::Arc;
use super::{InferOk, InferResult, InferenceContext, TypeError};
use crate::{
- db::HirDatabase, fold_tys, static_lifetime, traits::FnTrait, AliasEq, AliasTy, BoundVar,
- Canonical, Const, DebruijnIndex, GenericArg, GenericArgData, Goal, Guidance, InEnvironment,
- InferenceVar, Interner, Lifetime, ParamKind, ProjectionTy, ProjectionTyExt, Scalar, Solution,
- Substitution, TraitEnvironment, Ty, TyBuilder, TyExt, TyKind, VariableKind,
+ consteval::unknown_const, db::HirDatabase, fold_tys_and_consts, static_lifetime,
+ to_chalk_trait_id, traits::FnTrait, AliasEq, AliasTy, BoundVar, Canonical, Const, ConstValue,
+ DebruijnIndex, GenericArg, GenericArgData, Goal, Guidance, InEnvironment, InferenceVar,
+ Interner, Lifetime, ParamKind, ProjectionTy, ProjectionTyExt, Scalar, Solution, Substitution,
+ TraitEnvironment, Ty, TyBuilder, TyExt, TyKind, VariableKind,
};
impl<'a> InferenceContext<'a> {
@@ -130,7 +132,7 @@ pub(crate) fn unify(
}
bitflags::bitflags! {
- #[derive(Default)]
+ #[derive(Default, Clone, Copy)]
pub(crate) struct TypeVariableFlags: u8 {
const DIVERGING = 1 << 0;
const INTEGER = 1 << 1;
@@ -230,14 +232,40 @@ impl<'a> InferenceTable<'a> {
/// type annotation (e.g. from a let type annotation, field type or function
/// call). `make_ty` handles this already, but e.g. for field types we need
/// to do it as well.
- pub(crate) fn normalize_associated_types_in(&mut self, ty: Ty) -> Ty {
- fold_tys(
+ pub(crate) fn normalize_associated_types_in<T>(&mut self, ty: T) -> T
+ where
+ T: HasInterner<Interner = Interner> + TypeFoldable<Interner>,
+ {
+ fold_tys_and_consts(
ty,
- |ty, _| match ty.kind(Interner) {
- TyKind::Alias(AliasTy::Projection(proj_ty)) => {
- self.normalize_projection_ty(proj_ty.clone())
- }
- _ => ty,
+ |e, _| match e {
+ Either::Left(ty) => Either::Left(match ty.kind(Interner) {
+ TyKind::Alias(AliasTy::Projection(proj_ty)) => {
+ self.normalize_projection_ty(proj_ty.clone())
+ }
+ _ => ty,
+ }),
+ Either::Right(c) => Either::Right(match &c.data(Interner).value {
+ chalk_ir::ConstValue::Concrete(cc) => match &cc.interned {
+ crate::ConstScalar::UnevaluatedConst(c_id, subst) => {
+ // FIXME: Ideally here we should do everything that we do with type alias, i.e. adding a variable
+ // and registering an obligation. But it needs chalk support, so we handle the most basic
+ // case (a non associated const without generic parameters) manually.
+ if subst.len(Interner) == 0 {
+ if let Ok(eval) = self.db.const_eval((*c_id).into(), subst.clone())
+ {
+ eval
+ } else {
+ unknown_const(c.data(Interner).ty.clone())
+ }
+ } else {
+ unknown_const(c.data(Interner).ty.clone())
+ }
+ }
+ _ => c,
+ },
+ _ => c,
+ }),
},
DebruijnIndex::INNERMOST,
)
@@ -463,7 +491,8 @@ impl<'a> InferenceTable<'a> {
pub(crate) fn try_obligation(&mut self, goal: Goal) -> Option<Solution> {
let in_env = InEnvironment::new(&self.trait_env.env, goal);
let canonicalized = self.canonicalize(in_env);
- let solution = self.db.trait_solve(self.trait_env.krate, canonicalized.value);
+ let solution =
+ self.db.trait_solve(self.trait_env.krate, self.trait_env.block, canonicalized.value);
solution
}
@@ -598,7 +627,11 @@ impl<'a> InferenceTable<'a> {
&mut self,
canonicalized: &Canonicalized<InEnvironment<Goal>>,
) -> bool {
- let solution = self.db.trait_solve(self.trait_env.krate, canonicalized.value.clone());
+ let solution = self.db.trait_solve(
+ self.trait_env.krate,
+ self.trait_env.block,
+ canonicalized.value.clone(),
+ );
match solution {
Some(Solution::Unique(canonical_subst)) => {
@@ -631,10 +664,13 @@ impl<'a> InferenceTable<'a> {
&mut self,
ty: &Ty,
num_args: usize,
- ) -> Option<(Option<(TraitId, FunctionId)>, Vec<Ty>, Ty)> {
+ ) -> Option<(Option<FnTrait>, Vec<Ty>, Ty)> {
match ty.callable_sig(self.db) {
Some(sig) => Some((None, sig.params().to_vec(), sig.ret().clone())),
- None => self.callable_sig_from_fn_trait(ty, num_args),
+ None => {
+ let (f, args_ty, return_ty) = self.callable_sig_from_fn_trait(ty, num_args)?;
+ Some((Some(f), args_ty, return_ty))
+ }
}
}
@@ -642,7 +678,7 @@ impl<'a> InferenceTable<'a> {
&mut self,
ty: &Ty,
num_args: usize,
- ) -> Option<(Option<(TraitId, FunctionId)>, Vec<Ty>, Ty)> {
+ ) -> Option<(FnTrait, Vec<Ty>, Ty)> {
let krate = self.trait_env.krate;
let fn_once_trait = FnTrait::FnOnce.get_id(self.db, krate)?;
let trait_data = self.db.trait_data(fn_once_trait);
@@ -676,23 +712,90 @@ impl<'a> InferenceTable<'a> {
};
let trait_env = self.trait_env.env.clone();
+ let mut trait_ref = projection.trait_ref(self.db);
let obligation = InEnvironment {
- goal: projection.trait_ref(self.db).cast(Interner),
- environment: trait_env,
+ goal: trait_ref.clone().cast(Interner),
+ environment: trait_env.clone(),
};
let canonical = self.canonicalize(obligation.clone());
- if self.db.trait_solve(krate, canonical.value.cast(Interner)).is_some() {
+ if self
+ .db
+ .trait_solve(krate, self.trait_env.block, canonical.value.cast(Interner))
+ .is_some()
+ {
self.register_obligation(obligation.goal);
let return_ty = self.normalize_projection_ty(projection);
- Some((
- Some(fn_once_trait).zip(trait_data.method_by_name(&name!(call_once))),
- arg_tys,
- return_ty,
- ))
+ for fn_x in [FnTrait::Fn, FnTrait::FnMut, FnTrait::FnOnce] {
+ let fn_x_trait = fn_x.get_id(self.db, krate)?;
+ trait_ref.trait_id = to_chalk_trait_id(fn_x_trait);
+ let obligation: chalk_ir::InEnvironment<chalk_ir::Goal<Interner>> = InEnvironment {
+ goal: trait_ref.clone().cast(Interner),
+ environment: trait_env.clone(),
+ };
+ let canonical = self.canonicalize(obligation.clone());
+ if self
+ .db
+ .trait_solve(krate, self.trait_env.block, canonical.value.cast(Interner))
+ .is_some()
+ {
+ return Some((fn_x, arg_tys, return_ty));
+ }
+ }
+ unreachable!("It should at least implement FnOnce at this point");
} else {
None
}
}
+
+ pub(super) fn insert_type_vars<T>(&mut self, ty: T) -> T
+ where
+ T: HasInterner<Interner = Interner> + TypeFoldable<Interner>,
+ {
+ fold_tys_and_consts(
+ ty,
+ |x, _| match x {
+ Either::Left(ty) => Either::Left(self.insert_type_vars_shallow(ty)),
+ Either::Right(c) => Either::Right(self.insert_const_vars_shallow(c)),
+ },
+ DebruijnIndex::INNERMOST,
+ )
+ }
+
+ /// Replaces `Ty::Error` by a new type var, so we can maybe still infer it.
+ pub(super) fn insert_type_vars_shallow(&mut self, ty: Ty) -> Ty {
+ match ty.kind(Interner) {
+ TyKind::Error => self.new_type_var(),
+ TyKind::InferenceVar(..) => {
+ let ty_resolved = self.resolve_ty_shallow(&ty);
+ if ty_resolved.is_unknown() {
+ self.new_type_var()
+ } else {
+ ty
+ }
+ }
+ _ => ty,
+ }
+ }
+
+ /// Replaces ConstScalar::Unknown by a new type var, so we can maybe still infer it.
+ pub(super) fn insert_const_vars_shallow(&mut self, c: Const) -> Const {
+ let data = c.data(Interner);
+ match &data.value {
+ ConstValue::Concrete(cc) => match &cc.interned {
+ crate::ConstScalar::Unknown => self.new_const_var(data.ty.clone()),
+ // try to evaluate unevaluated const. Replace with new var if const eval failed.
+ crate::ConstScalar::UnevaluatedConst(id, subst) => {
+ if let Ok(eval) = self.db.const_eval(*id, subst.clone()) {
+ eval
+ } else {
+ self.new_const_var(data.ty.clone())
+ }
+ }
+ _ => c,
+ },
+ _ => c,
+ }
+ }
}
impl<'a> fmt::Debug for InferenceTable<'a> {
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/inhabitedness.rs b/src/tools/rust-analyzer/crates/hir-ty/src/inhabitedness.rs
index 36af78153..e5038543b 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/inhabitedness.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/inhabitedness.rs
@@ -6,9 +6,10 @@ use chalk_ir::{
DebruijnIndex,
};
use hir_def::{
- adt::VariantData, attr::Attrs, visibility::Visibility, AdtId, EnumVariantId, HasModule, Lookup,
- ModuleId, VariantId,
+ attr::Attrs, data::adt::VariantData, visibility::Visibility, AdtId, EnumVariantId, HasModule,
+ Lookup, ModuleId, VariantId,
};
+use rustc_hash::FxHashSet;
use crate::{
consteval::try_const_usize, db::HirDatabase, Binders, Interner, Substitution, Ty, TyKind,
@@ -16,7 +17,8 @@ use crate::{
/// Checks whether a type is visibly uninhabited from a particular module.
pub(crate) fn is_ty_uninhabited_from(ty: &Ty, target_mod: ModuleId, db: &dyn HirDatabase) -> bool {
- let mut uninhabited_from = UninhabitedFrom { target_mod, db };
+ let mut uninhabited_from =
+ UninhabitedFrom { target_mod, db, max_depth: 500, recursive_ty: FxHashSet::default() };
let inhabitedness = ty.visit_with(&mut uninhabited_from, DebruijnIndex::INNERMOST);
inhabitedness == BREAK_VISIBLY_UNINHABITED
}
@@ -32,7 +34,8 @@ pub(crate) fn is_enum_variant_uninhabited_from(
let vars_attrs = db.variants_attrs(variant.parent);
let is_local = variant.parent.lookup(db.upcast()).container.krate() == target_mod.krate();
- let mut uninhabited_from = UninhabitedFrom { target_mod, db };
+ let mut uninhabited_from =
+ UninhabitedFrom { target_mod, db, max_depth: 500, recursive_ty: FxHashSet::default() };
let inhabitedness = uninhabited_from.visit_variant(
variant.into(),
&enum_data.variants[variant.local_id].variant_data,
@@ -45,6 +48,9 @@ pub(crate) fn is_enum_variant_uninhabited_from(
struct UninhabitedFrom<'a> {
target_mod: ModuleId,
+ recursive_ty: FxHashSet<Ty>,
+ // guard for preventing stack overflow in non trivial non terminating types
+ max_depth: usize,
db: &'a dyn HirDatabase,
}
@@ -65,17 +71,27 @@ impl TypeVisitor<Interner> for UninhabitedFrom<'_> {
ty: &Ty,
outer_binder: DebruijnIndex,
) -> ControlFlow<VisiblyUninhabited> {
- match ty.kind(Interner) {
+ if self.recursive_ty.contains(ty) || self.max_depth == 0 {
+ // rustc considers recursive types always inhabited. I think it is valid to consider
+ // recursive types as always uninhabited, but we should do what rustc is doing.
+ return CONTINUE_OPAQUELY_INHABITED;
+ }
+ self.recursive_ty.insert(ty.clone());
+ self.max_depth -= 1;
+ let r = match ty.kind(Interner) {
TyKind::Adt(adt, subst) => self.visit_adt(adt.0, subst),
TyKind::Never => BREAK_VISIBLY_UNINHABITED,
TyKind::Tuple(..) => ty.super_visit_with(self, outer_binder),
- TyKind::Array(item_ty, len) => match try_const_usize(len) {
+ TyKind::Array(item_ty, len) => match try_const_usize(self.db, len) {
Some(0) | None => CONTINUE_OPAQUELY_INHABITED,
Some(1..) => item_ty.super_visit_with(self, outer_binder),
},
TyKind::Ref(..) | _ => CONTINUE_OPAQUELY_INHABITED,
- }
+ };
+ self.recursive_ty.remove(ty);
+ self.max_depth += 1;
+ r
}
fn interner(&self) -> Interner {
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/interner.rs b/src/tools/rust-analyzer/crates/hir-ty/src/interner.rs
index aea7e9762..e4dd4b86c 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/interner.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/interner.rs
@@ -7,7 +7,8 @@ use chalk_ir::{Goal, GoalData};
use hir_def::TypeAliasId;
use intern::{impl_internable, Interned};
use smallvec::SmallVec;
-use std::{fmt, sync::Arc};
+use std::fmt;
+use triomphe::Arc;
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Interner;
@@ -43,7 +44,7 @@ impl_internable!(
);
impl chalk_ir::interner::Interner for Interner {
- type InternedType = Interned<InternedWrapper<chalk_ir::TyData<Interner>>>;
+ type InternedType = Interned<InternedWrapper<chalk_ir::TyData<Self>>>;
type InternedLifetime = Interned<InternedWrapper<chalk_ir::LifetimeData<Self>>>;
type InternedConst = Interned<InternedWrapper<chalk_ir::ConstData<Self>>>;
type InternedConcreteConst = ConstScalar;
@@ -51,8 +52,8 @@ impl chalk_ir::interner::Interner for Interner {
type InternedGoal = Arc<GoalData<Self>>;
type InternedGoals = Vec<Goal<Self>>;
type InternedSubstitution = Interned<InternedWrapper<SmallVec<[GenericArg; 2]>>>;
- type InternedProgramClause = chalk_ir::ProgramClauseData<Self>;
type InternedProgramClauses = Interned<InternedWrapper<Vec<chalk_ir::ProgramClause<Self>>>>;
+ type InternedProgramClause = chalk_ir::ProgramClauseData<Self>;
type InternedQuantifiedWhereClauses =
Interned<InternedWrapper<Vec<chalk_ir::QuantifiedWhereClause<Self>>>>;
type InternedVariableKinds = Interned<InternedWrapper<Vec<chalk_ir::VariableKind<Interner>>>>;
@@ -86,6 +87,27 @@ impl chalk_ir::interner::Interner for Interner {
tls::with_current_program(|prog| Some(prog?.debug_assoc_type_id(id, fmt)))
}
+ fn debug_opaque_ty_id(
+ opaque_ty_id: chalk_ir::OpaqueTyId<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "OpaqueTy#{}", opaque_ty_id.0))
+ }
+
+ fn debug_fn_def_id(
+ fn_def_id: chalk_ir::FnDefId<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ tls::with_current_program(|prog| Some(prog?.debug_fn_def_id(fn_def_id, fmt)))
+ }
+
+ fn debug_closure_id(
+ _fn_def_id: chalk_ir::ClosureId<Self>,
+ _fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ None
+ }
+
fn debug_alias(
alias: &chalk_ir::AliasTy<Interner>,
fmt: &mut fmt::Formatter<'_>,
@@ -113,13 +135,6 @@ impl chalk_ir::interner::Interner for Interner {
Some(write!(fmt, "{:?}", opaque_ty.opaque_ty_id))
}
- fn debug_opaque_ty_id(
- opaque_ty_id: chalk_ir::OpaqueTyId<Self>,
- fmt: &mut fmt::Formatter<'_>,
- ) -> Option<fmt::Result> {
- Some(write!(fmt, "OpaqueTy#{}", opaque_ty_id.0))
- }
-
fn debug_ty(ty: &chalk_ir::Ty<Interner>, fmt: &mut fmt::Formatter<'_>) -> Option<fmt::Result> {
Some(write!(fmt, "{:?}", ty.data(Interner)))
}
@@ -131,76 +146,56 @@ impl chalk_ir::interner::Interner for Interner {
Some(write!(fmt, "{:?}", lifetime.data(Interner)))
}
- fn debug_generic_arg(
- parameter: &GenericArg,
- fmt: &mut fmt::Formatter<'_>,
- ) -> Option<fmt::Result> {
- Some(write!(fmt, "{:?}", parameter.data(Interner).inner_debug()))
- }
-
- fn debug_goal(goal: &Goal<Interner>, fmt: &mut fmt::Formatter<'_>) -> Option<fmt::Result> {
- let goal_data = goal.data(Interner);
- Some(write!(fmt, "{goal_data:?}"))
- }
-
- fn debug_goals(
- goals: &chalk_ir::Goals<Interner>,
- fmt: &mut fmt::Formatter<'_>,
- ) -> Option<fmt::Result> {
- Some(write!(fmt, "{:?}", goals.debug(Interner)))
- }
-
- fn debug_program_clause_implication(
- pci: &chalk_ir::ProgramClauseImplication<Interner>,
- fmt: &mut fmt::Formatter<'_>,
- ) -> Option<fmt::Result> {
- Some(write!(fmt, "{:?}", pci.debug(Interner)))
- }
-
- fn debug_substitution(
- substitution: &chalk_ir::Substitution<Interner>,
+ fn debug_const(
+ constant: &chalk_ir::Const<Self>,
fmt: &mut fmt::Formatter<'_>,
) -> Option<fmt::Result> {
- Some(write!(fmt, "{:?}", substitution.debug(Interner)))
+ Some(write!(fmt, "{:?}", constant.data(Interner)))
}
- fn debug_separator_trait_ref(
- separator_trait_ref: &chalk_ir::SeparatorTraitRef<'_, Interner>,
+ fn debug_generic_arg(
+ parameter: &GenericArg,
fmt: &mut fmt::Formatter<'_>,
) -> Option<fmt::Result> {
- Some(write!(fmt, "{:?}", separator_trait_ref.debug(Interner)))
+ Some(write!(fmt, "{:?}", parameter.data(Interner).inner_debug()))
}
- fn debug_fn_def_id(
- fn_def_id: chalk_ir::FnDefId<Self>,
- fmt: &mut fmt::Formatter<'_>,
- ) -> Option<fmt::Result> {
- tls::with_current_program(|prog| Some(prog?.debug_fn_def_id(fn_def_id, fmt)))
- }
- fn debug_const(
- constant: &chalk_ir::Const<Self>,
- fmt: &mut fmt::Formatter<'_>,
- ) -> Option<fmt::Result> {
- Some(write!(fmt, "{:?}", constant.data(Interner)))
- }
fn debug_variable_kinds(
variable_kinds: &chalk_ir::VariableKinds<Self>,
fmt: &mut fmt::Formatter<'_>,
) -> Option<fmt::Result> {
Some(write!(fmt, "{:?}", variable_kinds.as_slice(Interner)))
}
+
fn debug_variable_kinds_with_angles(
variable_kinds: &chalk_ir::VariableKinds<Self>,
fmt: &mut fmt::Formatter<'_>,
) -> Option<fmt::Result> {
Some(write!(fmt, "{:?}", variable_kinds.inner_debug(Interner)))
}
+
fn debug_canonical_var_kinds(
canonical_var_kinds: &chalk_ir::CanonicalVarKinds<Self>,
fmt: &mut fmt::Formatter<'_>,
) -> Option<fmt::Result> {
Some(write!(fmt, "{:?}", canonical_var_kinds.as_slice(Interner)))
}
+ fn debug_goal(goal: &Goal<Interner>, fmt: &mut fmt::Formatter<'_>) -> Option<fmt::Result> {
+ let goal_data = goal.data(Interner);
+ Some(write!(fmt, "{goal_data:?}"))
+ }
+ fn debug_goals(
+ goals: &chalk_ir::Goals<Interner>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", goals.debug(Interner)))
+ }
+ fn debug_program_clause_implication(
+ pci: &chalk_ir::ProgramClauseImplication<Interner>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", pci.debug(Interner)))
+ }
fn debug_program_clause(
clause: &chalk_ir::ProgramClause<Self>,
fmt: &mut fmt::Formatter<'_>,
@@ -213,6 +208,19 @@ impl chalk_ir::interner::Interner for Interner {
) -> Option<fmt::Result> {
Some(write!(fmt, "{:?}", clauses.as_slice(Interner)))
}
+ fn debug_substitution(
+ substitution: &chalk_ir::Substitution<Interner>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", substitution.debug(Interner)))
+ }
+ fn debug_separator_trait_ref(
+ separator_trait_ref: &chalk_ir::SeparatorTraitRef<'_, Interner>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", separator_trait_ref.debug(Interner)))
+ }
+
fn debug_quantified_where_clauses(
clauses: &chalk_ir::QuantifiedWhereClauses<Self>,
fmt: &mut fmt::Formatter<'_>,
@@ -220,6 +228,13 @@ impl chalk_ir::interner::Interner for Interner {
Some(write!(fmt, "{:?}", clauses.as_slice(Interner)))
}
+ fn debug_constraints(
+ _clauses: &chalk_ir::Constraints<Self>,
+ _fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ None
+ }
+
fn intern_ty(self, kind: chalk_ir::TyKind<Self>) -> Self::InternedType {
let flags = kind.compute_flags(self);
Interned::new(InternedWrapper(chalk_ir::TyData { kind, flags }))
@@ -251,7 +266,7 @@ impl chalk_ir::interner::Interner for Interner {
c1: &Self::InternedConcreteConst,
c2: &Self::InternedConcreteConst,
) -> bool {
- (c1 == &ConstScalar::Unknown) || (c2 == &ConstScalar::Unknown) || (c1 == c2)
+ !matches!(c1, ConstScalar::Bytes(..)) || !matches!(c2, ConstScalar::Bytes(..)) || (c1 == c2)
}
fn intern_generic_arg(
@@ -272,6 +287,10 @@ impl chalk_ir::interner::Interner for Interner {
Arc::new(goal)
}
+ fn goal_data(self, goal: &Self::InternedGoal) -> &GoalData<Self> {
+ goal
+ }
+
fn intern_goals<E>(
self,
data: impl IntoIterator<Item = Result<Goal<Self>, E>>,
@@ -279,10 +298,6 @@ impl chalk_ir::interner::Interner for Interner {
data.into_iter().collect()
}
- fn goal_data(self, goal: &Self::InternedGoal) -> &GoalData<Self> {
- goal
- }
-
fn goals_data(self, goals: &Self::InternedGoals) -> &[Goal<Interner>] {
goals
}
@@ -367,32 +382,18 @@ impl chalk_ir::interner::Interner for Interner {
) -> &[chalk_ir::CanonicalVarKind<Self>] {
canonical_var_kinds
}
-
fn intern_constraints<E>(
self,
data: impl IntoIterator<Item = Result<chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>, E>>,
) -> Result<Self::InternedConstraints, E> {
data.into_iter().collect()
}
-
fn constraints_data(
self,
constraints: &Self::InternedConstraints,
) -> &[chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>] {
constraints
}
- fn debug_closure_id(
- _fn_def_id: chalk_ir::ClosureId<Self>,
- _fmt: &mut fmt::Formatter<'_>,
- ) -> Option<fmt::Result> {
- None
- }
- fn debug_constraints(
- _clauses: &chalk_ir::Constraints<Self>,
- _fmt: &mut fmt::Formatter<'_>,
- ) -> Option<fmt::Result> {
- None
- }
fn intern_variances<E>(
self,
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/lang_items.rs b/src/tools/rust-analyzer/crates/hir-ty/src/lang_items.rs
index 5308c7216..85ed46b96 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/lang_items.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/lang_items.rs
@@ -1,19 +1,65 @@
//! Functions to detect special lang items
-use hir_def::{lang_item::LangItem, AdtId, HasModule};
+use hir_def::{data::adt::StructFlags, lang_item::LangItem, AdtId};
+use hir_expand::name::Name;
use crate::db::HirDatabase;
-pub fn is_box(adt: AdtId, db: &dyn HirDatabase) -> bool {
- let krate = adt.module(db.upcast()).krate();
- let box_adt =
- db.lang_item(krate, LangItem::OwnedBox).and_then(|it| it.as_struct()).map(AdtId::from);
- Some(adt) == box_adt
+pub fn is_box(db: &dyn HirDatabase, adt: AdtId) -> bool {
+ let AdtId::StructId(id) = adt else { return false };
+ db.struct_data(id).flags.contains(StructFlags::IS_BOX)
}
-pub fn is_unsafe_cell(adt: AdtId, db: &dyn HirDatabase) -> bool {
- let krate = adt.module(db.upcast()).krate();
- let box_adt =
- db.lang_item(krate, LangItem::UnsafeCell).and_then(|it| it.as_struct()).map(AdtId::from);
- Some(adt) == box_adt
+pub fn is_unsafe_cell(db: &dyn HirDatabase, adt: AdtId) -> bool {
+ let AdtId::StructId(id) = adt else { return false };
+ db.struct_data(id).flags.contains(StructFlags::IS_UNSAFE_CELL)
+}
+
+pub fn lang_items_for_bin_op(op: syntax::ast::BinaryOp) -> Option<(Name, LangItem)> {
+ use hir_expand::name;
+ use syntax::ast::{ArithOp, BinaryOp, CmpOp, Ordering};
+ Some(match op {
+ BinaryOp::LogicOp(_) => return None,
+ BinaryOp::ArithOp(aop) => match aop {
+ ArithOp::Add => (name![add], LangItem::Add),
+ ArithOp::Mul => (name![mul], LangItem::Mul),
+ ArithOp::Sub => (name![sub], LangItem::Sub),
+ ArithOp::Div => (name![div], LangItem::Div),
+ ArithOp::Rem => (name![rem], LangItem::Rem),
+ ArithOp::Shl => (name![shl], LangItem::Shl),
+ ArithOp::Shr => (name![shr], LangItem::Shr),
+ ArithOp::BitXor => (name![bitxor], LangItem::BitXor),
+ ArithOp::BitOr => (name![bitor], LangItem::BitOr),
+ ArithOp::BitAnd => (name![bitand], LangItem::BitAnd),
+ },
+ BinaryOp::Assignment { op: Some(aop) } => match aop {
+ ArithOp::Add => (name![add_assign], LangItem::AddAssign),
+ ArithOp::Mul => (name![mul_assign], LangItem::MulAssign),
+ ArithOp::Sub => (name![sub_assign], LangItem::SubAssign),
+ ArithOp::Div => (name![div_assign], LangItem::DivAssign),
+ ArithOp::Rem => (name![rem_assign], LangItem::RemAssign),
+ ArithOp::Shl => (name![shl_assign], LangItem::ShlAssign),
+ ArithOp::Shr => (name![shr_assign], LangItem::ShrAssign),
+ ArithOp::BitXor => (name![bitxor_assign], LangItem::BitXorAssign),
+ ArithOp::BitOr => (name![bitor_assign], LangItem::BitOrAssign),
+ ArithOp::BitAnd => (name![bitand_assign], LangItem::BitAndAssign),
+ },
+ BinaryOp::CmpOp(cop) => match cop {
+ CmpOp::Eq { negated: false } => (name![eq], LangItem::PartialEq),
+ CmpOp::Eq { negated: true } => (name![ne], LangItem::PartialEq),
+ CmpOp::Ord { ordering: Ordering::Less, strict: false } => {
+ (name![le], LangItem::PartialOrd)
+ }
+ CmpOp::Ord { ordering: Ordering::Less, strict: true } => {
+ (name![lt], LangItem::PartialOrd)
+ }
+ CmpOp::Ord { ordering: Ordering::Greater, strict: false } => {
+ (name![ge], LangItem::PartialOrd)
+ }
+ CmpOp::Ord { ordering: Ordering::Greater, strict: true } => {
+ (name![gt], LangItem::PartialOrd)
+ }
+ },
+ BinaryOp::Assignment { op: None } => return None,
+ })
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
index b95bb01fc..35d3407c1 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
@@ -1,19 +1,23 @@
//! Compute the binary representation of a type
use base_db::CrateId;
-use chalk_ir::{AdtId, TyKind};
+use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy};
use hir_def::{
layout::{
- Abi, FieldsShape, Integer, Layout, LayoutCalculator, LayoutError, Primitive, ReprOptions,
- RustcEnumVariantIdx, Scalar, Size, StructKind, TargetDataLayout, Variants, WrappingRange,
+ Abi, FieldsShape, Integer, LayoutCalculator, LayoutS, Primitive, ReprOptions, Scalar, Size,
+ StructKind, TargetDataLayout, WrappingRange,
},
- LocalFieldId,
+ LocalEnumVariantId, LocalFieldId,
};
+use la_arena::{Idx, RawIdx};
use stdx::never;
+use triomphe::Arc;
-use crate::{consteval::try_const_usize, db::HirDatabase, Interner, Substitution, Ty};
+use crate::{
+ consteval::try_const_usize, db::HirDatabase, infer::normalize, layout::adt::struct_variant_idx,
+ utils::ClosureSubst, Interner, Substitution, TraitEnvironment, Ty,
+};
-use self::adt::struct_variant_idx;
pub use self::{
adt::{layout_of_adt_query, layout_of_adt_recover},
target::target_data_layout_query,
@@ -28,6 +32,34 @@ macro_rules! user_error {
mod adt;
mod target;
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub struct RustcEnumVariantIdx(pub LocalEnumVariantId);
+
+impl rustc_index::vec::Idx for RustcEnumVariantIdx {
+ fn new(idx: usize) -> Self {
+ RustcEnumVariantIdx(Idx::from_raw(RawIdx::from(idx as u32)))
+ }
+
+ fn index(self) -> usize {
+ u32::from(self.0.into_raw()) as usize
+ }
+}
+
+pub type Layout = LayoutS<RustcEnumVariantIdx>;
+pub type TagEncoding = hir_def::layout::TagEncoding<RustcEnumVariantIdx>;
+pub type Variants = hir_def::layout::Variants<RustcEnumVariantIdx>;
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum LayoutError {
+ UserError(String),
+ SizeOverflow,
+ TargetLayoutNotAvailable,
+ HasPlaceholder,
+ HasErrorType,
+ NotImplemented,
+ Unknown,
+}
+
struct LayoutCx<'a> {
krate: CrateId,
target: &'a TargetDataLayout,
@@ -45,20 +77,18 @@ impl<'a> LayoutCalculator for LayoutCx<'a> {
}
}
-fn scalar_unit(dl: &TargetDataLayout, value: Primitive) -> Scalar {
- Scalar::Initialized { value, valid_range: WrappingRange::full(value.size(dl)) }
-}
-
-fn scalar(dl: &TargetDataLayout, value: Primitive) -> Layout {
- Layout::scalar(dl, scalar_unit(dl, value))
-}
-
-pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Layout, LayoutError> {
+pub fn layout_of_ty_query(
+ db: &dyn HirDatabase,
+ ty: Ty,
+ krate: CrateId,
+) -> Result<Arc<Layout>, LayoutError> {
let Some(target) = db.target_data_layout(krate) else { return Err(LayoutError::TargetLayoutNotAvailable) };
let cx = LayoutCx { krate, target: &target };
let dl = &*cx.current_data_layout();
- Ok(match ty.kind(Interner) {
- TyKind::Adt(AdtId(def), subst) => db.layout_of_adt(*def, subst.clone())?,
+ let trait_env = Arc::new(TraitEnvironment::empty(krate));
+ let ty = normalize(db, trait_env, ty.clone());
+ let result = match ty.kind(Interner) {
+ TyKind::Adt(AdtId(def), subst) => return db.layout_of_adt(*def, subst.clone(), krate),
TyKind::Scalar(s) => match s {
chalk_ir::Scalar::Bool => Layout::scalar(
dl,
@@ -78,12 +108,12 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Lay
dl,
Primitive::Int(
match i {
- chalk_ir::IntTy::Isize => dl.ptr_sized_integer(),
- chalk_ir::IntTy::I8 => Integer::I8,
- chalk_ir::IntTy::I16 => Integer::I16,
- chalk_ir::IntTy::I32 => Integer::I32,
- chalk_ir::IntTy::I64 => Integer::I64,
- chalk_ir::IntTy::I128 => Integer::I128,
+ IntTy::Isize => dl.ptr_sized_integer(),
+ IntTy::I8 => Integer::I8,
+ IntTy::I16 => Integer::I16,
+ IntTy::I32 => Integer::I32,
+ IntTy::I64 => Integer::I64,
+ IntTy::I128 => Integer::I128,
},
true,
),
@@ -92,12 +122,12 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Lay
dl,
Primitive::Int(
match i {
- chalk_ir::UintTy::Usize => dl.ptr_sized_integer(),
- chalk_ir::UintTy::U8 => Integer::I8,
- chalk_ir::UintTy::U16 => Integer::I16,
- chalk_ir::UintTy::U32 => Integer::I32,
- chalk_ir::UintTy::U64 => Integer::I64,
- chalk_ir::UintTy::U128 => Integer::I128,
+ UintTy::Usize => dl.ptr_sized_integer(),
+ UintTy::U8 => Integer::I8,
+ UintTy::U16 => Integer::I16,
+ UintTy::U32 => Integer::I32,
+ UintTy::U64 => Integer::I64,
+ UintTy::U128 => Integer::I128,
},
false,
),
@@ -105,8 +135,8 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Lay
chalk_ir::Scalar::Float(f) => scalar(
dl,
match f {
- chalk_ir::FloatTy::F32 => Primitive::F32,
- chalk_ir::FloatTy::F64 => Primitive::F64,
+ FloatTy::F32 => Primitive::F32,
+ FloatTy::F64 => Primitive::F64,
},
),
},
@@ -115,17 +145,17 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Lay
let fields = tys
.iter(Interner)
- .map(|k| layout_of_ty(db, k.assert_ty_ref(Interner), krate))
+ .map(|k| db.layout_of_ty(k.assert_ty_ref(Interner).clone(), krate))
.collect::<Result<Vec<_>, _>>()?;
- let fields = fields.iter().collect::<Vec<_>>();
+ let fields = fields.iter().map(|x| &**x).collect::<Vec<_>>();
let fields = fields.iter().collect::<Vec<_>>();
cx.univariant(dl, &fields, &ReprOptions::default(), kind).ok_or(LayoutError::Unknown)?
}
TyKind::Array(element, count) => {
- let count = try_const_usize(&count).ok_or(LayoutError::UserError(
- "mismatched type of const generic parameter".to_string(),
+ let count = try_const_usize(db, &count).ok_or(LayoutError::UserError(
+ "unevaluated or mistyped const generic parameter".to_string(),
))? as u64;
- let element = layout_of_ty(db, element, krate)?;
+ let element = db.layout_of_ty(element.clone(), krate)?;
let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow)?;
let abi = if count != 0 && matches!(element.abi, Abi::Uninhabited) {
@@ -146,7 +176,7 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Lay
}
}
TyKind::Slice(element) => {
- let element = layout_of_ty(db, element, krate)?;
+ let element = db.layout_of_ty(element.clone(), krate)?;
Layout {
variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Array { stride: element.size, count: 0 },
@@ -180,7 +210,7 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Lay
}
_ => {
// pointee is sized
- return Ok(Layout::scalar(dl, data_ptr));
+ return Ok(Arc::new(Layout::scalar(dl, data_ptr)));
}
};
@@ -222,23 +252,51 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Lay
match impl_trait_id {
crate::ImplTraitId::ReturnTypeImplTrait(func, idx) => {
let infer = db.infer(func.into());
- layout_of_ty(db, &infer.type_of_rpit[idx], krate)?
+ return db.layout_of_ty(infer.type_of_rpit[idx].clone(), krate);
}
crate::ImplTraitId::AsyncBlockTypeImplTrait(_, _) => {
return Err(LayoutError::NotImplemented)
}
}
}
- TyKind::Closure(_, _) | TyKind::Generator(_, _) | TyKind::GeneratorWitness(_, _) => {
+ TyKind::Closure(c, subst) => {
+ let (def, _) = db.lookup_intern_closure((*c).into());
+ let infer = db.infer(def);
+ let (captures, _) = infer.closure_info(c);
+ let fields = captures
+ .iter()
+ .map(|x| {
+ db.layout_of_ty(
+ x.ty.clone().substitute(Interner, ClosureSubst(subst).parent_subst()),
+ krate,
+ )
+ })
+ .collect::<Result<Vec<_>, _>>()?;
+ let fields = fields.iter().map(|x| &**x).collect::<Vec<_>>();
+ let fields = fields.iter().collect::<Vec<_>>();
+ cx.univariant(dl, &fields, &ReprOptions::default(), StructKind::AlwaysSized)
+ .ok_or(LayoutError::Unknown)?
+ }
+ TyKind::Generator(_, _) | TyKind::GeneratorWitness(_, _) => {
return Err(LayoutError::NotImplemented)
}
+ TyKind::Error => return Err(LayoutError::HasErrorType),
TyKind::AssociatedType(_, _)
- | TyKind::Error
| TyKind::Alias(_)
| TyKind::Placeholder(_)
| TyKind::BoundVar(_)
| TyKind::InferenceVar(_, _) => return Err(LayoutError::HasPlaceholder),
- })
+ };
+ Ok(Arc::new(result))
+}
+
+pub fn layout_of_ty_recover(
+ _: &dyn HirDatabase,
+ _: &[String],
+ _: &Ty,
+ _: &CrateId,
+) -> Result<Arc<Layout>, LayoutError> {
+ user_error!("infinite sized recursive type");
}
fn layout_of_unit(cx: &LayoutCx<'_>, dl: &TargetDataLayout) -> Result<Layout, LayoutError> {
@@ -274,5 +332,13 @@ fn field_ty(
db.field_types(def)[fd].clone().substitute(Interner, subst)
}
+fn scalar_unit(dl: &TargetDataLayout, value: Primitive) -> Scalar {
+ Scalar::Initialized { value, valid_range: WrappingRange::full(value.size(dl)) }
+}
+
+fn scalar(dl: &TargetDataLayout, value: Primitive) -> Layout {
+ Layout::scalar(dl, scalar_unit(dl, value))
+}
+
#[cfg(test)]
mod tests;
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
index b22d0fe8d..bd2752a71 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
@@ -1,18 +1,25 @@
//! Compute the binary representation of structs, unions and enums
-use std::ops::Bound;
+use std::{cmp, ops::Bound};
+use base_db::CrateId;
use hir_def::{
- adt::VariantData,
- layout::{Integer, IntegerExt, Layout, LayoutCalculator, LayoutError, RustcEnumVariantIdx},
- AdtId, EnumVariantId, HasModule, LocalEnumVariantId, VariantId,
+ data::adt::VariantData,
+ layout::{Integer, LayoutCalculator, ReprOptions, TargetDataLayout},
+ AdtId, EnumVariantId, LocalEnumVariantId, VariantId,
};
use la_arena::RawIdx;
use smallvec::SmallVec;
+use triomphe::Arc;
-use crate::{db::HirDatabase, lang_items::is_unsafe_cell, layout::field_ty, Substitution};
+use crate::{
+ db::HirDatabase,
+ lang_items::is_unsafe_cell,
+ layout::{field_ty, Layout, LayoutError, RustcEnumVariantIdx},
+ Substitution,
+};
-use super::{layout_of_ty, LayoutCx};
+use super::LayoutCx;
pub(crate) fn struct_variant_idx() -> RustcEnumVariantIdx {
RustcEnumVariantIdx(LocalEnumVariantId::from_raw(RawIdx::from(0)))
@@ -22,29 +29,29 @@ pub fn layout_of_adt_query(
db: &dyn HirDatabase,
def: AdtId,
subst: Substitution,
-) -> Result<Layout, LayoutError> {
- let krate = def.module(db.upcast()).krate();
+ krate: CrateId,
+) -> Result<Arc<Layout>, LayoutError> {
let Some(target) = db.target_data_layout(krate) else { return Err(LayoutError::TargetLayoutNotAvailable) };
let cx = LayoutCx { krate, target: &target };
let dl = cx.current_data_layout();
let handle_variant = |def: VariantId, var: &VariantData| {
var.fields()
.iter()
- .map(|(fd, _)| layout_of_ty(db, &field_ty(db, def, fd, &subst), cx.krate))
+ .map(|(fd, _)| db.layout_of_ty(field_ty(db, def, fd, &subst), cx.krate))
.collect::<Result<Vec<_>, _>>()
};
- let (variants, is_enum, is_union, repr) = match def {
+ let (variants, repr) = match def {
AdtId::StructId(s) => {
let data = db.struct_data(s);
let mut r = SmallVec::<[_; 1]>::new();
r.push(handle_variant(s.into(), &data.variant_data)?);
- (r, false, false, data.repr.unwrap_or_default())
+ (r, data.repr.unwrap_or_default())
}
AdtId::UnionId(id) => {
let data = db.union_data(id);
let mut r = SmallVec::new();
r.push(handle_variant(id.into(), &data.variant_data)?);
- (r, false, true, data.repr.unwrap_or_default())
+ (r, data.repr.unwrap_or_default())
}
AdtId::EnumId(e) => {
let data = db.enum_data(e);
@@ -58,22 +65,24 @@ pub fn layout_of_adt_query(
)
})
.collect::<Result<SmallVec<_>, _>>()?;
- (r, true, false, data.repr.unwrap_or_default())
+ (r, data.repr.unwrap_or_default())
}
};
- let variants =
- variants.iter().map(|x| x.iter().collect::<Vec<_>>()).collect::<SmallVec<[_; 1]>>();
+ let variants = variants
+ .iter()
+ .map(|x| x.iter().map(|x| &**x).collect::<Vec<_>>())
+ .collect::<SmallVec<[_; 1]>>();
let variants = variants.iter().map(|x| x.iter().collect()).collect();
- if is_union {
- cx.layout_of_union(&repr, &variants).ok_or(LayoutError::Unknown)
+ let result = if matches!(def, AdtId::UnionId(..)) {
+ cx.layout_of_union(&repr, &variants).ok_or(LayoutError::Unknown)?
} else {
cx.layout_of_struct_or_enum(
&repr,
&variants,
- is_enum,
- is_unsafe_cell(def, db),
+ matches!(def, AdtId::EnumId(..)),
+ is_unsafe_cell(db, def),
layout_scalar_valid_range(db, def),
- |min, max| Integer::repr_discr(&dl, &repr, min, max).unwrap_or((Integer::I8, false)),
+ |min, max| repr_discr(&dl, &repr, min, max).unwrap_or((Integer::I8, false)),
variants.iter_enumerated().filter_map(|(id, _)| {
let AdtId::EnumId(e) = def else { return None };
let d =
@@ -90,15 +99,16 @@ pub fn layout_of_adt_query(
// .iter_enumerated()
// .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
repr.inhibit_enum_layout_opt(),
- !is_enum
+ !matches!(def, AdtId::EnumId(..))
&& variants
.iter()
.next()
.and_then(|x| x.last().map(|x| x.is_unsized()))
.unwrap_or(true),
)
- .ok_or(LayoutError::SizeOverflow)
- }
+ .ok_or(LayoutError::SizeOverflow)?
+ };
+ Ok(Arc::new(result))
}
fn layout_scalar_valid_range(db: &dyn HirDatabase, def: AdtId) -> (Bound<u128>, Bound<u128>) {
@@ -122,6 +132,54 @@ pub fn layout_of_adt_recover(
_: &[String],
_: &AdtId,
_: &Substitution,
-) -> Result<Layout, LayoutError> {
+ _: &CrateId,
+) -> Result<Arc<Layout>, LayoutError> {
user_error!("infinite sized recursive type");
}
+
+/// Finds the appropriate Integer type and signedness for the given
+/// signed discriminant range and `#[repr]` attribute.
+/// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
+/// that shouldn't affect anything, other than maybe debuginfo.
+fn repr_discr(
+ dl: &TargetDataLayout,
+ repr: &ReprOptions,
+ min: i128,
+ max: i128,
+) -> Result<(Integer, bool), LayoutError> {
+ // Theoretically, negative values could be larger in unsigned representation
+ // than the unsigned representation of the signed minimum. However, if there
+ // are any negative values, the only valid unsigned representation is u128
+ // which can fit all i128 values, so the result remains unaffected.
+ let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
+ let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
+
+ if let Some(ity) = repr.int {
+ let discr = Integer::from_attr(dl, ity);
+ let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
+ if discr < fit {
+ return Err(LayoutError::UserError(
+ "Integer::repr_discr: `#[repr]` hint too small for \
+ discriminant range of enum "
+ .to_string(),
+ ));
+ }
+ return Ok((discr, ity.is_signed()));
+ }
+
+ let at_least = if repr.c() {
+ // This is usually I32, however it can be different on some platforms,
+ // notably hexagon and arm-none/thumb-none
+ dl.c_enum_min_size
+ } else {
+ // repr(Rust) enums try to be as small as possible
+ Integer::I8
+ };
+
+ // If there are no negative values, we can use the unsigned fit.
+ Ok(if min >= 0 {
+ (cmp::max(unsigned_fit, at_least), false)
+ } else {
+ (cmp::max(signed_fit, at_least), true)
+ })
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs
index adfae0a1a..04b940afb 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs
@@ -1,9 +1,8 @@
//! Target dependent parameters needed for layouts
-use std::sync::Arc;
-
use base_db::CrateId;
use hir_def::layout::TargetDataLayout;
+use triomphe::Arc;
use crate::db::HirDatabase;
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs
index a8971fde3..0ff8c532d 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs
@@ -2,49 +2,67 @@ use std::collections::HashMap;
use base_db::fixture::WithFixture;
use chalk_ir::{AdtId, TyKind};
-use hir_def::{
- db::DefDatabase,
+use either::Either;
+use hir_def::db::DefDatabase;
+use triomphe::Arc;
+
+use crate::{
+ db::HirDatabase,
layout::{Layout, LayoutError},
+ test_db::TestDB,
+ Interner, Substitution,
};
-use crate::{db::HirDatabase, test_db::TestDB, Interner, Substitution};
-
-use super::layout_of_ty;
+mod closure;
fn current_machine_data_layout() -> String {
project_model::target_data_layout::get(None, None, &HashMap::default()).unwrap()
}
-fn eval_goal(ra_fixture: &str, minicore: &str) -> Result<Layout, LayoutError> {
+fn eval_goal(ra_fixture: &str, minicore: &str) -> Result<Arc<Layout>, LayoutError> {
let target_data_layout = current_machine_data_layout();
let ra_fixture = format!(
"{minicore}//- /main.rs crate:test target_data_layout:{target_data_layout}\n{ra_fixture}",
);
- let (db, file_id) = TestDB::with_single_file(&ra_fixture);
- let module_id = db.module_for_file(file_id);
- let def_map = module_id.def_map(&db);
- let scope = &def_map[module_id.local_id].scope;
- let adt_id = scope
- .declarations()
- .find_map(|x| match x {
- hir_def::ModuleDefId::AdtId(x) => {
- let name = match x {
- hir_def::AdtId::StructId(x) => db.struct_data(x).name.to_smol_str(),
- hir_def::AdtId::UnionId(x) => db.union_data(x).name.to_smol_str(),
- hir_def::AdtId::EnumId(x) => db.enum_data(x).name.to_smol_str(),
- };
- (name == "Goal").then_some(x)
- }
- _ => None,
+ let (db, file_ids) = TestDB::with_many_files(&ra_fixture);
+ let (adt_or_type_alias_id, module_id) = file_ids
+ .into_iter()
+ .find_map(|file_id| {
+ let module_id = db.module_for_file(file_id);
+ let def_map = module_id.def_map(&db);
+ let scope = &def_map[module_id.local_id].scope;
+ let adt_or_type_alias_id = scope.declarations().find_map(|x| match x {
+ hir_def::ModuleDefId::AdtId(x) => {
+ let name = match x {
+ hir_def::AdtId::StructId(x) => db.struct_data(x).name.to_smol_str(),
+ hir_def::AdtId::UnionId(x) => db.union_data(x).name.to_smol_str(),
+ hir_def::AdtId::EnumId(x) => db.enum_data(x).name.to_smol_str(),
+ };
+ (name == "Goal").then_some(Either::Left(x))
+ }
+ hir_def::ModuleDefId::TypeAliasId(x) => {
+ let name = db.type_alias_data(x).name.to_smol_str();
+ (name == "Goal").then_some(Either::Right(x))
+ }
+ _ => None,
+ })?;
+ Some((adt_or_type_alias_id, module_id))
})
.unwrap();
- let goal_ty = TyKind::Adt(AdtId(adt_id), Substitution::empty(Interner)).intern(Interner);
- layout_of_ty(&db, &goal_ty, module_id.krate())
+ let goal_ty = match adt_or_type_alias_id {
+ Either::Left(adt_id) => {
+ TyKind::Adt(AdtId(adt_id), Substitution::empty(Interner)).intern(Interner)
+ }
+ Either::Right(ty_id) => {
+ db.ty(ty_id.into()).substitute(Interner, &Substitution::empty(Interner))
+ }
+ };
+ db.layout_of_ty(goal_ty, module_id.krate())
}
/// A version of `eval_goal` for types that can not be expressed in ADTs, like closures and `impl Trait`
-fn eval_expr(ra_fixture: &str, minicore: &str) -> Result<Layout, LayoutError> {
+fn eval_expr(ra_fixture: &str, minicore: &str) -> Result<Arc<Layout>, LayoutError> {
let target_data_layout = current_machine_data_layout();
let ra_fixture = format!(
"{minicore}//- /main.rs crate:test target_data_layout:{target_data_layout}\nfn main(){{let goal = {{{ra_fixture}}};}}",
@@ -68,7 +86,7 @@ fn eval_expr(ra_fixture: &str, minicore: &str) -> Result<Layout, LayoutError> {
let b = hir_body.bindings.iter().find(|x| x.1.name.to_smol_str() == "goal").unwrap().0;
let infer = db.infer(adt_id.into());
let goal_ty = infer.type_of_binding[b].clone();
- layout_of_ty(&db, &goal_ty, module_id.krate())
+ db.layout_of_ty(goal_ty, module_id.krate())
}
#[track_caller]
@@ -81,8 +99,8 @@ fn check_size_and_align(ra_fixture: &str, minicore: &str, size: u64, align: u64)
#[track_caller]
fn check_size_and_align_expr(ra_fixture: &str, minicore: &str, size: u64, align: u64) {
let l = eval_expr(ra_fixture, minicore).unwrap();
- assert_eq!(l.size.bytes(), size);
- assert_eq!(l.align.abi.bytes(), align);
+ assert_eq!(l.size.bytes(), size, "size mismatch");
+ assert_eq!(l.align.abi.bytes(), align, "align mismatch");
}
#[track_caller]
@@ -118,13 +136,31 @@ macro_rules! size_and_align {
};
}
+#[macro_export]
macro_rules! size_and_align_expr {
+ (minicore: $($x:tt),*; stmts: [$($s:tt)*] $($t:tt)*) => {
+ {
+ #[allow(dead_code)]
+ #[allow(unused_must_use)]
+ #[allow(path_statements)]
+ {
+ $($s)*
+ let val = { $($t)* };
+ $crate::layout::tests::check_size_and_align_expr(
+ &format!("{{ {} let val = {{ {} }}; val }}", stringify!($($s)*), stringify!($($t)*)),
+ &format!("//- minicore: {}\n", stringify!($($x),*)),
+ ::std::mem::size_of_val(&val) as u64,
+ ::std::mem::align_of_val(&val) as u64,
+ );
+ }
+ }
+ };
($($t:tt)*) => {
{
#[allow(dead_code)]
{
let val = { $($t)* };
- check_size_and_align_expr(
+ $crate::layout::tests::check_size_and_align_expr(
stringify!($($t)*),
"",
::std::mem::size_of_val(&val) as u64,
@@ -197,6 +233,44 @@ fn generic() {
}
#[test]
+fn associated_types() {
+ size_and_align! {
+ trait Tr {
+ type Ty;
+ }
+
+ impl Tr for i32 {
+ type Ty = i64;
+ }
+
+ struct Foo<A: Tr>(<A as Tr>::Ty);
+ struct Bar<A: Tr>(A::Ty);
+ struct Goal(Foo<i32>, Bar<i32>, <i32 as Tr>::Ty);
+ }
+ check_size_and_align(
+ r#"
+//- /b/mod.rs crate:b
+pub trait Tr {
+ type Ty;
+}
+pub struct Foo<A: Tr>(<A as Tr>::Ty);
+
+//- /a/mod.rs crate:a deps:b
+use b::{Tr, Foo};
+
+struct S;
+impl Tr for S {
+ type Ty = i64;
+}
+struct Goal(Foo<S>);
+ "#,
+ "",
+ 8,
+ 8,
+ );
+}
+
+#[test]
fn return_position_impl_trait() {
size_and_align_expr! {
trait T {}
@@ -213,6 +287,45 @@ fn return_position_impl_trait() {
foo()
}
size_and_align_expr! {
+ minicore: iterators;
+ stmts: []
+ trait Tr {}
+ impl Tr for i32 {}
+ fn foo() -> impl Iterator<Item = impl Tr> {
+ [1, 2, 3].into_iter()
+ }
+ let mut iter = foo();
+ let item = iter.next();
+ (iter, item)
+ }
+ size_and_align_expr! {
+ minicore: future;
+ stmts: []
+ use core::{future::Future, task::{Poll, Context}, pin::pin};
+ use std::{task::Wake, sync::Arc};
+ trait Tr {}
+ impl Tr for i32 {}
+ async fn f() -> impl Tr {
+ 2
+ }
+ fn unwrap_fut<T>(inp: impl Future<Output = T>) -> Poll<T> {
+ // In a normal test we could use `loop {}` or `panic!()` here,
+ // but rustc actually runs this code.
+ let pinned = pin!(inp);
+ struct EmptyWaker;
+ impl Wake for EmptyWaker {
+ fn wake(self: Arc<Self>) {
+ }
+ }
+ let waker = Arc::new(EmptyWaker).into();
+ let mut context = Context::from_waker(&waker);
+ let x = pinned.poll(&mut context);
+ x
+ }
+ let x = unwrap_fut(f());
+ x
+ }
+ size_and_align_expr! {
struct Foo<T>(T, T, (T, T));
trait T {}
impl T for Foo<i32> {}
@@ -277,6 +390,27 @@ fn niche_optimization() {
}
#[test]
+fn const_eval() {
+ size_and_align! {
+ struct Goal([i32; 2 + 2]);
+ }
+ size_and_align! {
+ const X: usize = 5;
+ struct Goal([i32; X]);
+ }
+ size_and_align! {
+ mod foo {
+ pub(super) const BAR: usize = 5;
+ }
+ struct Ar<T>([T; foo::BAR]);
+ struct Goal(Ar<Ar<i32>>);
+ }
+ size_and_align! {
+ type Goal = [u8; 2 + 2];
+ }
+}
+
+#[test]
fn enums_with_discriminants() {
size_and_align! {
enum Goal {
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests/closure.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests/closure.rs
new file mode 100644
index 000000000..576e7f3fc
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests/closure.rs
@@ -0,0 +1,257 @@
+use crate::size_and_align_expr;
+
+#[test]
+fn zero_capture_simple() {
+ size_and_align_expr! {
+ |x: i32| x + 2
+ }
+}
+
+#[test]
+fn move_simple() {
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: []
+ let y: i32 = 5;
+ move |x: i32| {
+ x + y
+ }
+ }
+}
+
+#[test]
+fn ref_simple() {
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ let y: i32 = 5;
+ ]
+ |x: i32| {
+ x + y
+ }
+ }
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ let mut y: i32 = 5;
+ ]
+ |x: i32| {
+ y = y + x;
+ y
+ }
+ }
+ size_and_align_expr! {
+ minicore: copy, deref_mut;
+ stmts: [
+ let y: &mut i32 = &mut 5;
+ ]
+ |x: i32| {
+ *y += x;
+ }
+ }
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ struct X(i32, i64);
+ let x: X = X(2, 6);
+ ]
+ || {
+ x
+ }
+ }
+ size_and_align_expr! {
+ minicore: copy, deref_mut;
+ stmts: [
+ struct X(i32, i64);
+ let x: &mut X = &mut X(2, 6);
+ ]
+ || {
+ (*x).0 as i64 + x.1
+ }
+ }
+}
+
+#[test]
+fn ref_then_mut_then_move() {
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ struct X(i32, i64);
+ let mut x: X = X(2, 6);
+ ]
+ || {
+ &x;
+ &mut x;
+ x;
+ }
+ }
+}
+
+#[test]
+fn nested_closures() {
+ size_and_align_expr! {
+ || {
+ || {
+ || {
+ let x = 2;
+ move || {
+ move || {
+ x
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn capture_specific_fields2() {
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ let x = &mut 2;
+ ]
+ || {
+ *x = 5;
+ &x;
+ }
+ }
+}
+
+#[test]
+fn capture_specific_fields() {
+ size_and_align_expr! {
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ move |x: i64| {
+ y.0 + x + (y.2 .0 as i64)
+ }
+ }
+ size_and_align_expr! {
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ move |x: i64| {
+ let _ = &y;
+ y.0 + x + (y.2 .0 as i64)
+ }
+ }
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ ]
+ let y = &y;
+ move |x: i64| {
+ y.0 + x + (y.2 .0 as i64)
+ }
+ }
+ size_and_align_expr! {
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ move |x: i64| {
+ let X(a, _, (b, _)) = y;
+ a + x + (b as i64)
+ }
+ }
+ size_and_align_expr! {
+ struct X(i64, i32, (u8, i128));
+ let y = &&X(2, 5, (7, 3));
+ move |x: i64| {
+ let X(a, _, (b, _)) = y;
+ *a + x + (*b as i64)
+ }
+ }
+ size_and_align_expr! {
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ move |x: i64| {
+ match y {
+ X(a, _, (b, _)) => a + x + (b as i64),
+ }
+ }
+ }
+ size_and_align_expr! {
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ move |x: i64| {
+ let X(a @ 2, _, (b, _)) = y else { return 5 };
+ a + x + (b as i64)
+ }
+ }
+}
+
+#[test]
+fn match_pattern() {
+ size_and_align_expr! {
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ move |x: i64| {
+ match y {
+ _ => x,
+ }
+ }
+ }
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ ]
+ |x: i64| {
+ match y {
+ X(_a, _, _c) => x,
+ }
+ }
+ }
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ ]
+ |x: i64| {
+ match y {
+ _y => x,
+ }
+ }
+ }
+ size_and_align_expr! {
+ minicore: copy;
+ stmts: [
+ struct X(i64, i32, (u8, i128));
+ let y: X = X(2, 5, (7, 3));
+ ]
+ |x: i64| {
+ match y {
+ ref _y => x,
+ }
+ }
+ }
+}
+
+#[test]
+fn ellipsis_pattern() {
+ size_and_align_expr! {
+ struct X(i8, u16, i32, u64, i128, u8);
+ let y: X = X(1, 2, 3, 4, 5, 6);
+ move |_: i64| {
+ let X(_a, .., _b, _c) = y;
+ }
+ }
+ size_and_align_expr! {
+ struct X { a: i32, b: u8, c: i128}
+ let y: X = X { a: 1, b: 2, c: 3 };
+ move |_: i64| {
+ let X { a, b, .. } = y;
+ _ = (a, b);
+ }
+ }
+ size_and_align_expr! {
+ let y: (&&&(i8, u16, i32, u64, i128, u8), u16, i32, u64, i128, u8) = (&&&(1, 2, 3, 4, 5, 6), 2, 3, 4, 5, 6);
+ move |_: i64| {
+ let ((_a, .., _b, _c), .., _e, _f) = y;
+ }
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs b/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs
index 9c63d67ab..1a4d003bf 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs
@@ -1,6 +1,5 @@
//! The type system. We currently use this to infer types for completion, hover
//! information and various assists.
-
#![warn(rust_2018_idioms, unused_lifetimes, semicolon_in_expressions_from_macros)]
#[allow(unused)]
@@ -8,12 +7,9 @@ macro_rules! eprintln {
($($tt:tt)*) => { stdx::eprintln!($($tt)*) };
}
-mod autoderef;
mod builder;
mod chalk_db;
mod chalk_ext;
-pub mod consteval;
-pub mod mir;
mod infer;
mod inhabitedness;
mod interner;
@@ -21,21 +17,28 @@ mod lower;
mod mapping;
mod tls;
mod utils;
+
+pub mod autoderef;
+pub mod consteval;
pub mod db;
pub mod diagnostics;
pub mod display;
+pub mod lang_items;
+pub mod layout;
pub mod method_resolution;
+pub mod mir;
pub mod primitive;
pub mod traits;
-pub mod layout;
-pub mod lang_items;
#[cfg(test)]
mod tests;
#[cfg(test)]
mod test_db;
-use std::{collections::HashMap, hash::Hash, sync::Arc};
+use std::{
+ collections::{hash_map::Entry, HashMap},
+ hash::Hash,
+};
use chalk_ir::{
fold::{Shift, TypeFoldable},
@@ -44,12 +47,13 @@ use chalk_ir::{
NoSolution, TyData,
};
use either::Either;
-use hir_def::{expr::ExprId, type_ref::Rawness, TypeOrConstParamId};
+use hir_def::{hir::ExprId, type_ref::Rawness, GeneralConstId, TypeOrConstParamId};
use hir_expand::name;
use la_arena::{Arena, Idx};
-use mir::MirEvalError;
+use mir::{MirEvalError, VTableMap};
use rustc_hash::FxHashSet;
use traits::FnTrait;
+use triomphe::Arc;
use utils::Generics;
use crate::{
@@ -60,6 +64,7 @@ pub use autoderef::autoderef;
pub use builder::{ParamKind, TyBuilder};
pub use chalk_ext::*;
pub use infer::{
+ closure::{CaptureKind, CapturedItem},
could_coerce, could_unify, Adjust, Adjustment, AutoBorrow, BindingMode, InferenceDiagnostic,
InferenceResult, OverloadedDeref, PointerCast,
};
@@ -148,14 +153,26 @@ pub type Guidance = chalk_solve::Guidance<Interner>;
pub type WhereClause = chalk_ir::WhereClause<Interner>;
/// A constant can have reference to other things. Memory map job is holding
-/// the neccessary bits of memory of the const eval session to keep the constant
+/// the necessary bits of memory of the const eval session to keep the constant
/// meaningful.
#[derive(Debug, Default, Clone, PartialEq, Eq)]
-pub struct MemoryMap(pub HashMap<usize, Vec<u8>>);
+pub struct MemoryMap {
+ pub memory: HashMap<usize, Vec<u8>>,
+ pub vtable: VTableMap,
+}
impl MemoryMap {
fn insert(&mut self, addr: usize, x: Vec<u8>) {
- self.0.insert(addr, x);
+ match self.memory.entry(addr) {
+ Entry::Occupied(mut e) => {
+ if e.get().len() < x.len() {
+ e.insert(x);
+ }
+ }
+ Entry::Vacant(e) => {
+ e.insert(x);
+ }
+ }
}
/// This functions convert each address by a function `f` which gets the byte intervals and assign an address
@@ -165,7 +182,15 @@ impl MemoryMap {
&self,
mut f: impl FnMut(&[u8]) -> Result<usize, MirEvalError>,
) -> Result<HashMap<usize, usize>, MirEvalError> {
- self.0.iter().map(|x| Ok((*x.0, f(x.1)?))).collect()
+ self.memory.iter().map(|x| Ok((*x.0, f(x.1)?))).collect()
+ }
+
+ fn get<'a>(&'a self, addr: usize, size: usize) -> Option<&'a [u8]> {
+ if size == 0 {
+ Some(&[])
+ } else {
+ self.memory.get(&addr)?.get(0..size)
+ }
}
}
@@ -173,6 +198,9 @@ impl MemoryMap {
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ConstScalar {
Bytes(Vec<u8>, MemoryMap),
+ // FIXME: this is a hack to get around chalk not being able to represent unevaluatable
+ // constants
+ UnevaluatedConst(GeneralConstId, Substitution),
/// Case of an unknown value that rustc might know but we don't
// FIXME: this is a hack to get around chalk not being able to represent unevaluatable
// constants
@@ -283,16 +311,19 @@ impl CallableSig {
pub fn from_fn_ptr(fn_ptr: &FnPointer) -> CallableSig {
CallableSig {
// FIXME: what to do about lifetime params? -> return PolyFnSig
- params_and_return: fn_ptr
- .substitution
- .clone()
- .shifted_out_to(Interner, DebruijnIndex::ONE)
- .expect("unexpected lifetime vars in fn ptr")
- .0
- .as_slice(Interner)
- .iter()
- .map(|arg| arg.assert_ty_ref(Interner).clone())
- .collect(),
+ // FIXME: use `Arc::from_iter` when it becomes available
+ params_and_return: Arc::from(
+ fn_ptr
+ .substitution
+ .clone()
+ .shifted_out_to(Interner, DebruijnIndex::ONE)
+ .expect("unexpected lifetime vars in fn ptr")
+ .0
+ .as_slice(Interner)
+ .iter()
+ .map(|arg| arg.assert_ty_ref(Interner).clone())
+ .collect::<Vec<_>>(),
+ ),
is_varargs: fn_ptr.sig.variadic,
safety: fn_ptr.sig.safety,
}
@@ -576,15 +607,19 @@ where
}
pub fn callable_sig_from_fnonce(
- self_ty: &Ty,
+ mut self_ty: &Ty,
env: Arc<TraitEnvironment>,
db: &dyn HirDatabase,
) -> Option<CallableSig> {
+ if let Some((ty, _, _)) = self_ty.as_reference() {
+ // This will happen when it implements fn or fn mut, since we add a autoborrow adjustment
+ self_ty = ty;
+ }
let krate = env.krate;
let fn_once_trait = FnTrait::FnOnce.get_id(db, krate)?;
let output_assoc_type = db.trait_data(fn_once_trait).associated_type_by_name(&name![Output])?;
- let mut table = InferenceTable::new(db, env.clone());
+ let mut table = InferenceTable::new(db, env);
let b = TyBuilder::trait_ref(db, fn_once_trait);
if b.remaining() != 2 {
return None;
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/lower.rs b/src/tools/rust-analyzer/crates/hir-ty/src/lower.rs
index 23b15087e..9951a1c75 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/lower.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/lower.rs
@@ -8,7 +8,6 @@
use std::{
cell::{Cell, RefCell, RefMut},
iter,
- sync::Arc,
};
use base_db::CrateId;
@@ -18,19 +17,21 @@ use chalk_ir::{
use either::Either;
use hir_def::{
- adt::StructKind,
- body::{Expander, LowerCtx},
builtin_type::BuiltinType,
+ data::adt::StructKind,
+ expander::Expander,
generics::{
TypeOrConstParamData, TypeParamProvenance, WherePredicate, WherePredicateTypeTarget,
},
lang_item::{lang_attr, LangItem},
- path::{GenericArg, ModPath, Path, PathKind, PathSegment, PathSegments},
+ nameres::MacroSubNs,
+ path::{GenericArg, GenericArgs, ModPath, Path, PathKind, PathSegment, PathSegments},
resolver::{HasResolver, Resolver, TypeNs},
- type_ref::{ConstRefOrPath, TraitBoundModifier, TraitRef as HirTraitRef, TypeBound, TypeRef},
- AdtId, AssocItemId, ConstId, ConstParamId, EnumId, EnumVariantId, FunctionId, GenericDefId,
- HasModule, ImplId, ItemContainerId, LocalFieldId, Lookup, ModuleDefId, StaticId, StructId,
- TraitId, TypeAliasId, TypeOrConstParamId, TypeParamId, UnionId, VariantId,
+ type_ref::{ConstRef, TraitBoundModifier, TraitRef as HirTraitRef, TypeBound, TypeRef},
+ AdtId, AssocItemId, ConstId, ConstParamId, DefWithBodyId, EnumId, EnumVariantId, FunctionId,
+ GenericDefId, HasModule, ImplId, InTypeConstLoc, ItemContainerId, LocalFieldId, Lookup,
+ ModuleDefId, StaticId, StructId, TraitId, TypeAliasId, TypeOrConstParamId, TypeOwnerId,
+ TypeParamId, UnionId, VariantId,
};
use hir_expand::{name::Name, ExpandResult};
use intern::Interned;
@@ -39,20 +40,28 @@ use rustc_hash::FxHashSet;
use smallvec::SmallVec;
use stdx::{impl_from, never};
use syntax::ast;
+use triomphe::Arc;
use crate::{
all_super_traits,
- consteval::{intern_const_ref, path_to_const, unknown_const, unknown_const_as_generic},
+ consteval::{
+ intern_const_ref, intern_const_scalar, path_to_const, unknown_const,
+ unknown_const_as_generic,
+ },
db::HirDatabase,
make_binders,
mapping::{from_chalk_trait_id, ToChalk},
static_lifetime, to_assoc_type_id, to_chalk_trait_id, to_placeholder_idx,
utils::Generics,
- utils::{all_super_trait_refs, associated_type_by_name_including_super_traits, generics},
- AliasEq, AliasTy, Binders, BoundVar, CallableSig, Const, DebruijnIndex, DynTy, FnPointer,
- FnSig, FnSubst, GenericArgData, ImplTraitId, Interner, ParamKind, PolyFnSig, ProjectionTy,
- QuantifiedWhereClause, QuantifiedWhereClauses, ReturnTypeImplTrait, ReturnTypeImplTraits,
- Substitution, TraitEnvironment, TraitRef, TraitRefExt, Ty, TyBuilder, TyKind, WhereClause,
+ utils::{
+ all_super_trait_refs, associated_type_by_name_including_super_traits, generics,
+ InTypeConstIdMetadata,
+ },
+ AliasEq, AliasTy, Binders, BoundVar, CallableSig, Const, ConstScalar, DebruijnIndex, DynTy,
+ FnPointer, FnSig, FnSubst, GenericArgData, ImplTraitId, Interner, ParamKind, PolyFnSig,
+ ProjectionTy, QuantifiedWhereClause, QuantifiedWhereClauses, ReturnTypeImplTrait,
+ ReturnTypeImplTraits, Substitution, TraitEnvironment, TraitRef, TraitRefExt, Ty, TyBuilder,
+ TyKind, WhereClause,
};
#[derive(Debug)]
@@ -103,8 +112,9 @@ impl ImplTraitLoweringState {
#[derive(Debug)]
pub struct TyLoweringContext<'a> {
pub db: &'a dyn HirDatabase,
- pub resolver: &'a Resolver,
+ resolver: &'a Resolver,
in_binders: DebruijnIndex,
+ owner: TypeOwnerId,
/// Note: Conceptually, it's thinkable that we could be in a location where
/// some type params should be represented as placeholders, and others
/// should be converted to variables. I think in practice, this isn't
@@ -117,13 +127,14 @@ pub struct TyLoweringContext<'a> {
}
impl<'a> TyLoweringContext<'a> {
- pub fn new(db: &'a dyn HirDatabase, resolver: &'a Resolver) -> Self {
+ pub fn new(db: &'a dyn HirDatabase, resolver: &'a Resolver, owner: TypeOwnerId) -> Self {
let impl_trait_mode = ImplTraitLoweringState::Disallowed;
let type_param_mode = ParamLoweringMode::Placeholder;
let in_binders = DebruijnIndex::INNERMOST;
Self {
db,
resolver,
+ owner,
in_binders,
impl_trait_mode,
type_param_mode,
@@ -234,6 +245,7 @@ impl<'a> TyLoweringContext<'a> {
let const_len = const_or_path_to_chalk(
self.db,
self.resolver,
+ self.owner,
TyBuilder::usize(),
len,
self.type_param_mode,
@@ -378,10 +390,19 @@ impl<'a> TyLoweringContext<'a> {
};
let ty = {
let macro_call = macro_call.to_node(self.db.upcast());
- match expander.enter_expand::<ast::Type>(self.db.upcast(), macro_call) {
+ let resolver = |path| {
+ self.resolver.resolve_path_as_macro(
+ self.db.upcast(),
+ &path,
+ Some(MacroSubNs::Bang),
+ )
+ };
+ match expander.enter_expand::<ast::Type>(self.db.upcast(), macro_call, resolver)
+ {
Ok(ExpandResult { value: Some((mark, expanded)), .. }) => {
- let ctx = LowerCtx::new(self.db.upcast(), expander.current_file_id());
- let type_ref = TypeRef::from_ast(&ctx, expanded);
+ let ctx = expander.ctx(self.db.upcast());
+ // FIXME: Report syntax errors in expansion here
+ let type_ref = TypeRef::from_ast(&ctx, expanded.tree());
drop(expander);
let ty = self.lower_ty(&type_ref);
@@ -425,11 +446,10 @@ impl<'a> TyLoweringContext<'a> {
if path.segments().len() > 1 {
return None;
}
- let resolution =
- match self.resolver.resolve_path_in_type_ns(self.db.upcast(), path.mod_path()) {
- Some((it, None)) => it,
- _ => return None,
- };
+ let resolution = match self.resolver.resolve_path_in_type_ns(self.db.upcast(), path) {
+ Some((it, None)) => it,
+ _ => return None,
+ };
match resolution {
TypeNs::GenericParam(param_id) => Some(param_id.into()),
_ => None,
@@ -608,7 +628,7 @@ impl<'a> TyLoweringContext<'a> {
}
let (resolution, remaining_index) =
- match self.resolver.resolve_path_in_type_ns(self.db.upcast(), path.mod_path()) {
+ match self.resolver.resolve_path_in_type_ns(self.db.upcast(), path) {
Some(it) => it,
None => return (TyKind::Error.intern(Interner), None),
};
@@ -716,7 +736,7 @@ impl<'a> TyLoweringContext<'a> {
resolved: ValueTyDefId,
infer_args: bool,
) -> Substitution {
- let last = path.segments().last().expect("path should have at least one segment");
+ let last = path.segments().last();
let (segment, generic_def) = match resolved {
ValueTyDefId::FunctionId(it) => (last, Some(it.into())),
ValueTyDefId::StructId(it) => (last, Some(it.into())),
@@ -732,13 +752,20 @@ impl<'a> TyLoweringContext<'a> {
let len = path.segments().len();
let penultimate = len.checked_sub(2).and_then(|idx| path.segments().get(idx));
let segment = match penultimate {
- Some(segment) if segment.args_and_bindings.is_some() => segment,
+ Some(segment) if segment.args_and_bindings.is_some() => Some(segment),
_ => last,
};
(segment, Some(var.parent.into()))
}
};
- self.substs_from_path_segment(segment, generic_def, infer_args, None)
+ if let Some(segment) = segment {
+ self.substs_from_path_segment(segment, generic_def, infer_args, None)
+ } else if let Some(generic_def) = generic_def {
+ // lang item
+ self.substs_from_args_and_bindings(None, Some(generic_def), infer_args, None)
+ } else {
+ Substitution::empty(Interner)
+ }
}
fn substs_from_path_segment(
@@ -748,6 +775,21 @@ impl<'a> TyLoweringContext<'a> {
infer_args: bool,
explicit_self_ty: Option<Ty>,
) -> Substitution {
+ self.substs_from_args_and_bindings(
+ segment.args_and_bindings,
+ def,
+ infer_args,
+ explicit_self_ty,
+ )
+ }
+
+ fn substs_from_args_and_bindings(
+ &self,
+ args_and_bindings: Option<&GenericArgs>,
+ def: Option<GenericDefId>,
+ infer_args: bool,
+ explicit_self_ty: Option<Ty>,
+ ) -> Substitution {
// Remember that the item's own generic args come before its parent's.
let mut substs = Vec::new();
let def = if let Some(d) = def {
@@ -780,7 +822,7 @@ impl<'a> TyLoweringContext<'a> {
};
let mut had_explicit_args = false;
- if let Some(generic_args) = &segment.args_and_bindings {
+ if let Some(generic_args) = &args_and_bindings {
if !generic_args.has_self_type {
fill_self_params();
}
@@ -809,6 +851,7 @@ impl<'a> TyLoweringContext<'a> {
const_or_path_to_chalk(
self.db,
self.resolver,
+ self.owner,
ty,
c,
self.type_param_mode,
@@ -879,12 +922,11 @@ impl<'a> TyLoweringContext<'a> {
path: &Path,
explicit_self_ty: Option<Ty>,
) -> Option<TraitRef> {
- let resolved =
- match self.resolver.resolve_path_in_type_ns_fully(self.db.upcast(), path.mod_path())? {
- // FIXME(trait_alias): We need to handle trait alias here.
- TypeNs::TraitId(tr) => tr,
- _ => return None,
- };
+ let resolved = match self.resolver.resolve_path_in_type_ns_fully(self.db.upcast(), path)? {
+ // FIXME(trait_alias): We need to handle trait alias here.
+ TypeNs::TraitId(tr) => tr,
+ _ => return None,
+ };
let segment = path.segments().last().expect("path should have at least one segment");
Some(self.lower_trait_ref_from_resolved_path(resolved, segment, explicit_self_ty))
}
@@ -968,7 +1010,7 @@ impl<'a> TyLoweringContext<'a> {
// ignore `T: Drop` or `T: Destruct` bounds.
// - `T: ~const Drop` has a special meaning in Rust 1.61 that we don't implement.
// (So ideally, we'd only ignore `~const Drop` here)
- // - `Destruct` impls are built-in in 1.62 (current nightlies as of 08-04-2022), so until
+ // - `Destruct` impls are built-in in 1.62 (current nightly as of 08-04-2022), so until
// the builtin impls are supported by Chalk, we ignore them here.
if let Some(lang) = lang_attr(self.db.upcast(), tr.hir_trait_id()) {
if matches!(lang, LangItem::Drop | LangItem::Destruct) {
@@ -1062,23 +1104,23 @@ impl<'a> TyLoweringContext<'a> {
associated_ty_id: to_assoc_type_id(associated_ty),
substitution,
};
- let mut preds: SmallVec<[_; 1]> = SmallVec::with_capacity(
+ let mut predicates: SmallVec<[_; 1]> = SmallVec::with_capacity(
binding.type_ref.as_ref().map_or(0, |_| 1) + binding.bounds.len(),
);
if let Some(type_ref) = &binding.type_ref {
let ty = self.lower_ty(type_ref);
let alias_eq =
AliasEq { alias: AliasTy::Projection(projection_ty.clone()), ty };
- preds.push(crate::wrap_empty_binders(WhereClause::AliasEq(alias_eq)));
+ predicates.push(crate::wrap_empty_binders(WhereClause::AliasEq(alias_eq)));
}
for bound in binding.bounds.iter() {
- preds.extend(self.lower_type_bound(
+ predicates.extend(self.lower_type_bound(
bound,
TyKind::Alias(AliasTy::Projection(projection_ty.clone())).intern(Interner),
false,
));
}
- preds
+ predicates
})
}
@@ -1145,7 +1187,7 @@ impl<'a> TyLoweringContext<'a> {
return None;
}
- // As multiple occurrences of the same auto traits *are* permitted, we dedulicate the
+ // As multiple occurrences of the same auto traits *are* permitted, we deduplicate the
// bounds. We shouldn't have repeated elements besides auto traits at this point.
bounds.dedup();
@@ -1326,8 +1368,8 @@ pub(crate) fn field_types_query(
};
let generics = generics(db.upcast(), def);
let mut res = ArenaMap::default();
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, GenericDefId::from(variant_id.adt_id()).into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
for (field_id, field_data) in var_data.fields().iter() {
res.insert(field_id, make_binders(db, &generics, ctx.lower_ty(&field_data.type_ref)));
}
@@ -1349,8 +1391,8 @@ pub(crate) fn generic_predicates_for_param_query(
assoc_name: Option<Name>,
) -> Arc<[Binders<QuantifiedWhereClause>]> {
let resolver = def.resolver(db.upcast());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, def.into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
let generics = generics(db.upcast(), def);
let mut predicates: Vec<_> = resolver
.where_predicates_in_scope()
@@ -1381,9 +1423,7 @@ pub(crate) fn generic_predicates_for_param_query(
Some(it) => it,
None => return true,
};
- let tr = match resolver
- .resolve_path_in_type_ns_fully(db.upcast(), path.mod_path())
- {
+ let tr = match resolver.resolve_path_in_type_ns_fully(db.upcast(), path) {
Some(TypeNs::TraitId(tr)) => tr,
_ => return false,
};
@@ -1420,7 +1460,19 @@ pub(crate) fn generic_predicates_for_param_recover(
_param_id: &TypeOrConstParamId,
_assoc_name: &Option<Name>,
) -> Arc<[Binders<QuantifiedWhereClause>]> {
- Arc::new([])
+ // FIXME: use `Arc::from_iter` when it becomes available
+ Arc::from(vec![])
+}
+
+pub(crate) fn trait_environment_for_body_query(
+ db: &dyn HirDatabase,
+ def: DefWithBodyId,
+) -> Arc<TraitEnvironment> {
+ let Some(def) = def.as_generic_def_id() else {
+ let krate = def.module(db.upcast()).krate();
+ return Arc::new(TraitEnvironment::empty(krate));
+ };
+ db.trait_environment(def)
}
pub(crate) fn trait_environment_query(
@@ -1428,8 +1480,8 @@ pub(crate) fn trait_environment_query(
def: GenericDefId,
) -> Arc<TraitEnvironment> {
let resolver = def.resolver(db.upcast());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Placeholder);
+ let ctx = TyLoweringContext::new(db, &resolver, def.into())
+ .with_type_param_mode(ParamLoweringMode::Placeholder);
let mut traits_in_scope = Vec::new();
let mut clauses = Vec::new();
for pred in resolver.where_predicates_in_scope() {
@@ -1478,7 +1530,7 @@ pub(crate) fn trait_environment_query(
let env = chalk_ir::Environment::new(Interner).add_clauses(Interner, clauses);
- Arc::new(TraitEnvironment { krate, traits_from_clauses: traits_in_scope, env })
+ Arc::new(TraitEnvironment { krate, block: None, traits_from_clauses: traits_in_scope, env })
}
/// Resolve the where clause(s) of an item with generics.
@@ -1487,8 +1539,8 @@ pub(crate) fn generic_predicates_query(
def: GenericDefId,
) -> Arc<[Binders<QuantifiedWhereClause>]> {
let resolver = def.resolver(db.upcast());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, def.into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
let generics = generics(db.upcast(), def);
let mut predicates = resolver
@@ -1542,35 +1594,38 @@ pub(crate) fn generic_defaults_query(
def: GenericDefId,
) -> Arc<[Binders<chalk_ir::GenericArg<Interner>>]> {
let resolver = def.resolver(db.upcast());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, def.into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
let generic_params = generics(db.upcast(), def);
let parent_start_idx = generic_params.len_self();
- let defaults = generic_params
- .iter()
- .enumerate()
- .map(|(idx, (id, p))| {
- let p = match p {
- TypeOrConstParamData::TypeParamData(p) => p,
- TypeOrConstParamData::ConstParamData(_) => {
- // FIXME: implement const generic defaults
- let val = unknown_const_as_generic(
- db.const_param_ty(ConstParamId::from_unchecked(id)),
- );
- return make_binders(db, &generic_params, val);
- }
- };
- let mut ty =
- p.default.as_ref().map_or(TyKind::Error.intern(Interner), |t| ctx.lower_ty(t));
-
- // Each default can only refer to previous parameters.
- // Type variable default referring to parameter coming
- // after it is forbidden (FIXME: report diagnostic)
- ty = fallback_bound_vars(ty, idx, parent_start_idx);
- crate::make_binders(db, &generic_params, ty.cast(Interner))
- })
- .collect();
+ let defaults = Arc::from(
+ generic_params
+ .iter()
+ .enumerate()
+ .map(|(idx, (id, p))| {
+ let p = match p {
+ TypeOrConstParamData::TypeParamData(p) => p,
+ TypeOrConstParamData::ConstParamData(_) => {
+ // FIXME: implement const generic defaults
+ let val = unknown_const_as_generic(
+ db.const_param_ty(ConstParamId::from_unchecked(id)),
+ );
+ return make_binders(db, &generic_params, val);
+ }
+ };
+ let mut ty =
+ p.default.as_ref().map_or(TyKind::Error.intern(Interner), |t| ctx.lower_ty(t));
+
+ // Each default can only refer to previous parameters.
+ // Type variable default referring to parameter coming
+ // after it is forbidden (FIXME: report diagnostic)
+ ty = fallback_bound_vars(ty, idx, parent_start_idx);
+ crate::make_binders(db, &generic_params, ty.cast(Interner))
+ })
+ // FIXME: use `Arc::from_iter` when it becomes available
+ .collect::<Vec<_>>(),
+ );
defaults
}
@@ -1583,18 +1638,21 @@ pub(crate) fn generic_defaults_recover(
let generic_params = generics(db.upcast(), *def);
// FIXME: this code is not covered in tests.
// we still need one default per parameter
- let defaults = generic_params
- .iter_id()
- .map(|id| {
- let val = match id {
- Either::Left(_) => {
- GenericArgData::Ty(TyKind::Error.intern(Interner)).intern(Interner)
- }
- Either::Right(id) => unknown_const_as_generic(db.const_param_ty(id)),
- };
- crate::make_binders(db, &generic_params, val)
- })
- .collect();
+ let defaults = Arc::from(
+ generic_params
+ .iter_id()
+ .map(|id| {
+ let val = match id {
+ Either::Left(_) => {
+ GenericArgData::Ty(TyKind::Error.intern(Interner)).intern(Interner)
+ }
+ Either::Right(id) => unknown_const_as_generic(db.const_param_ty(id)),
+ };
+ crate::make_binders(db, &generic_params, val)
+ })
+ // FIXME: use `Arc::from_iter` when it becomes available
+ .collect::<Vec<_>>(),
+ );
defaults
}
@@ -1602,11 +1660,11 @@ pub(crate) fn generic_defaults_recover(
fn fn_sig_for_fn(db: &dyn HirDatabase, def: FunctionId) -> PolyFnSig {
let data = db.function_data(def);
let resolver = def.resolver(db.upcast());
- let ctx_params = TyLoweringContext::new(db, &resolver)
+ let ctx_params = TyLoweringContext::new(db, &resolver, def.into())
.with_impl_trait_mode(ImplTraitLoweringMode::Variable)
.with_type_param_mode(ParamLoweringMode::Variable);
- let params = data.params.iter().map(|(_, tr)| ctx_params.lower_ty(tr)).collect::<Vec<_>>();
- let ctx_ret = TyLoweringContext::new(db, &resolver)
+ let params = data.params.iter().map(|tr| ctx_params.lower_ty(tr)).collect::<Vec<_>>();
+ let ctx_ret = TyLoweringContext::new(db, &resolver, def.into())
.with_impl_trait_mode(ImplTraitLoweringMode::Opaque)
.with_type_param_mode(ParamLoweringMode::Variable);
let ret = ctx_ret.lower_ty(&data.ret_type);
@@ -1637,8 +1695,8 @@ fn type_for_const(db: &dyn HirDatabase, def: ConstId) -> Binders<Ty> {
let data = db.const_data(def);
let generics = generics(db.upcast(), def.into());
let resolver = def.resolver(db.upcast());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, def.into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
make_binders(db, &generics, ctx.lower_ty(&data.type_ref))
}
@@ -1647,7 +1705,7 @@ fn type_for_const(db: &dyn HirDatabase, def: ConstId) -> Binders<Ty> {
fn type_for_static(db: &dyn HirDatabase, def: StaticId) -> Binders<Ty> {
let data = db.static_data(def);
let resolver = def.resolver(db.upcast());
- let ctx = TyLoweringContext::new(db, &resolver);
+ let ctx = TyLoweringContext::new(db, &resolver, def.into());
Binders::empty(Interner, ctx.lower_ty(&data.type_ref))
}
@@ -1656,8 +1714,8 @@ fn fn_sig_for_struct_constructor(db: &dyn HirDatabase, def: StructId) -> PolyFnS
let struct_data = db.struct_data(def);
let fields = struct_data.variant_data.fields();
let resolver = def.resolver(db.upcast());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, AdtId::from(def).into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
let params = fields.iter().map(|(_, field)| ctx.lower_ty(&field.type_ref)).collect::<Vec<_>>();
let (ret, binders) = type_for_adt(db, def.into()).into_value_and_skipped_binders();
Binders::new(binders, CallableSig::from_params_and_return(params, ret, false, Safety::Safe))
@@ -1669,7 +1727,7 @@ fn type_for_struct_constructor(db: &dyn HirDatabase, def: StructId) -> Binders<T
if let StructKind::Unit = struct_data.variant_data.kind() {
return type_for_adt(db, def.into());
}
- let generics = generics(db.upcast(), def.into());
+ let generics = generics(db.upcast(), AdtId::from(def).into());
let substs = generics.bound_vars_subst(db, DebruijnIndex::INNERMOST);
make_binders(
db,
@@ -1683,8 +1741,8 @@ fn fn_sig_for_enum_variant_constructor(db: &dyn HirDatabase, def: EnumVariantId)
let var_data = &enum_data.variants[def.local_id];
let fields = var_data.variant_data.fields();
let resolver = def.parent.resolver(db.upcast());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, DefWithBodyId::VariantId(def).into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
let params = fields.iter().map(|(_, field)| ctx.lower_ty(&field.type_ref)).collect::<Vec<_>>();
let (ret, binders) = type_for_adt(db, def.parent.into()).into_value_and_skipped_binders();
Binders::new(binders, CallableSig::from_params_and_return(params, ret, false, Safety::Safe))
@@ -1716,8 +1774,8 @@ fn type_for_adt(db: &dyn HirDatabase, adt: AdtId) -> Binders<Ty> {
fn type_for_type_alias(db: &dyn HirDatabase, t: TypeAliasId) -> Binders<Ty> {
let generics = generics(db.upcast(), t.into());
let resolver = t.resolver(db.upcast());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, t.into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
if db.type_alias_data(t).is_extern {
Binders::empty(Interner, TyKind::Foreign(crate::to_foreign_def_id(t)).intern(Interner))
} else {
@@ -1838,8 +1896,8 @@ pub(crate) fn impl_self_ty_query(db: &dyn HirDatabase, impl_id: ImplId) -> Binde
"impl_self_ty_query({impl_id:?} -> {impl_loc:?} -> {impl_data:?})"
));
let generics = generics(db.upcast(), impl_id.into());
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, impl_id.into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
make_binders(db, &generics, ctx.lower_ty(&impl_data.self_ty))
}
@@ -1848,7 +1906,7 @@ pub(crate) fn const_param_ty_query(db: &dyn HirDatabase, def: ConstParamId) -> T
let parent_data = db.generic_params(def.parent());
let data = &parent_data.type_or_consts[def.local_id()];
let resolver = def.parent().resolver(db.upcast());
- let ctx = TyLoweringContext::new(db, &resolver);
+ let ctx = TyLoweringContext::new(db, &resolver, def.parent().into());
match data {
TypeOrConstParamData::TypeParamData(_) => {
never!();
@@ -1874,8 +1932,8 @@ pub(crate) fn impl_trait_query(db: &dyn HirDatabase, impl_id: ImplId) -> Option<
let _cx = stdx::panic_context::enter(format!(
"impl_trait_query({impl_id:?} -> {impl_loc:?} -> {impl_data:?})"
));
- let ctx =
- TyLoweringContext::new(db, &resolver).with_type_param_mode(ParamLoweringMode::Variable);
+ let ctx = TyLoweringContext::new(db, &resolver, impl_id.into())
+ .with_type_param_mode(ParamLoweringMode::Variable);
let (self_ty, binders) = db.impl_self_ty(impl_id).into_value_and_skipped_binders();
let target_trait = impl_data.target_trait.as_ref()?;
Some(Binders::new(binders, ctx.lower_trait_ref(target_trait, Some(self_ty))?))
@@ -1888,7 +1946,7 @@ pub(crate) fn return_type_impl_traits(
// FIXME unify with fn_sig_for_fn instead of doing lowering twice, maybe
let data = db.function_data(def);
let resolver = def.resolver(db.upcast());
- let ctx_ret = TyLoweringContext::new(db, &resolver)
+ let ctx_ret = TyLoweringContext::new(db, &resolver, def.into())
.with_impl_trait_mode(ImplTraitLoweringMode::Opaque)
.with_type_param_mode(ParamLoweringMode::Variable);
let _ret = ctx_ret.lower_ty(&data.ret_type);
@@ -1923,7 +1981,7 @@ pub(crate) fn generic_arg_to_chalk<'a, T>(
arg: &'a GenericArg,
this: &mut T,
for_type: impl FnOnce(&mut T, &TypeRef) -> Ty + 'a,
- for_const: impl FnOnce(&mut T, &ConstRefOrPath, Ty) -> Const + 'a,
+ for_const: impl FnOnce(&mut T, &ConstRef, Ty) -> Const + 'a,
) -> Option<crate::GenericArg> {
let kind = match kind_id {
Either::Left(_) => ParamKind::Type,
@@ -1948,10 +2006,10 @@ pub(crate) fn generic_arg_to_chalk<'a, T>(
// as types. Maybe here is not the best place to do it, but
// it works.
if let TypeRef::Path(p) = t {
- let p = p.mod_path();
+ let p = p.mod_path()?;
if p.kind == PathKind::Plain {
if let [n] = p.segments() {
- let c = ConstRefOrPath::Path(n.clone());
+ let c = ConstRef::Path(n.clone());
return Some(
GenericArgData::Const(for_const(this, &c, c_ty)).intern(Interner),
);
@@ -1967,18 +2025,47 @@ pub(crate) fn generic_arg_to_chalk<'a, T>(
pub(crate) fn const_or_path_to_chalk(
db: &dyn HirDatabase,
resolver: &Resolver,
+ owner: TypeOwnerId,
expected_ty: Ty,
- value: &ConstRefOrPath,
+ value: &ConstRef,
mode: ParamLoweringMode,
args: impl FnOnce() -> Generics,
debruijn: DebruijnIndex,
) -> Const {
match value {
- ConstRefOrPath::Scalar(s) => intern_const_ref(db, s, expected_ty, resolver.krate()),
- ConstRefOrPath::Path(n) => {
+ ConstRef::Scalar(s) => intern_const_ref(db, s, expected_ty, resolver.krate()),
+ ConstRef::Path(n) => {
let path = ModPath::from_segments(PathKind::Plain, Some(n.clone()));
- path_to_const(db, resolver, &path, mode, args, debruijn)
- .unwrap_or_else(|| unknown_const(expected_ty))
+ path_to_const(
+ db,
+ resolver,
+ &Path::from_known_path_with_no_generic(path),
+ mode,
+ args,
+ debruijn,
+ expected_ty.clone(),
+ )
+ .unwrap_or_else(|| unknown_const(expected_ty))
+ }
+ &ConstRef::Complex(it) => {
+ let crate_data = &db.crate_graph()[owner.module(db.upcast()).krate()];
+ if crate_data.env.get("__ra_is_test_fixture").is_none() && crate_data.origin.is_local()
+ {
+ // FIXME: current `InTypeConstId` is very unstable, so we only use it in non local crate
+ // that are unlikely to be edited.
+ return unknown_const(expected_ty);
+ }
+ let c = db
+ .intern_in_type_const(InTypeConstLoc {
+ id: it,
+ owner,
+ thing: Box::new(InTypeConstIdMetadata(expected_ty.clone())),
+ })
+ .into();
+ intern_const_scalar(
+ ConstScalar::UnevaluatedConst(c, Substitution::empty(Interner)),
+ expected_ty,
+ )
}
}
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs b/src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs
index f3a27632b..ab6430e8f 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs
@@ -2,25 +2,28 @@
//! For details about how this works in rustc, see the method lookup page in the
//! [rustc guide](https://rust-lang.github.io/rustc-guide/method-lookup.html)
//! and the corresponding code mostly in rustc_hir_analysis/check/method/probe.rs.
-use std::{ops::ControlFlow, sync::Arc};
+use std::ops::ControlFlow;
use base_db::{CrateId, Edition};
-use chalk_ir::{cast::Cast, Mutability, TyKind, UniverseIndex};
+use chalk_ir::{cast::Cast, Mutability, TyKind, UniverseIndex, WhereClause};
use hir_def::{
- data::ImplData, item_scope::ItemScope, lang_item::LangItem, nameres::DefMap, AssocItemId,
- BlockId, ConstId, FunctionId, HasModule, ImplId, ItemContainerId, Lookup, ModuleDefId,
- ModuleId, TraitId,
+ data::{adt::StructFlags, ImplData},
+ item_scope::ItemScope,
+ nameres::DefMap,
+ AssocItemId, BlockId, ConstId, FunctionId, HasModule, ImplId, ItemContainerId, Lookup,
+ ModuleDefId, ModuleId, TraitId,
};
use hir_expand::name::Name;
use rustc_hash::{FxHashMap, FxHashSet};
use smallvec::{smallvec, SmallVec};
use stdx::never;
+use triomphe::Arc;
use crate::{
autoderef::{self, AutoderefKind},
db::HirDatabase,
from_chalk_trait_id, from_foreign_def_id,
- infer::{unify::InferenceTable, Adjust, Adjustment, AutoBorrow, OverloadedDeref, PointerCast},
+ infer::{unify::InferenceTable, Adjust, Adjustment, OverloadedDeref, PointerCast},
primitive::{FloatTy, IntTy, UintTy},
static_lifetime, to_chalk_trait_id,
utils::all_super_traits,
@@ -147,31 +150,30 @@ impl TraitImpls {
Arc::new(impls)
}
- pub(crate) fn trait_impls_in_block_query(
- db: &dyn HirDatabase,
- block: BlockId,
- ) -> Option<Arc<Self>> {
+ pub(crate) fn trait_impls_in_block_query(db: &dyn HirDatabase, block: BlockId) -> Arc<Self> {
let _p = profile::span("trait_impls_in_block_query");
let mut impls = Self { map: FxHashMap::default() };
- let block_def_map = db.block_def_map(block)?;
+ let block_def_map = db.block_def_map(block);
impls.collect_def_map(db, &block_def_map);
impls.shrink_to_fit();
- Some(Arc::new(impls))
+ Arc::new(impls)
}
- pub(crate) fn trait_impls_in_deps_query(db: &dyn HirDatabase, krate: CrateId) -> Arc<Self> {
+ pub(crate) fn trait_impls_in_deps_query(
+ db: &dyn HirDatabase,
+ krate: CrateId,
+ ) -> Arc<[Arc<Self>]> {
let _p = profile::span("trait_impls_in_deps_query").detail(|| format!("{krate:?}"));
let crate_graph = db.crate_graph();
- let mut res = Self { map: FxHashMap::default() };
-
- for krate in crate_graph.transitive_deps(krate) {
- res.merge(&db.trait_impls_in_crate(krate));
- }
- res.shrink_to_fit();
-
- Arc::new(res)
+ // FIXME: use `Arc::from_iter` when it becomes available
+ Arc::from(
+ crate_graph
+ .transitive_deps(krate)
+ .map(|krate| db.trait_impls_in_crate(krate))
+ .collect::<Vec<_>>(),
+ )
}
fn shrink_to_fit(&mut self) {
@@ -185,6 +187,15 @@ impl TraitImpls {
fn collect_def_map(&mut self, db: &dyn HirDatabase, def_map: &DefMap) {
for (_module_id, module_data) in def_map.modules() {
for impl_id in module_data.scope.impls() {
+ // Reservation impls should be ignored during trait resolution, so we never need
+ // them during type analysis. See rust-lang/rust#64631 for details.
+ //
+ // FIXME: Reservation impls should be considered during coherence checks. If we are
+ // (ever) to implement coherence checks, this filtering should be done by the trait
+ // solver.
+ if db.attrs(impl_id.into()).by_key("rustc_reservation_impl").exists() {
+ continue;
+ }
let target_trait = match db.impl_trait(impl_id) {
Some(tr) => tr.skip_binders().hir_trait_id(),
None => continue,
@@ -210,15 +221,6 @@ impl TraitImpls {
}
}
- fn merge(&mut self, other: &Self) {
- for (trait_, other_map) in &other.map {
- let map = self.map.entry(*trait_).or_default();
- for (fp, impls) in other_map {
- map.entry(*fp).or_default().extend(impls);
- }
- }
- }
-
/// Queries all trait impls for the given type.
pub fn for_self_ty_without_blanket_impls(
&self,
@@ -271,6 +273,7 @@ pub struct InherentImpls {
impl InherentImpls {
pub(crate) fn inherent_impls_in_crate_query(db: &dyn HirDatabase, krate: CrateId) -> Arc<Self> {
+ let _p = profile::span("inherent_impls_in_crate_query").detail(|| format!("{krate:?}"));
let mut impls = Self { map: FxHashMap::default(), invalid_impls: Vec::default() };
let crate_def_map = db.crate_def_map(krate);
@@ -280,17 +283,15 @@ impl InherentImpls {
Arc::new(impls)
}
- pub(crate) fn inherent_impls_in_block_query(
- db: &dyn HirDatabase,
- block: BlockId,
- ) -> Option<Arc<Self>> {
+ pub(crate) fn inherent_impls_in_block_query(db: &dyn HirDatabase, block: BlockId) -> Arc<Self> {
+ let _p = profile::span("inherent_impls_in_block_query");
let mut impls = Self { map: FxHashMap::default(), invalid_impls: Vec::default() };
- if let Some(block_def_map) = db.block_def_map(block) {
- impls.collect_def_map(db, &block_def_map);
- impls.shrink_to_fit();
- return Some(Arc::new(impls));
- }
- None
+
+ let block_def_map = db.block_def_map(block);
+ impls.collect_def_map(db, &block_def_map);
+ impls.shrink_to_fit();
+
+ Arc::new(impls)
}
fn shrink_to_fit(&mut self) {
@@ -404,12 +405,14 @@ pub fn def_crates(
match ty.kind(Interner) {
&TyKind::Adt(AdtId(def_id), _) => {
let rustc_has_incoherent_inherent_impls = match def_id {
- hir_def::AdtId::StructId(id) => {
- db.struct_data(id).rustc_has_incoherent_inherent_impls
- }
- hir_def::AdtId::UnionId(id) => {
- db.union_data(id).rustc_has_incoherent_inherent_impls
- }
+ hir_def::AdtId::StructId(id) => db
+ .struct_data(id)
+ .flags
+ .contains(StructFlags::IS_RUSTC_HAS_INCOHERENT_INHERENT_IMPL),
+ hir_def::AdtId::UnionId(id) => db
+ .union_data(id)
+ .flags
+ .contains(StructFlags::IS_RUSTC_HAS_INCOHERENT_INHERENT_IMPL),
hir_def::AdtId::EnumId(id) => db.enum_data(id).rustc_has_incoherent_inherent_impls,
};
Some(if rustc_has_incoherent_inherent_impls {
@@ -449,55 +452,6 @@ pub fn def_crates(
}
}
-pub fn lang_items_for_bin_op(op: syntax::ast::BinaryOp) -> Option<(Name, LangItem)> {
- use hir_expand::name;
- use syntax::ast::{ArithOp, BinaryOp, CmpOp, Ordering};
- Some(match op {
- BinaryOp::LogicOp(_) => return None,
- BinaryOp::ArithOp(aop) => match aop {
- ArithOp::Add => (name![add], LangItem::Add),
- ArithOp::Mul => (name![mul], LangItem::Mul),
- ArithOp::Sub => (name![sub], LangItem::Sub),
- ArithOp::Div => (name![div], LangItem::Div),
- ArithOp::Rem => (name![rem], LangItem::Rem),
- ArithOp::Shl => (name![shl], LangItem::Shl),
- ArithOp::Shr => (name![shr], LangItem::Shr),
- ArithOp::BitXor => (name![bitxor], LangItem::BitXor),
- ArithOp::BitOr => (name![bitor], LangItem::BitOr),
- ArithOp::BitAnd => (name![bitand], LangItem::BitAnd),
- },
- BinaryOp::Assignment { op: Some(aop) } => match aop {
- ArithOp::Add => (name![add_assign], LangItem::AddAssign),
- ArithOp::Mul => (name![mul_assign], LangItem::MulAssign),
- ArithOp::Sub => (name![sub_assign], LangItem::SubAssign),
- ArithOp::Div => (name![div_assign], LangItem::DivAssign),
- ArithOp::Rem => (name![rem_assign], LangItem::RemAssign),
- ArithOp::Shl => (name![shl_assign], LangItem::ShlAssign),
- ArithOp::Shr => (name![shr_assign], LangItem::ShrAssign),
- ArithOp::BitXor => (name![bitxor_assign], LangItem::BitXorAssign),
- ArithOp::BitOr => (name![bitor_assign], LangItem::BitOrAssign),
- ArithOp::BitAnd => (name![bitand_assign], LangItem::BitAndAssign),
- },
- BinaryOp::CmpOp(cop) => match cop {
- CmpOp::Eq { negated: false } => (name![eq], LangItem::PartialEq),
- CmpOp::Eq { negated: true } => (name![ne], LangItem::PartialEq),
- CmpOp::Ord { ordering: Ordering::Less, strict: false } => {
- (name![le], LangItem::PartialOrd)
- }
- CmpOp::Ord { ordering: Ordering::Less, strict: true } => {
- (name![lt], LangItem::PartialOrd)
- }
- CmpOp::Ord { ordering: Ordering::Greater, strict: false } => {
- (name![ge], LangItem::PartialOrd)
- }
- CmpOp::Ord { ordering: Ordering::Greater, strict: true } => {
- (name![gt], LangItem::PartialOrd)
- }
- },
- BinaryOp::Assignment { op: None } => return None,
- })
-}
-
/// Look up the method with the given name.
pub(crate) fn lookup_method(
db: &dyn HirDatabase,
@@ -600,9 +554,9 @@ impl ReceiverAdjustments {
}
}
if let Some(m) = self.autoref {
- ty = TyKind::Ref(m, static_lifetime(), ty).intern(Interner);
- adjust
- .push(Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(m)), target: ty.clone() });
+ let a = Adjustment::borrow(m, ty);
+ ty = a.target.clone();
+ adjust.push(a);
}
if self.unsize_array {
ty = 'x: {
@@ -616,7 +570,7 @@ impl ReceiverAdjustments {
.intern(Interner);
}
}
- never!("unsize_array with non-reference-to-array {:?}", ty);
+ // FIXME: report diagnostic if array unsizing happens without indirection.
ty
};
adjust.push(Adjustment {
@@ -692,6 +646,39 @@ pub fn lookup_impl_const(
.unwrap_or((const_id, subs))
}
+/// Checks if the self parameter of `Trait` method is the `dyn Trait` and we should
+/// call the method using the vtable.
+pub fn is_dyn_method(
+ db: &dyn HirDatabase,
+ _env: Arc<TraitEnvironment>,
+ func: FunctionId,
+ fn_subst: Substitution,
+) -> Option<usize> {
+ let ItemContainerId::TraitId(trait_id) = func.lookup(db.upcast()).container else {
+ return None;
+ };
+ let trait_params = db.generic_params(trait_id.into()).type_or_consts.len();
+ let fn_params = fn_subst.len(Interner) - trait_params;
+ let trait_ref = TraitRef {
+ trait_id: to_chalk_trait_id(trait_id),
+ substitution: Substitution::from_iter(Interner, fn_subst.iter(Interner).skip(fn_params)),
+ };
+ let self_ty = trait_ref.self_type_parameter(Interner);
+ if let TyKind::Dyn(d) = self_ty.kind(Interner) {
+ let is_my_trait_in_bounds =
+ d.bounds.skip_binders().as_slice(Interner).iter().any(|x| match x.skip_binders() {
+ // rustc doesn't accept `impl Foo<2> for dyn Foo<5>`, so if the trait id is equal, no matter
+ // what the generics are, we are sure that the method is come from the vtable.
+ WhereClause::Implemented(tr) => tr.trait_id == trait_ref.trait_id,
+ _ => false,
+ });
+ if is_my_trait_in_bounds {
+ return Some(fn_params);
+ }
+ }
+ None
+}
+
/// Looks up the impl method that actually runs for the trait method `func`.
///
/// Returns `func` if it's not a method defined in a trait or the lookup failed.
@@ -701,9 +688,8 @@ pub fn lookup_impl_method(
func: FunctionId,
fn_subst: Substitution,
) -> (FunctionId, Substitution) {
- let trait_id = match func.lookup(db.upcast()).container {
- ItemContainerId::TraitId(id) => id,
- _ => return (func, fn_subst),
+ let ItemContainerId::TraitId(trait_id) = func.lookup(db.upcast()).container else {
+ return (func, fn_subst)
};
let trait_params = db.generic_params(trait_id.into()).type_or_consts.len();
let fn_params = fn_subst.len(Interner) - trait_params;
@@ -713,7 +699,7 @@ pub fn lookup_impl_method(
};
let name = &db.function_data(func).name;
- lookup_impl_assoc_item_for_trait_ref(trait_ref, db, env, name)
+ let Some((impl_fn, impl_subst)) = lookup_impl_assoc_item_for_trait_ref(trait_ref, db, env, name)
.and_then(|assoc| {
if let (AssocItemId::FunctionId(id), subst) = assoc {
Some((id, subst))
@@ -721,7 +707,16 @@ pub fn lookup_impl_method(
None
}
})
- .unwrap_or((func, fn_subst))
+ else {
+ return (func, fn_subst);
+ };
+ (
+ impl_fn,
+ Substitution::from_iter(
+ Interner,
+ fn_subst.iter(Interner).take(fn_params).chain(impl_subst.iter(Interner)),
+ ),
+ )
}
fn lookup_impl_assoc_item_for_trait_ref(
@@ -730,10 +725,20 @@ fn lookup_impl_assoc_item_for_trait_ref(
env: Arc<TraitEnvironment>,
name: &Name,
) -> Option<(AssocItemId, Substitution)> {
+ let hir_trait_id = trait_ref.hir_trait_id();
let self_ty = trait_ref.self_type_parameter(Interner);
let self_ty_fp = TyFingerprint::for_trait_impl(&self_ty)?;
let impls = db.trait_impls_in_deps(env.krate);
- let impls = impls.for_trait_and_self_ty(trait_ref.hir_trait_id(), self_ty_fp);
+ let self_impls = match self_ty.kind(Interner) {
+ TyKind::Adt(id, _) => {
+ id.0.module(db.upcast()).containing_block().map(|x| db.trait_impls_in_block(x))
+ }
+ _ => None,
+ };
+ let impls = impls
+ .iter()
+ .chain(self_impls.as_ref())
+ .flat_map(|impls| impls.for_trait_and_self_ty(hir_trait_id, self_ty_fp));
let table = InferenceTable::new(db, env);
@@ -759,9 +764,8 @@ fn find_matching_impl(
actual_trait_ref: TraitRef,
) -> Option<(Arc<ImplData>, Substitution)> {
let db = table.db;
- loop {
- let impl_ = impls.next()?;
- let r = table.run_in_snapshot(|table| {
+ impls.find_map(|impl_| {
+ table.run_in_snapshot(|table| {
let impl_data = db.impl_data(impl_);
let impl_substs =
TyBuilder::subst_for_def(db, impl_, None).fill_with_inference_vars(table).build();
@@ -778,12 +782,11 @@ fn find_matching_impl(
.into_iter()
.map(|b| b.cast(Interner));
let goal = crate::Goal::all(Interner, wcs);
- table.try_obligation(goal).map(|_| (impl_data, table.resolve_completely(impl_substs)))
- });
- if r.is_some() {
- break r;
- }
- }
+ table.try_obligation(goal.clone())?;
+ table.register_obligation(goal);
+ Some((impl_data, table.resolve_completely(impl_substs)))
+ })
+ })
}
fn is_inherent_impl_coherent(
@@ -824,12 +827,14 @@ fn is_inherent_impl_coherent(
| TyKind::Scalar(_) => true,
&TyKind::Adt(AdtId(adt), _) => match adt {
- hir_def::AdtId::StructId(it) => {
- db.struct_data(it).rustc_has_incoherent_inherent_impls
- }
- hir_def::AdtId::UnionId(it) => {
- db.union_data(it).rustc_has_incoherent_inherent_impls
- }
+ hir_def::AdtId::StructId(id) => db
+ .struct_data(id)
+ .flags
+ .contains(StructFlags::IS_RUSTC_HAS_INCOHERENT_INHERENT_IMPL),
+ hir_def::AdtId::UnionId(id) => db
+ .union_data(id)
+ .flags
+ .contains(StructFlags::IS_RUSTC_HAS_INCOHERENT_INHERENT_IMPL),
hir_def::AdtId::EnumId(it) => db.enum_data(it).rustc_has_incoherent_inherent_impls,
},
TyKind::Dyn(it) => it.principal().map_or(false, |trait_ref| {
@@ -963,7 +968,14 @@ fn iterate_method_candidates_with_autoref(
)
};
- iterate_method_candidates_by_receiver(receiver_ty, first_adjustment.clone())?;
+ let mut maybe_reborrowed = first_adjustment.clone();
+ if let Some((_, _, m)) = receiver_ty.value.as_reference() {
+ // Prefer reborrow of references to move
+ maybe_reborrowed.autoref = Some(m);
+ maybe_reborrowed.autoderefs += 1;
+ }
+
+ iterate_method_candidates_by_receiver(receiver_ty, maybe_reborrowed)?;
let refed = Canonical {
value: TyKind::Ref(Mutability::Not, static_lifetime(), receiver_ty.value.clone())
@@ -1108,7 +1120,7 @@ fn iterate_trait_method_candidates(
};
if !known_implemented {
let goal = generic_implements_goal(db, env.clone(), t, &canonical_self_ty);
- if db.trait_solve(env.krate, goal.cast(Interner)).is_none() {
+ if db.trait_solve(env.krate, env.block, goal.cast(Interner)).is_none() {
continue 'traits;
}
}
@@ -1180,23 +1192,19 @@ fn iterate_inherent_methods(
};
while let Some(block_id) = block {
- if let Some(impls) = db.inherent_impls_in_block(block_id) {
- impls_for_self_ty(
- &impls,
- self_ty,
- table,
- name,
- receiver_ty,
- receiver_adjustments.clone(),
- module,
- callback,
- )?;
- }
+ let impls = db.inherent_impls_in_block(block_id);
+ impls_for_self_ty(
+ &impls,
+ self_ty,
+ table,
+ name,
+ receiver_ty,
+ receiver_adjustments.clone(),
+ module,
+ callback,
+ )?;
- block = db
- .block_def_map(block_id)
- .and_then(|map| map.parent())
- .and_then(|module| module.containing_block());
+ block = db.block_def_map(block_id).parent().and_then(|module| module.containing_block());
}
for krate in def_crates {
@@ -1274,7 +1282,7 @@ fn iterate_inherent_methods(
}
/// Returns the receiver type for the index trait call.
-pub fn resolve_indexing_op(
+pub(crate) fn resolve_indexing_op(
db: &dyn HirDatabase,
env: Arc<TraitEnvironment>,
ty: Canonical<Ty>,
@@ -1284,8 +1292,11 @@ pub fn resolve_indexing_op(
let ty = table.instantiate_canonical(ty);
let deref_chain = autoderef_method_receiver(&mut table, ty);
for (ty, adj) in deref_chain {
- let goal = generic_implements_goal(db, env.clone(), index_trait, &ty);
- if db.trait_solve(env.krate, goal.cast(Interner)).is_some() {
+ let goal = generic_implements_goal(db, table.trait_env.clone(), index_trait, &ty);
+ if db
+ .trait_solve(table.trait_env.krate, table.trait_env.block, goal.cast(Interner))
+ .is_some()
+ {
return Some(adj);
}
}
@@ -1310,14 +1321,12 @@ fn is_valid_candidate(
) -> IsValidCandidate {
let db = table.db;
match item {
- AssocItemId::FunctionId(m) => {
- is_valid_fn_candidate(table, m, name, receiver_ty, self_ty, visible_from_module)
+ AssocItemId::FunctionId(f) => {
+ is_valid_fn_candidate(table, f, name, receiver_ty, self_ty, visible_from_module)
}
AssocItemId::ConstId(c) => {
- let data = db.const_data(c);
check_that!(receiver_ty.is_none());
-
- check_that!(name.map_or(true, |n| data.name.as_ref() == Some(n)));
+ check_that!(name.map_or(true, |n| db.const_data(c).name.as_ref() == Some(n)));
if let Some(from_module) = visible_from_module {
if !db.const_visibility(c).is_visible_from(db.upcast(), from_module) {
@@ -1441,7 +1450,7 @@ pub fn implements_trait(
trait_: TraitId,
) -> bool {
let goal = generic_implements_goal(db, env.clone(), trait_, ty);
- let solution = db.trait_solve(env.krate, goal.cast(Interner));
+ let solution = db.trait_solve(env.krate, env.block, goal.cast(Interner));
solution.is_some()
}
@@ -1453,7 +1462,7 @@ pub fn implements_trait_unique(
trait_: TraitId,
) -> bool {
let goal = generic_implements_goal(db, env.clone(), trait_, ty);
- let solution = db.trait_solve(env.krate, goal.cast(Interner));
+ let solution = db.trait_solve(env.krate, env.block, goal.cast(Interner));
matches!(solution, Some(crate::Solution::Unique(_)))
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir.rs
index 7c1cbbdf5..2345bab0b 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/mir.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir.rs
@@ -3,12 +3,15 @@
use std::{fmt::Display, iter};
use crate::{
- infer::PointerCast, Const, ConstScalar, InferenceResult, Interner, MemoryMap, Substitution, Ty,
+ consteval::usize_const, db::HirDatabase, display::HirDisplay, infer::PointerCast,
+ lang_items::is_box, mapping::ToChalk, CallableDefId, ClosureId, Const, ConstScalar,
+ InferenceResult, Interner, MemoryMap, Substitution, Ty, TyKind,
};
+use base_db::CrateId;
use chalk_ir::Mutability;
use hir_def::{
- expr::{BindingId, Expr, ExprId, Ordering, PatId},
- DefWithBodyId, FieldId, UnionId, VariantId,
+ hir::{BindingId, Expr, ExprId, Ordering, PatId},
+ DefWithBodyId, FieldId, StaticId, UnionId, VariantId,
};
use la_arena::{Arena, ArenaMap, Idx, RawIdx};
@@ -16,12 +19,19 @@ mod eval;
mod lower;
mod borrowck;
mod pretty;
+mod monomorphization;
pub use borrowck::{borrowck_query, BorrowckResult, MutabilityReason};
-pub use eval::{interpret_mir, pad16, Evaluator, MirEvalError};
-pub use lower::{lower_to_mir, mir_body_query, mir_body_recover, MirLowerError};
+pub use eval::{interpret_mir, pad16, Evaluator, MirEvalError, VTableMap};
+pub use lower::{
+ lower_to_mir, mir_body_for_closure_query, mir_body_query, mir_body_recover, MirLowerError,
+};
+pub use monomorphization::{
+ monomorphize_mir_body_bad, monomorphized_mir_body_for_closure_query,
+ monomorphized_mir_body_query, monomorphized_mir_body_recover,
+};
use smallvec::{smallvec, SmallVec};
-use stdx::impl_from;
+use stdx::{impl_from, never};
use super::consteval::{intern_const_scalar, try_const_usize};
@@ -32,7 +42,7 @@ fn return_slot() -> LocalId {
LocalId::from_raw(RawIdx::from(0))
}
-#[derive(Debug, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Local {
pub ty: Ty,
}
@@ -52,7 +62,7 @@ pub struct Local {
/// This is what is implemented in miri today. Are these the semantics we want for MIR? Is this
/// something we can even decide without knowing more about Rust's memory model?
///
-/// **Needs clarifiation:** Is loading a place that has its variant index set well-formed? Miri
+/// **Needs clarification:** Is loading a place that has its variant index set well-formed? Miri
/// currently implements it, but it seems like this may be something to check against in the
/// validator.
#[derive(Debug, PartialEq, Eq, Clone)]
@@ -73,6 +83,9 @@ pub enum Operand {
Move(Place),
/// Constants are already semantically values, and remain unchanged.
Constant(Const),
+ /// NON STANDARD: This kind of operand returns an immutable reference to that static memory. Rustc
+ /// handles it with the `Constant` variant somehow.
+ Static(StaticId),
}
impl Operand {
@@ -87,31 +100,141 @@ impl Operand {
fn const_zst(ty: Ty) -> Operand {
Self::from_bytes(vec![], ty)
}
+
+ fn from_fn(
+ db: &dyn HirDatabase,
+ func_id: hir_def::FunctionId,
+ generic_args: Substitution,
+ ) -> Operand {
+ let ty =
+ chalk_ir::TyKind::FnDef(CallableDefId::FunctionId(func_id).to_chalk(db), generic_args)
+ .intern(Interner);
+ Operand::from_bytes(vec![], ty)
+ }
}
-#[derive(Debug, PartialEq, Eq, Clone)]
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ProjectionElem<V, T> {
Deref,
Field(FieldId),
- TupleField(usize),
+ // FIXME: get rid of this, and use FieldId for tuples and closures
+ TupleOrClosureField(usize),
Index(V),
- ConstantIndex { offset: u64, min_length: u64, from_end: bool },
- Subslice { from: u64, to: u64, from_end: bool },
+ ConstantIndex { offset: u64, from_end: bool },
+ Subslice { from: u64, to: u64 },
//Downcast(Option<Symbol>, VariantIdx),
OpaqueCast(T),
}
+impl<V, T> ProjectionElem<V, T> {
+ pub fn projected_ty(
+ &self,
+ base: Ty,
+ db: &dyn HirDatabase,
+ closure_field: impl FnOnce(ClosureId, &Substitution, usize) -> Ty,
+ krate: CrateId,
+ ) -> Ty {
+ match self {
+ ProjectionElem::Deref => match &base.data(Interner).kind {
+ TyKind::Raw(_, inner) | TyKind::Ref(_, _, inner) => inner.clone(),
+ TyKind::Adt(adt, subst) if is_box(db, adt.0) => {
+ subst.at(Interner, 0).assert_ty_ref(Interner).clone()
+ }
+ _ => {
+ never!("Overloaded deref on type {} is not a projection", base.display(db));
+ return TyKind::Error.intern(Interner);
+ }
+ },
+ ProjectionElem::Field(f) => match &base.data(Interner).kind {
+ TyKind::Adt(_, subst) => {
+ db.field_types(f.parent)[f.local_id].clone().substitute(Interner, subst)
+ }
+ _ => {
+ never!("Only adt has field");
+ return TyKind::Error.intern(Interner);
+ }
+ },
+ ProjectionElem::TupleOrClosureField(f) => match &base.data(Interner).kind {
+ TyKind::Tuple(_, subst) => subst
+ .as_slice(Interner)
+ .get(*f)
+ .map(|x| x.assert_ty_ref(Interner))
+ .cloned()
+ .unwrap_or_else(|| {
+ never!("Out of bound tuple field");
+ TyKind::Error.intern(Interner)
+ }),
+ TyKind::Closure(id, subst) => closure_field(*id, subst, *f),
+ _ => {
+ never!("Only tuple or closure has tuple or closure field");
+ return TyKind::Error.intern(Interner);
+ }
+ },
+ ProjectionElem::ConstantIndex { .. } | ProjectionElem::Index(_) => {
+ match &base.data(Interner).kind {
+ TyKind::Array(inner, _) | TyKind::Slice(inner) => inner.clone(),
+ _ => {
+ never!("Overloaded index is not a projection");
+ return TyKind::Error.intern(Interner);
+ }
+ }
+ }
+ &ProjectionElem::Subslice { from, to } => match &base.data(Interner).kind {
+ TyKind::Array(inner, c) => {
+ let next_c = usize_const(
+ db,
+ match try_const_usize(db, c) {
+ None => None,
+ Some(x) => x.checked_sub(u128::from(from + to)),
+ },
+ krate,
+ );
+ TyKind::Array(inner.clone(), next_c).intern(Interner)
+ }
+ TyKind::Slice(_) => base.clone(),
+ _ => {
+ never!("Subslice projection should only happen on slice and array");
+ return TyKind::Error.intern(Interner);
+ }
+ },
+ ProjectionElem::OpaqueCast(_) => {
+ never!("We don't emit these yet");
+ return TyKind::Error.intern(Interner);
+ }
+ }
+ }
+}
+
type PlaceElem = ProjectionElem<LocalId, Ty>;
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Place {
pub local: LocalId,
- pub projection: Vec<PlaceElem>,
+ pub projection: Box<[PlaceElem]>,
+}
+
+impl Place {
+ fn is_parent(&self, child: &Place) -> bool {
+ self.local == child.local && child.projection.starts_with(&self.projection)
+ }
+
+ fn iterate_over_parents(&self) -> impl Iterator<Item = Place> + '_ {
+ (0..self.projection.len())
+ .map(|x| &self.projection[0..x])
+ .map(|x| Place { local: self.local, projection: x.to_vec().into() })
+ }
+
+ fn project(&self, projection: PlaceElem) -> Place {
+ Place {
+ local: self.local,
+ projection: self.projection.iter().cloned().chain([projection]).collect(),
+ }
+ }
}
impl From<LocalId> for Place {
fn from(local: LocalId) -> Self {
- Self { local, projection: vec![] }
+ Self { local, projection: vec![].into() }
}
}
@@ -123,7 +246,7 @@ pub enum AggregateKind {
Tuple(Ty),
Adt(VariantId, Substitution),
Union(UnionId, FieldId),
- //Closure(LocalDefId, SubstsRef),
+ Closure(Ty),
//Generator(LocalDefId, SubstsRef, Movability),
}
@@ -197,7 +320,13 @@ impl SwitchTargets {
}
#[derive(Debug, PartialEq, Eq, Clone)]
-pub enum Terminator {
+pub struct Terminator {
+ span: MirSpan,
+ kind: TerminatorKind,
+}
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum TerminatorKind {
/// Block has one successor; we continue execution there.
Goto { target: BasicBlockId },
@@ -320,7 +449,7 @@ pub enum Terminator {
/// These are owned by the callee, which is free to modify them.
/// This allows the memory occupied by "by-value" arguments to be
/// reused across function calls without duplicating the contents.
- args: Vec<Operand>,
+ args: Box<[Operand]>,
/// Where the returned value will be written
destination: Place,
/// Where to go after this call returns. If none, the call necessarily diverges.
@@ -418,7 +547,7 @@ pub enum Terminator {
},
}
-#[derive(Debug, PartialEq, Eq, Clone)]
+#[derive(Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub enum BorrowKind {
/// Data must be immutable and is aliasable.
Shared,
@@ -564,6 +693,20 @@ pub enum BinOp {
Offset,
}
+impl BinOp {
+ fn run_compare<T: PartialEq + PartialOrd>(&self, l: T, r: T) -> bool {
+ match self {
+ BinOp::Ge => l >= r,
+ BinOp::Gt => l > r,
+ BinOp::Le => l <= r,
+ BinOp::Lt => l < r,
+ BinOp::Eq => l == r,
+ BinOp::Ne => l != r,
+ x => panic!("`run_compare` called on operator {x:?}"),
+ }
+ }
+}
+
impl Display for BinOp {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
@@ -588,32 +731,32 @@ impl Display for BinOp {
}
}
-impl From<hir_def::expr::ArithOp> for BinOp {
- fn from(value: hir_def::expr::ArithOp) -> Self {
+impl From<hir_def::hir::ArithOp> for BinOp {
+ fn from(value: hir_def::hir::ArithOp) -> Self {
match value {
- hir_def::expr::ArithOp::Add => BinOp::Add,
- hir_def::expr::ArithOp::Mul => BinOp::Mul,
- hir_def::expr::ArithOp::Sub => BinOp::Sub,
- hir_def::expr::ArithOp::Div => BinOp::Div,
- hir_def::expr::ArithOp::Rem => BinOp::Rem,
- hir_def::expr::ArithOp::Shl => BinOp::Shl,
- hir_def::expr::ArithOp::Shr => BinOp::Shr,
- hir_def::expr::ArithOp::BitXor => BinOp::BitXor,
- hir_def::expr::ArithOp::BitOr => BinOp::BitOr,
- hir_def::expr::ArithOp::BitAnd => BinOp::BitAnd,
+ hir_def::hir::ArithOp::Add => BinOp::Add,
+ hir_def::hir::ArithOp::Mul => BinOp::Mul,
+ hir_def::hir::ArithOp::Sub => BinOp::Sub,
+ hir_def::hir::ArithOp::Div => BinOp::Div,
+ hir_def::hir::ArithOp::Rem => BinOp::Rem,
+ hir_def::hir::ArithOp::Shl => BinOp::Shl,
+ hir_def::hir::ArithOp::Shr => BinOp::Shr,
+ hir_def::hir::ArithOp::BitXor => BinOp::BitXor,
+ hir_def::hir::ArithOp::BitOr => BinOp::BitOr,
+ hir_def::hir::ArithOp::BitAnd => BinOp::BitAnd,
}
}
}
-impl From<hir_def::expr::CmpOp> for BinOp {
- fn from(value: hir_def::expr::CmpOp) -> Self {
+impl From<hir_def::hir::CmpOp> for BinOp {
+ fn from(value: hir_def::hir::CmpOp) -> Self {
match value {
- hir_def::expr::CmpOp::Eq { negated: false } => BinOp::Eq,
- hir_def::expr::CmpOp::Eq { negated: true } => BinOp::Ne,
- hir_def::expr::CmpOp::Ord { ordering: Ordering::Greater, strict: false } => BinOp::Ge,
- hir_def::expr::CmpOp::Ord { ordering: Ordering::Greater, strict: true } => BinOp::Gt,
- hir_def::expr::CmpOp::Ord { ordering: Ordering::Less, strict: false } => BinOp::Le,
- hir_def::expr::CmpOp::Ord { ordering: Ordering::Less, strict: true } => BinOp::Lt,
+ hir_def::hir::CmpOp::Eq { negated: false } => BinOp::Eq,
+ hir_def::hir::CmpOp::Eq { negated: true } => BinOp::Ne,
+ hir_def::hir::CmpOp::Ord { ordering: Ordering::Greater, strict: false } => BinOp::Ge,
+ hir_def::hir::CmpOp::Ord { ordering: Ordering::Greater, strict: true } => BinOp::Gt,
+ hir_def::hir::CmpOp::Ord { ordering: Ordering::Less, strict: false } => BinOp::Le,
+ hir_def::hir::CmpOp::Ord { ordering: Ordering::Less, strict: true } => BinOp::Lt,
}
}
}
@@ -642,7 +785,6 @@ pub enum CastKind {
FloatToInt,
FloatToFloat,
IntToFloat,
- PtrToPtr,
FnPtrToPtr,
}
@@ -653,13 +795,8 @@ pub enum Rvalue {
/// Creates an array where each element is the value of the operand.
///
- /// This is the cause of a bug in the case where the repetition count is zero because the value
- /// is not dropped, see [#74836].
- ///
/// Corresponds to source code like `[x; 32]`.
- ///
- /// [#74836]: https://github.com/rust-lang/rust/issues/74836
- //Repeat(Operand, ty::Const),
+ Repeat(Operand, Const),
/// Creates a reference of the indicated kind to the place.
///
@@ -768,7 +905,7 @@ pub enum Rvalue {
///
/// Disallowed after deaggregation for all aggregate kinds except `Array` and `Generator`. After
/// generator lowering, `Generator` aggregate kinds are disallowed too.
- Aggregate(AggregateKind, Vec<Operand>),
+ Aggregate(AggregateKind, Box<[Operand]>),
/// Transmutes a `*mut u8` into shallow-initialized `Box<T>`.
///
@@ -777,6 +914,9 @@ pub enum Rvalue {
/// affects alias analysis.
ShallowInitBox(Operand, Ty),
+ /// NON STANDARD: allocates memory with the type's layout, and shallow init the box with the resulting pointer.
+ ShallowInitBoxWithAlloc(Ty),
+
/// A CopyForDeref is equivalent to a read from a place at the
/// codegen level, but is treated specially by drop elaboration. When such a read happens, it
/// is guaranteed (via nature of the mir_opt `Derefer` in rustc_mir_transform/src/deref_separator)
@@ -816,7 +956,7 @@ pub struct Statement {
pub span: MirSpan,
}
-#[derive(Debug, Default, PartialEq, Eq)]
+#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct BasicBlock {
/// List of statements in this block.
pub statements: Vec<Statement>,
@@ -838,19 +978,118 @@ pub struct BasicBlock {
pub is_cleanup: bool,
}
-#[derive(Debug, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq)]
pub struct MirBody {
pub basic_blocks: Arena<BasicBlock>,
pub locals: Arena<Local>,
pub start_block: BasicBlockId,
pub owner: DefWithBodyId,
- pub arg_count: usize,
pub binding_locals: ArenaMap<BindingId, LocalId>,
pub param_locals: Vec<LocalId>,
+ /// This field stores the closures directly owned by this body. It is used
+ /// in traversing every mir body.
+ pub closures: Vec<ClosureId>,
}
-fn const_as_usize(c: &Const) -> usize {
- try_const_usize(c).unwrap() as usize
+impl MirBody {
+ fn walk_places(&mut self, mut f: impl FnMut(&mut Place)) {
+ fn for_operand(op: &mut Operand, f: &mut impl FnMut(&mut Place)) {
+ match op {
+ Operand::Copy(p) | Operand::Move(p) => {
+ f(p);
+ }
+ Operand::Constant(_) | Operand::Static(_) => (),
+ }
+ }
+ for (_, block) in self.basic_blocks.iter_mut() {
+ for statement in &mut block.statements {
+ match &mut statement.kind {
+ StatementKind::Assign(p, r) => {
+ f(p);
+ match r {
+ Rvalue::ShallowInitBoxWithAlloc(_) => (),
+ Rvalue::ShallowInitBox(o, _)
+ | Rvalue::UnaryOp(_, o)
+ | Rvalue::Cast(_, o, _)
+ | Rvalue::Repeat(o, _)
+ | Rvalue::Use(o) => for_operand(o, &mut f),
+ Rvalue::CopyForDeref(p)
+ | Rvalue::Discriminant(p)
+ | Rvalue::Len(p)
+ | Rvalue::Ref(_, p) => f(p),
+ Rvalue::CheckedBinaryOp(_, o1, o2) => {
+ for_operand(o1, &mut f);
+ for_operand(o2, &mut f);
+ }
+ Rvalue::Aggregate(_, ops) => {
+ for op in ops.iter_mut() {
+ for_operand(op, &mut f);
+ }
+ }
+ }
+ }
+ StatementKind::Deinit(p) => f(p),
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Nop => (),
+ }
+ }
+ match &mut block.terminator {
+ Some(x) => match &mut x.kind {
+ TerminatorKind::SwitchInt { discr, .. } => for_operand(discr, &mut f),
+ TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable => (),
+ TerminatorKind::Drop { place, .. } => {
+ f(place);
+ }
+ TerminatorKind::DropAndReplace { place, value, .. } => {
+ f(place);
+ for_operand(value, &mut f);
+ }
+ TerminatorKind::Call { func, args, destination, .. } => {
+ for_operand(func, &mut f);
+ args.iter_mut().for_each(|x| for_operand(x, &mut f));
+ f(destination);
+ }
+ TerminatorKind::Assert { cond, .. } => {
+ for_operand(cond, &mut f);
+ }
+ TerminatorKind::Yield { value, resume_arg, .. } => {
+ for_operand(value, &mut f);
+ f(resume_arg);
+ }
+ },
+ None => (),
+ }
+ }
+ }
+
+ fn shrink_to_fit(&mut self) {
+ let MirBody {
+ basic_blocks,
+ locals,
+ start_block: _,
+ owner: _,
+ binding_locals,
+ param_locals,
+ closures,
+ } = self;
+ basic_blocks.shrink_to_fit();
+ locals.shrink_to_fit();
+ binding_locals.shrink_to_fit();
+ param_locals.shrink_to_fit();
+ closures.shrink_to_fit();
+ for (_, b) in basic_blocks.iter_mut() {
+ let BasicBlock { statements, terminator: _, is_cleanup: _ } = b;
+ statements.shrink_to_fit();
+ }
+ }
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs
index c8729af86..a5dd0182e 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs
@@ -3,17 +3,20 @@
// Currently it is an ad-hoc implementation, only useful for mutability analysis. Feel free to remove all of these
// if needed for implementing a proper borrow checker.
-use std::sync::Arc;
+use std::iter;
-use hir_def::DefWithBodyId;
+use hir_def::{DefWithBodyId, HasModule};
use la_arena::ArenaMap;
use stdx::never;
+use triomphe::Arc;
-use crate::db::HirDatabase;
+use crate::{
+ db::HirDatabase, mir::Operand, utils::ClosureSubst, ClosureId, Interner, Ty, TyExt, TypeFlags,
+};
use super::{
BasicBlockId, BorrowKind, LocalId, MirBody, MirLowerError, MirSpan, Place, ProjectionElem,
- Rvalue, StatementKind, Terminator,
+ Rvalue, StatementKind, TerminatorKind,
};
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -24,25 +27,166 @@ pub enum MutabilityReason {
}
#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct MovedOutOfRef {
+ pub ty: Ty,
+ pub span: MirSpan,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
pub struct BorrowckResult {
pub mir_body: Arc<MirBody>,
pub mutability_of_locals: ArenaMap<LocalId, MutabilityReason>,
+ pub moved_out_of_ref: Vec<MovedOutOfRef>,
+}
+
+fn all_mir_bodies(
+ db: &dyn HirDatabase,
+ def: DefWithBodyId,
+) -> Box<dyn Iterator<Item = Result<Arc<MirBody>, MirLowerError>> + '_> {
+ fn for_closure(
+ db: &dyn HirDatabase,
+ c: ClosureId,
+ ) -> Box<dyn Iterator<Item = Result<Arc<MirBody>, MirLowerError>> + '_> {
+ match db.mir_body_for_closure(c) {
+ Ok(body) => {
+ let closures = body.closures.clone();
+ Box::new(
+ iter::once(Ok(body))
+ .chain(closures.into_iter().flat_map(|x| for_closure(db, x))),
+ )
+ }
+ Err(e) => Box::new(iter::once(Err(e))),
+ }
+ }
+ match db.mir_body(def) {
+ Ok(body) => {
+ let closures = body.closures.clone();
+ Box::new(
+ iter::once(Ok(body)).chain(closures.into_iter().flat_map(|x| for_closure(db, x))),
+ )
+ }
+ Err(e) => Box::new(iter::once(Err(e))),
+ }
}
pub fn borrowck_query(
db: &dyn HirDatabase,
def: DefWithBodyId,
-) -> Result<Arc<BorrowckResult>, MirLowerError> {
+) -> Result<Arc<[BorrowckResult]>, MirLowerError> {
let _p = profile::span("borrowck_query");
- let body = db.mir_body(def)?;
- let r = BorrowckResult { mutability_of_locals: mutability_of_locals(&body), mir_body: body };
- Ok(Arc::new(r))
+ let r = all_mir_bodies(db, def)
+ .map(|body| {
+ let body = body?;
+ Ok(BorrowckResult {
+ mutability_of_locals: mutability_of_locals(db, &body),
+ moved_out_of_ref: moved_out_of_ref(db, &body),
+ mir_body: body,
+ })
+ })
+ .collect::<Result<Vec<_>, MirLowerError>>()?;
+ Ok(r.into())
}
-fn is_place_direct(lvalue: &Place) -> bool {
- !lvalue.projection.iter().any(|x| *x == ProjectionElem::Deref)
+fn moved_out_of_ref(db: &dyn HirDatabase, body: &MirBody) -> Vec<MovedOutOfRef> {
+ let mut result = vec![];
+ let mut for_operand = |op: &Operand, span: MirSpan| match op {
+ Operand::Copy(p) | Operand::Move(p) => {
+ let mut ty: Ty = body.locals[p.local].ty.clone();
+ let mut is_dereference_of_ref = false;
+ for proj in &*p.projection {
+ if *proj == ProjectionElem::Deref && ty.as_reference().is_some() {
+ is_dereference_of_ref = true;
+ }
+ ty = proj.projected_ty(
+ ty,
+ db,
+ |c, subst, f| {
+ let (def, _) = db.lookup_intern_closure(c.into());
+ let infer = db.infer(def);
+ let (captures, _) = infer.closure_info(&c);
+ let parent_subst = ClosureSubst(subst).parent_subst();
+ captures
+ .get(f)
+ .expect("broken closure field")
+ .ty
+ .clone()
+ .substitute(Interner, parent_subst)
+ },
+ body.owner.module(db.upcast()).krate(),
+ );
+ }
+ if is_dereference_of_ref
+ && !ty.clone().is_copy(db, body.owner)
+ && !ty.data(Interner).flags.intersects(TypeFlags::HAS_ERROR)
+ {
+ result.push(MovedOutOfRef { span, ty });
+ }
+ }
+ Operand::Constant(_) | Operand::Static(_) => (),
+ };
+ for (_, block) in body.basic_blocks.iter() {
+ for statement in &block.statements {
+ match &statement.kind {
+ StatementKind::Assign(_, r) => match r {
+ Rvalue::ShallowInitBoxWithAlloc(_) => (),
+ Rvalue::ShallowInitBox(o, _)
+ | Rvalue::UnaryOp(_, o)
+ | Rvalue::Cast(_, o, _)
+ | Rvalue::Repeat(o, _)
+ | Rvalue::Use(o) => for_operand(o, statement.span),
+ Rvalue::CopyForDeref(_)
+ | Rvalue::Discriminant(_)
+ | Rvalue::Len(_)
+ | Rvalue::Ref(_, _) => (),
+ Rvalue::CheckedBinaryOp(_, o1, o2) => {
+ for_operand(o1, statement.span);
+ for_operand(o2, statement.span);
+ }
+ Rvalue::Aggregate(_, ops) => {
+ for op in ops.iter() {
+ for_operand(op, statement.span);
+ }
+ }
+ },
+ StatementKind::Deinit(_)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Nop => (),
+ }
+ }
+ match &block.terminator {
+ Some(terminator) => match &terminator.kind {
+ TerminatorKind::SwitchInt { discr, .. } => for_operand(discr, terminator.span),
+ TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. } => (),
+ TerminatorKind::DropAndReplace { value, .. } => {
+ for_operand(value, terminator.span);
+ }
+ TerminatorKind::Call { func, args, .. } => {
+ for_operand(func, terminator.span);
+ args.iter().for_each(|x| for_operand(x, terminator.span));
+ }
+ TerminatorKind::Assert { cond, .. } => {
+ for_operand(cond, terminator.span);
+ }
+ TerminatorKind::Yield { value, .. } => {
+ for_operand(value, terminator.span);
+ }
+ },
+ None => (),
+ }
+ }
+ result
}
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum ProjectionCase {
/// Projection is a local
Direct,
@@ -52,20 +196,39 @@ enum ProjectionCase {
Indirect,
}
-fn place_case(lvalue: &Place) -> ProjectionCase {
+fn place_case(db: &dyn HirDatabase, body: &MirBody, lvalue: &Place) -> ProjectionCase {
let mut is_part_of = false;
- for proj in lvalue.projection.iter().rev() {
+ let mut ty = body.locals[lvalue.local].ty.clone();
+ for proj in lvalue.projection.iter() {
match proj {
- ProjectionElem::Deref => return ProjectionCase::Indirect, // It's indirect
- ProjectionElem::ConstantIndex { .. }
+ ProjectionElem::Deref if ty.as_adt().is_none() => return ProjectionCase::Indirect, // It's indirect in case of reference and raw
+ ProjectionElem::Deref // It's direct in case of `Box<T>`
+ | ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Field(_)
- | ProjectionElem::TupleField(_)
+ | ProjectionElem::TupleOrClosureField(_)
| ProjectionElem::Index(_) => {
is_part_of = true;
}
ProjectionElem::OpaqueCast(_) => (),
}
+ ty = proj.projected_ty(
+ ty,
+ db,
+ |c, subst, f| {
+ let (def, _) = db.lookup_intern_closure(c.into());
+ let infer = db.infer(def);
+ let (captures, _) = infer.closure_info(&c);
+ let parent_subst = ClosureSubst(subst).parent_subst();
+ captures
+ .get(f)
+ .expect("broken closure field")
+ .ty
+ .clone()
+ .substitute(Interner, parent_subst)
+ },
+ body.owner.module(db.upcast()).krate(),
+ );
}
if is_part_of {
ProjectionCase::DirectPart
@@ -76,11 +239,15 @@ fn place_case(lvalue: &Place) -> ProjectionCase {
/// Returns a map from basic blocks to the set of locals that might be ever initialized before
/// the start of the block. Only `StorageDead` can remove something from this map, and we ignore
-/// `Uninit` and `drop` and similars after initialization.
-fn ever_initialized_map(body: &MirBody) -> ArenaMap<BasicBlockId, ArenaMap<LocalId, bool>> {
+/// `Uninit` and `drop` and similar after initialization.
+fn ever_initialized_map(
+ db: &dyn HirDatabase,
+ body: &MirBody,
+) -> ArenaMap<BasicBlockId, ArenaMap<LocalId, bool>> {
let mut result: ArenaMap<BasicBlockId, ArenaMap<LocalId, bool>> =
body.basic_blocks.iter().map(|x| (x.0, ArenaMap::default())).collect();
fn dfs(
+ db: &dyn HirDatabase,
body: &MirBody,
b: BasicBlockId,
l: LocalId,
@@ -104,29 +271,31 @@ fn ever_initialized_map(body: &MirBody) -> ArenaMap<BasicBlockId, ArenaMap<Local
}
}
let Some(terminator) = &block.terminator else {
- never!("Terminator should be none only in construction");
+ never!("Terminator should be none only in construction.\nThe body:\n{}", body.pretty_print(db));
return;
};
- let targets = match terminator {
- Terminator::Goto { target } => vec![*target],
- Terminator::SwitchInt { targets, .. } => targets.all_targets().to_vec(),
- Terminator::Resume
- | Terminator::Abort
- | Terminator::Return
- | Terminator::Unreachable => vec![],
- Terminator::Call { target, cleanup, destination, .. } => {
+ let targets = match &terminator.kind {
+ TerminatorKind::Goto { target } => vec![*target],
+ TerminatorKind::SwitchInt { targets, .. } => targets.all_targets().to_vec(),
+ TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable => vec![],
+ TerminatorKind::Call { target, cleanup, destination, .. } => {
if destination.projection.len() == 0 && destination.local == l {
is_ever_initialized = true;
}
target.into_iter().chain(cleanup.into_iter()).copied().collect()
}
- Terminator::Drop { .. }
- | Terminator::DropAndReplace { .. }
- | Terminator::Assert { .. }
- | Terminator::Yield { .. }
- | Terminator::GeneratorDrop
- | Terminator::FalseEdge { .. }
- | Terminator::FalseUnwind { .. } => {
+ TerminatorKind::Drop { target, unwind, place: _ } => {
+ Some(target).into_iter().chain(unwind.into_iter()).copied().collect()
+ }
+ TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => {
never!("We don't emit these MIR terminators yet");
vec![]
}
@@ -134,37 +303,40 @@ fn ever_initialized_map(body: &MirBody) -> ArenaMap<BasicBlockId, ArenaMap<Local
for target in targets {
if !result[target].contains_idx(l) || !result[target][l] && is_ever_initialized {
result[target].insert(l, is_ever_initialized);
- dfs(body, target, l, result);
+ dfs(db, body, target, l, result);
}
}
}
for &l in &body.param_locals {
result[body.start_block].insert(l, true);
- dfs(body, body.start_block, l, &mut result);
+ dfs(db, body, body.start_block, l, &mut result);
}
for l in body.locals.iter().map(|x| x.0) {
if !result[body.start_block].contains_idx(l) {
result[body.start_block].insert(l, false);
- dfs(body, body.start_block, l, &mut result);
+ dfs(db, body, body.start_block, l, &mut result);
}
}
result
}
-fn mutability_of_locals(body: &MirBody) -> ArenaMap<LocalId, MutabilityReason> {
+fn mutability_of_locals(
+ db: &dyn HirDatabase,
+ body: &MirBody,
+) -> ArenaMap<LocalId, MutabilityReason> {
let mut result: ArenaMap<LocalId, MutabilityReason> =
body.locals.iter().map(|x| (x.0, MutabilityReason::Not)).collect();
let mut push_mut_span = |local, span| match &mut result[local] {
MutabilityReason::Mut { spans } => spans.push(span),
x @ MutabilityReason::Not => *x = MutabilityReason::Mut { spans: vec![span] },
};
- let ever_init_maps = ever_initialized_map(body);
+ let ever_init_maps = ever_initialized_map(db, body);
for (block_id, mut ever_init_map) in ever_init_maps.into_iter() {
let block = &body.basic_blocks[block_id];
for statement in &block.statements {
match &statement.kind {
StatementKind::Assign(place, value) => {
- match place_case(place) {
+ match place_case(db, body, place) {
ProjectionCase::Direct => {
if ever_init_map.get(place.local).copied().unwrap_or_default() {
push_mut_span(place.local, statement.span);
@@ -179,7 +351,7 @@ fn mutability_of_locals(body: &MirBody) -> ArenaMap<LocalId, MutabilityReason> {
ProjectionCase::Indirect => (),
}
if let Rvalue::Ref(BorrowKind::Mut { .. }, p) = value {
- if is_place_direct(p) {
+ if place_case(db, body, p) != ProjectionCase::Indirect {
push_mut_span(p.local, statement.span);
}
}
@@ -194,21 +366,21 @@ fn mutability_of_locals(body: &MirBody) -> ArenaMap<LocalId, MutabilityReason> {
never!("Terminator should be none only in construction");
continue;
};
- match terminator {
- Terminator::Goto { .. }
- | Terminator::Resume
- | Terminator::Abort
- | Terminator::Return
- | Terminator::Unreachable
- | Terminator::FalseEdge { .. }
- | Terminator::FalseUnwind { .. }
- | Terminator::GeneratorDrop
- | Terminator::SwitchInt { .. }
- | Terminator::Drop { .. }
- | Terminator::DropAndReplace { .. }
- | Terminator::Assert { .. }
- | Terminator::Yield { .. } => (),
- Terminator::Call { destination, .. } => {
+ match &terminator.kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Yield { .. } => (),
+ TerminatorKind::Call { destination, .. } => {
if destination.projection.len() == 0 {
if ever_init_map.get(destination.local).copied().unwrap_or_default() {
push_mut_span(destination.local, MirSpan::Unknown);
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs
index c5d843d9e..9acf9d39e 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs
@@ -1,41 +1,134 @@
//! This module provides a MIR interpreter, which is used in const eval.
-use std::{borrow::Cow, collections::HashMap, iter};
+use std::{borrow::Cow, collections::HashMap, fmt::Write, iter, ops::Range};
-use base_db::CrateId;
-use chalk_ir::{
- fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable},
- DebruijnIndex, TyKind,
-};
+use base_db::{CrateId, FileId};
+use chalk_ir::Mutability;
+use either::Either;
use hir_def::{
builtin_type::BuiltinType,
+ data::adt::{StructFlags, VariantData},
lang_item::{lang_attr, LangItem},
- layout::{Layout, LayoutError, RustcEnumVariantIdx, TagEncoding, Variants},
- AdtId, DefWithBodyId, EnumVariantId, FunctionId, HasModule, Lookup, VariantId,
+ layout::{TagEncoding, Variants},
+ AdtId, DefWithBodyId, EnumVariantId, FunctionId, HasModule, ItemContainerId, Lookup, StaticId,
+ VariantId,
};
+use hir_expand::InFile;
use intern::Interned;
use la_arena::ArenaMap;
+use rustc_hash::{FxHashMap, FxHashSet};
+use stdx::never;
+use syntax::{SyntaxNodePtr, TextRange};
+use triomphe::Arc;
use crate::{
- consteval::{intern_const_scalar, ConstEvalError},
+ consteval::{intern_const_scalar, try_const_usize, ConstEvalError},
db::HirDatabase,
- from_placeholder_idx,
- infer::{normalize, PointerCast},
- layout::layout_of_ty,
+ display::{ClosureStyle, HirDisplay},
+ infer::PointerCast,
+ layout::{Layout, LayoutError, RustcEnumVariantIdx},
mapping::from_chalk,
- method_resolution::lookup_impl_method,
- CallableDefId, Const, ConstScalar, Interner, MemoryMap, Substitution, Ty, TyBuilder, TyExt,
+ method_resolution::{is_dyn_method, lookup_impl_method},
+ name, static_lifetime,
+ traits::FnTrait,
+ utils::{detect_variant_from_bytes, ClosureSubst},
+ CallableDefId, ClosureId, Const, ConstScalar, FnDefId, GenericArgData, Interner, MemoryMap,
+ Substitution, TraitEnvironment, Ty, TyBuilder, TyExt, TyKind,
};
use super::{
- const_as_usize, return_slot, AggregateKind, BinOp, CastKind, LocalId, MirBody, MirLowerError,
- Operand, Place, ProjectionElem, Rvalue, StatementKind, Terminator, UnOp,
+ return_slot, AggregateKind, BinOp, CastKind, LocalId, MirBody, MirLowerError, MirSpan, Operand,
+ Place, ProjectionElem, Rvalue, StatementKind, TerminatorKind, UnOp,
};
+mod shim;
+#[cfg(test)]
+mod tests;
+
+macro_rules! from_bytes {
+ ($ty:tt, $value:expr) => {
+ ($ty::from_le_bytes(match ($value).try_into() {
+ Ok(x) => x,
+ Err(_) => return Err(MirEvalError::TypeError(stringify!(mismatched size in constructing $ty))),
+ }))
+ };
+}
+
+macro_rules! not_supported {
+ ($x: expr) => {
+ return Err(MirEvalError::NotSupported(format!($x)))
+ };
+}
+
+#[derive(Debug, Default, Clone, PartialEq, Eq)]
+pub struct VTableMap {
+ ty_to_id: FxHashMap<Ty, usize>,
+ id_to_ty: Vec<Ty>,
+}
+
+impl VTableMap {
+ fn id(&mut self, ty: Ty) -> usize {
+ if let Some(x) = self.ty_to_id.get(&ty) {
+ return *x;
+ }
+ let id = self.id_to_ty.len();
+ self.id_to_ty.push(ty.clone());
+ self.ty_to_id.insert(ty, id);
+ id
+ }
+
+ pub(crate) fn ty(&self, id: usize) -> Result<&Ty> {
+ self.id_to_ty.get(id).ok_or(MirEvalError::InvalidVTableId(id))
+ }
+
+ fn ty_of_bytes(&self, bytes: &[u8]) -> Result<&Ty> {
+ let id = from_bytes!(usize, bytes);
+ self.ty(id)
+ }
+}
+
+#[derive(Debug, Default, Clone, PartialEq, Eq)]
+struct TlsData {
+ keys: Vec<u128>,
+}
+
+impl TlsData {
+ fn create_key(&mut self) -> usize {
+ self.keys.push(0);
+ self.keys.len() - 1
+ }
+
+ fn get_key(&mut self, key: usize) -> Result<u128> {
+ let r = self.keys.get(key).ok_or_else(|| {
+ MirEvalError::UndefinedBehavior(format!("Getting invalid tls key {key}"))
+ })?;
+ Ok(*r)
+ }
+
+ fn set_key(&mut self, key: usize, value: u128) -> Result<()> {
+ let r = self.keys.get_mut(key).ok_or_else(|| {
+ MirEvalError::UndefinedBehavior(format!("Setting invalid tls key {key}"))
+ })?;
+ *r = value;
+ Ok(())
+ }
+}
+
pub struct Evaluator<'a> {
db: &'a dyn HirDatabase,
+ trait_env: Arc<TraitEnvironment>,
stack: Vec<u8>,
heap: Vec<u8>,
+ /// Stores the global location of the statics. We const evaluate every static first time we need it
+ /// and see it's missing, then we add it to this to reuse.
+ static_locations: FxHashMap<StaticId, Address>,
+ /// We don't really have function pointers, i.e. pointers to some assembly instructions that we can run. Instead, we
+ /// store the type as an interned id in place of function and vtable pointers, and we recover back the type at the
+ /// time of use.
+ vtable_map: VTableMap,
+ thread_local_storage: TlsData,
+ stdout: Vec<u8>,
+ stderr: Vec<u8>,
crate_id: CrateId,
// FIXME: This is a workaround, see the comment on `interpret_mir`
assert_placeholder_ty_is_unused: bool,
@@ -45,19 +138,27 @@ pub struct Evaluator<'a> {
stack_depth_limit: usize,
}
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
enum Address {
Stack(usize),
Heap(usize),
+ Invalid(usize),
}
use Address::*;
+#[derive(Debug, Clone, Copy)]
struct Interval {
addr: Address,
size: usize,
}
+#[derive(Debug, Clone)]
+struct IntervalAndTy {
+ interval: Interval,
+ ty: Ty,
+}
+
impl Interval {
fn new(addr: Address, size: usize) -> Self {
Self { addr, size }
@@ -66,12 +167,49 @@ impl Interval {
fn get<'a>(&self, memory: &'a Evaluator<'a>) -> Result<&'a [u8]> {
memory.read_memory(self.addr, self.size)
}
+
+ fn write_from_bytes(&self, memory: &mut Evaluator<'_>, bytes: &[u8]) -> Result<()> {
+ memory.write_memory(self.addr, bytes)
+ }
+
+ fn write_from_interval(&self, memory: &mut Evaluator<'_>, interval: Interval) -> Result<()> {
+ // FIXME: this could be more efficient
+ let bytes = &interval.get(memory)?.to_vec();
+ memory.write_memory(self.addr, bytes)
+ }
+
+ fn slice(self, range: Range<usize>) -> Interval {
+ Interval { addr: self.addr.offset(range.start), size: range.len() }
+ }
+}
+
+impl IntervalAndTy {
+ fn get<'a>(&self, memory: &'a Evaluator<'a>) -> Result<&'a [u8]> {
+ memory.read_memory(self.interval.addr, self.interval.size)
+ }
+
+ fn new(
+ addr: Address,
+ ty: Ty,
+ evaluator: &Evaluator<'_>,
+ locals: &Locals<'_>,
+ ) -> Result<IntervalAndTy> {
+ let size = evaluator.size_of_sized(&ty, locals, "type of interval")?;
+ Ok(IntervalAndTy { interval: Interval { addr, size }, ty })
+ }
}
enum IntervalOrOwned {
Owned(Vec<u8>),
Borrowed(Interval),
}
+
+impl From<Interval> for IntervalOrOwned {
+ fn from(it: Interval) -> IntervalOrOwned {
+ IntervalOrOwned::Borrowed(it)
+ }
+}
+
impl IntervalOrOwned {
pub(crate) fn to_vec(self, memory: &Evaluator<'_>) -> Result<Vec<u8>> {
Ok(match self {
@@ -79,15 +217,13 @@ impl IntervalOrOwned {
IntervalOrOwned::Borrowed(b) => b.get(memory)?.to_vec(),
})
}
-}
-macro_rules! from_bytes {
- ($ty:tt, $value:expr) => {
- ($ty::from_le_bytes(match ($value).try_into() {
- Ok(x) => x,
- Err(_) => return Err(MirEvalError::TypeError("mismatched size")),
- }))
- };
+ fn get<'a>(&'a self, memory: &'a Evaluator<'a>) -> Result<&'a [u8]> {
+ Ok(match self {
+ IntervalOrOwned::Owned(o) => o,
+ IntervalOrOwned::Borrowed(b) => b.get(memory)?,
+ })
+ }
}
impl Address {
@@ -97,9 +233,11 @@ impl Address {
fn from_usize(x: usize) -> Self {
if x > usize::MAX / 2 {
- Stack(usize::MAX - x)
+ Stack(x - usize::MAX / 2)
+ } else if x > usize::MAX / 4 {
+ Heap(x - usize::MAX / 4)
} else {
- Heap(x)
+ Invalid(x)
}
}
@@ -109,8 +247,9 @@ impl Address {
fn to_usize(&self) -> usize {
let as_num = match self {
- Stack(x) => usize::MAX - *x,
- Heap(x) => *x,
+ Stack(x) => *x + usize::MAX / 2,
+ Heap(x) => *x + usize::MAX / 4,
+ Invalid(x) => *x,
};
as_num
}
@@ -119,6 +258,7 @@ impl Address {
match self {
Stack(x) => Stack(f(*x)),
Heap(x) => Heap(f(*x)),
+ Invalid(x) => Invalid(f(*x)),
}
}
@@ -129,28 +269,123 @@ impl Address {
#[derive(Clone, PartialEq, Eq)]
pub enum MirEvalError {
- ConstEvalError(Box<ConstEvalError>),
+ ConstEvalError(String, Box<ConstEvalError>),
LayoutError(LayoutError, Ty),
/// Means that code had type errors (or mismatched args) and we shouldn't generate mir in first place.
TypeError(&'static str),
/// Means that code had undefined behavior. We don't try to actively detect UB, but if it was detected
/// then use this type of error.
- UndefinedBehavior(&'static str),
- Panic,
+ UndefinedBehavior(String),
+ Panic(String),
MirLowerError(FunctionId, MirLowerError),
+ MirLowerErrorForClosure(ClosureId, MirLowerError),
TypeIsUnsized(Ty, &'static str),
NotSupported(String),
InvalidConst(Const),
- InFunction(FunctionId, Box<MirEvalError>),
+ InFunction(Either<FunctionId, ClosureId>, Box<MirEvalError>, MirSpan, DefWithBodyId),
ExecutionLimitExceeded,
StackOverflow,
TargetDataLayoutNotAvailable,
+ InvalidVTableId(usize),
+ CoerceUnsizedError(Ty),
+ LangItemNotFound(LangItem),
+}
+
+impl MirEvalError {
+ pub fn pretty_print(
+ &self,
+ f: &mut String,
+ db: &dyn HirDatabase,
+ span_formatter: impl Fn(FileId, TextRange) -> String,
+ ) -> std::result::Result<(), std::fmt::Error> {
+ writeln!(f, "Mir eval error:")?;
+ let mut err = self;
+ while let MirEvalError::InFunction(func, e, span, def) = err {
+ err = e;
+ match func {
+ Either::Left(func) => {
+ let function_name = db.function_data(*func);
+ writeln!(
+ f,
+ "In function {} ({:?})",
+ function_name.name.display(db.upcast()),
+ func
+ )?;
+ }
+ Either::Right(clos) => {
+ writeln!(f, "In {:?}", clos)?;
+ }
+ }
+ let source_map = db.body_with_source_map(*def).1;
+ let span: InFile<SyntaxNodePtr> = match span {
+ MirSpan::ExprId(e) => match source_map.expr_syntax(*e) {
+ Ok(s) => s.map(|x| x.into()),
+ Err(_) => continue,
+ },
+ MirSpan::PatId(p) => match source_map.pat_syntax(*p) {
+ Ok(s) => s.map(|x| match x {
+ Either::Left(e) => e.into(),
+ Either::Right(e) => e.into(),
+ }),
+ Err(_) => continue,
+ },
+ MirSpan::Unknown => continue,
+ };
+ let file_id = span.file_id.original_file(db.upcast());
+ let text_range = span.value.text_range();
+ writeln!(f, "{}", span_formatter(file_id, text_range))?;
+ }
+ match err {
+ MirEvalError::InFunction(..) => unreachable!(),
+ MirEvalError::LayoutError(err, ty) => {
+ write!(
+ f,
+ "Layout for type `{}` is not available due {err:?}",
+ ty.display(db).with_closure_style(ClosureStyle::ClosureWithId).to_string()
+ )?;
+ }
+ MirEvalError::MirLowerError(func, err) => {
+ let function_name = db.function_data(*func);
+ writeln!(
+ f,
+ "MIR lowering for function `{}` ({:?}) failed due:",
+ function_name.name.display(db.upcast()),
+ func
+ )?;
+ err.pretty_print(f, db, span_formatter)?;
+ }
+ MirEvalError::ConstEvalError(name, err) => {
+ MirLowerError::ConstEvalError(name.clone(), err.clone()).pretty_print(
+ f,
+ db,
+ span_formatter,
+ )?;
+ }
+ MirEvalError::TypeError(_)
+ | MirEvalError::UndefinedBehavior(_)
+ | MirEvalError::Panic(_)
+ | MirEvalError::MirLowerErrorForClosure(_, _)
+ | MirEvalError::TypeIsUnsized(_, _)
+ | MirEvalError::NotSupported(_)
+ | MirEvalError::InvalidConst(_)
+ | MirEvalError::ExecutionLimitExceeded
+ | MirEvalError::StackOverflow
+ | MirEvalError::TargetDataLayoutNotAvailable
+ | MirEvalError::CoerceUnsizedError(_)
+ | MirEvalError::LangItemNotFound(_)
+ | MirEvalError::InvalidVTableId(_) => writeln!(f, "{:?}", err)?,
+ }
+ Ok(())
+ }
}
impl std::fmt::Debug for MirEvalError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
- Self::ConstEvalError(arg0) => f.debug_tuple("ConstEvalError").field(arg0).finish(),
+ Self::ConstEvalError(arg0, arg1) => {
+ f.debug_tuple("ConstEvalError").field(arg0).field(arg1).finish()
+ }
+ Self::LangItemNotFound(arg0) => f.debug_tuple("LangItemNotFound").field(arg0).finish(),
Self::LayoutError(arg0, arg1) => {
f.debug_tuple("LayoutError").field(arg0).field(arg1).finish()
}
@@ -158,7 +393,7 @@ impl std::fmt::Debug for MirEvalError {
Self::UndefinedBehavior(arg0) => {
f.debug_tuple("UndefinedBehavior").field(arg0).finish()
}
- Self::Panic => write!(f, "Panic"),
+ Self::Panic(msg) => write!(f, "Panic with message:\n{msg:?}"),
Self::TargetDataLayoutNotAvailable => write!(f, "TargetDataLayoutNotAvailable"),
Self::TypeIsUnsized(ty, it) => write!(f, "{ty:?} is unsized. {it} should be sized."),
Self::ExecutionLimitExceeded => write!(f, "execution limit exceeded"),
@@ -166,17 +401,24 @@ impl std::fmt::Debug for MirEvalError {
Self::MirLowerError(arg0, arg1) => {
f.debug_tuple("MirLowerError").field(arg0).field(arg1).finish()
}
+ Self::MirLowerErrorForClosure(arg0, arg1) => {
+ f.debug_tuple("MirLowerError").field(arg0).field(arg1).finish()
+ }
+ Self::CoerceUnsizedError(arg0) => {
+ f.debug_tuple("CoerceUnsizedError").field(arg0).finish()
+ }
+ Self::InvalidVTableId(arg0) => f.debug_tuple("InvalidVTableId").field(arg0).finish(),
Self::NotSupported(arg0) => f.debug_tuple("NotSupported").field(arg0).finish(),
Self::InvalidConst(arg0) => {
let data = &arg0.data(Interner);
f.debug_struct("InvalidConst").field("ty", &data.ty).field("value", &arg0).finish()
}
- Self::InFunction(func, e) => {
+ Self::InFunction(func, e, span, _) => {
let mut e = &**e;
- let mut stack = vec![*func];
- while let Self::InFunction(f, next_e) = e {
+ let mut stack = vec![(*func, *span)];
+ while let Self::InFunction(f, next_e, span, _) = e {
e = &next_e;
- stack.push(*f);
+ stack.push((*f, *span));
}
f.debug_struct("WithStack").field("error", e).field("stack", &stack).finish()
}
@@ -184,26 +426,33 @@ impl std::fmt::Debug for MirEvalError {
}
}
-macro_rules! not_supported {
- ($x: expr) => {
- return Err(MirEvalError::NotSupported(format!($x)))
- };
+type Result<T> = std::result::Result<T, MirEvalError>;
+
+#[derive(Debug, Default)]
+struct DropFlags {
+ need_drop: FxHashSet<Place>,
}
-impl From<ConstEvalError> for MirEvalError {
- fn from(value: ConstEvalError) -> Self {
- match value {
- _ => MirEvalError::ConstEvalError(Box::new(value)),
+impl DropFlags {
+ fn add_place(&mut self, p: Place) {
+ if p.iterate_over_parents().any(|x| self.need_drop.contains(&x)) {
+ return;
}
+ self.need_drop.retain(|x| !p.is_parent(x));
+ self.need_drop.insert(p);
}
-}
-type Result<T> = std::result::Result<T, MirEvalError>;
+ fn remove_place(&mut self, p: &Place) -> bool {
+ // FIXME: replace parents with parts
+ self.need_drop.remove(p)
+ }
+}
+#[derive(Debug)]
struct Locals<'a> {
- ptr: &'a ArenaMap<LocalId, Address>,
+ ptr: &'a ArenaMap<LocalId, Interval>,
body: &'a MirBody,
- subst: &'a Substitution,
+ drop_flags: DropFlags,
}
pub fn interpret_mir(
@@ -215,38 +464,65 @@ pub fn interpret_mir(
// a zero size, hoping that they are all outside of our current body. Even without a fix for #7434, we can
// (and probably should) do better here, for example by excluding bindings outside of the target expression.
assert_placeholder_ty_is_unused: bool,
-) -> Result<Const> {
+) -> (Result<Const>, String, String) {
let ty = body.locals[return_slot()].ty.clone();
- let mut evaluator =
- Evaluator::new(db, body.owner.module(db.upcast()).krate(), assert_placeholder_ty_is_unused);
- let bytes = evaluator.interpret_mir_with_no_arg(&body)?;
- let memory_map = evaluator.create_memory_map(
- &bytes,
- &ty,
- &Locals { ptr: &ArenaMap::new(), body: &body, subst: &Substitution::empty(Interner) },
- )?;
- return Ok(intern_const_scalar(ConstScalar::Bytes(bytes, memory_map), ty));
+ let mut evaluator = Evaluator::new(db, body, assert_placeholder_ty_is_unused);
+ let x: Result<Const> = (|| {
+ let bytes = evaluator.interpret_mir(&body, None.into_iter())?;
+ let mut memory_map = evaluator.create_memory_map(
+ &bytes,
+ &ty,
+ &Locals { ptr: &ArenaMap::new(), body: &body, drop_flags: DropFlags::default() },
+ )?;
+ memory_map.vtable = evaluator.vtable_map.clone();
+ return Ok(intern_const_scalar(ConstScalar::Bytes(bytes, memory_map), ty));
+ })();
+ (
+ x,
+ String::from_utf8_lossy(&evaluator.stdout).into_owned(),
+ String::from_utf8_lossy(&evaluator.stderr).into_owned(),
+ )
}
impl Evaluator<'_> {
pub fn new<'a>(
db: &'a dyn HirDatabase,
- crate_id: CrateId,
+ body: &MirBody,
assert_placeholder_ty_is_unused: bool,
) -> Evaluator<'a> {
+ let crate_id = body.owner.module(db.upcast()).krate();
+ let trait_env = db.trait_environment_for_body(body.owner);
Evaluator {
stack: vec![0],
heap: vec![0],
+ vtable_map: VTableMap::default(),
+ thread_local_storage: TlsData::default(),
+ static_locations: HashMap::default(),
db,
+ trait_env,
crate_id,
+ stdout: vec![],
+ stderr: vec![],
assert_placeholder_ty_is_unused,
stack_depth_limit: 100,
- execution_limit: 100_000,
+ execution_limit: 1000_000,
}
}
fn place_addr(&self, p: &Place, locals: &Locals<'_>) -> Result<Address> {
- Ok(self.place_addr_and_ty(p, locals)?.0)
+ Ok(self.place_addr_and_ty_and_metadata(p, locals)?.0)
+ }
+
+ fn place_interval(&self, p: &Place, locals: &Locals<'_>) -> Result<Interval> {
+ let place_addr_and_ty = self.place_addr_and_ty_and_metadata(p, locals)?;
+ Ok(Interval {
+ addr: place_addr_and_ty.0,
+ size: self.size_of_sized(
+ &place_addr_and_ty.1,
+ locals,
+ "Type of place that we need its interval",
+ )?,
+ })
}
fn ptr_size(&self) -> usize {
@@ -256,125 +532,170 @@ impl Evaluator<'_> {
}
}
- fn place_addr_and_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<(Address, Ty)> {
- let mut addr = locals.ptr[p.local];
- let mut ty: Ty =
- self.ty_filler(&locals.body.locals[p.local].ty, locals.subst, locals.body.owner)?;
- for proj in &p.projection {
+ fn place_addr_and_ty_and_metadata<'a>(
+ &'a self,
+ p: &Place,
+ locals: &'a Locals<'a>,
+ ) -> Result<(Address, Ty, Option<IntervalOrOwned>)> {
+ let mut addr = locals.ptr[p.local].addr;
+ let mut ty: Ty = locals.body.locals[p.local].ty.clone();
+ let mut metadata: Option<IntervalOrOwned> = None; // locals are always sized
+ for proj in &*p.projection {
+ let prev_ty = ty.clone();
+ ty = proj.projected_ty(
+ ty,
+ self.db,
+ |c, subst, f| {
+ let (def, _) = self.db.lookup_intern_closure(c.into());
+ let infer = self.db.infer(def);
+ let (captures, _) = infer.closure_info(&c);
+ let parent_subst = ClosureSubst(subst).parent_subst();
+ captures
+ .get(f)
+ .expect("broken closure field")
+ .ty
+ .clone()
+ .substitute(Interner, parent_subst)
+ },
+ self.crate_id,
+ );
match proj {
ProjectionElem::Deref => {
- ty = match &ty.data(Interner).kind {
- TyKind::Raw(_, inner) | TyKind::Ref(_, _, inner) => inner.clone(),
- _ => {
- return Err(MirEvalError::TypeError(
- "Overloaded deref in MIR is disallowed",
- ))
- }
+ metadata = if self.size_align_of(&ty, locals)?.is_none() {
+ Some(
+ Interval { addr: addr.offset(self.ptr_size()), size: self.ptr_size() }
+ .into(),
+ )
+ } else {
+ None
};
let x = from_bytes!(usize, self.read_memory(addr, self.ptr_size())?);
addr = Address::from_usize(x);
}
ProjectionElem::Index(op) => {
- let offset =
- from_bytes!(usize, self.read_memory(locals.ptr[*op], self.ptr_size())?);
- match &ty.data(Interner).kind {
- TyKind::Ref(_, _, inner) => match &inner.data(Interner).kind {
- TyKind::Slice(inner) => {
- ty = inner.clone();
- let ty_size = self.size_of_sized(
- &ty,
- locals,
- "slice inner type should be sized",
- )?;
- let value = self.read_memory(addr, self.ptr_size() * 2)?;
- addr = Address::from_bytes(&value[0..8])?.offset(ty_size * offset);
- }
- x => not_supported!("MIR index for ref type {x:?}"),
- },
- TyKind::Array(inner, _) | TyKind::Slice(inner) => {
- ty = inner.clone();
- let ty_size = self.size_of_sized(
- &ty,
- locals,
- "array inner type should be sized",
- )?;
- addr = addr.offset(ty_size * offset);
- }
- x => not_supported!("MIR index for type {x:?}"),
- }
+ let offset = from_bytes!(
+ usize,
+ self.read_memory(locals.ptr[*op].addr, self.ptr_size())?
+ );
+ metadata = None; // Result of index is always sized
+ let ty_size =
+ self.size_of_sized(&ty, locals, "array inner type should be sized")?;
+ addr = addr.offset(ty_size * offset);
}
- &ProjectionElem::TupleField(f) => match &ty.data(Interner).kind {
- TyKind::Tuple(_, subst) => {
- let layout = self.layout(&ty)?;
- ty = subst
- .as_slice(Interner)
- .get(f)
- .ok_or(MirEvalError::TypeError("not enough tuple fields"))?
- .assert_ty_ref(Interner)
- .clone();
- let offset = layout.fields.offset(f).bytes_usize();
- addr = addr.offset(offset);
- }
- _ => return Err(MirEvalError::TypeError("Only tuple has tuple fields")),
- },
- ProjectionElem::Field(f) => match &ty.data(Interner).kind {
- TyKind::Adt(adt, subst) => {
- let layout = self.layout_adt(adt.0, subst.clone())?;
- let variant_layout = match &layout.variants {
- Variants::Single { .. } => &layout,
- Variants::Multiple { variants, .. } => {
- &variants[match f.parent {
- hir_def::VariantId::EnumVariantId(x) => {
- RustcEnumVariantIdx(x.local_id)
- }
- _ => {
- return Err(MirEvalError::TypeError(
- "Multivariant layout only happens for enums",
- ))
- }
- }]
- }
+ &ProjectionElem::ConstantIndex { from_end, offset } => {
+ let offset = if from_end {
+ let len = match prev_ty.kind(Interner) {
+ TyKind::Array(_, c) => match try_const_usize(self.db, c) {
+ Some(x) => x as u64,
+ None => {
+ not_supported!("indexing array with unknown const from end")
+ }
+ },
+ TyKind::Slice(_) => match metadata {
+ Some(x) => from_bytes!(u64, x.get(self)?),
+ None => not_supported!("slice place without metadata"),
+ },
+ _ => not_supported!("bad type for const index"),
};
- ty = self.db.field_types(f.parent)[f.local_id]
- .clone()
- .substitute(Interner, subst);
- let offset = variant_layout
- .fields
- .offset(u32::from(f.local_id.into_raw()) as usize)
- .bytes_usize();
- addr = addr.offset(offset);
- }
- _ => return Err(MirEvalError::TypeError("Only adt has fields")),
- },
- ProjectionElem::ConstantIndex { .. } => {
- not_supported!("constant index")
+ (len - offset - 1) as usize
+ } else {
+ offset as usize
+ };
+ metadata = None; // Result of index is always sized
+ let ty_size =
+ self.size_of_sized(&ty, locals, "array inner type should be sized")?;
+ addr = addr.offset(ty_size * offset);
+ }
+ &ProjectionElem::Subslice { from, to } => {
+ let inner_ty = match &ty.data(Interner).kind {
+ TyKind::Array(inner, _) | TyKind::Slice(inner) => inner.clone(),
+ _ => TyKind::Error.intern(Interner),
+ };
+ metadata = match metadata {
+ Some(x) => {
+ let prev_len = from_bytes!(u64, x.get(self)?);
+ Some(IntervalOrOwned::Owned(
+ (prev_len - from - to).to_le_bytes().to_vec(),
+ ))
+ }
+ None => None,
+ };
+ let ty_size =
+ self.size_of_sized(&inner_ty, locals, "array inner type should be sized")?;
+ addr = addr.offset(ty_size * (from as usize));
+ }
+ &ProjectionElem::TupleOrClosureField(f) => {
+ let layout = self.layout(&prev_ty)?;
+ let offset = layout.fields.offset(f).bytes_usize();
+ addr = addr.offset(offset);
+ metadata = None; // tuple field is always sized
+ }
+ ProjectionElem::Field(f) => {
+ let layout = self.layout(&prev_ty)?;
+ let variant_layout = match &layout.variants {
+ Variants::Single { .. } => &layout,
+ Variants::Multiple { variants, .. } => {
+ &variants[match f.parent {
+ hir_def::VariantId::EnumVariantId(x) => {
+ RustcEnumVariantIdx(x.local_id)
+ }
+ _ => {
+ return Err(MirEvalError::TypeError(
+ "Multivariant layout only happens for enums",
+ ))
+ }
+ }]
+ }
+ };
+ let offset = variant_layout
+ .fields
+ .offset(u32::from(f.local_id.into_raw()) as usize)
+ .bytes_usize();
+ addr = addr.offset(offset);
+ // FIXME: support structs with unsized fields
+ metadata = None;
}
- ProjectionElem::Subslice { .. } => not_supported!("subslice"),
ProjectionElem::OpaqueCast(_) => not_supported!("opaque cast"),
}
}
- Ok((addr, ty))
+ Ok((addr, ty, metadata))
}
- fn layout(&self, ty: &Ty) -> Result<Layout> {
- layout_of_ty(self.db, ty, self.crate_id)
+ fn layout(&self, ty: &Ty) -> Result<Arc<Layout>> {
+ self.db
+ .layout_of_ty(ty.clone(), self.crate_id)
.map_err(|e| MirEvalError::LayoutError(e, ty.clone()))
}
- fn layout_adt(&self, adt: AdtId, subst: Substitution) -> Result<Layout> {
- self.db.layout_of_adt(adt, subst.clone()).map_err(|e| {
+ fn layout_adt(&self, adt: AdtId, subst: Substitution) -> Result<Arc<Layout>> {
+ self.db.layout_of_adt(adt, subst.clone(), self.crate_id).map_err(|e| {
MirEvalError::LayoutError(e, TyKind::Adt(chalk_ir::AdtId(adt), subst).intern(Interner))
})
}
fn place_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<Ty> {
- Ok(self.place_addr_and_ty(p, locals)?.1)
+ Ok(self.place_addr_and_ty_and_metadata(p, locals)?.1)
}
- fn operand_ty<'a>(&'a self, o: &'a Operand, locals: &'a Locals<'a>) -> Result<Ty> {
+ fn operand_ty(&self, o: &Operand, locals: &Locals<'_>) -> Result<Ty> {
Ok(match o {
Operand::Copy(p) | Operand::Move(p) => self.place_ty(p, locals)?,
Operand::Constant(c) => c.data(Interner).ty.clone(),
+ &Operand::Static(s) => {
+ let ty = self.db.infer(s.into())[self.db.body(s.into()).body_expr].clone();
+ TyKind::Ref(Mutability::Not, static_lifetime(), ty).intern(Interner)
+ }
+ })
+ }
+
+ fn operand_ty_and_eval(
+ &mut self,
+ o: &Operand,
+ locals: &mut Locals<'_>,
+ ) -> Result<IntervalAndTy> {
+ Ok(IntervalAndTy {
+ interval: self.eval_operand(o, locals)?,
+ ty: self.operand_ty(o, locals)?,
})
}
@@ -382,7 +703,6 @@ impl Evaluator<'_> {
&mut self,
body: &MirBody,
args: impl Iterator<Item = Vec<u8>>,
- subst: Substitution,
) -> Result<Vec<u8>> {
if let Some(x) = self.stack_depth_limit.checked_sub(1) {
self.stack_depth_limit = x;
@@ -390,7 +710,8 @@ impl Evaluator<'_> {
return Err(MirEvalError::StackOverflow);
}
let mut current_block_idx = body.start_block;
- let mut locals = Locals { ptr: &ArenaMap::new(), body: &body, subst: &subst };
+ let mut locals =
+ Locals { ptr: &ArenaMap::new(), body: &body, drop_flags: DropFlags::default() };
let (locals_ptr, stack_size) = {
let mut stack_ptr = self.stack.len();
let addr = body
@@ -401,7 +722,7 @@ impl Evaluator<'_> {
self.size_of_sized(&x.ty, &locals, "no unsized local in extending stack")?;
let my_ptr = stack_ptr;
stack_ptr += size;
- Ok((id, Stack(my_ptr)))
+ Ok((id, Interval { addr: Stack(my_ptr), size }))
})
.collect::<Result<ArenaMap<LocalId, _>>>()?;
let stack_size = stack_ptr - self.stack.len();
@@ -409,9 +730,10 @@ impl Evaluator<'_> {
};
locals.ptr = &locals_ptr;
self.stack.extend(iter::repeat(0).take(stack_size));
- let mut remain_args = body.arg_count;
- for ((_, addr), value) in locals_ptr.iter().skip(1).zip(args) {
- self.write_memory(*addr, &value)?;
+ let mut remain_args = body.param_locals.len();
+ for ((l, interval), value) in locals_ptr.iter().skip(1).zip(args) {
+ locals.drop_flags.add_place(l.into());
+ interval.write_from_bytes(self, &value)?;
if remain_args == 0 {
return Err(MirEvalError::TypeError("more arguments provided"));
}
@@ -431,8 +753,9 @@ impl Evaluator<'_> {
match &statement.kind {
StatementKind::Assign(l, r) => {
let addr = self.place_addr(l, &locals)?;
- let result = self.eval_rvalue(r, &locals)?.to_vec(&self)?;
+ let result = self.eval_rvalue(r, &mut locals)?.to_vec(&self)?;
self.write_memory(addr, &result)?;
+ locals.drop_flags.add_place(l.clone());
}
StatementKind::Deinit(_) => not_supported!("de-init statement"),
StatementKind::StorageLive(_)
@@ -443,11 +766,11 @@ impl Evaluator<'_> {
let Some(terminator) = current_block.terminator.as_ref() else {
not_supported!("block without terminator");
};
- match terminator {
- Terminator::Goto { target } => {
+ match &terminator.kind {
+ TerminatorKind::Goto { target } => {
current_block_idx = *target;
}
- Terminator::Call {
+ TerminatorKind::Call {
func,
args,
destination,
@@ -455,155 +778,84 @@ impl Evaluator<'_> {
cleanup: _,
from_hir_call: _,
} => {
+ let destination_interval = self.place_interval(destination, &locals)?;
let fn_ty = self.operand_ty(func, &locals)?;
+ let args = args
+ .iter()
+ .map(|x| self.operand_ty_and_eval(x, &mut locals))
+ .collect::<Result<Vec<_>>>()?;
match &fn_ty.data(Interner).kind {
+ TyKind::Function(_) => {
+ let bytes = self.eval_operand(func, &mut locals)?;
+ self.exec_fn_pointer(
+ bytes,
+ destination_interval,
+ &args,
+ &locals,
+ terminator.span,
+ )?;
+ }
TyKind::FnDef(def, generic_args) => {
- let def: CallableDefId = from_chalk(self.db, *def);
- let generic_args = self.subst_filler(generic_args, &locals);
- match def {
- CallableDefId::FunctionId(def) => {
- let arg_bytes = args
- .iter()
- .map(|x| {
- Ok(self
- .eval_operand(x, &locals)?
- .get(&self)?
- .to_owned())
- })
- .collect::<Result<Vec<_>>>()?
- .into_iter();
- let function_data = self.db.function_data(def);
- let is_intrinsic = match &function_data.abi {
- Some(abi) => *abi == Interned::new_str("rust-intrinsic"),
- None => match def.lookup(self.db.upcast()).container {
- hir_def::ItemContainerId::ExternBlockId(block) => {
- let id = block.lookup(self.db.upcast()).id;
- id.item_tree(self.db.upcast())[id.value]
- .abi
- .as_deref()
- == Some("rust-intrinsic")
- }
- _ => false,
- },
- };
- let result = if is_intrinsic {
- self.exec_intrinsic(
- function_data
- .name
- .as_text()
- .unwrap_or_default()
- .as_str(),
- arg_bytes,
- generic_args,
- &locals,
- )?
- } else if let Some(x) = self.detect_lang_function(def) {
- self.exec_lang_item(x, arg_bytes)?
- } else {
- let trait_env = {
- let Some(d) = body.owner.as_generic_def_id() else {
- not_supported!("trait resolving in non generic def id");
- };
- self.db.trait_environment(d)
- };
- let (imp, generic_args) = lookup_impl_method(
- self.db,
- trait_env,
- def,
- generic_args.clone(),
- );
- let generic_args =
- self.subst_filler(&generic_args, &locals);
- let def = imp.into();
- let mir_body = self
- .db
- .mir_body(def)
- .map_err(|e| MirEvalError::MirLowerError(imp, e))?;
- self.interpret_mir(&mir_body, arg_bytes, generic_args)
- .map_err(|e| {
- MirEvalError::InFunction(imp, Box::new(e))
- })?
- };
- let dest_addr = self.place_addr(destination, &locals)?;
- self.write_memory(dest_addr, &result)?;
- }
- CallableDefId::StructId(id) => {
- let (size, variant_layout, tag) = self.layout_of_variant(
- id.into(),
- generic_args.clone(),
- &locals,
- )?;
- let result = self.make_by_layout(
- size,
- &variant_layout,
- tag,
- args,
- &locals,
- )?;
- let dest_addr = self.place_addr(destination, &locals)?;
- self.write_memory(dest_addr, &result)?;
- }
- CallableDefId::EnumVariantId(id) => {
- let (size, variant_layout, tag) = self.layout_of_variant(
- id.into(),
- generic_args.clone(),
- &locals,
- )?;
- let result = self.make_by_layout(
- size,
- &variant_layout,
- tag,
- args,
- &locals,
- )?;
- let dest_addr = self.place_addr(destination, &locals)?;
- self.write_memory(dest_addr, &result)?;
- }
- }
- current_block_idx =
- target.expect("broken mir, function without target");
+ self.exec_fn_def(
+ *def,
+ generic_args,
+ destination_interval,
+ &args,
+ &locals,
+ terminator.span,
+ )?;
}
- _ => not_supported!("unknown function type"),
+ x => not_supported!("unknown function type {x:?}"),
}
+ locals.drop_flags.add_place(destination.clone());
+ current_block_idx = target.expect("broken mir, function without target");
}
- Terminator::SwitchInt { discr, targets } => {
+ TerminatorKind::SwitchInt { discr, targets } => {
let val = u128::from_le_bytes(pad16(
- self.eval_operand(discr, &locals)?.get(&self)?,
+ self.eval_operand(discr, &mut locals)?.get(&self)?,
false,
));
current_block_idx = targets.target_for_value(val);
}
- Terminator::Return => {
- let ty = body.locals[return_slot()].ty.clone();
+ TerminatorKind::Return => {
self.stack_depth_limit += 1;
- return Ok(self
- .read_memory(
- locals.ptr[return_slot()],
- self.size_of_sized(&ty, &locals, "return type")?,
- )?
- .to_owned());
+ return Ok(locals.ptr[return_slot()].get(self)?.to_vec());
+ }
+ TerminatorKind::Unreachable => {
+ return Err(MirEvalError::UndefinedBehavior("unreachable executed".to_owned()));
}
- Terminator::Unreachable => {
- return Err(MirEvalError::UndefinedBehavior("unreachable executed"))
+ TerminatorKind::Drop { place, target, unwind: _ } => {
+ self.drop_place(place, &mut locals, terminator.span)?;
+ current_block_idx = *target;
}
_ => not_supported!("unknown terminator"),
}
}
}
- fn eval_rvalue<'a>(
- &'a mut self,
- r: &'a Rvalue,
- locals: &'a Locals<'a>,
- ) -> Result<IntervalOrOwned> {
+ fn eval_rvalue(&mut self, r: &Rvalue, locals: &mut Locals<'_>) -> Result<IntervalOrOwned> {
use IntervalOrOwned::*;
Ok(match r {
Rvalue::Use(x) => Borrowed(self.eval_operand(x, locals)?),
Rvalue::Ref(_, p) => {
- let addr = self.place_addr(p, locals)?;
- Owned(addr.to_bytes())
+ let (addr, _, metadata) = self.place_addr_and_ty_and_metadata(p, locals)?;
+ let mut r = addr.to_bytes();
+ if let Some(metadata) = metadata {
+ r.extend(metadata.get(self)?);
+ }
+ Owned(r)
+ }
+ Rvalue::Len(p) => {
+ let (_, _, metadata) = self.place_addr_and_ty_and_metadata(p, locals)?;
+ match metadata {
+ Some(m) => m,
+ None => {
+ return Err(MirEvalError::TypeError(
+ "type without metadata is used for Rvalue::Len",
+ ));
+ }
+ }
}
- Rvalue::Len(_) => not_supported!("rvalue len"),
Rvalue::UnaryOp(op, val) => {
let mut c = self.eval_operand(val, locals)?.get(&self)?;
let mut ty = self.operand_ty(val, locals)?;
@@ -612,27 +864,40 @@ impl Evaluator<'_> {
let size = self.size_of_sized(&ty, locals, "operand of unary op")?;
c = self.read_memory(Address::from_bytes(c)?, size)?;
}
- let mut c = c.to_vec();
- if ty.as_builtin() == Some(BuiltinType::Bool) {
- c[0] = 1 - c[0];
+ if let TyKind::Scalar(chalk_ir::Scalar::Float(f)) = ty.kind(Interner) {
+ match f {
+ chalk_ir::FloatTy::F32 => {
+ let c = -from_bytes!(f32, c);
+ Owned(c.to_le_bytes().into())
+ }
+ chalk_ir::FloatTy::F64 => {
+ let c = -from_bytes!(f64, c);
+ Owned(c.to_le_bytes().into())
+ }
+ }
} else {
- match op {
- UnOp::Not => c.iter_mut().for_each(|x| *x = !*x),
- UnOp::Neg => {
- c.iter_mut().for_each(|x| *x = !*x);
- for k in c.iter_mut() {
- let o;
- (*k, o) = k.overflowing_add(1);
- if !o {
- break;
+ let mut c = c.to_vec();
+ if ty.as_builtin() == Some(BuiltinType::Bool) {
+ c[0] = 1 - c[0];
+ } else {
+ match op {
+ UnOp::Not => c.iter_mut().for_each(|x| *x = !*x),
+ UnOp::Neg => {
+ c.iter_mut().for_each(|x| *x = !*x);
+ for k in c.iter_mut() {
+ let o;
+ (*k, o) = k.overflowing_add(1);
+ if !o {
+ break;
+ }
}
}
}
}
+ Owned(c)
}
- Owned(c)
}
- Rvalue::CheckedBinaryOp(op, lhs, rhs) => {
+ Rvalue::CheckedBinaryOp(op, lhs, rhs) => 'binary_op: {
let lc = self.eval_operand(lhs, locals)?;
let rc = self.eval_operand(rhs, locals)?;
let mut lc = lc.get(&self)?;
@@ -640,79 +905,170 @@ impl Evaluator<'_> {
let mut ty = self.operand_ty(lhs, locals)?;
while let TyKind::Ref(_, _, z) = ty.kind(Interner) {
ty = z.clone();
- let size = self.size_of_sized(&ty, locals, "operand of binary op")?;
+ let size = if ty.kind(Interner) == &TyKind::Str {
+ if *op != BinOp::Eq {
+ never!("Only eq is builtin for `str`");
+ }
+ let ls = from_bytes!(usize, &lc[self.ptr_size()..self.ptr_size() * 2]);
+ let rs = from_bytes!(usize, &rc[self.ptr_size()..self.ptr_size() * 2]);
+ if ls != rs {
+ break 'binary_op Owned(vec![0]);
+ }
+ lc = &lc[..self.ptr_size()];
+ rc = &rc[..self.ptr_size()];
+ ls
+ } else {
+ self.size_of_sized(&ty, locals, "operand of binary op")?
+ };
lc = self.read_memory(Address::from_bytes(lc)?, size)?;
rc = self.read_memory(Address::from_bytes(rc)?, size)?;
}
- let is_signed = matches!(ty.as_builtin(), Some(BuiltinType::Int(_)));
- let l128 = i128::from_le_bytes(pad16(lc, is_signed));
- let r128 = i128::from_le_bytes(pad16(rc, is_signed));
- match op {
- BinOp::Ge | BinOp::Gt | BinOp::Le | BinOp::Lt | BinOp::Eq | BinOp::Ne => {
- let r = match op {
- BinOp::Ge => l128 >= r128,
- BinOp::Gt => l128 > r128,
- BinOp::Le => l128 <= r128,
- BinOp::Lt => l128 < r128,
- BinOp::Eq => l128 == r128,
- BinOp::Ne => l128 != r128,
- _ => unreachable!(),
- };
- let r = r as u8;
- Owned(vec![r])
+ if let TyKind::Scalar(chalk_ir::Scalar::Float(f)) = ty.kind(Interner) {
+ match f {
+ chalk_ir::FloatTy::F32 => {
+ let l = from_bytes!(f32, lc);
+ let r = from_bytes!(f32, rc);
+ match op {
+ BinOp::Ge
+ | BinOp::Gt
+ | BinOp::Le
+ | BinOp::Lt
+ | BinOp::Eq
+ | BinOp::Ne => {
+ let r = op.run_compare(l, r) as u8;
+ Owned(vec![r])
+ }
+ BinOp::Add | BinOp::Sub | BinOp::Mul | BinOp::Div => {
+ let r = match op {
+ BinOp::Add => l + r,
+ BinOp::Sub => l - r,
+ BinOp::Mul => l * r,
+ BinOp::Div => l / r,
+ _ => unreachable!(),
+ };
+ Owned(r.to_le_bytes().into())
+ }
+ x => not_supported!(
+ "invalid binop {x:?} on floating point operators"
+ ),
+ }
+ }
+ chalk_ir::FloatTy::F64 => {
+ let l = from_bytes!(f64, lc);
+ let r = from_bytes!(f64, rc);
+ match op {
+ BinOp::Ge
+ | BinOp::Gt
+ | BinOp::Le
+ | BinOp::Lt
+ | BinOp::Eq
+ | BinOp::Ne => {
+ let r = op.run_compare(l, r) as u8;
+ Owned(vec![r])
+ }
+ BinOp::Add | BinOp::Sub | BinOp::Mul | BinOp::Div => {
+ let r = match op {
+ BinOp::Add => l + r,
+ BinOp::Sub => l - r,
+ BinOp::Mul => l * r,
+ BinOp::Div => l / r,
+ _ => unreachable!(),
+ };
+ Owned(r.to_le_bytes().into())
+ }
+ x => not_supported!(
+ "invalid binop {x:?} on floating point operators"
+ ),
+ }
+ }
}
- BinOp::BitAnd
- | BinOp::BitOr
- | BinOp::BitXor
- | BinOp::Add
- | BinOp::Mul
- | BinOp::Div
- | BinOp::Rem
- | BinOp::Sub => {
- let r = match op {
- BinOp::Add => l128.overflowing_add(r128).0,
- BinOp::Mul => l128.overflowing_mul(r128).0,
- BinOp::Div => l128.checked_div(r128).ok_or(MirEvalError::Panic)?,
- BinOp::Rem => l128.checked_rem(r128).ok_or(MirEvalError::Panic)?,
- BinOp::Sub => l128.overflowing_sub(r128).0,
- BinOp::BitAnd => l128 & r128,
- BinOp::BitOr => l128 | r128,
- BinOp::BitXor => l128 ^ r128,
- _ => unreachable!(),
- };
+ } else {
+ let is_signed = matches!(ty.as_builtin(), Some(BuiltinType::Int(_)));
+ let l128 = i128::from_le_bytes(pad16(lc, is_signed));
+ let r128 = i128::from_le_bytes(pad16(rc, is_signed));
+ let check_overflow = |r: i128| {
+ // FIXME: this is not very correct, and only catches the basic cases.
let r = r.to_le_bytes();
for &k in &r[lc.len()..] {
if k != 0 && (k != 255 || !is_signed) {
- return Err(MirEvalError::Panic);
+ return Err(MirEvalError::Panic(format!("Overflow in {op:?}")));
}
}
- Owned(r[0..lc.len()].into())
- }
- BinOp::Shl | BinOp::Shr => {
- let shift_amout = if r128 < 0 {
- return Err(MirEvalError::Panic);
- } else if r128 > 128 {
- return Err(MirEvalError::Panic);
- } else {
- r128 as u8
- };
- let r = match op {
- BinOp::Shl => l128 << shift_amout,
- BinOp::Shr => l128 >> shift_amout,
- _ => unreachable!(),
- };
- Owned(r.to_le_bytes()[0..lc.len()].into())
+ Ok(Owned(r[0..lc.len()].into()))
+ };
+ match op {
+ BinOp::Ge | BinOp::Gt | BinOp::Le | BinOp::Lt | BinOp::Eq | BinOp::Ne => {
+ let r = op.run_compare(l128, r128) as u8;
+ Owned(vec![r])
+ }
+ BinOp::BitAnd
+ | BinOp::BitOr
+ | BinOp::BitXor
+ | BinOp::Add
+ | BinOp::Mul
+ | BinOp::Div
+ | BinOp::Rem
+ | BinOp::Sub => {
+ let r = match op {
+ BinOp::Add => l128.overflowing_add(r128).0,
+ BinOp::Mul => l128.overflowing_mul(r128).0,
+ BinOp::Div => l128.checked_div(r128).ok_or_else(|| {
+ MirEvalError::Panic(format!("Overflow in {op:?}"))
+ })?,
+ BinOp::Rem => l128.checked_rem(r128).ok_or_else(|| {
+ MirEvalError::Panic(format!("Overflow in {op:?}"))
+ })?,
+ BinOp::Sub => l128.overflowing_sub(r128).0,
+ BinOp::BitAnd => l128 & r128,
+ BinOp::BitOr => l128 | r128,
+ BinOp::BitXor => l128 ^ r128,
+ _ => unreachable!(),
+ };
+ check_overflow(r)?
+ }
+ BinOp::Shl | BinOp::Shr => {
+ let r = 'b: {
+ if let Ok(shift_amount) = u32::try_from(r128) {
+ let r = match op {
+ BinOp::Shl => l128.checked_shl(shift_amount),
+ BinOp::Shr => l128.checked_shr(shift_amount),
+ _ => unreachable!(),
+ };
+ if let Some(r) = r {
+ break 'b r;
+ }
+ };
+ return Err(MirEvalError::Panic(format!("Overflow in {op:?}")));
+ };
+ check_overflow(r)?
+ }
+ BinOp::Offset => not_supported!("offset binop"),
}
- BinOp::Offset => not_supported!("offset binop"),
}
}
Rvalue::Discriminant(p) => {
let ty = self.place_ty(p, locals)?;
let bytes = self.eval_place(p, locals)?.get(&self)?;
let layout = self.layout(&ty)?;
- match layout.variants {
- Variants::Single { .. } => Owned(0u128.to_le_bytes().to_vec()),
- Variants::Multiple { tag, tag_encoding, .. } => {
+ let enum_id = 'b: {
+ match ty.kind(Interner) {
+ TyKind::Adt(e, _) => match e.0 {
+ AdtId::EnumId(e) => break 'b e,
+ _ => (),
+ },
+ _ => (),
+ }
+ return Ok(Owned(0u128.to_le_bytes().to_vec()));
+ };
+ match &layout.variants {
+ Variants::Single { index } => {
+ let r = self.const_eval_discriminant(EnumVariantId {
+ parent: enum_id,
+ local_id: index.0,
+ })?;
+ Owned(r.to_le_bytes().to_vec())
+ }
+ Variants::Multiple { tag, tag_encoding, variants, .. } => {
let Some(target_data_layout) = self.db.target_data_layout(self.crate_id) else {
not_supported!("missing target data layout");
};
@@ -725,120 +1081,142 @@ impl Evaluator<'_> {
}
TagEncoding::Niche { untagged_variant, niche_start, .. } => {
let tag = &bytes[offset..offset + size];
- let candidate_discriminant = i128::from_le_bytes(pad16(tag, false))
- .wrapping_sub(niche_start as i128);
- let enum_id = match ty.kind(Interner) {
- TyKind::Adt(e, _) => match e.0 {
- AdtId::EnumId(e) => e,
- _ => not_supported!("Non enum with multi variant layout"),
- },
- _ => not_supported!("Non adt with multi variant layout"),
- };
- let enum_data = self.db.enum_data(enum_id);
- let result = 'b: {
- for (local_id, _) in enum_data.variants.iter() {
- if candidate_discriminant
- == self.db.const_eval_discriminant(EnumVariantId {
- parent: enum_id,
- local_id,
- })?
- {
- break 'b candidate_discriminant;
- }
- }
- self.db.const_eval_discriminant(EnumVariantId {
- parent: enum_id,
- local_id: untagged_variant.0,
- })?
- };
+ let candidate_tag = i128::from_le_bytes(pad16(tag, false))
+ .wrapping_sub(*niche_start as i128)
+ as usize;
+ let variant = variants
+ .iter_enumerated()
+ .map(|(x, _)| x)
+ .filter(|x| x != untagged_variant)
+ .nth(candidate_tag)
+ .unwrap_or(*untagged_variant)
+ .0;
+ let result = self.const_eval_discriminant(EnumVariantId {
+ parent: enum_id,
+ local_id: variant,
+ })?;
Owned(result.to_le_bytes().to_vec())
}
}
}
}
}
+ Rvalue::Repeat(x, len) => {
+ let len = match try_const_usize(self.db, &len) {
+ Some(x) => x as usize,
+ None => not_supported!("non evaluatable array len in repeat Rvalue"),
+ };
+ let val = self.eval_operand(x, locals)?.get(self)?;
+ let size = len * val.len();
+ Owned(val.iter().copied().cycle().take(size).collect())
+ }
Rvalue::ShallowInitBox(_, _) => not_supported!("shallow init box"),
+ Rvalue::ShallowInitBoxWithAlloc(ty) => {
+ let Some((size, align)) = self.size_align_of(ty, locals)? else {
+ not_supported!("unsized box initialization");
+ };
+ let addr = self.heap_allocate(size, align);
+ Owned(addr.to_bytes())
+ }
Rvalue::CopyForDeref(_) => not_supported!("copy for deref"),
- Rvalue::Aggregate(kind, values) => match kind {
- AggregateKind::Array(_) => {
- let mut r = vec![];
- for x in values {
- let value = self.eval_operand(x, locals)?.get(&self)?;
- r.extend(value);
+ Rvalue::Aggregate(kind, values) => {
+ let values = values
+ .iter()
+ .map(|x| self.eval_operand(x, locals))
+ .collect::<Result<Vec<_>>>()?;
+ match kind {
+ AggregateKind::Array(_) => {
+ let mut r = vec![];
+ for x in values {
+ let value = x.get(&self)?;
+ r.extend(value);
+ }
+ Owned(r)
+ }
+ AggregateKind::Tuple(ty) => {
+ let layout = self.layout(&ty)?;
+ Owned(self.make_by_layout(
+ layout.size.bytes_usize(),
+ &layout,
+ None,
+ values.iter().map(|&x| x.into()),
+ )?)
+ }
+ AggregateKind::Union(x, f) => {
+ let layout = self.layout_adt((*x).into(), Substitution::empty(Interner))?;
+ let offset = layout
+ .fields
+ .offset(u32::from(f.local_id.into_raw()) as usize)
+ .bytes_usize();
+ let op = values[0].get(&self)?;
+ let mut result = vec![0; layout.size.bytes_usize()];
+ result[offset..offset + op.len()].copy_from_slice(op);
+ Owned(result)
+ }
+ AggregateKind::Adt(x, subst) => {
+ let (size, variant_layout, tag) =
+ self.layout_of_variant(*x, subst.clone(), locals)?;
+ Owned(self.make_by_layout(
+ size,
+ &variant_layout,
+ tag,
+ values.iter().map(|&x| x.into()),
+ )?)
+ }
+ AggregateKind::Closure(ty) => {
+ let layout = self.layout(&ty)?;
+ Owned(self.make_by_layout(
+ layout.size.bytes_usize(),
+ &layout,
+ None,
+ values.iter().map(|&x| x.into()),
+ )?)
}
- Owned(r)
- }
- AggregateKind::Tuple(ty) => {
- let layout = self.layout(&ty)?;
- Owned(self.make_by_layout(
- layout.size.bytes_usize(),
- &layout,
- None,
- values,
- locals,
- )?)
- }
- AggregateKind::Union(x, f) => {
- let layout = self.layout_adt((*x).into(), Substitution::empty(Interner))?;
- let offset = layout
- .fields
- .offset(u32::from(f.local_id.into_raw()) as usize)
- .bytes_usize();
- let op = self.eval_operand(&values[0], locals)?.get(&self)?;
- let mut result = vec![0; layout.size.bytes_usize()];
- result[offset..offset + op.len()].copy_from_slice(op);
- Owned(result)
- }
- AggregateKind::Adt(x, subst) => {
- let (size, variant_layout, tag) =
- self.layout_of_variant(*x, subst.clone(), locals)?;
- Owned(self.make_by_layout(size, &variant_layout, tag, values, locals)?)
}
- },
+ }
Rvalue::Cast(kind, operand, target_ty) => match kind {
- CastKind::PointerExposeAddress => not_supported!("exposing pointer address"),
- CastKind::PointerFromExposedAddress => {
- not_supported!("creating pointer from exposed address")
- }
CastKind::Pointer(cast) => match cast {
- PointerCast::Unsize => {
+ PointerCast::ReifyFnPointer | PointerCast::ClosureFnPointer(_) => {
let current_ty = self.operand_ty(operand, locals)?;
- match &target_ty.data(Interner).kind {
- TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => {
- match &ty.data(Interner).kind {
- TyKind::Slice(_) => match &current_ty.data(Interner).kind {
- TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => {
- match &ty.data(Interner).kind {
- TyKind::Array(_, size) => {
- let addr = self
- .eval_operand(operand, locals)?
- .get(&self)?;
- let len = const_as_usize(size);
- let mut r = Vec::with_capacity(16);
- r.extend(addr.iter().copied());
- r.extend(len.to_le_bytes().into_iter());
- Owned(r)
- }
- _ => {
- not_supported!("slice unsizing from non arrays")
- }
- }
- }
- _ => not_supported!("slice unsizing from non pointers"),
- },
- TyKind::Dyn(_) => not_supported!("dyn pointer unsize cast"),
- _ => not_supported!("unknown unsized cast"),
- }
- }
- _ => not_supported!("unsized cast on unknown pointer type"),
+ if let TyKind::FnDef(_, _) | TyKind::Closure(_, _) =
+ &current_ty.data(Interner).kind
+ {
+ let id = self.vtable_map.id(current_ty);
+ let ptr_size = self.ptr_size();
+ Owned(id.to_le_bytes()[0..ptr_size].to_vec())
+ } else {
+ not_supported!(
+ "creating a fn pointer from a non FnDef or Closure type"
+ );
}
}
- x => not_supported!("pointer cast {x:?}"),
+ PointerCast::Unsize => {
+ let current_ty = self.operand_ty(operand, locals)?;
+ let addr = self.eval_operand(operand, locals)?;
+ self.coerce_unsized(addr, &current_ty, target_ty)?
+ }
+ PointerCast::MutToConstPointer | PointerCast::UnsafeFnPointer => {
+ // This is no-op
+ Borrowed(self.eval_operand(operand, locals)?)
+ }
+ PointerCast::ArrayToPointer => {
+ // We should remove the metadata part if the current type is slice
+ Borrowed(self.eval_operand(operand, locals)?.slice(0..self.ptr_size()))
+ }
},
CastKind::DynStar => not_supported!("dyn star cast"),
- CastKind::IntToInt => {
- // FIXME: handle signed cast
- let current = pad16(self.eval_operand(operand, locals)?.get(&self)?, false);
+ CastKind::IntToInt
+ | CastKind::PointerExposeAddress
+ | CastKind::PointerFromExposedAddress => {
+ let current_ty = self.operand_ty(operand, locals)?;
+ let is_signed = match current_ty.kind(Interner) {
+ TyKind::Scalar(s) => match s {
+ chalk_ir::Scalar::Int(_) => true,
+ _ => false,
+ },
+ _ => false,
+ };
+ let current = pad16(self.eval_operand(operand, locals)?.get(&self)?, is_signed);
let dest_size =
self.size_of_sized(target_ty, locals, "destination of int to int cast")?;
Owned(current[0..dest_size].to_vec())
@@ -846,31 +1224,106 @@ impl Evaluator<'_> {
CastKind::FloatToInt => not_supported!("float to int cast"),
CastKind::FloatToFloat => not_supported!("float to float cast"),
CastKind::IntToFloat => not_supported!("float to int cast"),
- CastKind::PtrToPtr => not_supported!("ptr to ptr cast"),
CastKind::FnPtrToPtr => not_supported!("fn ptr to ptr cast"),
},
})
}
+ fn coerce_unsized_look_through_fields<T>(
+ &self,
+ ty: &Ty,
+ goal: impl Fn(&TyKind) -> Option<T>,
+ ) -> Result<T> {
+ let kind = ty.kind(Interner);
+ if let Some(x) = goal(kind) {
+ return Ok(x);
+ }
+ if let TyKind::Adt(id, subst) = kind {
+ if let AdtId::StructId(struct_id) = id.0 {
+ let field_types = self.db.field_types(struct_id.into());
+ let mut field_types = field_types.iter();
+ if let Some(ty) =
+ field_types.next().map(|x| x.1.clone().substitute(Interner, subst))
+ {
+ return self.coerce_unsized_look_through_fields(&ty, goal);
+ }
+ }
+ }
+ Err(MirEvalError::CoerceUnsizedError(ty.clone()))
+ }
+
+ fn coerce_unsized(
+ &mut self,
+ addr: Interval,
+ current_ty: &Ty,
+ target_ty: &Ty,
+ ) -> Result<IntervalOrOwned> {
+ use IntervalOrOwned::*;
+ fn for_ptr(x: &TyKind) -> Option<Ty> {
+ match x {
+ TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => Some(ty.clone()),
+ _ => None,
+ }
+ }
+ Ok(match self.coerce_unsized_look_through_fields(target_ty, for_ptr)? {
+ ty => match &ty.data(Interner).kind {
+ TyKind::Slice(_) => {
+ match self.coerce_unsized_look_through_fields(current_ty, for_ptr)? {
+ ty => match &ty.data(Interner).kind {
+ TyKind::Array(_, size) => {
+ let len = match try_const_usize(self.db, size) {
+ None => not_supported!(
+ "unevaluatble len of array in coerce unsized"
+ ),
+ Some(x) => x as usize,
+ };
+ let mut r = Vec::with_capacity(16);
+ let addr = addr.get(self)?;
+ r.extend(addr.iter().copied());
+ r.extend(len.to_le_bytes().into_iter());
+ Owned(r)
+ }
+ t => {
+ not_supported!("slice unsizing from non array type {t:?}")
+ }
+ },
+ }
+ }
+ TyKind::Dyn(_) => match &current_ty.data(Interner).kind {
+ TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => {
+ let vtable = self.vtable_map.id(ty.clone());
+ let mut r = Vec::with_capacity(16);
+ let addr = addr.get(self)?;
+ r.extend(addr.iter().copied());
+ r.extend(vtable.to_le_bytes().into_iter());
+ Owned(r)
+ }
+ _ => not_supported!("dyn unsizing from non pointers"),
+ },
+ _ => not_supported!("unknown unsized cast"),
+ },
+ })
+ }
+
fn layout_of_variant(
&mut self,
x: VariantId,
subst: Substitution,
locals: &Locals<'_>,
- ) -> Result<(usize, Layout, Option<(usize, usize, i128)>)> {
+ ) -> Result<(usize, Arc<Layout>, Option<(usize, usize, i128)>)> {
let adt = x.adt_id();
if let DefWithBodyId::VariantId(f) = locals.body.owner {
if let VariantId::EnumVariantId(x) = x {
if AdtId::from(f.parent) == adt {
// Computing the exact size of enums require resolving the enum discriminants. In order to prevent loops (and
// infinite sized type errors) we use a dummy layout
- let i = self.db.const_eval_discriminant(x)?;
+ let i = self.const_eval_discriminant(x)?;
return Ok((16, self.layout(&TyBuilder::unit())?, Some((0, 16, i))));
}
}
}
let layout = self.layout_adt(adt, subst)?;
- Ok(match layout.variants {
+ Ok(match &layout.variants {
Variants::Single { .. } => (layout.size.bytes_usize(), layout, None),
Variants::Multiple { variants, tag, tag_encoding, .. } => {
let cx = self
@@ -882,18 +1335,27 @@ impl Evaluator<'_> {
_ => not_supported!("multi variant layout for non-enums"),
};
let rustc_enum_variant_idx = RustcEnumVariantIdx(enum_variant_id.local_id);
- let mut discriminant = self.db.const_eval_discriminant(enum_variant_id)?;
+ let mut discriminant = self.const_eval_discriminant(enum_variant_id)?;
let variant_layout = variants[rustc_enum_variant_idx].clone();
let have_tag = match tag_encoding {
TagEncoding::Direct => true,
TagEncoding::Niche { untagged_variant, niche_variants: _, niche_start } => {
- discriminant = discriminant.wrapping_add(niche_start as i128);
- untagged_variant != rustc_enum_variant_idx
+ if *untagged_variant == rustc_enum_variant_idx {
+ false
+ } else {
+ discriminant = (variants
+ .iter_enumerated()
+ .filter(|(x, _)| x != untagged_variant)
+ .position(|(x, _)| x == rustc_enum_variant_idx)
+ .unwrap() as i128)
+ .wrapping_add(*niche_start as i128);
+ true
+ }
}
};
(
layout.size.bytes_usize(),
- variant_layout,
+ Arc::new(variant_layout),
if have_tag {
Some((
layout.fields.offset(0).bytes_usize(),
@@ -910,71 +1372,84 @@ impl Evaluator<'_> {
fn make_by_layout(
&mut self,
- size: usize, // Not neccessarily equal to variant_layout.size
+ size: usize, // Not necessarily equal to variant_layout.size
variant_layout: &Layout,
tag: Option<(usize, usize, i128)>,
- values: &Vec<Operand>,
- locals: &Locals<'_>,
+ values: impl Iterator<Item = IntervalOrOwned>,
) -> Result<Vec<u8>> {
let mut result = vec![0; size];
if let Some((offset, size, value)) = tag {
result[offset..offset + size].copy_from_slice(&value.to_le_bytes()[0..size]);
}
- for (i, op) in values.iter().enumerate() {
+ for (i, op) in values.enumerate() {
let offset = variant_layout.fields.offset(i).bytes_usize();
- let op = self.eval_operand(op, locals)?.get(&self)?;
+ let op = op.get(&self)?;
result[offset..offset + op.len()].copy_from_slice(op);
}
Ok(result)
}
- fn eval_operand(&mut self, x: &Operand, locals: &Locals<'_>) -> Result<Interval> {
+ fn eval_operand(&mut self, x: &Operand, locals: &mut Locals<'_>) -> Result<Interval> {
Ok(match x {
- Operand::Copy(p) | Operand::Move(p) => self.eval_place(p, locals)?,
+ Operand::Copy(p) | Operand::Move(p) => {
+ locals.drop_flags.remove_place(p);
+ self.eval_place(p, locals)?
+ }
+ Operand::Static(st) => {
+ let addr = self.eval_static(*st, locals)?;
+ Interval::new(addr, self.ptr_size())
+ }
Operand::Constant(konst) => {
let data = &konst.data(Interner);
match &data.value {
- chalk_ir::ConstValue::BoundVar(b) => {
- let c = locals
- .subst
- .as_slice(Interner)
- .get(b.index)
- .ok_or(MirEvalError::TypeError("missing generic arg"))?
- .assert_const_ref(Interner);
- self.eval_operand(&Operand::Constant(c.clone()), locals)?
- }
+ chalk_ir::ConstValue::BoundVar(_) => not_supported!("bound var constant"),
chalk_ir::ConstValue::InferenceVar(_) => {
not_supported!("inference var constant")
}
chalk_ir::ConstValue::Placeholder(_) => not_supported!("placeholder constant"),
- chalk_ir::ConstValue::Concrete(c) => match &c.interned {
- ConstScalar::Bytes(v, memory_map) => {
- let mut v: Cow<'_, [u8]> = Cow::Borrowed(v);
- let patch_map = memory_map.transform_addresses(|b| {
- let addr = self.heap_allocate(b.len());
- self.write_memory(addr, b)?;
- Ok(addr.to_usize())
- })?;
- let size = self.size_of(&data.ty, locals)?.unwrap_or(v.len());
- if size != v.len() {
- // Handle self enum
- if size == 16 && v.len() < 16 {
- v = Cow::Owned(pad16(&v, false).to_vec());
- } else if size < 16 && v.len() == 16 {
- v = Cow::Owned(v[0..size].to_vec());
- } else {
- return Err(MirEvalError::InvalidConst(konst.clone()));
- }
- }
- let addr = self.heap_allocate(size);
- self.write_memory(addr, &v)?;
- self.patch_addresses(&patch_map, addr, &data.ty, locals)?;
- Interval::new(addr, size)
- }
- ConstScalar::Unknown => not_supported!("evaluating unknown const"),
- },
+ chalk_ir::ConstValue::Concrete(c) => {
+ self.allocate_const_in_heap(c, &data.ty, locals, konst)?
+ }
+ }
+ }
+ })
+ }
+
+ fn allocate_const_in_heap(
+ &mut self,
+ c: &chalk_ir::ConcreteConst<Interner>,
+ ty: &Ty,
+ locals: &Locals<'_>,
+ konst: &chalk_ir::Const<Interner>,
+ ) -> Result<Interval> {
+ Ok(match &c.interned {
+ ConstScalar::Bytes(v, memory_map) => {
+ let mut v: Cow<'_, [u8]> = Cow::Borrowed(v);
+ let patch_map = memory_map.transform_addresses(|b| {
+ let addr = self.heap_allocate(b.len(), 1); // FIXME: align is wrong
+ self.write_memory(addr, b)?;
+ Ok(addr.to_usize())
+ })?;
+ let (size, align) = self.size_align_of(ty, locals)?.unwrap_or((v.len(), 1));
+ if size != v.len() {
+ // Handle self enum
+ if size == 16 && v.len() < 16 {
+ v = Cow::Owned(pad16(&v, false).to_vec());
+ } else if size < 16 && v.len() == 16 {
+ v = Cow::Owned(v[0..size].to_vec());
+ } else {
+ return Err(MirEvalError::InvalidConst(konst.clone()));
+ }
}
+ let addr = self.heap_allocate(size, align);
+ self.write_memory(addr, &v)?;
+ self.patch_addresses(&patch_map, &memory_map.vtable, addr, ty, locals)?;
+ Interval::new(addr, size)
+ }
+ ConstScalar::UnevaluatedConst(..) => {
+ not_supported!("unevaluated const present in monomorphized mir");
}
+ ConstScalar::Unknown => not_supported!("evaluating unknown const"),
})
}
@@ -987,197 +1462,213 @@ impl Evaluator<'_> {
}
fn read_memory(&self, addr: Address, size: usize) -> Result<&[u8]> {
+ if size == 0 {
+ return Ok(&[]);
+ }
let (mem, pos) = match addr {
Stack(x) => (&self.stack, x),
Heap(x) => (&self.heap, x),
+ Invalid(x) => {
+ return Err(MirEvalError::UndefinedBehavior(format!(
+ "read invalid memory address {x} with size {size}"
+ )));
+ }
};
- mem.get(pos..pos + size).ok_or(MirEvalError::UndefinedBehavior("out of bound memory read"))
+ mem.get(pos..pos + size)
+ .ok_or_else(|| MirEvalError::UndefinedBehavior("out of bound memory read".to_string()))
}
fn write_memory(&mut self, addr: Address, r: &[u8]) -> Result<()> {
+ if r.is_empty() {
+ return Ok(());
+ }
let (mem, pos) = match addr {
Stack(x) => (&mut self.stack, x),
Heap(x) => (&mut self.heap, x),
+ Invalid(x) => {
+ return Err(MirEvalError::UndefinedBehavior(format!(
+ "write invalid memory address {x} with content {r:?}"
+ )));
+ }
};
mem.get_mut(pos..pos + r.len())
- .ok_or(MirEvalError::UndefinedBehavior("out of bound memory write"))?
+ .ok_or_else(|| {
+ MirEvalError::UndefinedBehavior("out of bound memory write".to_string())
+ })?
.copy_from_slice(r);
Ok(())
}
- fn size_of(&self, ty: &Ty, locals: &Locals<'_>) -> Result<Option<usize>> {
+ fn size_align_of(&self, ty: &Ty, locals: &Locals<'_>) -> Result<Option<(usize, usize)>> {
if let DefWithBodyId::VariantId(f) = locals.body.owner {
if let Some((adt, _)) = ty.as_adt() {
if AdtId::from(f.parent) == adt {
// Computing the exact size of enums require resolving the enum discriminants. In order to prevent loops (and
// infinite sized type errors) we use a dummy size
- return Ok(Some(16));
+ return Ok(Some((16, 16)));
}
}
}
- let ty = &self.ty_filler(ty, locals.subst, locals.body.owner)?;
let layout = self.layout(ty);
if self.assert_placeholder_ty_is_unused {
if matches!(layout, Err(MirEvalError::LayoutError(LayoutError::HasPlaceholder, _))) {
- return Ok(Some(0));
+ return Ok(Some((0, 1)));
}
}
let layout = layout?;
- Ok(layout.is_sized().then(|| layout.size.bytes_usize()))
+ Ok(layout
+ .is_sized()
+ .then(|| (layout.size.bytes_usize(), layout.align.abi.bytes() as usize)))
}
/// A version of `self.size_of` which returns error if the type is unsized. `what` argument should
/// be something that complete this: `error: type {ty} was unsized. {what} should be sized`
fn size_of_sized(&self, ty: &Ty, locals: &Locals<'_>, what: &'static str) -> Result<usize> {
- match self.size_of(ty, locals)? {
- Some(x) => Ok(x),
+ match self.size_align_of(ty, locals)? {
+ Some(x) => Ok(x.0),
None => Err(MirEvalError::TypeIsUnsized(ty.clone(), what)),
}
}
- /// Uses `ty_filler` to fill an entire subst
- fn subst_filler(&self, subst: &Substitution, locals: &Locals<'_>) -> Substitution {
- Substitution::from_iter(
- Interner,
- subst.iter(Interner).map(|x| match x.data(Interner) {
- chalk_ir::GenericArgData::Ty(ty) => {
- let Ok(ty) = self.ty_filler(ty, locals.subst, locals.body.owner) else {
- return x.clone();
- };
- chalk_ir::GenericArgData::Ty(ty).intern(Interner)
- }
- _ => x.clone(),
- }),
- )
- }
-
- /// This function substitutes placeholders of the body with the provided subst, effectively plays
- /// the rule of monomorphization. In addition to placeholders, it substitutes opaque types (return
- /// position impl traits) with their underlying type.
- fn ty_filler(&self, ty: &Ty, subst: &Substitution, owner: DefWithBodyId) -> Result<Ty> {
- struct Filler<'a> {
- db: &'a dyn HirDatabase,
- subst: &'a Substitution,
- skip_params: usize,
- }
- impl FallibleTypeFolder<Interner> for Filler<'_> {
- type Error = MirEvalError;
-
- fn as_dyn(&mut self) -> &mut dyn FallibleTypeFolder<Interner, Error = Self::Error> {
- self
- }
-
- fn interner(&self) -> Interner {
- Interner
- }
-
- fn try_fold_ty(
- &mut self,
- ty: Ty,
- outer_binder: DebruijnIndex,
- ) -> std::result::Result<Ty, Self::Error> {
- match ty.kind(Interner) {
- TyKind::OpaqueType(id, subst) => {
- let impl_trait_id = self.db.lookup_intern_impl_trait_id((*id).into());
- match impl_trait_id {
- crate::ImplTraitId::ReturnTypeImplTrait(func, idx) => {
- let infer = self.db.infer(func.into());
- let filler = &mut Filler { db: self.db, subst, skip_params: 0 };
- filler.try_fold_ty(infer.type_of_rpit[idx].clone(), outer_binder)
- }
- crate::ImplTraitId::AsyncBlockTypeImplTrait(_, _) => {
- not_supported!("async block impl trait");
- }
- }
- }
- _ => ty.try_super_fold_with(self.as_dyn(), outer_binder),
- }
- }
-
- fn try_fold_free_placeholder_ty(
- &mut self,
- idx: chalk_ir::PlaceholderIndex,
- _outer_binder: DebruijnIndex,
- ) -> std::result::Result<Ty, Self::Error> {
- let x = from_placeholder_idx(self.db, idx);
- Ok(self
- .subst
- .as_slice(Interner)
- .get((u32::from(x.local_id.into_raw()) as usize) + self.skip_params)
- .and_then(|x| x.ty(Interner))
- .ok_or(MirEvalError::TypeError("Generic arg not provided"))?
- .clone())
- }
- }
- let filler = &mut Filler { db: self.db, subst, skip_params: 0 };
- Ok(normalize(self.db, owner, ty.clone().try_fold_with(filler, DebruijnIndex::INNERMOST)?))
- }
-
- fn heap_allocate(&mut self, s: usize) -> Address {
+ fn heap_allocate(&mut self, size: usize, _align: usize) -> Address {
let pos = self.heap.len();
- self.heap.extend(iter::repeat(0).take(s));
+ self.heap.extend(iter::repeat(0).take(size));
Address::Heap(pos)
}
- pub fn interpret_mir_with_no_arg(&mut self, body: &MirBody) -> Result<Vec<u8>> {
- self.interpret_mir(&body, vec![].into_iter(), Substitution::empty(Interner))
- }
-
- fn detect_lang_function(&self, def: FunctionId) -> Option<LangItem> {
- let candidate = lang_attr(self.db.upcast(), def)?;
- // filter normal lang functions out
- if [LangItem::IntoIterIntoIter, LangItem::IteratorNext].contains(&candidate) {
+ fn detect_fn_trait(&self, def: FunctionId) -> Option<FnTrait> {
+ use LangItem::*;
+ let ItemContainerId::TraitId(parent) = self.db.lookup_intern_function(def).container else {
return None;
+ };
+ let l = lang_attr(self.db.upcast(), parent)?;
+ match l {
+ FnOnce => Some(FnTrait::FnOnce),
+ FnMut => Some(FnTrait::FnMut),
+ Fn => Some(FnTrait::Fn),
+ _ => None,
}
- Some(candidate)
}
fn create_memory_map(&self, bytes: &[u8], ty: &Ty, locals: &Locals<'_>) -> Result<MemoryMap> {
- // FIXME: support indirect references
- let mut mm = MemoryMap::default();
- match ty.kind(Interner) {
- TyKind::Ref(_, _, t) => {
- let size = self.size_of(t, locals)?;
- match size {
- Some(size) => {
- let addr_usize = from_bytes!(usize, bytes);
- mm.insert(
- addr_usize,
- self.read_memory(Address::from_usize(addr_usize), size)?.to_vec(),
- )
- }
- None => {
- let element_size = match t.kind(Interner) {
- TyKind::Str => 1,
- TyKind::Slice(t) => {
- self.size_of_sized(t, locals, "slice inner type")?
+ fn rec(
+ this: &Evaluator<'_>,
+ bytes: &[u8],
+ ty: &Ty,
+ locals: &Locals<'_>,
+ mm: &mut MemoryMap,
+ ) -> Result<()> {
+ match ty.kind(Interner) {
+ TyKind::Ref(_, _, t) => {
+ let size = this.size_align_of(t, locals)?;
+ match size {
+ Some((size, _)) => {
+ let addr_usize = from_bytes!(usize, bytes);
+ mm.insert(
+ addr_usize,
+ this.read_memory(Address::from_usize(addr_usize), size)?.to_vec(),
+ )
+ }
+ None => {
+ let mut check_inner = None;
+ let (addr, meta) = bytes.split_at(bytes.len() / 2);
+ let element_size = match t.kind(Interner) {
+ TyKind::Str => 1,
+ TyKind::Slice(t) => {
+ check_inner = Some(t);
+ this.size_of_sized(t, locals, "slice inner type")?
+ }
+ TyKind::Dyn(_) => {
+ let t = this.vtable_map.ty_of_bytes(meta)?;
+ check_inner = Some(t);
+ this.size_of_sized(t, locals, "dyn concrete type")?
+ }
+ _ => return Ok(()),
+ };
+ let count = match t.kind(Interner) {
+ TyKind::Dyn(_) => 1,
+ _ => from_bytes!(usize, meta),
+ };
+ let size = element_size * count;
+ let addr = Address::from_bytes(addr)?;
+ let b = this.read_memory(addr, size)?;
+ mm.insert(addr.to_usize(), b.to_vec());
+ if let Some(ty) = check_inner {
+ for i in 0..count {
+ let offset = element_size * i;
+ rec(this, &b[offset..offset + element_size], &ty, locals, mm)?;
+ }
}
- _ => return Ok(mm), // FIXME: support other kind of unsized types
- };
- let (addr, meta) = bytes.split_at(bytes.len() / 2);
- let size = element_size * from_bytes!(usize, meta);
- let addr = Address::from_bytes(addr)?;
- mm.insert(addr.to_usize(), self.read_memory(addr, size)?.to_vec());
+ }
}
}
+ chalk_ir::TyKind::Tuple(_, subst) => {
+ let layout = this.layout(ty)?;
+ for (id, ty) in subst.iter(Interner).enumerate() {
+ let ty = ty.assert_ty_ref(Interner); // Tuple only has type argument
+ let offset = layout.fields.offset(id).bytes_usize();
+ let size = this.layout(ty)?.size.bytes_usize();
+ rec(this, &bytes[offset..offset + size], ty, locals, mm)?;
+ }
+ }
+ chalk_ir::TyKind::Adt(adt, subst) => match adt.0 {
+ AdtId::StructId(s) => {
+ let data = this.db.struct_data(s);
+ let layout = this.layout(ty)?;
+ let field_types = this.db.field_types(s.into());
+ for (f, _) in data.variant_data.fields().iter() {
+ let offset = layout
+ .fields
+ .offset(u32::from(f.into_raw()) as usize)
+ .bytes_usize();
+ let ty = &field_types[f].clone().substitute(Interner, subst);
+ let size = this.layout(ty)?.size.bytes_usize();
+ rec(this, &bytes[offset..offset + size], ty, locals, mm)?;
+ }
+ }
+ AdtId::EnumId(e) => {
+ let layout = this.layout(ty)?;
+ if let Some((v, l)) =
+ detect_variant_from_bytes(&layout, this.db, this.crate_id, bytes, e)
+ {
+ let data = &this.db.enum_data(e).variants[v].variant_data;
+ let field_types = this
+ .db
+ .field_types(EnumVariantId { parent: e, local_id: v }.into());
+ for (f, _) in data.fields().iter() {
+ let offset =
+ l.fields.offset(u32::from(f.into_raw()) as usize).bytes_usize();
+ let ty = &field_types[f].clone().substitute(Interner, subst);
+ let size = this.layout(ty)?.size.bytes_usize();
+ rec(this, &bytes[offset..offset + size], ty, locals, mm)?;
+ }
+ }
+ }
+ AdtId::UnionId(_) => (),
+ },
+ _ => (),
}
- _ => (),
+ Ok(())
}
+ let mut mm = MemoryMap::default();
+ rec(self, bytes, ty, locals, &mut mm)?;
Ok(mm)
}
fn patch_addresses(
&mut self,
patch_map: &HashMap<usize, usize>,
+ old_vtable: &VTableMap,
addr: Address,
ty: &Ty,
locals: &Locals<'_>,
) -> Result<()> {
// FIXME: support indirect references
+ let layout = self.layout(ty)?;
let my_size = self.size_of_sized(ty, locals, "value to patch address")?;
match ty.kind(Interner) {
TyKind::Ref(_, _, t) => {
- let size = self.size_of(t, locals)?;
+ let size = self.size_align_of(t, locals)?;
match size {
Some(_) => {
let current = from_bytes!(usize, self.read_memory(addr, my_size)?);
@@ -1193,51 +1684,441 @@ impl Evaluator<'_> {
}
}
}
- _ => (),
+ TyKind::Function(_) => {
+ let ty = old_vtable.ty_of_bytes(self.read_memory(addr, my_size)?)?.clone();
+ let new_id = self.vtable_map.id(ty);
+ self.write_memory(addr, &new_id.to_le_bytes())?;
+ }
+ TyKind::Adt(id, subst) => match id.0 {
+ AdtId::StructId(s) => {
+ for (i, (_, ty)) in self.db.field_types(s.into()).iter().enumerate() {
+ let offset = layout.fields.offset(i).bytes_usize();
+ let ty = ty.clone().substitute(Interner, subst);
+ self.patch_addresses(
+ patch_map,
+ old_vtable,
+ addr.offset(offset),
+ &ty,
+ locals,
+ )?;
+ }
+ }
+ AdtId::UnionId(_) => (),
+ AdtId::EnumId(_) => (),
+ },
+ TyKind::AssociatedType(_, _)
+ | TyKind::Scalar(_)
+ | TyKind::Tuple(_, _)
+ | TyKind::Array(_, _)
+ | TyKind::Slice(_)
+ | TyKind::Raw(_, _)
+ | TyKind::OpaqueType(_, _)
+ | TyKind::FnDef(_, _)
+ | TyKind::Str
+ | TyKind::Never
+ | TyKind::Closure(_, _)
+ | TyKind::Generator(_, _)
+ | TyKind::GeneratorWitness(_, _)
+ | TyKind::Foreign(_)
+ | TyKind::Error
+ | TyKind::Placeholder(_)
+ | TyKind::Dyn(_)
+ | TyKind::Alias(_)
+ | TyKind::BoundVar(_)
+ | TyKind::InferenceVar(_, _) => (),
}
Ok(())
}
- fn exec_intrinsic(
- &self,
- as_str: &str,
- _arg_bytes: impl Iterator<Item = Vec<u8>>,
- generic_args: Substitution,
+ fn exec_fn_pointer(
+ &mut self,
+ bytes: Interval,
+ destination: Interval,
+ args: &[IntervalAndTy],
locals: &Locals<'_>,
- ) -> Result<Vec<u8>> {
- match as_str {
- "size_of" => {
- let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
- return Err(MirEvalError::TypeError("size_of generic arg is not provided"));
- };
- let size = self.size_of(ty, locals)?;
- match size {
- Some(x) => Ok(x.to_le_bytes().to_vec()),
- None => return Err(MirEvalError::TypeError("size_of arg is unsized")),
+ span: MirSpan,
+ ) -> Result<()> {
+ let id = from_bytes!(usize, bytes.get(self)?);
+ let next_ty = self.vtable_map.ty(id)?.clone();
+ match &next_ty.data(Interner).kind {
+ TyKind::FnDef(def, generic_args) => {
+ self.exec_fn_def(*def, generic_args, destination, args, &locals, span)?;
+ }
+ TyKind::Closure(id, subst) => {
+ self.exec_closure(*id, bytes.slice(0..0), subst, destination, args, locals, span)?;
+ }
+ _ => return Err(MirEvalError::TypeError("function pointer to non function")),
+ }
+ Ok(())
+ }
+
+ fn exec_closure(
+ &mut self,
+ closure: ClosureId,
+ closure_data: Interval,
+ generic_args: &Substitution,
+ destination: Interval,
+ args: &[IntervalAndTy],
+ locals: &Locals<'_>,
+ span: MirSpan,
+ ) -> Result<()> {
+ let mir_body = self
+ .db
+ .monomorphized_mir_body_for_closure(
+ closure,
+ generic_args.clone(),
+ self.trait_env.clone(),
+ )
+ .map_err(|x| MirEvalError::MirLowerErrorForClosure(closure, x))?;
+ let closure_data = if mir_body.locals[mir_body.param_locals[0]].ty.as_reference().is_some()
+ {
+ closure_data.addr.to_bytes()
+ } else {
+ closure_data.get(self)?.to_owned()
+ };
+ let arg_bytes = iter::once(Ok(closure_data))
+ .chain(args.iter().map(|x| Ok(x.get(&self)?.to_owned())))
+ .collect::<Result<Vec<_>>>()?;
+ let bytes = self.interpret_mir(&mir_body, arg_bytes.into_iter()).map_err(|e| {
+ MirEvalError::InFunction(Either::Right(closure), Box::new(e), span, locals.body.owner)
+ })?;
+ destination.write_from_bytes(self, &bytes)
+ }
+
+ fn exec_fn_def(
+ &mut self,
+ def: FnDefId,
+ generic_args: &Substitution,
+ destination: Interval,
+ args: &[IntervalAndTy],
+ locals: &Locals<'_>,
+ span: MirSpan,
+ ) -> Result<()> {
+ let def: CallableDefId = from_chalk(self.db, def);
+ let generic_args = generic_args.clone();
+ match def {
+ CallableDefId::FunctionId(def) => {
+ if let Some(_) = self.detect_fn_trait(def) {
+ self.exec_fn_trait(&args, destination, locals, span)?;
+ return Ok(());
}
+ self.exec_fn_with_args(def, args, generic_args, locals, destination, span)?;
+ }
+ CallableDefId::StructId(id) => {
+ let (size, variant_layout, tag) =
+ self.layout_of_variant(id.into(), generic_args, &locals)?;
+ let result = self.make_by_layout(
+ size,
+ &variant_layout,
+ tag,
+ args.iter().map(|x| x.interval.into()),
+ )?;
+ destination.write_from_bytes(self, &result)?;
+ }
+ CallableDefId::EnumVariantId(id) => {
+ let (size, variant_layout, tag) =
+ self.layout_of_variant(id.into(), generic_args, &locals)?;
+ let result = self.make_by_layout(
+ size,
+ &variant_layout,
+ tag,
+ args.iter().map(|x| x.interval.into()),
+ )?;
+ destination.write_from_bytes(self, &result)?;
}
- _ => not_supported!("unknown intrinsic {as_str}"),
}
+ Ok(())
}
- pub(crate) fn exec_lang_item(
- &self,
- x: LangItem,
- mut args: std::vec::IntoIter<Vec<u8>>,
- ) -> Result<Vec<u8>> {
- use LangItem::*;
- match x {
- PanicFmt | BeginPanic => Err(MirEvalError::Panic),
- SliceLen => {
- let arg = args
- .next()
- .ok_or(MirEvalError::TypeError("argument of <[T]>::len() is not provided"))?;
- let ptr_size = arg.len() / 2;
- Ok(arg[ptr_size..].into())
+ fn exec_fn_with_args(
+ &mut self,
+ def: FunctionId,
+ args: &[IntervalAndTy],
+ generic_args: Substitution,
+ locals: &Locals<'_>,
+ destination: Interval,
+ span: MirSpan,
+ ) -> Result<()> {
+ if self.detect_and_exec_special_function(
+ def,
+ args,
+ &generic_args,
+ locals,
+ destination,
+ span,
+ )? {
+ return Ok(());
+ }
+ let arg_bytes =
+ args.iter().map(|x| Ok(x.get(&self)?.to_owned())).collect::<Result<Vec<_>>>()?;
+ if let Some(self_ty_idx) =
+ is_dyn_method(self.db, self.trait_env.clone(), def, generic_args.clone())
+ {
+ // In the layout of current possible receiver, which at the moment of writing this code is one of
+ // `&T`, `&mut T`, `Box<T>`, `Rc<T>`, `Arc<T>`, and `Pin<P>` where `P` is one of possible recievers,
+ // the vtable is exactly in the `[ptr_size..2*ptr_size]` bytes. So we can use it without branching on
+ // the type.
+ let ty =
+ self.vtable_map.ty_of_bytes(&arg_bytes[0][self.ptr_size()..self.ptr_size() * 2])?;
+ let mut args_for_target = args.to_vec();
+ args_for_target[0] = IntervalAndTy {
+ interval: args_for_target[0].interval.slice(0..self.ptr_size()),
+ ty: ty.clone(),
+ };
+ let ty = GenericArgData::Ty(ty.clone()).intern(Interner);
+ let generics_for_target =
+ Substitution::from_iter(
+ Interner,
+ generic_args.iter(Interner).enumerate().map(|(i, x)| {
+ if i == self_ty_idx {
+ &ty
+ } else {
+ x
+ }
+ }),
+ );
+ return self.exec_fn_with_args(
+ def,
+ &args_for_target,
+ generics_for_target,
+ locals,
+ destination,
+ span,
+ );
+ }
+ let (imp, generic_args) =
+ lookup_impl_method(self.db, self.trait_env.clone(), def, generic_args);
+ self.exec_looked_up_function(generic_args, locals, imp, arg_bytes, span, destination)
+ }
+
+ fn exec_looked_up_function(
+ &mut self,
+ generic_args: Substitution,
+ locals: &Locals<'_>,
+ imp: FunctionId,
+ arg_bytes: Vec<Vec<u8>>,
+ span: MirSpan,
+ destination: Interval,
+ ) -> Result<()> {
+ let def = imp.into();
+ let mir_body = self
+ .db
+ .monomorphized_mir_body(def, generic_args, self.trait_env.clone())
+ .map_err(|e| {
+ MirEvalError::InFunction(
+ Either::Left(imp),
+ Box::new(MirEvalError::MirLowerError(imp, e)),
+ span,
+ locals.body.owner,
+ )
+ })?;
+ let result = self.interpret_mir(&mir_body, arg_bytes.iter().cloned()).map_err(|e| {
+ MirEvalError::InFunction(Either::Left(imp), Box::new(e), span, locals.body.owner)
+ })?;
+ destination.write_from_bytes(self, &result)?;
+ Ok(())
+ }
+
+ fn exec_fn_trait(
+ &mut self,
+ args: &[IntervalAndTy],
+ destination: Interval,
+ locals: &Locals<'_>,
+ span: MirSpan,
+ ) -> Result<()> {
+ let func = args.get(0).ok_or(MirEvalError::TypeError("fn trait with no arg"))?;
+ let mut func_ty = func.ty.clone();
+ let mut func_data = func.interval;
+ while let TyKind::Ref(_, _, z) = func_ty.kind(Interner) {
+ func_ty = z.clone();
+ if matches!(func_ty.kind(Interner), TyKind::Dyn(_)) {
+ let id =
+ from_bytes!(usize, &func_data.get(self)?[self.ptr_size()..self.ptr_size() * 2]);
+ func_data = func_data.slice(0..self.ptr_size());
+ func_ty = self.vtable_map.ty(id)?.clone();
+ }
+ let size = self.size_of_sized(&func_ty, locals, "self type of fn trait")?;
+ func_data = Interval { addr: Address::from_bytes(func_data.get(self)?)?, size };
+ }
+ match &func_ty.data(Interner).kind {
+ TyKind::FnDef(def, subst) => {
+ self.exec_fn_def(*def, subst, destination, &args[1..], locals, span)?;
+ }
+ TyKind::Function(_) => {
+ self.exec_fn_pointer(func_data, destination, &args[1..], locals, span)?;
+ }
+ TyKind::Closure(closure, subst) => {
+ self.exec_closure(
+ *closure,
+ func_data,
+ &Substitution::from_iter(Interner, ClosureSubst(subst).parent_subst()),
+ destination,
+ &args[1..],
+ locals,
+ span,
+ )?;
+ }
+ x => not_supported!("Call FnTrait methods with type {x:?}"),
+ }
+ Ok(())
+ }
+
+ fn eval_static(&mut self, st: StaticId, locals: &Locals<'_>) -> Result<Address> {
+ if let Some(o) = self.static_locations.get(&st) {
+ return Ok(*o);
+ };
+ let static_data = self.db.static_data(st);
+ let result = if !static_data.is_extern {
+ let konst = self.db.const_eval_static(st).map_err(|e| {
+ MirEvalError::ConstEvalError(
+ static_data.name.as_str().unwrap_or("_").to_owned(),
+ Box::new(e),
+ )
+ })?;
+ let data = &konst.data(Interner);
+ if let chalk_ir::ConstValue::Concrete(c) = &data.value {
+ self.allocate_const_in_heap(&c, &data.ty, locals, &konst)?
+ } else {
+ not_supported!("unevaluatable static");
+ }
+ } else {
+ let ty = &self.db.infer(st.into())[self.db.body(st.into()).body_expr];
+ let Some((size, align)) = self.size_align_of(&ty, locals)? else {
+ not_supported!("unsized extern static");
+ };
+ let addr = self.heap_allocate(size, align);
+ Interval::new(addr, size)
+ };
+ let addr = self.heap_allocate(self.ptr_size(), self.ptr_size());
+ self.write_memory(addr, &result.addr.to_bytes())?;
+ self.static_locations.insert(st, addr);
+ Ok(addr)
+ }
+
+ fn const_eval_discriminant(&self, variant: EnumVariantId) -> Result<i128> {
+ let r = self.db.const_eval_discriminant(variant);
+ match r {
+ Ok(r) => Ok(r),
+ Err(e) => {
+ let data = self.db.enum_data(variant.parent);
+ let name = format!(
+ "{}::{}",
+ data.name.display(self.db.upcast()),
+ data.variants[variant.local_id].name.display(self.db.upcast())
+ );
+ Err(MirEvalError::ConstEvalError(name, Box::new(e)))
}
- x => not_supported!("Executing lang item {x:?}"),
}
}
+
+ fn drop_place(&mut self, place: &Place, locals: &mut Locals<'_>, span: MirSpan) -> Result<()> {
+ let (addr, ty, metadata) = self.place_addr_and_ty_and_metadata(place, locals)?;
+ if !locals.drop_flags.remove_place(place) {
+ return Ok(());
+ }
+ let metadata = match metadata {
+ Some(x) => x.get(self)?.to_vec(),
+ None => vec![],
+ };
+ self.run_drop_glue_deep(ty, locals, addr, &metadata, span)
+ }
+
+ fn run_drop_glue_deep(
+ &mut self,
+ ty: Ty,
+ locals: &Locals<'_>,
+ addr: Address,
+ _metadata: &[u8],
+ span: MirSpan,
+ ) -> Result<()> {
+ let Some(drop_fn) = (|| {
+ let drop_trait = self.db.lang_item(self.crate_id, LangItem::Drop)?.as_trait()?;
+ self.db.trait_data(drop_trait).method_by_name(&name![drop])
+ })() else {
+ // in some tests we don't have drop trait in minicore, and
+ // we can ignore drop in them.
+ return Ok(());
+ };
+ let (impl_drop_candidate, subst) = lookup_impl_method(
+ self.db,
+ self.trait_env.clone(),
+ drop_fn,
+ Substitution::from1(Interner, ty.clone()),
+ );
+ if impl_drop_candidate != drop_fn {
+ self.exec_looked_up_function(
+ subst,
+ locals,
+ impl_drop_candidate,
+ vec![addr.to_bytes()],
+ span,
+ Interval { addr: Address::Invalid(0), size: 0 },
+ )?;
+ }
+ match ty.kind(Interner) {
+ TyKind::Adt(id, subst) => {
+ match id.0 {
+ AdtId::StructId(s) => {
+ let data = self.db.struct_data(s);
+ if data.flags.contains(StructFlags::IS_MANUALLY_DROP) {
+ return Ok(());
+ }
+ let layout = self.layout_adt(id.0, subst.clone())?;
+ match data.variant_data.as_ref() {
+ VariantData::Record(fields) | VariantData::Tuple(fields) => {
+ let field_types = self.db.field_types(s.into());
+ for (field, _) in fields.iter() {
+ let offset = layout
+ .fields
+ .offset(u32::from(field.into_raw()) as usize)
+ .bytes_usize();
+ let addr = addr.offset(offset);
+ let ty = field_types[field].clone().substitute(Interner, subst);
+ self.run_drop_glue_deep(ty, locals, addr, &[], span)?;
+ }
+ }
+ VariantData::Unit => (),
+ }
+ }
+ AdtId::UnionId(_) => (), // union fields don't need drop
+ AdtId::EnumId(_) => (),
+ }
+ }
+ TyKind::AssociatedType(_, _)
+ | TyKind::Scalar(_)
+ | TyKind::Tuple(_, _)
+ | TyKind::Array(_, _)
+ | TyKind::Slice(_)
+ | TyKind::Raw(_, _)
+ | TyKind::Ref(_, _, _)
+ | TyKind::OpaqueType(_, _)
+ | TyKind::FnDef(_, _)
+ | TyKind::Str
+ | TyKind::Never
+ | TyKind::Closure(_, _)
+ | TyKind::Generator(_, _)
+ | TyKind::GeneratorWitness(_, _)
+ | TyKind::Foreign(_)
+ | TyKind::Error
+ | TyKind::Placeholder(_)
+ | TyKind::Dyn(_)
+ | TyKind::Alias(_)
+ | TyKind::Function(_)
+ | TyKind::BoundVar(_)
+ | TyKind::InferenceVar(_, _) => (),
+ };
+ Ok(())
+ }
+
+ fn write_to_stdout(&mut self, interval: Interval) -> Result<()> {
+ self.stdout.extend(interval.get(self)?.to_vec());
+ Ok(())
+ }
+
+ fn write_to_stderr(&mut self, interval: Interval) -> Result<()> {
+ self.stderr.extend(interval.get(self)?.to_vec());
+ Ok(())
+ }
}
pub fn pad16(x: &[u8], is_signed: bool) -> [u8; 16] {
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs
new file mode 100644
index 000000000..3b9ef03c3
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs
@@ -0,0 +1,792 @@
+//! Interpret intrinsics, lang items and `extern "C"` wellknown functions which their implementation
+//! is not available.
+
+use std::cmp;
+
+use super::*;
+
+macro_rules! from_bytes {
+ ($ty:tt, $value:expr) => {
+ ($ty::from_le_bytes(match ($value).try_into() {
+ Ok(x) => x,
+ Err(_) => return Err(MirEvalError::TypeError("mismatched size")),
+ }))
+ };
+}
+
+macro_rules! not_supported {
+ ($x: expr) => {
+ return Err(MirEvalError::NotSupported(format!($x)))
+ };
+}
+
+impl Evaluator<'_> {
+ pub(super) fn detect_and_exec_special_function(
+ &mut self,
+ def: FunctionId,
+ args: &[IntervalAndTy],
+ generic_args: &Substitution,
+ locals: &Locals<'_>,
+ destination: Interval,
+ span: MirSpan,
+ ) -> Result<bool> {
+ let function_data = self.db.function_data(def);
+ let is_intrinsic = match &function_data.abi {
+ Some(abi) => *abi == Interned::new_str("rust-intrinsic"),
+ None => match def.lookup(self.db.upcast()).container {
+ hir_def::ItemContainerId::ExternBlockId(block) => {
+ let id = block.lookup(self.db.upcast()).id;
+ id.item_tree(self.db.upcast())[id.value].abi.as_deref()
+ == Some("rust-intrinsic")
+ }
+ _ => false,
+ },
+ };
+ if is_intrinsic {
+ self.exec_intrinsic(
+ function_data.name.as_text().unwrap_or_default().as_str(),
+ args,
+ generic_args,
+ destination,
+ &locals,
+ span,
+ )?;
+ return Ok(true);
+ }
+ let is_extern_c = match def.lookup(self.db.upcast()).container {
+ hir_def::ItemContainerId::ExternBlockId(block) => {
+ let id = block.lookup(self.db.upcast()).id;
+ id.item_tree(self.db.upcast())[id.value].abi.as_deref() == Some("C")
+ }
+ _ => false,
+ };
+ if is_extern_c {
+ self.exec_extern_c(
+ function_data.name.as_text().unwrap_or_default().as_str(),
+ args,
+ generic_args,
+ destination,
+ &locals,
+ span,
+ )?;
+ return Ok(true);
+ }
+ let alloc_fn = function_data
+ .attrs
+ .iter()
+ .filter_map(|x| x.path().as_ident())
+ .filter_map(|x| x.as_str())
+ .find(|x| {
+ [
+ "rustc_allocator",
+ "rustc_deallocator",
+ "rustc_reallocator",
+ "rustc_allocator_zeroed",
+ ]
+ .contains(x)
+ });
+ if let Some(alloc_fn) = alloc_fn {
+ self.exec_alloc_fn(alloc_fn, args, destination)?;
+ return Ok(true);
+ }
+ if let Some(x) = self.detect_lang_function(def) {
+ let arg_bytes =
+ args.iter().map(|x| Ok(x.get(&self)?.to_owned())).collect::<Result<Vec<_>>>()?;
+ let result = self.exec_lang_item(x, generic_args, &arg_bytes, locals, span)?;
+ destination.write_from_bytes(self, &result)?;
+ return Ok(true);
+ }
+ Ok(false)
+ }
+
+ fn exec_alloc_fn(
+ &mut self,
+ alloc_fn: &str,
+ args: &[IntervalAndTy],
+ destination: Interval,
+ ) -> Result<()> {
+ match alloc_fn {
+ "rustc_allocator_zeroed" | "rustc_allocator" => {
+ let [size, align] = args else {
+ return Err(MirEvalError::TypeError("rustc_allocator args are not provided"));
+ };
+ let size = from_bytes!(usize, size.get(self)?);
+ let align = from_bytes!(usize, align.get(self)?);
+ let result = self.heap_allocate(size, align);
+ destination.write_from_bytes(self, &result.to_bytes())?;
+ }
+ "rustc_deallocator" => { /* no-op for now */ }
+ "rustc_reallocator" => {
+ let [ptr, old_size, align, new_size] = args else {
+ return Err(MirEvalError::TypeError("rustc_allocator args are not provided"));
+ };
+ let ptr = Address::from_bytes(ptr.get(self)?)?;
+ let old_size = from_bytes!(usize, old_size.get(self)?);
+ let new_size = from_bytes!(usize, new_size.get(self)?);
+ let align = from_bytes!(usize, align.get(self)?);
+ let result = self.heap_allocate(new_size, align);
+ Interval { addr: result, size: old_size }
+ .write_from_interval(self, Interval { addr: ptr, size: old_size })?;
+ destination.write_from_bytes(self, &result.to_bytes())?;
+ }
+ _ => not_supported!("unknown alloc function"),
+ }
+ Ok(())
+ }
+
+ fn detect_lang_function(&self, def: FunctionId) -> Option<LangItem> {
+ use LangItem::*;
+ let candidate = lang_attr(self.db.upcast(), def)?;
+ // We want to execute these functions with special logic
+ if [PanicFmt, BeginPanic, SliceLen, DropInPlace].contains(&candidate) {
+ return Some(candidate);
+ }
+ None
+ }
+
+ fn exec_lang_item(
+ &mut self,
+ x: LangItem,
+ generic_args: &Substitution,
+ args: &[Vec<u8>],
+ locals: &Locals<'_>,
+ span: MirSpan,
+ ) -> Result<Vec<u8>> {
+ use LangItem::*;
+ let mut args = args.iter();
+ match x {
+ BeginPanic => Err(MirEvalError::Panic("<unknown-panic-payload>".to_string())),
+ PanicFmt => {
+ let message = (|| {
+ let arguments_struct =
+ self.db.lang_item(self.crate_id, LangItem::FormatArguments)?.as_struct()?;
+ let arguments_layout = self
+ .layout_adt(arguments_struct.into(), Substitution::empty(Interner))
+ .ok()?;
+ let arguments_field_pieces =
+ self.db.struct_data(arguments_struct).variant_data.field(&name![pieces])?;
+ let pieces_offset = arguments_layout
+ .fields
+ .offset(u32::from(arguments_field_pieces.into_raw()) as usize)
+ .bytes_usize();
+ let ptr_size = self.ptr_size();
+ let arg = args.next()?;
+ let pieces_array_addr =
+ Address::from_bytes(&arg[pieces_offset..pieces_offset + ptr_size]).ok()?;
+ let pieces_array_len = usize::from_le_bytes(
+ (&arg[pieces_offset + ptr_size..pieces_offset + 2 * ptr_size])
+ .try_into()
+ .ok()?,
+ );
+ let mut message = "".to_string();
+ for i in 0..pieces_array_len {
+ let piece_ptr_addr = pieces_array_addr.offset(2 * i * ptr_size);
+ let piece_addr =
+ Address::from_bytes(self.read_memory(piece_ptr_addr, ptr_size).ok()?)
+ .ok()?;
+ let piece_len = usize::from_le_bytes(
+ self.read_memory(piece_ptr_addr.offset(ptr_size), ptr_size)
+ .ok()?
+ .try_into()
+ .ok()?,
+ );
+ let piece_data = self.read_memory(piece_addr, piece_len).ok()?;
+ message += &std::string::String::from_utf8_lossy(piece_data);
+ }
+ Some(message)
+ })()
+ .unwrap_or_else(|| "<format-args-evaluation-failed>".to_string());
+ Err(MirEvalError::Panic(message))
+ }
+ SliceLen => {
+ let arg = args
+ .next()
+ .ok_or(MirEvalError::TypeError("argument of <[T]>::len() is not provided"))?;
+ let ptr_size = arg.len() / 2;
+ Ok(arg[ptr_size..].into())
+ }
+ DropInPlace => {
+ let ty =
+ generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)).ok_or(
+ MirEvalError::TypeError(
+ "generic argument of drop_in_place is not provided",
+ ),
+ )?;
+ let arg = args
+ .next()
+ .ok_or(MirEvalError::TypeError("argument of drop_in_place is not provided"))?;
+ self.run_drop_glue_deep(
+ ty.clone(),
+ locals,
+ Address::from_bytes(&arg[0..self.ptr_size()])?,
+ &arg[self.ptr_size()..],
+ span,
+ )?;
+ Ok(vec![])
+ }
+ x => not_supported!("Executing lang item {x:?}"),
+ }
+ }
+
+ fn exec_extern_c(
+ &mut self,
+ as_str: &str,
+ args: &[IntervalAndTy],
+ _generic_args: &Substitution,
+ destination: Interval,
+ locals: &Locals<'_>,
+ _span: MirSpan,
+ ) -> Result<()> {
+ match as_str {
+ "memcmp" => {
+ let [ptr1, ptr2, size] = args else {
+ return Err(MirEvalError::TypeError("memcmp args are not provided"));
+ };
+ let addr1 = Address::from_bytes(ptr1.get(self)?)?;
+ let addr2 = Address::from_bytes(ptr2.get(self)?)?;
+ let size = from_bytes!(usize, size.get(self)?);
+ let slice1 = self.read_memory(addr1, size)?;
+ let slice2 = self.read_memory(addr2, size)?;
+ let r: i128 = match slice1.cmp(slice2) {
+ cmp::Ordering::Less => -1,
+ cmp::Ordering::Equal => 0,
+ cmp::Ordering::Greater => 1,
+ };
+ destination.write_from_bytes(self, &r.to_le_bytes()[..destination.size])
+ }
+ "write" => {
+ let [fd, ptr, len] = args else {
+ return Err(MirEvalError::TypeError("libc::write args are not provided"));
+ };
+ let fd = u128::from_le_bytes(pad16(fd.get(self)?, false));
+ let interval = Interval {
+ addr: Address::from_bytes(ptr.get(self)?)?,
+ size: from_bytes!(usize, len.get(self)?),
+ };
+ match fd {
+ 1 => {
+ self.write_to_stdout(interval)?;
+ }
+ 2 => {
+ self.write_to_stderr(interval)?;
+ }
+ _ => not_supported!("write to arbitrary file descriptor"),
+ }
+ destination.write_from_interval(self, len.interval)?;
+ Ok(())
+ }
+ "pthread_key_create" => {
+ let key = self.thread_local_storage.create_key();
+ let Some(arg0) = args.get(0) else {
+ return Err(MirEvalError::TypeError("pthread_key_create arg0 is not provided"));
+ };
+ let arg0_addr = Address::from_bytes(arg0.get(self)?)?;
+ let key_ty = if let Some((ty, ..)) = arg0.ty.as_reference_or_ptr() {
+ ty
+ } else {
+ return Err(MirEvalError::TypeError(
+ "pthread_key_create arg0 is not a pointer",
+ ));
+ };
+ let arg0_interval = Interval::new(
+ arg0_addr,
+ self.size_of_sized(key_ty, locals, "pthread_key_create key arg")?,
+ );
+ arg0_interval.write_from_bytes(self, &key.to_le_bytes()[0..arg0_interval.size])?;
+ // return 0 as success
+ destination.write_from_bytes(self, &0u64.to_le_bytes()[0..destination.size])?;
+ Ok(())
+ }
+ "pthread_getspecific" => {
+ let Some(arg0) = args.get(0) else {
+ return Err(MirEvalError::TypeError("pthread_getspecific arg0 is not provided"));
+ };
+ let key = from_bytes!(usize, &pad16(arg0.get(self)?, false)[0..8]);
+ let value = self.thread_local_storage.get_key(key)?;
+ destination.write_from_bytes(self, &value.to_le_bytes()[0..destination.size])?;
+ Ok(())
+ }
+ "pthread_setspecific" => {
+ let Some(arg0) = args.get(0) else {
+ return Err(MirEvalError::TypeError("pthread_setspecific arg0 is not provided"));
+ };
+ let key = from_bytes!(usize, &pad16(arg0.get(self)?, false)[0..8]);
+ let Some(arg1) = args.get(1) else {
+ return Err(MirEvalError::TypeError("pthread_setspecific arg1 is not provided"));
+ };
+ let value = from_bytes!(u128, pad16(arg1.get(self)?, false));
+ self.thread_local_storage.set_key(key, value)?;
+ // return 0 as success
+ destination.write_from_bytes(self, &0u64.to_le_bytes()[0..destination.size])?;
+ Ok(())
+ }
+ "pthread_key_delete" => {
+ // we ignore this currently
+ // return 0 as success
+ destination.write_from_bytes(self, &0u64.to_le_bytes()[0..destination.size])?;
+ Ok(())
+ }
+ _ => not_supported!("unknown external function {as_str}"),
+ }
+ }
+
+ fn exec_intrinsic(
+ &mut self,
+ name: &str,
+ args: &[IntervalAndTy],
+ generic_args: &Substitution,
+ destination: Interval,
+ locals: &Locals<'_>,
+ span: MirSpan,
+ ) -> Result<()> {
+ if let Some(name) = name.strip_prefix("atomic_") {
+ return self.exec_atomic_intrinsic(name, args, generic_args, destination, locals, span);
+ }
+ if let Some(name) = name.strip_suffix("f64") {
+ let result = match name {
+ "sqrt" | "sin" | "cos" | "exp" | "exp2" | "log" | "log10" | "log2" | "fabs"
+ | "floor" | "ceil" | "trunc" | "rint" | "nearbyint" | "round" | "roundeven" => {
+ let [arg] = args else {
+ return Err(MirEvalError::TypeError("f64 intrinsic signature doesn't match fn (f64) -> f64"));
+ };
+ let arg = from_bytes!(f64, arg.get(self)?);
+ match name {
+ "sqrt" => arg.sqrt(),
+ "sin" => arg.sin(),
+ "cos" => arg.cos(),
+ "exp" => arg.exp(),
+ "exp2" => arg.exp2(),
+ "log" => arg.ln(),
+ "log10" => arg.log10(),
+ "log2" => arg.log2(),
+ "fabs" => arg.abs(),
+ "floor" => arg.floor(),
+ "ceil" => arg.ceil(),
+ "trunc" => arg.trunc(),
+ // FIXME: these rounds should be different, but only `.round()` is stable now.
+ "rint" => arg.round(),
+ "nearbyint" => arg.round(),
+ "round" => arg.round(),
+ "roundeven" => arg.round(),
+ _ => unreachable!(),
+ }
+ }
+ "pow" | "minnum" | "maxnum" | "copysign" => {
+ let [arg1, arg2] = args else {
+ return Err(MirEvalError::TypeError("f64 intrinsic signature doesn't match fn (f64, f64) -> f64"));
+ };
+ let arg1 = from_bytes!(f64, arg1.get(self)?);
+ let arg2 = from_bytes!(f64, arg2.get(self)?);
+ match name {
+ "pow" => arg1.powf(arg2),
+ "minnum" => arg1.min(arg2),
+ "maxnum" => arg1.max(arg2),
+ "copysign" => arg1.copysign(arg2),
+ _ => unreachable!(),
+ }
+ }
+ "powi" => {
+ let [arg1, arg2] = args else {
+ return Err(MirEvalError::TypeError("powif64 signature doesn't match fn (f64, i32) -> f64"));
+ };
+ let arg1 = from_bytes!(f64, arg1.get(self)?);
+ let arg2 = from_bytes!(i32, arg2.get(self)?);
+ arg1.powi(arg2)
+ }
+ "fma" => {
+ let [arg1, arg2, arg3] = args else {
+ return Err(MirEvalError::TypeError("fmaf64 signature doesn't match fn (f64, f64, f64) -> f64"));
+ };
+ let arg1 = from_bytes!(f64, arg1.get(self)?);
+ let arg2 = from_bytes!(f64, arg2.get(self)?);
+ let arg3 = from_bytes!(f64, arg3.get(self)?);
+ arg1.mul_add(arg2, arg3)
+ }
+ _ => not_supported!("unknown f64 intrinsic {name}"),
+ };
+ return destination.write_from_bytes(self, &result.to_le_bytes());
+ }
+ if let Some(name) = name.strip_suffix("f32") {
+ let result = match name {
+ "sqrt" | "sin" | "cos" | "exp" | "exp2" | "log" | "log10" | "log2" | "fabs"
+ | "floor" | "ceil" | "trunc" | "rint" | "nearbyint" | "round" | "roundeven" => {
+ let [arg] = args else {
+ return Err(MirEvalError::TypeError("f32 intrinsic signature doesn't match fn (f32) -> f32"));
+ };
+ let arg = from_bytes!(f32, arg.get(self)?);
+ match name {
+ "sqrt" => arg.sqrt(),
+ "sin" => arg.sin(),
+ "cos" => arg.cos(),
+ "exp" => arg.exp(),
+ "exp2" => arg.exp2(),
+ "log" => arg.ln(),
+ "log10" => arg.log10(),
+ "log2" => arg.log2(),
+ "fabs" => arg.abs(),
+ "floor" => arg.floor(),
+ "ceil" => arg.ceil(),
+ "trunc" => arg.trunc(),
+ // FIXME: these rounds should be different, but only `.round()` is stable now.
+ "rint" => arg.round(),
+ "nearbyint" => arg.round(),
+ "round" => arg.round(),
+ "roundeven" => arg.round(),
+ _ => unreachable!(),
+ }
+ }
+ "pow" | "minnum" | "maxnum" | "copysign" => {
+ let [arg1, arg2] = args else {
+ return Err(MirEvalError::TypeError("f32 intrinsic signature doesn't match fn (f32, f32) -> f32"));
+ };
+ let arg1 = from_bytes!(f32, arg1.get(self)?);
+ let arg2 = from_bytes!(f32, arg2.get(self)?);
+ match name {
+ "pow" => arg1.powf(arg2),
+ "minnum" => arg1.min(arg2),
+ "maxnum" => arg1.max(arg2),
+ "copysign" => arg1.copysign(arg2),
+ _ => unreachable!(),
+ }
+ }
+ "powi" => {
+ let [arg1, arg2] = args else {
+ return Err(MirEvalError::TypeError("powif32 signature doesn't match fn (f32, i32) -> f32"));
+ };
+ let arg1 = from_bytes!(f32, arg1.get(self)?);
+ let arg2 = from_bytes!(i32, arg2.get(self)?);
+ arg1.powi(arg2)
+ }
+ "fma" => {
+ let [arg1, arg2, arg3] = args else {
+ return Err(MirEvalError::TypeError("fmaf32 signature doesn't match fn (f32, f32, f32) -> f32"));
+ };
+ let arg1 = from_bytes!(f32, arg1.get(self)?);
+ let arg2 = from_bytes!(f32, arg2.get(self)?);
+ let arg3 = from_bytes!(f32, arg3.get(self)?);
+ arg1.mul_add(arg2, arg3)
+ }
+ _ => not_supported!("unknown f32 intrinsic {name}"),
+ };
+ return destination.write_from_bytes(self, &result.to_le_bytes());
+ }
+ match name {
+ "size_of" => {
+ let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
+ return Err(MirEvalError::TypeError("size_of generic arg is not provided"));
+ };
+ let size = self.size_of_sized(ty, locals, "size_of arg")?;
+ destination.write_from_bytes(self, &size.to_le_bytes()[0..destination.size])
+ }
+ "min_align_of" | "pref_align_of" => {
+ let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
+ return Err(MirEvalError::TypeError("align_of generic arg is not provided"));
+ };
+ let align = self.layout(ty)?.align.abi.bytes();
+ destination.write_from_bytes(self, &align.to_le_bytes()[0..destination.size])
+ }
+ "needs_drop" => {
+ let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
+ return Err(MirEvalError::TypeError("size_of generic arg is not provided"));
+ };
+ let result = !ty.clone().is_copy(self.db, locals.body.owner);
+ destination.write_from_bytes(self, &[u8::from(result)])
+ }
+ "ptr_guaranteed_cmp" => {
+ // FIXME: this is wrong for const eval, it should return 2 in some
+ // cases.
+ let [lhs, rhs] = args else {
+ return Err(MirEvalError::TypeError("wrapping_add args are not provided"));
+ };
+ let ans = lhs.get(self)? == rhs.get(self)?;
+ destination.write_from_bytes(self, &[u8::from(ans)])
+ }
+ "saturating_add" => {
+ let [lhs, rhs] = args else {
+ return Err(MirEvalError::TypeError("saturating_add args are not provided"));
+ };
+ let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false));
+ let ans = lhs.saturating_add(rhs);
+ let bits = destination.size * 8;
+ // FIXME: signed
+ let is_signed = false;
+ let mx: u128 = if is_signed { (1 << (bits - 1)) - 1 } else { (1 << bits) - 1 };
+ // FIXME: signed
+ let mn: u128 = 0;
+ let ans = cmp::min(mx, cmp::max(mn, ans));
+ destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
+ }
+ "wrapping_add" | "unchecked_add" => {
+ let [lhs, rhs] = args else {
+ return Err(MirEvalError::TypeError("wrapping_add args are not provided"));
+ };
+ let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false));
+ let ans = lhs.wrapping_add(rhs);
+ destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
+ }
+ "wrapping_sub" | "unchecked_sub" | "ptr_offset_from_unsigned" | "ptr_offset_from" => {
+ let [lhs, rhs] = args else {
+ return Err(MirEvalError::TypeError("wrapping_sub args are not provided"));
+ };
+ let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false));
+ let ans = lhs.wrapping_sub(rhs);
+ destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
+ }
+ "wrapping_mul" | "unchecked_mul" => {
+ let [lhs, rhs] = args else {
+ return Err(MirEvalError::TypeError("wrapping_mul args are not provided"));
+ };
+ let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false));
+ let ans = lhs.wrapping_mul(rhs);
+ destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
+ }
+ "unchecked_rem" => {
+ // FIXME: signed
+ let [lhs, rhs] = args else {
+ return Err(MirEvalError::TypeError("unchecked_rem args are not provided"));
+ };
+ let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false));
+ let ans = lhs.checked_rem(rhs).ok_or_else(|| {
+ MirEvalError::UndefinedBehavior("unchecked_rem with bad inputs".to_owned())
+ })?;
+ destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
+ }
+ "unchecked_div" | "exact_div" => {
+ // FIXME: signed
+ let [lhs, rhs] = args else {
+ return Err(MirEvalError::TypeError("unchecked_div args are not provided"));
+ };
+ let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false));
+ let ans = lhs.checked_div(rhs).ok_or_else(|| {
+ MirEvalError::UndefinedBehavior("unchecked_rem with bad inputs".to_owned())
+ })?;
+ destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
+ }
+ "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
+ let [lhs, rhs] = args else {
+ return Err(MirEvalError::TypeError("const_eval_select args are not provided"));
+ };
+ let result_ty = TyKind::Tuple(
+ 2,
+ Substitution::from_iter(Interner, [lhs.ty.clone(), TyBuilder::bool()]),
+ )
+ .intern(Interner);
+ let op_size =
+ self.size_of_sized(&lhs.ty, locals, "operand of add_with_overflow")?;
+ let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false));
+ let (ans, u128overflow) = match name {
+ "add_with_overflow" => lhs.overflowing_add(rhs),
+ "sub_with_overflow" => lhs.overflowing_sub(rhs),
+ "mul_with_overflow" => lhs.overflowing_mul(rhs),
+ _ => unreachable!(),
+ };
+ let is_overflow = u128overflow
+ || ans.to_le_bytes()[op_size..].iter().any(|&x| x != 0 && x != 255);
+ let is_overflow = vec![u8::from(is_overflow)];
+ let layout = self.layout(&result_ty)?;
+ let result = self.make_by_layout(
+ layout.size.bytes_usize(),
+ &layout,
+ None,
+ [ans.to_le_bytes()[0..op_size].to_vec(), is_overflow]
+ .into_iter()
+ .map(IntervalOrOwned::Owned),
+ )?;
+ destination.write_from_bytes(self, &result)
+ }
+ "copy" | "copy_nonoverlapping" => {
+ let [src, dst, offset] = args else {
+ return Err(MirEvalError::TypeError("copy_nonoverlapping args are not provided"));
+ };
+ let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
+ return Err(MirEvalError::TypeError("copy_nonoverlapping generic arg is not provided"));
+ };
+ let src = Address::from_bytes(src.get(self)?)?;
+ let dst = Address::from_bytes(dst.get(self)?)?;
+ let offset = from_bytes!(usize, offset.get(self)?);
+ let size = self.size_of_sized(ty, locals, "copy_nonoverlapping ptr type")?;
+ let size = offset * size;
+ let src = Interval { addr: src, size };
+ let dst = Interval { addr: dst, size };
+ dst.write_from_interval(self, src)
+ }
+ "offset" | "arith_offset" => {
+ let [ptr, offset] = args else {
+ return Err(MirEvalError::TypeError("offset args are not provided"));
+ };
+ let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
+ return Err(MirEvalError::TypeError("offset generic arg is not provided"));
+ };
+ let ptr = u128::from_le_bytes(pad16(ptr.get(self)?, false));
+ let offset = u128::from_le_bytes(pad16(offset.get(self)?, false));
+ let size = self.size_of_sized(ty, locals, "offset ptr type")? as u128;
+ let ans = ptr + offset * size;
+ destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
+ }
+ "assert_inhabited" | "assert_zero_valid" | "assert_uninit_valid" | "assume" => {
+ // FIXME: We should actually implement these checks
+ Ok(())
+ }
+ "forget" => {
+ // We don't call any drop glue yet, so there is nothing here
+ Ok(())
+ }
+ "transmute" => {
+ let [arg] = args else {
+ return Err(MirEvalError::TypeError("trasmute arg is not provided"));
+ };
+ destination.write_from_interval(self, arg.interval)
+ }
+ "likely" | "unlikely" => {
+ let [arg] = args else {
+ return Err(MirEvalError::TypeError("likely arg is not provided"));
+ };
+ destination.write_from_interval(self, arg.interval)
+ }
+ "ctpop" => {
+ let [arg] = args else {
+ return Err(MirEvalError::TypeError("likely arg is not provided"));
+ };
+ let result = u128::from_le_bytes(pad16(arg.get(self)?, false)).count_ones();
+ destination
+ .write_from_bytes(self, &(result as u128).to_le_bytes()[0..destination.size])
+ }
+ "cttz" | "cttz_nonzero" => {
+ let [arg] = args else {
+ return Err(MirEvalError::TypeError("likely arg is not provided"));
+ };
+ let result = u128::from_le_bytes(pad16(arg.get(self)?, false)).trailing_zeros();
+ destination
+ .write_from_bytes(self, &(result as u128).to_le_bytes()[0..destination.size])
+ }
+ "const_eval_select" => {
+ let [tuple, const_fn, _] = args else {
+ return Err(MirEvalError::TypeError("const_eval_select args are not provided"));
+ };
+ let mut args = vec![const_fn.clone()];
+ let TyKind::Tuple(_, fields) = tuple.ty.kind(Interner) else {
+ return Err(MirEvalError::TypeError("const_eval_select arg[0] is not a tuple"));
+ };
+ let layout = self.layout(&tuple.ty)?;
+ for (i, field) in fields.iter(Interner).enumerate() {
+ let field = field.assert_ty_ref(Interner).clone();
+ let offset = layout.fields.offset(i).bytes_usize();
+ let addr = tuple.interval.addr.offset(offset);
+ args.push(IntervalAndTy::new(addr, field, self, locals)?);
+ }
+ self.exec_fn_trait(&args, destination, locals, span)
+ }
+ _ => not_supported!("unknown intrinsic {name}"),
+ }
+ }
+
+ fn exec_atomic_intrinsic(
+ &mut self,
+ name: &str,
+ args: &[IntervalAndTy],
+ generic_args: &Substitution,
+ destination: Interval,
+ locals: &Locals<'_>,
+ _span: MirSpan,
+ ) -> Result<()> {
+ // We are a single threaded runtime with no UB checking and no optimization, so
+ // we can implement these as normal functions.
+ let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
+ return Err(MirEvalError::TypeError("atomic intrinsic generic arg is not provided"));
+ };
+ let Some(arg0) = args.get(0) else {
+ return Err(MirEvalError::TypeError("atomic intrinsic arg0 is not provided"));
+ };
+ let arg0_addr = Address::from_bytes(arg0.get(self)?)?;
+ let arg0_interval =
+ Interval::new(arg0_addr, self.size_of_sized(ty, locals, "atomic intrinsic type arg")?);
+ if name.starts_with("load_") {
+ return destination.write_from_interval(self, arg0_interval);
+ }
+ let Some(arg1) = args.get(1) else {
+ return Err(MirEvalError::TypeError("atomic intrinsic arg1 is not provided"));
+ };
+ if name.starts_with("store_") {
+ return arg0_interval.write_from_interval(self, arg1.interval);
+ }
+ if name.starts_with("xchg_") {
+ destination.write_from_interval(self, arg0_interval)?;
+ return arg0_interval.write_from_interval(self, arg1.interval);
+ }
+ if name.starts_with("xadd_") {
+ destination.write_from_interval(self, arg0_interval)?;
+ let lhs = u128::from_le_bytes(pad16(arg0_interval.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(arg1.get(self)?, false));
+ let ans = lhs.wrapping_add(rhs);
+ return arg0_interval.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]);
+ }
+ if name.starts_with("xsub_") {
+ destination.write_from_interval(self, arg0_interval)?;
+ let lhs = u128::from_le_bytes(pad16(arg0_interval.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(arg1.get(self)?, false));
+ let ans = lhs.wrapping_sub(rhs);
+ return arg0_interval.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]);
+ }
+ if name.starts_with("and_") {
+ destination.write_from_interval(self, arg0_interval)?;
+ let lhs = u128::from_le_bytes(pad16(arg0_interval.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(arg1.get(self)?, false));
+ let ans = lhs & rhs;
+ return arg0_interval.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]);
+ }
+ if name.starts_with("or_") {
+ destination.write_from_interval(self, arg0_interval)?;
+ let lhs = u128::from_le_bytes(pad16(arg0_interval.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(arg1.get(self)?, false));
+ let ans = lhs | rhs;
+ return arg0_interval.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]);
+ }
+ if name.starts_with("xor_") {
+ destination.write_from_interval(self, arg0_interval)?;
+ let lhs = u128::from_le_bytes(pad16(arg0_interval.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(arg1.get(self)?, false));
+ let ans = lhs ^ rhs;
+ return arg0_interval.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]);
+ }
+ if name.starts_with("nand_") {
+ destination.write_from_interval(self, arg0_interval)?;
+ let lhs = u128::from_le_bytes(pad16(arg0_interval.get(self)?, false));
+ let rhs = u128::from_le_bytes(pad16(arg1.get(self)?, false));
+ let ans = !(lhs & rhs);
+ return arg0_interval.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]);
+ }
+ let Some(arg2) = args.get(2) else {
+ return Err(MirEvalError::TypeError("atomic intrinsic arg2 is not provided"));
+ };
+ if name.starts_with("cxchg_") || name.starts_with("cxchgweak_") {
+ let dest = if arg1.get(self)? == arg0_interval.get(self)? {
+ arg0_interval.write_from_interval(self, arg2.interval)?;
+ (arg1.interval, true)
+ } else {
+ (arg0_interval, false)
+ };
+ let result_ty = TyKind::Tuple(
+ 2,
+ Substitution::from_iter(Interner, [ty.clone(), TyBuilder::bool()]),
+ )
+ .intern(Interner);
+ let layout = self.layout(&result_ty)?;
+ let result = self.make_by_layout(
+ layout.size.bytes_usize(),
+ &layout,
+ None,
+ [IntervalOrOwned::Borrowed(dest.0), IntervalOrOwned::Owned(vec![u8::from(dest.1)])]
+ .into_iter(),
+ )?;
+ return destination.write_from_bytes(self, &result);
+ }
+ not_supported!("unknown atomic intrinsic {name}");
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/tests.rs
new file mode 100644
index 000000000..ca4268b8f
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/tests.rs
@@ -0,0 +1,676 @@
+use base_db::{fixture::WithFixture, FileId};
+use hir_def::db::DefDatabase;
+use syntax::{TextRange, TextSize};
+
+use crate::{db::HirDatabase, test_db::TestDB, Interner, Substitution};
+
+use super::{interpret_mir, MirEvalError};
+
+fn eval_main(db: &TestDB, file_id: FileId) -> Result<(String, String), MirEvalError> {
+ let module_id = db.module_for_file(file_id);
+ let def_map = module_id.def_map(db);
+ let scope = &def_map[module_id.local_id].scope;
+ let func_id = scope
+ .declarations()
+ .find_map(|x| match x {
+ hir_def::ModuleDefId::FunctionId(x) => {
+ if db.function_data(x).name.display(db).to_string() == "main" {
+ Some(x)
+ } else {
+ None
+ }
+ }
+ _ => None,
+ })
+ .expect("no main function found");
+ let body = db
+ .monomorphized_mir_body(
+ func_id.into(),
+ Substitution::empty(Interner),
+ db.trait_environment(func_id.into()),
+ )
+ .map_err(|e| MirEvalError::MirLowerError(func_id.into(), e))?;
+ let (result, stdout, stderr) = interpret_mir(db, &body, false);
+ result?;
+ Ok((stdout, stderr))
+}
+
+fn check_pass(ra_fixture: &str) {
+ check_pass_and_stdio(ra_fixture, "", "");
+}
+
+fn check_pass_and_stdio(ra_fixture: &str, expected_stdout: &str, expected_stderr: &str) {
+ let (db, file_ids) = TestDB::with_many_files(ra_fixture);
+ let file_id = *file_ids.last().unwrap();
+ let x = eval_main(&db, file_id);
+ match x {
+ Err(e) => {
+ let mut err = String::new();
+ let line_index = |size: TextSize| {
+ let mut size = u32::from(size) as usize;
+ let mut lines = ra_fixture.lines().enumerate();
+ while let Some((i, l)) = lines.next() {
+ if let Some(x) = size.checked_sub(l.len()) {
+ size = x;
+ } else {
+ return (i, size);
+ }
+ }
+ (usize::MAX, size)
+ };
+ let span_formatter = |file, range: TextRange| {
+ format!("{:?} {:?}..{:?}", file, line_index(range.start()), line_index(range.end()))
+ };
+ e.pretty_print(&mut err, &db, span_formatter).unwrap();
+ panic!("Error in interpreting: {err}");
+ }
+ Ok((stdout, stderr)) => {
+ assert_eq!(stdout, expected_stdout);
+ assert_eq!(stderr, expected_stderr);
+ }
+ }
+}
+
+#[test]
+fn function_with_extern_c_abi() {
+ check_pass(
+ r#"
+extern "C" fn foo(a: i32, b: i32) -> i32 {
+ a + b
+}
+
+fn main() {
+ let x = foo(2, 3);
+}
+ "#,
+ );
+}
+
+#[test]
+fn drop_basic() {
+ check_pass(
+ r#"
+//- minicore: drop, add
+
+struct X<'a>(&'a mut i32);
+impl<'a> Drop for X<'a> {
+ fn drop(&mut self) {
+ *self.0 += 1;
+ }
+}
+
+struct NestedX<'a> { f1: X<'a>, f2: X<'a> }
+
+fn should_not_reach() {
+ _ // FIXME: replace this function with panic when that works
+}
+
+fn my_drop2(x: X<'_>) {
+ return;
+}
+
+fn my_drop(x: X<'_>) {
+ drop(x);
+}
+
+fn main() {
+ let mut s = 10;
+ let mut x = X(&mut s);
+ my_drop(x);
+ x = X(&mut s);
+ my_drop2(x);
+ X(&mut s); // dropped immediately
+ let x = X(&mut s);
+ NestedX { f1: x, f2: X(&mut s) };
+ if s != 15 {
+ should_not_reach();
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn drop_if_let() {
+ check_pass(
+ r#"
+//- minicore: drop, add, option, cell, builtin_impls
+
+use core::cell::Cell;
+
+struct X<'a>(&'a Cell<i32>);
+impl<'a> Drop for X<'a> {
+ fn drop(&mut self) {
+ self.0.set(self.0.get() + 1)
+ }
+}
+
+fn should_not_reach() {
+ _ // FIXME: replace this function with panic when that works
+}
+
+#[test]
+fn main() {
+ let s = Cell::new(0);
+ let x = Some(X(&s));
+ if let Some(y) = x {
+ if s.get() != 0 {
+ should_not_reach();
+ }
+ if s.get() != 0 {
+ should_not_reach();
+ }
+ } else {
+ should_not_reach();
+ }
+ if s.get() != 1 {
+ should_not_reach();
+ }
+ let x = Some(X(&s));
+ if let None = x {
+ should_not_reach();
+ } else {
+ if s.get() != 1 {
+ should_not_reach();
+ }
+ }
+ if s.get() != 1 {
+ should_not_reach();
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn drop_in_place() {
+ check_pass(
+ r#"
+//- minicore: drop, add, coerce_unsized
+use core::ptr::drop_in_place;
+
+struct X<'a>(&'a mut i32);
+impl<'a> Drop for X<'a> {
+ fn drop(&mut self) {
+ *self.0 += 1;
+ }
+}
+
+fn should_not_reach() {
+ _ // FIXME: replace this function with panic when that works
+}
+
+fn main() {
+ let mut s = 2;
+ let x = X(&mut s);
+ drop_in_place(&mut x);
+ drop(x);
+ if s != 4 {
+ should_not_reach();
+ }
+ let p: &mut [X] = &mut [X(&mut 2)];
+ drop_in_place(p);
+}
+ "#,
+ );
+}
+
+#[test]
+fn manually_drop() {
+ check_pass(
+ r#"
+//- minicore: manually_drop
+use core::mem::ManuallyDrop;
+
+struct X;
+impl Drop for X {
+ fn drop(&mut self) {
+ should_not_reach();
+ }
+}
+
+fn should_not_reach() {
+ _ // FIXME: replace this function with panic when that works
+}
+
+fn main() {
+ let x = ManuallyDrop::new(X);
+}
+ "#,
+ );
+}
+
+#[test]
+fn generic_impl_for_trait_with_generic_method() {
+ check_pass(
+ r#"
+//- minicore: drop
+struct S<T>(T);
+
+trait Tr {
+ fn f<F>(&self, x: F);
+}
+
+impl<T> Tr for S<T> {
+ fn f<F>(&self, x: F) {
+ }
+}
+
+fn main() {
+ let s = S(1u8);
+ s.f(5i64);
+}
+ "#,
+ );
+}
+
+#[test]
+fn index_of_slice_should_preserve_len() {
+ check_pass(
+ r#"
+//- minicore: index, slice, coerce_unsized
+
+struct X;
+
+impl core::ops::Index<X> for [i32] {
+ type Output = i32;
+
+ fn index(&self, _: X) -> &i32 {
+ if self.len() != 3 {
+ should_not_reach();
+ }
+ &self[0]
+ }
+}
+
+fn should_not_reach() {
+ _ // FIXME: replace this function with panic when that works
+}
+
+fn main() {
+ let x: &[i32] = &[1, 2, 3];
+ &x[X];
+}
+ "#,
+ );
+}
+
+#[test]
+fn memcmp() {
+ check_pass(
+ r#"
+//- minicore: slice, coerce_unsized, index
+
+fn should_not_reach() -> bool {
+ _ // FIXME: replace this function with panic when that works
+}
+
+extern "C" {
+ fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32;
+}
+
+fn my_cmp(x: &[u8], y: &[u8]) -> i32 {
+ memcmp(x as *const u8, y as *const u8, x.len())
+}
+
+fn main() {
+ if my_cmp(&[1, 2, 3], &[1, 2, 3]) != 0 {
+ should_not_reach();
+ }
+ if my_cmp(&[1, 20, 3], &[1, 2, 3]) <= 0 {
+ should_not_reach();
+ }
+ if my_cmp(&[1, 2, 3], &[1, 20, 3]) >= 0 {
+ should_not_reach();
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn unix_write_stdout() {
+ check_pass_and_stdio(
+ r#"
+//- minicore: slice, index, coerce_unsized
+
+type pthread_key_t = u32;
+type c_void = u8;
+type c_int = i32;
+
+extern "C" {
+ pub fn write(fd: i32, buf: *const u8, count: usize) -> usize;
+}
+
+fn main() {
+ let stdout = b"stdout";
+ let stderr = b"stderr";
+ write(1, &stdout[0], 6);
+ write(2, &stderr[0], 6);
+}
+ "#,
+ "stdout",
+ "stderr",
+ );
+}
+
+#[test]
+fn closure_layout_in_rpit() {
+ check_pass(
+ r#"
+//- minicore: fn
+
+fn f<F: Fn()>(x: F) {
+ fn g(x: impl Fn()) -> impl FnOnce() {
+ move || {
+ x();
+ }
+ }
+ g(x)();
+}
+
+fn main() {
+ f(|| {});
+}
+ "#,
+ );
+}
+
+#[test]
+fn from_fn() {
+ check_pass(
+ r#"
+//- minicore: fn, iterator
+struct FromFn<F>(F);
+
+impl<T, F: FnMut() -> Option<T>> Iterator for FromFn<F> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ (self.0)()
+ }
+}
+
+fn main() {
+ let mut tokenize = {
+ FromFn(move || Some(2))
+ };
+ let s = tokenize.next();
+}
+ "#,
+ );
+}
+
+#[test]
+fn for_loop() {
+ check_pass(
+ r#"
+//- minicore: iterator, add
+fn should_not_reach() {
+ _ // FIXME: replace this function with panic when that works
+}
+
+struct X;
+struct XIter(i32);
+
+impl IntoIterator for X {
+ type Item = i32;
+
+ type IntoIter = XIter;
+
+ fn into_iter(self) -> Self::IntoIter {
+ XIter(0)
+ }
+}
+
+impl Iterator for XIter {
+ type Item = i32;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.0 == 5 {
+ None
+ } else {
+ self.0 += 1;
+ Some(self.0)
+ }
+ }
+}
+
+fn main() {
+ let mut s = 0;
+ for x in X {
+ s += x;
+ }
+ if s != 15 {
+ should_not_reach();
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn field_with_associated_type() {
+ check_pass(
+ r#"
+//- /b/mod.rs crate:b
+pub trait Tr {
+ fn f(self);
+}
+
+pub trait Tr2 {
+ type Ty: Tr;
+}
+
+pub struct S<T: Tr2> {
+ pub t: T::Ty,
+}
+
+impl<T: Tr2> S<T> {
+ pub fn g(&self) {
+ let k = (self.t, self.t);
+ self.t.f();
+ }
+}
+
+//- /a/mod.rs crate:a deps:b
+use b::{Tr, Tr2, S};
+
+struct A(i32);
+struct B(u8);
+
+impl Tr for A {
+ fn f(&self) {
+ }
+}
+
+impl Tr2 for B {
+ type Ty = A;
+}
+
+#[test]
+fn main() {
+ let s: S<B> = S { t: A(2) };
+ s.g();
+}
+ "#,
+ );
+}
+
+#[test]
+fn specialization_array_clone() {
+ check_pass(
+ r#"
+//- minicore: copy, derive, slice, index, coerce_unsized
+impl<T: Clone, const N: usize> Clone for [T; N] {
+ #[inline]
+ fn clone(&self) -> Self {
+ SpecArrayClone::clone(self)
+ }
+}
+
+trait SpecArrayClone: Clone {
+ fn clone<const N: usize>(array: &[Self; N]) -> [Self; N];
+}
+
+impl<T: Clone> SpecArrayClone for T {
+ #[inline]
+ default fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
+ // FIXME: panic here when we actually implement specialization.
+ from_slice(array)
+ }
+}
+
+fn from_slice<T, const N: usize>(s: &[T]) -> [T; N] {
+ [s[0]; N]
+}
+
+impl<T: Copy> SpecArrayClone for T {
+ #[inline]
+ fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
+ *array
+ }
+}
+
+#[derive(Clone, Copy)]
+struct X(i32);
+
+fn main() {
+ let ar = [X(1), X(2)];
+ ar.clone();
+}
+ "#,
+ );
+}
+
+#[test]
+fn short_circuit_operator() {
+ check_pass(
+ r#"
+fn should_not_reach() -> bool {
+ _ // FIXME: replace this function with panic when that works
+}
+
+fn main() {
+ if false && should_not_reach() {
+ should_not_reach();
+ }
+ true || should_not_reach();
+
+}
+ "#,
+ );
+}
+
+#[test]
+fn closure_state() {
+ check_pass(
+ r#"
+//- minicore: fn, add, copy
+fn should_not_reach() {
+ _ // FIXME: replace this function with panic when that works
+}
+
+fn main() {
+ let mut x = 2;
+ let mut c = move || {
+ x += 1;
+ x
+ };
+ c();
+ c();
+ c();
+ if x != 2 {
+ should_not_reach();
+ }
+ if c() != 6 {
+ should_not_reach();
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn closure_capture_array_const_generic() {
+ check_pass(
+ r#"
+//- minicore: fn, add, copy
+struct X(i32);
+
+fn f<const N: usize>(mut x: [X; N]) { // -> impl FnOnce() {
+ let c = || {
+ x;
+ };
+ c();
+}
+
+fn main() {
+ let s = f([X(1)]);
+ //s();
+}
+ "#,
+ );
+}
+
+#[test]
+fn posix_tls() {
+ check_pass(
+ r#"
+//- minicore: option
+
+type pthread_key_t = u32;
+type c_void = u8;
+type c_int = i32;
+
+extern "C" {
+ pub fn pthread_key_create(
+ key: *mut pthread_key_t,
+ dtor: Option<unsafe extern "C" fn(*mut c_void)>,
+ ) -> c_int;
+ pub fn pthread_key_delete(key: pthread_key_t) -> c_int;
+ pub fn pthread_getspecific(key: pthread_key_t) -> *mut c_void;
+ pub fn pthread_setspecific(key: pthread_key_t, value: *const c_void) -> c_int;
+}
+
+fn main() {
+ let mut key = 2;
+ pthread_key_create(&mut key, None);
+}
+ "#,
+ );
+}
+
+#[test]
+fn regression_14966() {
+ check_pass(
+ r#"
+//- minicore: fn, copy, coerce_unsized
+trait A<T> {
+ fn a(&self) {}
+}
+impl A<()> for () {}
+
+struct B;
+impl B {
+ pub fn b<T>(s: &dyn A<T>) -> Self {
+ B
+ }
+}
+struct C;
+impl C {
+ fn c<T>(a: &dyn A<T>) -> Self {
+ let mut c = C;
+ let b = B::b(a);
+ c.d(|| a.a());
+ c
+ }
+ fn d(&mut self, f: impl FnOnce()) {}
+}
+
+fn main() {
+ C::c(&());
+}
+"#,
+ );
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs
index c4dd7c0ac..2cb29b4ab 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs
@@ -1,60 +1,88 @@
//! This module generates a polymorphic MIR from a hir body
-use std::{iter, mem, sync::Arc};
+use std::{fmt::Write, iter, mem};
+use base_db::FileId;
use chalk_ir::{BoundVar, ConstData, DebruijnIndex, TyKind};
use hir_def::{
body::Body,
- expr::{
- Array, BindingAnnotation, BindingId, ExprId, LabelId, Literal, MatchArm, Pat, PatId,
- RecordLitField,
+ data::adt::{StructKind, VariantData},
+ hir::{
+ ArithOp, Array, BinaryOp, BindingAnnotation, BindingId, ExprId, LabelId, Literal,
+ LiteralOrConst, MatchArm, Pat, PatId, RecordFieldPat, RecordLitField,
},
lang_item::{LangItem, LangItemTarget},
- layout::LayoutError,
path::Path,
- resolver::{resolver_for_expr, ResolveValueResult, ValueNs},
- DefWithBodyId, EnumVariantId, HasModule,
+ resolver::{resolver_for_expr, HasResolver, ResolveValueResult, ValueNs},
+ AdtId, DefWithBodyId, EnumVariantId, GeneralConstId, HasModule, ItemContainerId, LocalFieldId,
+ TraitId, TypeOrConstParamId,
};
use hir_expand::name::Name;
use la_arena::ArenaMap;
+use rustc_hash::FxHashMap;
+use syntax::TextRange;
+use triomphe::Arc;
use crate::{
- consteval::ConstEvalError, db::HirDatabase, display::HirDisplay, infer::TypeMismatch,
- inhabitedness::is_ty_uninhabited_from, layout::layout_of_ty, mapping::ToChalk, static_lifetime,
- utils::generics, Adjust, Adjustment, AutoBorrow, CallableDefId, TyBuilder, TyExt,
+ consteval::ConstEvalError,
+ db::HirDatabase,
+ display::HirDisplay,
+ infer::{CaptureKind, CapturedItem, TypeMismatch},
+ inhabitedness::is_ty_uninhabited_from,
+ layout::LayoutError,
+ mapping::ToChalk,
+ static_lifetime,
+ traits::FnTrait,
+ utils::{generics, ClosureSubst},
+ Adjust, Adjustment, AutoBorrow, CallableDefId, TyBuilder, TyExt,
};
use super::*;
mod as_place;
+mod pattern_matching;
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone)]
struct LoopBlocks {
begin: BasicBlockId,
/// `None` for loops that are not terminating
end: Option<BasicBlockId>,
+ place: Place,
+ drop_scope_index: usize,
+}
+
+#[derive(Debug, Clone, Default)]
+struct DropScope {
+ /// locals, in order of definition (so we should run drop glues in reverse order)
+ locals: Vec<LocalId>,
}
struct MirLowerCtx<'a> {
result: MirBody,
owner: DefWithBodyId,
current_loop_blocks: Option<LoopBlocks>,
+ labeled_loop_blocks: FxHashMap<LabelId, LoopBlocks>,
discr_temp: Option<Place>,
db: &'a dyn HirDatabase,
body: &'a Body,
infer: &'a InferenceResult,
+ drop_scopes: Vec<DropScope>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum MirLowerError {
- ConstEvalError(Box<ConstEvalError>),
+ ConstEvalError(String, Box<ConstEvalError>),
LayoutError(LayoutError),
IncompleteExpr,
+ IncompletePattern,
+ /// Trying to lower a trait function, instead of an implementation
+ TraitFunctionDefinition(TraitId, Name),
UnresolvedName(String),
RecordLiteralWithoutPath,
- UnresolvedMethod,
+ UnresolvedMethod(String),
UnresolvedField,
- MissingFunctionDefinition,
+ UnsizedTemporary(Ty),
+ MissingFunctionDefinition(DefWithBodyId, ExprId),
TypeMismatch(TypeMismatch),
/// This should be never happen. Type mismatch should catch everything.
TypeError(&'static str),
@@ -63,9 +91,114 @@ pub enum MirLowerError {
BreakWithoutLoop,
Loop,
/// Something that should never happen and is definitely a bug, but we don't want to panic if it happened
- ImplementationError(&'static str),
+ ImplementationError(String),
LangItemNotFound(LangItem),
MutatingRvalue,
+ UnresolvedLabel,
+ UnresolvedUpvar(Place),
+ UnaccessableLocal,
+
+ // monomorphization errors:
+ GenericArgNotProvided(TypeOrConstParamId, Substitution),
+}
+
+/// A token to ensuring that each drop scope is popped at most once, thanks to the compiler that checks moves.
+struct DropScopeToken;
+impl DropScopeToken {
+ fn pop_and_drop(self, ctx: &mut MirLowerCtx<'_>, current: BasicBlockId) -> BasicBlockId {
+ std::mem::forget(self);
+ ctx.pop_drop_scope_internal(current)
+ }
+
+ /// It is useful when we want a drop scope is syntaxically closed, but we don't want to execute any drop
+ /// code. Either when the control flow is diverging (so drop code doesn't reached) or when drop is handled
+ /// for us (for example a block that ended with a return statement. Return will drop everything, so the block shouldn't
+ /// do anything)
+ fn pop_assume_dropped(self, ctx: &mut MirLowerCtx<'_>) {
+ std::mem::forget(self);
+ ctx.pop_drop_scope_assume_dropped_internal();
+ }
+}
+
+// Uncomment this to make `DropScopeToken` a drop bomb. Unfortunately we can't do this in release, since
+// in cases that mir lowering fails, we don't handle (and don't need to handle) drop scopes so it will be
+// actually reached. `pop_drop_scope_assert_finished` will also detect this case, but doesn't show useful
+// stack trace.
+//
+// impl Drop for DropScopeToken {
+// fn drop(&mut self) {
+// never!("Drop scope doesn't popped");
+// }
+// }
+
+impl MirLowerError {
+ pub fn pretty_print(
+ &self,
+ f: &mut String,
+ db: &dyn HirDatabase,
+ span_formatter: impl Fn(FileId, TextRange) -> String,
+ ) -> std::result::Result<(), std::fmt::Error> {
+ match self {
+ MirLowerError::ConstEvalError(name, e) => {
+ writeln!(f, "In evaluating constant {name}")?;
+ match &**e {
+ ConstEvalError::MirLowerError(e) => e.pretty_print(f, db, span_formatter)?,
+ ConstEvalError::MirEvalError(e) => e.pretty_print(f, db, span_formatter)?,
+ }
+ }
+ MirLowerError::MissingFunctionDefinition(owner, x) => {
+ let body = db.body(*owner);
+ writeln!(
+ f,
+ "Missing function definition for {}",
+ body.pretty_print_expr(db.upcast(), *owner, *x)
+ )?;
+ }
+ MirLowerError::TypeMismatch(e) => {
+ writeln!(
+ f,
+ "Type mismatch: Expected {}, found {}",
+ e.expected.display(db),
+ e.actual.display(db),
+ )?;
+ }
+ MirLowerError::GenericArgNotProvided(id, subst) => {
+ let parent = id.parent;
+ let param = &db.generic_params(parent).type_or_consts[id.local_id];
+ writeln!(
+ f,
+ "Generic arg not provided for {}",
+ param.name().unwrap_or(&Name::missing()).display(db.upcast())
+ )?;
+ writeln!(f, "Provided args: [")?;
+ for g in subst.iter(Interner) {
+ write!(f, " {},", g.display(db).to_string())?;
+ }
+ writeln!(f, "]")?;
+ }
+ MirLowerError::LayoutError(_)
+ | MirLowerError::UnsizedTemporary(_)
+ | MirLowerError::IncompleteExpr
+ | MirLowerError::IncompletePattern
+ | MirLowerError::UnaccessableLocal
+ | MirLowerError::TraitFunctionDefinition(_, _)
+ | MirLowerError::UnresolvedName(_)
+ | MirLowerError::RecordLiteralWithoutPath
+ | MirLowerError::UnresolvedMethod(_)
+ | MirLowerError::UnresolvedField
+ | MirLowerError::TypeError(_)
+ | MirLowerError::NotSupported(_)
+ | MirLowerError::ContinueWithoutLoop
+ | MirLowerError::BreakWithoutLoop
+ | MirLowerError::Loop
+ | MirLowerError::ImplementationError(_)
+ | MirLowerError::LangItemNotFound(_)
+ | MirLowerError::MutatingRvalue
+ | MirLowerError::UnresolvedLabel
+ | MirLowerError::UnresolvedUpvar(_) => writeln!(f, "{:?}", self)?,
+ }
+ Ok(())
+ }
}
macro_rules! not_supported {
@@ -76,20 +209,11 @@ macro_rules! not_supported {
macro_rules! implementation_error {
($x: expr) => {{
- ::stdx::never!("MIR lower implementation bug: {}", $x);
- return Err(MirLowerError::ImplementationError($x));
+ ::stdx::never!("MIR lower implementation bug: {}", format!($x));
+ return Err(MirLowerError::ImplementationError(format!($x)));
}};
}
-impl From<ConstEvalError> for MirLowerError {
- fn from(value: ConstEvalError) -> Self {
- match value {
- ConstEvalError::MirLowerError(e) => e,
- _ => MirLowerError::ConstEvalError(Box::new(value)),
- }
- }
-}
-
impl From<LayoutError> for MirLowerError {
fn from(value: LayoutError) -> Self {
MirLowerError::LayoutError(value)
@@ -104,12 +228,51 @@ impl MirLowerError {
type Result<T> = std::result::Result<T, MirLowerError>;
-impl MirLowerCtx<'_> {
- fn temp(&mut self, ty: Ty) -> Result<LocalId> {
+impl<'ctx> MirLowerCtx<'ctx> {
+ fn new(
+ db: &'ctx dyn HirDatabase,
+ owner: DefWithBodyId,
+ body: &'ctx Body,
+ infer: &'ctx InferenceResult,
+ ) -> Self {
+ let mut basic_blocks = Arena::new();
+ let start_block = basic_blocks.alloc(BasicBlock {
+ statements: vec![],
+ terminator: None,
+ is_cleanup: false,
+ });
+ let locals = Arena::new();
+ let binding_locals: ArenaMap<BindingId, LocalId> = ArenaMap::new();
+ let mir = MirBody {
+ basic_blocks,
+ locals,
+ start_block,
+ binding_locals,
+ param_locals: vec![],
+ owner,
+ closures: vec![],
+ };
+ let ctx = MirLowerCtx {
+ result: mir,
+ db,
+ infer,
+ body,
+ owner,
+ current_loop_blocks: None,
+ labeled_loop_blocks: Default::default(),
+ discr_temp: None,
+ drop_scopes: vec![DropScope::default()],
+ };
+ ctx
+ }
+
+ fn temp(&mut self, ty: Ty, current: BasicBlockId, span: MirSpan) -> Result<LocalId> {
if matches!(ty.kind(Interner), TyKind::Slice(_) | TyKind::Dyn(_)) {
- implementation_error!("unsized temporaries");
+ return Err(MirLowerError::UnsizedTemporary(ty));
}
- Ok(self.result.locals.alloc(Local { ty }))
+ let l = self.result.locals.alloc(Local { ty });
+ self.push_storage_live_for_local(l, current, span)?;
+ Ok(l)
}
fn lower_expr_to_some_operand(
@@ -120,7 +283,7 @@ impl MirLowerCtx<'_> {
if !self.has_adjustments(expr_id) {
match &self.body.exprs[expr_id] {
Expr::Literal(l) => {
- let ty = self.expr_ty(expr_id);
+ let ty = self.expr_ty_without_adjust(expr_id);
return Ok(Some((self.lower_literal_to_operand(ty, l)?, current)));
}
_ => (),
@@ -142,7 +305,8 @@ impl MirLowerCtx<'_> {
match adjustments.split_last() {
Some((last, rest)) => match &last.kind {
Adjust::NeverToAny => {
- let temp = self.temp(TyKind::Never.intern(Interner))?;
+ let temp =
+ self.temp(TyKind::Never.intern(Interner), current, MirSpan::Unknown)?;
self.lower_expr_to_place_with_adjust(expr_id, temp.into(), current, rest)
}
Adjust::Deref(_) => {
@@ -200,65 +364,82 @@ impl MirLowerCtx<'_> {
mut current: BasicBlockId,
) -> Result<Option<BasicBlockId>> {
match &self.body.exprs[expr_id] {
- Expr::Missing => Err(MirLowerError::IncompleteExpr),
+ Expr::Missing => {
+ if let DefWithBodyId::FunctionId(f) = self.owner {
+ let assoc = self.db.lookup_intern_function(f);
+ if let ItemContainerId::TraitId(t) = assoc.container {
+ let name = &self.db.function_data(f).name;
+ return Err(MirLowerError::TraitFunctionDefinition(t, name.clone()));
+ }
+ }
+ Err(MirLowerError::IncompleteExpr)
+ },
Expr::Path(p) => {
- let unresolved_name = || MirLowerError::unresolved_path(self.db, p);
- let resolver = resolver_for_expr(self.db.upcast(), self.owner, expr_id);
- let pr = resolver
- .resolve_path_in_value_ns(self.db.upcast(), p.mod_path())
- .ok_or_else(unresolved_name)?;
- let pr = match pr {
- ResolveValueResult::ValueNs(v) => v,
- ResolveValueResult::Partial(..) => {
- if let Some(assoc) = self
- .infer
- .assoc_resolutions_for_expr(expr_id)
- {
- match assoc.0 {
- hir_def::AssocItemId::ConstId(c) => {
- self.lower_const(c, current, place, expr_id.into())?;
- return Ok(Some(current))
- },
- _ => not_supported!("associated functions and types"),
- }
- } else if let Some(variant) = self
- .infer
- .variant_resolution_for_expr(expr_id)
- {
- match variant {
- VariantId::EnumVariantId(e) => ValueNs::EnumVariantId(e),
- VariantId::StructId(s) => ValueNs::StructId(s),
- VariantId::UnionId(_) => implementation_error!("Union variant as path"),
- }
- } else {
- return Err(unresolved_name());
+ let pr = if let Some((assoc, subst)) = self
+ .infer
+ .assoc_resolutions_for_expr(expr_id)
+ {
+ match assoc {
+ hir_def::AssocItemId::ConstId(c) => {
+ self.lower_const(c.into(), current, place, subst, expr_id.into(), self.expr_ty_without_adjust(expr_id))?;
+ return Ok(Some(current))
+ },
+ hir_def::AssocItemId::FunctionId(_) => {
+ // FnDefs are zero sized, no action is needed.
+ return Ok(Some(current))
}
+ hir_def::AssocItemId::TypeAliasId(_) => {
+ // FIXME: If it is unreachable, use proper error instead of `not_supported`.
+ not_supported!("associated functions and types")
+ },
}
+ } else if let Some(variant) = self
+ .infer
+ .variant_resolution_for_expr(expr_id)
+ {
+ match variant {
+ VariantId::EnumVariantId(e) => ValueNs::EnumVariantId(e),
+ VariantId::StructId(s) => ValueNs::StructId(s),
+ VariantId::UnionId(_) => implementation_error!("Union variant as path"),
+ }
+ } else {
+ let unresolved_name = || MirLowerError::unresolved_path(self.db, p);
+ let resolver = resolver_for_expr(self.db.upcast(), self.owner, expr_id);
+ resolver
+ .resolve_path_in_value_ns_fully(self.db.upcast(), p)
+ .ok_or_else(unresolved_name)?
};
match pr {
- ValueNs::LocalBinding(pat_id) => {
+ ValueNs::LocalBinding(_) | ValueNs::StaticId(_) => {
+ let Some((temp, current)) = self.lower_expr_as_place_without_adjust(current, expr_id, false)? else {
+ return Ok(None);
+ };
self.push_assignment(
current,
place,
- Operand::Copy(self.result.binding_locals[pat_id].into()).into(),
+ Operand::Copy(temp).into(),
expr_id.into(),
);
Ok(Some(current))
}
ValueNs::ConstId(const_id) => {
- self.lower_const(const_id, current, place, expr_id.into())?;
+ self.lower_const(const_id.into(), current, place, Substitution::empty(Interner), expr_id.into(), self.expr_ty_without_adjust(expr_id))?;
Ok(Some(current))
}
ValueNs::EnumVariantId(variant_id) => {
- let ty = self.infer.type_of_expr[expr_id].clone();
- let current = self.lower_enum_variant(
- variant_id,
- current,
- place,
- ty,
- vec![],
- expr_id.into(),
- )?;
+ let variant_data = &self.db.enum_data(variant_id.parent).variants[variant_id.local_id];
+ if variant_data.variant_data.kind() == StructKind::Unit {
+ let ty = self.infer.type_of_expr[expr_id].clone();
+ current = self.lower_enum_variant(
+ variant_id,
+ current,
+ place,
+ ty,
+ Box::new([]),
+ expr_id.into(),
+ )?;
+ }
+ // Otherwise its a tuple like enum, treated like a zero sized function, so no action is needed
Ok(Some(current))
}
ValueNs::GenericParam(p) => {
@@ -266,7 +447,7 @@ impl MirLowerCtx<'_> {
not_supported!("owner without generic def id");
};
let gen = generics(self.db.upcast(), def);
- let ty = self.expr_ty(expr_id);
+ let ty = self.expr_ty_without_adjust(expr_id);
self.push_assignment(
current,
place,
@@ -287,7 +468,7 @@ impl MirLowerCtx<'_> {
);
Ok(Some(current))
}
- ValueNs::StructId(_) => {
+ ValueNs::FunctionId(_) | ValueNs::StructId(_) => {
// It's probably a unit struct or a zero sized function, so no action is needed.
Ok(Some(current))
}
@@ -311,12 +492,13 @@ impl MirLowerCtx<'_> {
};
self.set_terminator(
current,
- Terminator::SwitchInt {
+ TerminatorKind::SwitchInt {
discr,
targets: SwitchTargets::static_if(1, start_of_then, start_of_else),
},
+ expr_id.into(),
);
- Ok(self.merge_blocks(end_of_then, end_of_else))
+ Ok(self.merge_blocks(end_of_then, end_of_else, expr_id.into()))
}
Expr::Let { pat, expr } => {
let Some((cond_place, current)) = self.lower_expr_as_place(current, *expr, true)? else {
@@ -326,9 +508,7 @@ impl MirLowerCtx<'_> {
current,
None,
cond_place,
- self.expr_ty_after_adjustments(*expr),
*pat,
- BindingAnnotation::Unannotated,
)?;
self.write_bytes_to_place(
then_target,
@@ -346,141 +526,107 @@ impl MirLowerCtx<'_> {
MirSpan::Unknown,
)?;
}
- Ok(self.merge_blocks(Some(then_target), else_target))
+ Ok(self.merge_blocks(Some(then_target), else_target, expr_id.into()))
}
Expr::Unsafe { id: _, statements, tail } => {
- self.lower_block_to_place(None, statements, current, *tail, place)
+ self.lower_block_to_place(statements, current, *tail, place, expr_id.into())
}
Expr::Block { id: _, statements, tail, label } => {
- self.lower_block_to_place(*label, statements, current, *tail, place)
+ if let Some(label) = label {
+ self.lower_loop(current, place.clone(), Some(*label), expr_id.into(), |this, begin| {
+ if let Some(current) = this.lower_block_to_place(statements, begin, *tail, place, expr_id.into())? {
+ let end = this.current_loop_end()?;
+ this.set_goto(current, end, expr_id.into());
+ }
+ Ok(())
+ })
+ } else {
+ self.lower_block_to_place(statements, current, *tail, place, expr_id.into())
+ }
}
- Expr::Loop { body, label } => self.lower_loop(current, *label, |this, begin| {
- if let Some((_, block)) = this.lower_expr_as_place(begin, *body, true)? {
- this.set_goto(block, begin);
+ Expr::Loop { body, label } => self.lower_loop(current, place, *label, expr_id.into(), |this, begin| {
+ let scope = this.push_drop_scope();
+ if let Some((_, mut current)) = this.lower_expr_as_place(begin, *body, true)? {
+ current = scope.pop_and_drop(this, current);
+ this.set_goto(current, begin, expr_id.into());
+ } else {
+ scope.pop_assume_dropped(this);
}
Ok(())
}),
Expr::While { condition, body, label } => {
- self.lower_loop(current, *label, |this, begin| {
+ self.lower_loop(current, place, *label, expr_id.into(),|this, begin| {
+ let scope = this.push_drop_scope();
let Some((discr, to_switch)) = this.lower_expr_to_some_operand(*condition, begin)? else {
return Ok(());
};
- let end = this.current_loop_end()?;
+ let fail_cond = this.new_basic_block();
let after_cond = this.new_basic_block();
this.set_terminator(
to_switch,
- Terminator::SwitchInt {
+ TerminatorKind::SwitchInt {
discr,
- targets: SwitchTargets::static_if(1, after_cond, end),
+ targets: SwitchTargets::static_if(1, after_cond, fail_cond),
},
+ expr_id.into(),
);
+ let fail_cond = this.drop_until_scope(this.drop_scopes.len() - 1, fail_cond);
+ let end = this.current_loop_end()?;
+ this.set_goto(fail_cond, end, expr_id.into());
if let Some((_, block)) = this.lower_expr_as_place(after_cond, *body, true)? {
- this.set_goto(block, begin);
+ let block = scope.pop_and_drop(this, block);
+ this.set_goto(block, begin, expr_id.into());
+ } else {
+ scope.pop_assume_dropped(this);
}
Ok(())
})
}
- &Expr::For { iterable, pat, body, label } => {
- let into_iter_fn = self.resolve_lang_item(LangItem::IntoIterIntoIter)?
- .as_function().ok_or(MirLowerError::LangItemNotFound(LangItem::IntoIterIntoIter))?;
- let iter_next_fn = self.resolve_lang_item(LangItem::IteratorNext)?
- .as_function().ok_or(MirLowerError::LangItemNotFound(LangItem::IteratorNext))?;
- let option_some = self.resolve_lang_item(LangItem::OptionSome)?
- .as_enum_variant().ok_or(MirLowerError::LangItemNotFound(LangItem::OptionSome))?;
- let option = option_some.parent;
- let into_iter_fn_op = Operand::const_zst(
- TyKind::FnDef(
- self.db.intern_callable_def(CallableDefId::FunctionId(into_iter_fn)).into(),
- Substitution::from1(Interner, self.expr_ty(iterable))
- ).intern(Interner));
- let iter_next_fn_op = Operand::const_zst(
- TyKind::FnDef(
- self.db.intern_callable_def(CallableDefId::FunctionId(iter_next_fn)).into(),
- Substitution::from1(Interner, self.expr_ty(iterable))
- ).intern(Interner));
- let &Some(iterator_ty) = &self.infer.type_of_for_iterator.get(&expr_id) else {
- return Err(MirLowerError::TypeError("unknown for loop iterator type"));
- };
- let ref_mut_iterator_ty = TyKind::Ref(Mutability::Mut, static_lifetime(), iterator_ty.clone()).intern(Interner);
- let item_ty = &self.infer.type_of_pat[pat];
- let option_item_ty = TyKind::Adt(chalk_ir::AdtId(option.into()), Substitution::from1(Interner, item_ty.clone())).intern(Interner);
- let iterator_place: Place = self.temp(iterator_ty.clone())?.into();
- let option_item_place: Place = self.temp(option_item_ty.clone())?.into();
- let ref_mut_iterator_place: Place = self.temp(ref_mut_iterator_ty)?.into();
- let Some(current) = self.lower_call_and_args(into_iter_fn_op, Some(iterable).into_iter(), iterator_place.clone(), current, false)?
- else {
- return Ok(None);
- };
- self.push_assignment(current, ref_mut_iterator_place.clone(), Rvalue::Ref(BorrowKind::Mut { allow_two_phase_borrow: false }, iterator_place), expr_id.into());
- self.lower_loop(current, label, |this, begin| {
- let Some(current) = this.lower_call(iter_next_fn_op, vec![Operand::Copy(ref_mut_iterator_place)], option_item_place.clone(), begin, false)?
- else {
- return Ok(());
- };
- let end = this.current_loop_end()?;
- let (current, _) = this.pattern_matching_variant(
- option_item_ty.clone(),
- BindingAnnotation::Unannotated,
- option_item_place.into(),
- option_some.into(),
- current,
- pat.into(),
- Some(end),
- &[pat], &None)?;
- if let Some((_, block)) = this.lower_expr_as_place(current, body, true)? {
- this.set_goto(block, begin);
- }
- Ok(())
- })
- },
Expr::Call { callee, args, .. } => {
+ if let Some((func_id, generic_args)) =
+ self.infer.method_resolution(expr_id) {
+ let ty = chalk_ir::TyKind::FnDef(
+ CallableDefId::FunctionId(func_id).to_chalk(self.db),
+ generic_args,
+ )
+ .intern(Interner);
+ let func = Operand::from_bytes(vec![], ty);
+ return self.lower_call_and_args(
+ func,
+ iter::once(*callee).chain(args.iter().copied()),
+ place,
+ current,
+ self.is_uninhabited(expr_id),
+ expr_id.into(),
+ );
+ }
let callee_ty = self.expr_ty_after_adjustments(*callee);
match &callee_ty.data(Interner).kind {
chalk_ir::TyKind::FnDef(..) => {
let func = Operand::from_bytes(vec![], callee_ty.clone());
- self.lower_call_and_args(func, args.iter().copied(), place, current, self.is_uninhabited(expr_id))
+ self.lower_call_and_args(func, args.iter().copied(), place, current, self.is_uninhabited(expr_id), expr_id.into())
}
- TyKind::Scalar(_)
- | TyKind::Tuple(_, _)
- | TyKind::Array(_, _)
- | TyKind::Adt(_, _)
- | TyKind::Str
- | TyKind::Foreign(_)
- | TyKind::Slice(_) => {
- return Err(MirLowerError::TypeError("function call on data type"))
+ chalk_ir::TyKind::Function(_) => {
+ let Some((func, current)) = self.lower_expr_to_some_operand(*callee, current)? else {
+ return Ok(None);
+ };
+ self.lower_call_and_args(func, args.iter().copied(), place, current, self.is_uninhabited(expr_id), expr_id.into())
}
- TyKind::Error => return Err(MirLowerError::MissingFunctionDefinition),
- TyKind::AssociatedType(_, _)
- | TyKind::Raw(_, _)
- | TyKind::Ref(_, _, _)
- | TyKind::OpaqueType(_, _)
- | TyKind::Never
- | TyKind::Closure(_, _)
- | TyKind::Generator(_, _)
- | TyKind::GeneratorWitness(_, _)
- | TyKind::Placeholder(_)
- | TyKind::Dyn(_)
- | TyKind::Alias(_)
- | TyKind::Function(_)
- | TyKind::BoundVar(_)
- | TyKind::InferenceVar(_, _) => not_supported!("dynamic function call"),
+ TyKind::Error => return Err(MirLowerError::MissingFunctionDefinition(self.owner, expr_id)),
+ _ => return Err(MirLowerError::TypeError("function call on bad type")),
}
}
- Expr::MethodCall { receiver, args, .. } => {
+ Expr::MethodCall { receiver, args, method_name, .. } => {
let (func_id, generic_args) =
- self.infer.method_resolution(expr_id).ok_or(MirLowerError::UnresolvedMethod)?;
- let ty = chalk_ir::TyKind::FnDef(
- CallableDefId::FunctionId(func_id).to_chalk(self.db),
- generic_args,
- )
- .intern(Interner);
- let func = Operand::from_bytes(vec![], ty);
+ self.infer.method_resolution(expr_id).ok_or_else(|| MirLowerError::UnresolvedMethod(method_name.display(self.db.upcast()).to_string()))?;
+ let func = Operand::from_fn(self.db, func_id, generic_args);
self.lower_call_and_args(
func,
iter::once(*receiver).chain(args.iter().copied()),
place,
current,
self.is_uninhabited(expr_id),
+ expr_id.into(),
)
}
Expr::Match { expr, arms } => {
@@ -488,23 +634,27 @@ impl MirLowerCtx<'_> {
else {
return Ok(None);
};
- let cond_ty = self.expr_ty_after_adjustments(*expr);
let mut end = None;
for MatchArm { pat, guard, expr } in arms.iter() {
- if guard.is_some() {
- not_supported!("pattern matching with guard");
- }
- let (then, otherwise) = self.pattern_match(
+ let (then, mut otherwise) = self.pattern_match(
current,
None,
cond_place.clone(),
- cond_ty.clone(),
*pat,
- BindingAnnotation::Unannotated,
)?;
+ let then = if let &Some(guard) = guard {
+ let next = self.new_basic_block();
+ let o = otherwise.get_or_insert_with(|| self.new_basic_block());
+ if let Some((discr, c)) = self.lower_expr_to_some_operand(guard, then)? {
+ self.set_terminator(c, TerminatorKind::SwitchInt { discr, targets: SwitchTargets::static_if(1, next, *o) }, expr_id.into());
+ }
+ next
+ } else {
+ then
+ };
if let Some(block) = self.lower_expr_to_place(*expr, place.clone(), then)? {
let r = end.get_or_insert_with(|| self.new_basic_block());
- self.set_goto(block, *r);
+ self.set_goto(block, *r, expr_id.into());
}
match otherwise {
Some(o) => current = o,
@@ -516,32 +666,43 @@ impl MirLowerCtx<'_> {
}
}
if self.is_unterminated(current) {
- self.set_terminator(current, Terminator::Unreachable);
+ self.set_terminator(current, TerminatorKind::Unreachable, expr_id.into());
}
Ok(end)
}
- Expr::Continue { label } => match label {
- Some(_) => not_supported!("continue with label"),
- None => {
- let loop_data =
- self.current_loop_blocks.ok_or(MirLowerError::ContinueWithoutLoop)?;
- self.set_goto(current, loop_data.begin);
- Ok(None)
- }
+ Expr::Continue { label } => {
+ let loop_data = match label {
+ Some(l) => self.labeled_loop_blocks.get(l).ok_or(MirLowerError::UnresolvedLabel)?,
+ None => self.current_loop_blocks.as_ref().ok_or(MirLowerError::ContinueWithoutLoop)?,
+ };
+ let begin = loop_data.begin;
+ current = self.drop_until_scope(loop_data.drop_scope_index, current);
+ self.set_goto(current, begin, expr_id.into());
+ Ok(None)
},
- Expr::Break { expr, label } => {
- if expr.is_some() {
- not_supported!("break with value");
+ &Expr::Break { expr, label } => {
+ if let Some(expr) = expr {
+ let loop_data = match label {
+ Some(l) => self.labeled_loop_blocks.get(&l).ok_or(MirLowerError::UnresolvedLabel)?,
+ None => self.current_loop_blocks.as_ref().ok_or(MirLowerError::BreakWithoutLoop)?,
+ };
+ let Some(c) = self.lower_expr_to_place(expr, loop_data.place.clone(), current)? else {
+ return Ok(None);
+ };
+ current = c;
}
- match label {
- Some(_) => not_supported!("break with label"),
+ let (end, drop_scope) = match label {
+ Some(l) => {
+ let loop_blocks = self.labeled_loop_blocks.get(&l).ok_or(MirLowerError::UnresolvedLabel)?;
+ (loop_blocks.end.expect("We always generate end for labeled loops"), loop_blocks.drop_scope_index)
+ },
None => {
- let end =
- self.current_loop_end()?;
- self.set_goto(current, end);
- Ok(None)
- }
- }
+ (self.current_loop_end()?, self.current_loop_blocks.as_ref().unwrap().drop_scope_index)
+ },
+ };
+ current = self.drop_until_scope(drop_scope, current);
+ self.set_goto(current, end, expr_id.into());
+ Ok(None)
}
Expr::Return { expr } => {
if let Some(expr) = expr {
@@ -551,11 +712,22 @@ impl MirLowerCtx<'_> {
return Ok(None);
}
}
- self.set_terminator(current, Terminator::Return);
+ current = self.drop_until_scope(0, current);
+ self.set_terminator(current, TerminatorKind::Return, expr_id.into());
Ok(None)
}
Expr::Yield { .. } => not_supported!("yield"),
- Expr::RecordLit { fields, path, .. } => {
+ Expr::RecordLit { fields, path, spread, ellipsis: _, is_assignee_expr: _ } => {
+ let spread_place = match spread {
+ &Some(x) => {
+ let Some((p, c)) = self.lower_expr_as_place(current, x, true)? else {
+ return Ok(None);
+ };
+ current = c;
+ Some(p)
+ },
+ None => None,
+ };
let variant_id = self
.infer
.variant_resolution_for_expr(expr_id)
@@ -563,7 +735,7 @@ impl MirLowerCtx<'_> {
Some(p) => MirLowerError::UnresolvedName(p.display(self.db).to_string()),
None => MirLowerError::RecordLiteralWithoutPath,
})?;
- let subst = match self.expr_ty(expr_id).kind(Interner) {
+ let subst = match self.expr_ty_without_adjust(expr_id).kind(Interner) {
TyKind::Adt(_, s) => s.clone(),
_ => not_supported!("Non ADT record literal"),
};
@@ -585,9 +757,23 @@ impl MirLowerCtx<'_> {
place,
Rvalue::Aggregate(
AggregateKind::Adt(variant_id, subst),
- operands.into_iter().map(|x| x).collect::<Option<_>>().ok_or(
- MirLowerError::TypeError("missing field in record literal"),
- )?,
+ match spread_place {
+ Some(sp) => operands.into_iter().enumerate().map(|(i, x)| {
+ match x {
+ Some(x) => x,
+ None => {
+ let p = sp.project(ProjectionElem::Field(FieldId {
+ parent: variant_id,
+ local_id: LocalFieldId::from_raw(RawIdx::from(i as u32)),
+ }));
+ Operand::Copy(p)
+ },
+ }
+ }).collect(),
+ None => operands.into_iter().collect::<Option<_>>().ok_or(
+ MirLowerError::TypeError("missing field in record literal"),
+ )?,
+ },
),
expr_id.into(),
);
@@ -599,20 +785,19 @@ impl MirLowerCtx<'_> {
};
let local_id =
variant_data.field(name).ok_or(MirLowerError::UnresolvedField)?;
- let mut place = place;
- place
- .projection
- .push(PlaceElem::Field(FieldId { parent: union_id.into(), local_id }));
+ let place = place.project(PlaceElem::Field(FieldId { parent: union_id.into(), local_id }));
self.lower_expr_to_place(*expr, place, current)
}
}
}
Expr::Await { .. } => not_supported!("await"),
- Expr::Try { .. } => not_supported!("? operator"),
Expr::Yeet { .. } => not_supported!("yeet"),
- Expr::TryBlock { .. } => not_supported!("try block"),
Expr::Async { .. } => not_supported!("async block"),
- Expr::Const { .. } => not_supported!("anonymous const block"),
+ &Expr::Const(id) => {
+ let subst = self.placeholder_subst();
+ self.lower_const(id.into(), current, place, subst, expr_id.into(), self.expr_ty_without_adjust(expr_id))?;
+ Ok(Some(current))
+ },
Expr::Cast { expr, type_ref: _ } => {
let Some((x, current)) = self.lower_expr_to_some_operand(*expr, current)? else {
return Ok(None);
@@ -635,21 +820,30 @@ impl MirLowerCtx<'_> {
self.push_assignment(current, place, Rvalue::Ref(bk, p), expr_id.into());
Ok(Some(current))
}
- Expr::Box { .. } => not_supported!("box expression"),
- Expr::Field { .. } | Expr::Index { .. } | Expr::UnaryOp { op: hir_def::expr::UnaryOp::Deref, .. } => {
+ Expr::Box { expr } => {
+ let ty = self.expr_ty_after_adjustments(*expr);
+ self.push_assignment(current, place.clone(), Rvalue::ShallowInitBoxWithAlloc(ty), expr_id.into());
+ let Some((operand, current)) = self.lower_expr_to_some_operand(*expr, current)? else {
+ return Ok(None);
+ };
+ let p = place.project(ProjectionElem::Deref);
+ self.push_assignment(current, p, operand.into(), expr_id.into());
+ Ok(Some(current))
+ },
+ Expr::Field { .. } | Expr::Index { .. } | Expr::UnaryOp { op: hir_def::hir::UnaryOp::Deref, .. } => {
let Some((p, current)) = self.lower_expr_as_place_without_adjust(current, expr_id, true)? else {
return Ok(None);
};
self.push_assignment(current, place, Operand::Copy(p).into(), expr_id.into());
Ok(Some(current))
}
- Expr::UnaryOp { expr, op: op @ (hir_def::expr::UnaryOp::Not | hir_def::expr::UnaryOp::Neg) } => {
+ Expr::UnaryOp { expr, op: op @ (hir_def::hir::UnaryOp::Not | hir_def::hir::UnaryOp::Neg) } => {
let Some((operand, current)) = self.lower_expr_to_some_operand(*expr, current)? else {
return Ok(None);
};
let operation = match op {
- hir_def::expr::UnaryOp::Not => UnOp::Not,
- hir_def::expr::UnaryOp::Neg => UnOp::Neg,
+ hir_def::hir::UnaryOp::Not => UnOp::Not,
+ hir_def::hir::UnaryOp::Neg => UnOp::Neg,
_ => unreachable!(),
};
self.push_assignment(
@@ -662,24 +856,93 @@ impl MirLowerCtx<'_> {
},
Expr::BinaryOp { lhs, rhs, op } => {
let op = op.ok_or(MirLowerError::IncompleteExpr)?;
- if let hir_def::expr::BinaryOp::Assignment { op } = op {
- if op.is_some() {
- not_supported!("assignment with arith op (like +=)");
+ let is_builtin = 'b: {
+ // Without adjust here is a hack. We assume that we know every possible adjustment
+ // for binary operator, and use without adjust to simplify our conditions.
+ let lhs_ty = self.expr_ty_without_adjust(*lhs);
+ let rhs_ty = self.expr_ty_without_adjust(*rhs);
+ if matches!(op ,BinaryOp::CmpOp(syntax::ast::CmpOp::Eq { .. })) {
+ if lhs_ty.as_raw_ptr().is_some() && rhs_ty.as_raw_ptr().is_some() {
+ break 'b true;
+ }
}
- let Some((lhs_place, current)) =
+ let builtin_inequal_impls = matches!(
+ op,
+ BinaryOp::ArithOp(ArithOp::Shl | ArithOp::Shr) | BinaryOp::Assignment { op: Some(ArithOp::Shl | ArithOp::Shr) }
+ );
+ lhs_ty.is_scalar() && rhs_ty.is_scalar() && (lhs_ty == rhs_ty || builtin_inequal_impls)
+ };
+ if !is_builtin {
+ if let Some((func_id, generic_args)) = self.infer.method_resolution(expr_id) {
+ let func = Operand::from_fn(self.db, func_id, generic_args);
+ return self.lower_call_and_args(
+ func,
+ [*lhs, *rhs].into_iter(),
+ place,
+ current,
+ self.is_uninhabited(expr_id),
+ expr_id.into(),
+ );
+ }
+ }
+ if let hir_def::hir::BinaryOp::Assignment { op } = op {
+ if let Some(op) = op {
+ // last adjustment is `&mut` which we don't want it.
+ let adjusts = self
+ .infer
+ .expr_adjustments
+ .get(lhs)
+ .and_then(|x| x.split_last())
+ .map(|x| x.1)
+ .ok_or(MirLowerError::TypeError("adjustment of binary op was missing"))?;
+ let Some((lhs_place, current)) =
+ self.lower_expr_as_place_with_adjust(current, *lhs, false, adjusts)?
+ else {
+ return Ok(None);
+ };
+ let Some((rhs_op, current)) = self.lower_expr_to_some_operand(*rhs, current)? else {
+ return Ok(None);
+ };
+ let r_value = Rvalue::CheckedBinaryOp(op.into(), Operand::Copy(lhs_place.clone()), rhs_op);
+ self.push_assignment(current, lhs_place, r_value, expr_id.into());
+ return Ok(Some(current));
+ } else {
+ let Some((lhs_place, current)) =
self.lower_expr_as_place(current, *lhs, false)?
- else {
- return Ok(None);
- };
- let Some((rhs_op, current)) = self.lower_expr_to_some_operand(*rhs, current)? else {
- return Ok(None);
- };
- self.push_assignment(current, lhs_place, rhs_op.into(), expr_id.into());
- return Ok(Some(current));
+ else {
+ return Ok(None);
+ };
+ let Some((rhs_op, current)) = self.lower_expr_to_some_operand(*rhs, current)? else {
+ return Ok(None);
+ };
+ self.push_assignment(current, lhs_place, rhs_op.into(), expr_id.into());
+ return Ok(Some(current));
+ }
}
let Some((lhs_op, current)) = self.lower_expr_to_some_operand(*lhs, current)? else {
return Ok(None);
};
+ if let hir_def::hir::BinaryOp::LogicOp(op) = op {
+ let value_to_short = match op {
+ syntax::ast::LogicOp::And => 0,
+ syntax::ast::LogicOp::Or => 1,
+ };
+ let start_of_then = self.new_basic_block();
+ self.push_assignment(start_of_then, place.clone(), lhs_op.clone().into(), expr_id.into());
+ let end_of_then = Some(start_of_then);
+ let start_of_else = self.new_basic_block();
+ let end_of_else =
+ self.lower_expr_to_place(*rhs, place, start_of_else)?;
+ self.set_terminator(
+ current,
+ TerminatorKind::SwitchInt {
+ discr: lhs_op,
+ targets: SwitchTargets::static_if(value_to_short, start_of_then, start_of_else),
+ },
+ expr_id.into(),
+ );
+ return Ok(self.merge_blocks(end_of_then, end_of_else, expr_id.into()));
+ }
let Some((rhs_op, current)) = self.lower_expr_to_some_operand(*rhs, current)? else {
return Ok(None);
};
@@ -688,13 +951,13 @@ impl MirLowerCtx<'_> {
place,
Rvalue::CheckedBinaryOp(
match op {
- hir_def::expr::BinaryOp::LogicOp(op) => match op {
- hir_def::expr::LogicOp::And => BinOp::BitAnd, // FIXME: make these short circuit
- hir_def::expr::LogicOp::Or => BinOp::BitOr,
+ hir_def::hir::BinaryOp::LogicOp(op) => match op {
+ hir_def::hir::LogicOp::And => BinOp::BitAnd, // FIXME: make these short circuit
+ hir_def::hir::LogicOp::Or => BinOp::BitOr,
},
- hir_def::expr::BinaryOp::ArithOp(op) => BinOp::from(op),
- hir_def::expr::BinaryOp::CmpOp(op) => BinOp::from(op),
- hir_def::expr::BinaryOp::Assignment { .. } => unreachable!(), // handled above
+ hir_def::hir::BinaryOp::ArithOp(op) => BinOp::from(op),
+ hir_def::hir::BinaryOp::CmpOp(op) => BinOp::from(op),
+ hir_def::hir::BinaryOp::Assignment { .. } => unreachable!(), // handled above
},
lhs_op,
rhs_op,
@@ -703,8 +966,96 @@ impl MirLowerCtx<'_> {
);
Ok(Some(current))
}
- Expr::Range { .. } => not_supported!("range"),
- Expr::Closure { .. } => not_supported!("closure"),
+ &Expr::Range { lhs, rhs, range_type: _ } => {
+ let ty = self.expr_ty_without_adjust(expr_id);
+ let Some((adt, subst)) = ty.as_adt() else {
+ return Err(MirLowerError::TypeError("Range type is not adt"));
+ };
+ let AdtId::StructId(st) = adt else {
+ return Err(MirLowerError::TypeError("Range type is not struct"));
+ };
+ let mut lp = None;
+ let mut rp = None;
+ if let Some(x) = lhs {
+ let Some((o, c)) = self.lower_expr_to_some_operand(x, current)? else {
+ return Ok(None);
+ };
+ lp = Some(o);
+ current = c;
+ }
+ if let Some(x) = rhs {
+ let Some((o, c)) = self.lower_expr_to_some_operand(x, current)? else {
+ return Ok(None);
+ };
+ rp = Some(o);
+ current = c;
+ }
+ self.push_assignment(
+ current,
+ place,
+ Rvalue::Aggregate(
+ AggregateKind::Adt(st.into(), subst.clone()),
+ self.db.struct_data(st).variant_data.fields().iter().map(|x| {
+ let o = match x.1.name.as_str() {
+ Some("start") => lp.take(),
+ Some("end") => rp.take(),
+ Some("exhausted") => Some(Operand::from_bytes(vec![0], TyBuilder::bool())),
+ _ => None,
+ };
+ o.ok_or(MirLowerError::UnresolvedField)
+ }).collect::<Result<_>>()?,
+ ),
+ expr_id.into(),
+ );
+ Ok(Some(current))
+ },
+ Expr::Closure { .. } => {
+ let ty = self.expr_ty_without_adjust(expr_id);
+ let TyKind::Closure(id, _) = ty.kind(Interner) else {
+ not_supported!("closure with non closure type");
+ };
+ self.result.closures.push(*id);
+ let (captures, _) = self.infer.closure_info(id);
+ let mut operands = vec![];
+ for capture in captures.iter() {
+ let p = Place {
+ local: self.binding_local(capture.place.local)?,
+ projection: capture.place.projections.clone().into_iter().map(|x| {
+ match x {
+ ProjectionElem::Deref => ProjectionElem::Deref,
+ ProjectionElem::Field(x) => ProjectionElem::Field(x),
+ ProjectionElem::TupleOrClosureField(x) => ProjectionElem::TupleOrClosureField(x),
+ ProjectionElem::ConstantIndex { offset, from_end } => ProjectionElem::ConstantIndex { offset, from_end },
+ ProjectionElem::Subslice { from, to } => ProjectionElem::Subslice { from, to },
+ ProjectionElem::OpaqueCast(x) => ProjectionElem::OpaqueCast(x),
+ ProjectionElem::Index(x) => match x { },
+ }
+ }).collect(),
+ };
+ match &capture.kind {
+ CaptureKind::ByRef(bk) => {
+ let placeholder_subst = self.placeholder_subst();
+ let tmp_ty = capture.ty.clone().substitute(Interner, &placeholder_subst);
+ let tmp: Place = self.temp(tmp_ty, current, capture.span)?.into();
+ self.push_assignment(
+ current,
+ tmp.clone(),
+ Rvalue::Ref(bk.clone(), p),
+ capture.span,
+ );
+ operands.push(Operand::Move(tmp));
+ },
+ CaptureKind::ByValue => operands.push(Operand::Move(p)),
+ }
+ }
+ self.push_assignment(
+ current,
+ place,
+ Rvalue::Aggregate(AggregateKind::Closure(ty), operands.into()),
+ expr_id.into(),
+ );
+ Ok(Some(current))
+ },
Expr::Tuple { exprs, is_assignee_expr: _ } => {
let Some(values) = exprs
.iter()
@@ -720,7 +1071,7 @@ impl MirLowerCtx<'_> {
return Ok(None);
};
let r = Rvalue::Aggregate(
- AggregateKind::Tuple(self.expr_ty(expr_id)),
+ AggregateKind::Tuple(self.expr_ty_without_adjust(expr_id)),
values,
);
self.push_assignment(current, place, r, expr_id.into());
@@ -728,7 +1079,7 @@ impl MirLowerCtx<'_> {
}
Expr::Array(l) => match l {
Array::ElementList { elements, .. } => {
- let elem_ty = match &self.expr_ty(expr_id).data(Interner).kind {
+ let elem_ty = match &self.expr_ty_without_adjust(expr_id).data(Interner).kind {
TyKind::Array(ty, _) => ty.clone(),
_ => {
return Err(MirLowerError::TypeError(
@@ -756,10 +1107,25 @@ impl MirLowerCtx<'_> {
self.push_assignment(current, place, r, expr_id.into());
Ok(Some(current))
}
- Array::Repeat { .. } => not_supported!("array repeat"),
+ Array::Repeat { initializer, .. } => {
+ let Some((init, current)) = self.lower_expr_to_some_operand(*initializer, current)? else {
+ return Ok(None);
+ };
+ let len = match &self.expr_ty_without_adjust(expr_id).data(Interner).kind {
+ TyKind::Array(_, len) => len.clone(),
+ _ => {
+ return Err(MirLowerError::TypeError(
+ "Array repeat expression with non array type",
+ ))
+ }
+ };
+ let r = Rvalue::Repeat(init, len);
+ self.push_assignment(current, place, r, expr_id.into());
+ Ok(Some(current))
+ },
},
Expr::Literal(l) => {
- let ty = self.expr_ty(expr_id);
+ let ty = self.expr_ty_without_adjust(expr_id);
let op = self.lower_literal_to_operand(ty, l)?;
self.push_assignment(current, place, op.into(), expr_id.into());
Ok(Some(current))
@@ -768,17 +1134,25 @@ impl MirLowerCtx<'_> {
}
}
+ fn placeholder_subst(&mut self) -> Substitution {
+ let placeholder_subst = match self.owner.as_generic_def_id() {
+ Some(x) => TyBuilder::placeholder_subst(self.db, x),
+ None => Substitution::empty(Interner),
+ };
+ placeholder_subst
+ }
+
fn push_field_projection(&self, place: &mut Place, expr_id: ExprId) -> Result<()> {
if let Expr::Field { expr, name } = &self.body[expr_id] {
if let TyKind::Tuple(..) = self.expr_ty_after_adjustments(*expr).kind(Interner) {
let index = name
.as_tuple_index()
.ok_or(MirLowerError::TypeError("named field on tuple"))?;
- place.projection.push(ProjectionElem::TupleField(index))
+ *place = place.project(ProjectionElem::TupleOrClosureField(index))
} else {
let field =
self.infer.field_resolution(expr_id).ok_or(MirLowerError::UnresolvedField)?;
- place.projection.push(ProjectionElem::Field(field));
+ *place = place.project(ProjectionElem::Field(field));
}
} else {
not_supported!("")
@@ -786,33 +1160,75 @@ impl MirLowerCtx<'_> {
Ok(())
}
+ fn lower_literal_or_const_to_operand(
+ &mut self,
+ ty: Ty,
+ loc: &LiteralOrConst,
+ ) -> Result<Operand> {
+ match loc {
+ LiteralOrConst::Literal(l) => self.lower_literal_to_operand(ty, l),
+ LiteralOrConst::Const(c) => {
+ let unresolved_name = || MirLowerError::unresolved_path(self.db, c);
+ let resolver = self.owner.resolver(self.db.upcast());
+ let pr = resolver
+ .resolve_path_in_value_ns(self.db.upcast(), c)
+ .ok_or_else(unresolved_name)?;
+ match pr {
+ ResolveValueResult::ValueNs(v) => {
+ if let ValueNs::ConstId(c) = v {
+ self.lower_const_to_operand(Substitution::empty(Interner), c.into(), ty)
+ } else {
+ not_supported!("bad path in range pattern");
+ }
+ }
+ ResolveValueResult::Partial(_, _) => {
+ not_supported!("associated constants in range pattern")
+ }
+ }
+ }
+ }
+ }
+
fn lower_literal_to_operand(&mut self, ty: Ty, l: &Literal) -> Result<Operand> {
- let size = layout_of_ty(self.db, &ty, self.owner.module(self.db.upcast()).krate())?
+ let size = self
+ .db
+ .layout_of_ty(ty.clone(), self.owner.module(self.db.upcast()).krate())?
.size
.bytes_usize();
let bytes = match l {
- hir_def::expr::Literal::String(b) => {
+ hir_def::hir::Literal::String(b) => {
let b = b.as_bytes();
- let mut data = vec![];
+ let mut data = Vec::with_capacity(mem::size_of::<usize>() * 2);
data.extend(0usize.to_le_bytes());
data.extend(b.len().to_le_bytes());
let mut mm = MemoryMap::default();
mm.insert(0, b.to_vec());
return Ok(Operand::from_concrete_const(data, mm, ty));
}
- hir_def::expr::Literal::ByteString(b) => {
- let mut data = vec![];
+ hir_def::hir::Literal::CString(b) => {
+ let b = b.as_bytes();
+ let bytes = b.iter().copied().chain(iter::once(0)).collect::<Vec<_>>();
+
+ let mut data = Vec::with_capacity(mem::size_of::<usize>() * 2);
+ data.extend(0usize.to_le_bytes());
+ data.extend(bytes.len().to_le_bytes());
+ let mut mm = MemoryMap::default();
+ mm.insert(0, bytes);
+ return Ok(Operand::from_concrete_const(data, mm, ty));
+ }
+ hir_def::hir::Literal::ByteString(b) => {
+ let mut data = Vec::with_capacity(mem::size_of::<usize>() * 2);
data.extend(0usize.to_le_bytes());
data.extend(b.len().to_le_bytes());
let mut mm = MemoryMap::default();
mm.insert(0, b.to_vec());
return Ok(Operand::from_concrete_const(data, mm, ty));
}
- hir_def::expr::Literal::Char(c) => u32::from(*c).to_le_bytes().into(),
- hir_def::expr::Literal::Bool(b) => vec![*b as u8],
- hir_def::expr::Literal::Int(x, _) => x.to_le_bytes()[0..size].into(),
- hir_def::expr::Literal::Uint(x, _) => x.to_le_bytes()[0..size].into(),
- hir_def::expr::Literal::Float(f, _) => match size {
+ hir_def::hir::Literal::Char(c) => u32::from(*c).to_le_bytes().into(),
+ hir_def::hir::Literal::Bool(b) => vec![*b as u8],
+ hir_def::hir::Literal::Int(x, _) => x.to_le_bytes()[0..size].into(),
+ hir_def::hir::Literal::Uint(x, _) => x.to_le_bytes()[0..size].into(),
+ hir_def::hir::Literal::Float(f, _) => match size {
8 => f.into_f64().to_le_bytes().into(),
4 => f.into_f32().to_le_bytes().into(),
_ => {
@@ -829,24 +1245,34 @@ impl MirLowerCtx<'_> {
fn lower_const(
&mut self,
- const_id: hir_def::ConstId,
+ const_id: GeneralConstId,
prev_block: BasicBlockId,
place: Place,
+ subst: Substitution,
span: MirSpan,
+ ty: Ty,
) -> Result<()> {
- let c = self.db.const_eval(const_id)?;
- self.write_const_to_place(c, prev_block, place, span)
+ let c = self.lower_const_to_operand(subst, const_id, ty)?;
+ self.push_assignment(prev_block, place, c.into(), span);
+ Ok(())
}
- fn write_const_to_place(
+ fn lower_const_to_operand(
&mut self,
- c: Const,
- prev_block: BasicBlockId,
- place: Place,
- span: MirSpan,
- ) -> Result<()> {
- self.push_assignment(prev_block, place, Operand::Constant(c).into(), span);
- Ok(())
+ subst: Substitution,
+ const_id: GeneralConstId,
+ ty: Ty,
+ ) -> Result<Operand> {
+ let c = if subst.len(Interner) != 0 {
+ // We can't evaluate constant with substitution now, as generics are not monomorphized in lowering.
+ intern_const_scalar(ConstScalar::UnevaluatedConst(const_id, subst), ty)
+ } else {
+ let name = const_id.name(self.db.upcast());
+ self.db
+ .const_eval(const_id.into(), subst)
+ .map_err(|e| MirLowerError::ConstEvalError(name, Box::new(e)))?
+ };
+ Ok(Operand::Constant(c))
}
fn write_bytes_to_place(
@@ -867,12 +1293,12 @@ impl MirLowerCtx<'_> {
prev_block: BasicBlockId,
place: Place,
ty: Ty,
- fields: Vec<Operand>,
+ fields: Box<[Operand]>,
span: MirSpan,
) -> Result<BasicBlockId> {
let subst = match ty.kind(Interner) {
TyKind::Adt(_, subst) => subst.clone(),
- _ => not_supported!("Non ADT enum"),
+ _ => implementation_error!("Non ADT enum"),
};
self.push_assignment(
prev_block,
@@ -890,6 +1316,7 @@ impl MirLowerCtx<'_> {
place: Place,
mut current: BasicBlockId,
is_uninhabited: bool,
+ span: MirSpan,
) -> Result<Option<BasicBlockId>> {
let Some(args) = args
.map(|arg| {
@@ -904,21 +1331,22 @@ impl MirLowerCtx<'_> {
else {
return Ok(None);
};
- self.lower_call(func, args, place, current, is_uninhabited)
+ self.lower_call(func, args.into(), place, current, is_uninhabited, span)
}
fn lower_call(
&mut self,
func: Operand,
- args: Vec<Operand>,
+ args: Box<[Operand]>,
place: Place,
current: BasicBlockId,
is_uninhabited: bool,
+ span: MirSpan,
) -> Result<Option<BasicBlockId>> {
let b = if is_uninhabited { None } else { Some(self.new_basic_block()) };
self.set_terminator(
current,
- Terminator::Call {
+ TerminatorKind::Call {
func,
args,
destination: place,
@@ -926,6 +1354,7 @@ impl MirLowerCtx<'_> {
cleanup: None,
from_hir_call: true,
},
+ span,
);
Ok(b)
}
@@ -934,15 +1363,15 @@ impl MirLowerCtx<'_> {
self.result.basic_blocks[source].terminator.is_none()
}
- fn set_terminator(&mut self, source: BasicBlockId, terminator: Terminator) {
- self.result.basic_blocks[source].terminator = Some(terminator);
+ fn set_terminator(&mut self, source: BasicBlockId, terminator: TerminatorKind, span: MirSpan) {
+ self.result.basic_blocks[source].terminator = Some(Terminator { span, kind: terminator });
}
- fn set_goto(&mut self, source: BasicBlockId, target: BasicBlockId) {
- self.set_terminator(source, Terminator::Goto { target });
+ fn set_goto(&mut self, source: BasicBlockId, target: BasicBlockId, span: MirSpan) {
+ self.set_terminator(source, TerminatorKind::Goto { target }, span);
}
- fn expr_ty(&self, e: ExprId) -> Ty {
+ fn expr_ty_without_adjust(&self, e: ExprId) -> Ty {
self.infer[e].clone()
}
@@ -953,7 +1382,7 @@ impl MirLowerCtx<'_> {
ty = Some(x.target.clone());
}
}
- ty.unwrap_or_else(|| self.expr_ty(e))
+ ty.unwrap_or_else(|| self.expr_ty_without_adjust(e))
}
fn push_statement(&mut self, block: BasicBlockId, statement: Statement) {
@@ -970,293 +1399,14 @@ impl MirLowerCtx<'_> {
self.push_statement(block, StatementKind::Assign(place, rvalue).with_span(span));
}
- /// It gets a `current` unterminated block, appends some statements and possibly a terminator to it to check if
- /// the pattern matches and write bindings, and returns two unterminated blocks, one for the matched path (which
- /// can be the `current` block) and one for the mismatched path. If the input pattern is irrefutable, the
- /// mismatched path block is `None`.
- ///
- /// By default, it will create a new block for mismatched path. If you already have one, you can provide it with
- /// `current_else` argument to save an unneccessary jump. If `current_else` isn't `None`, the result mismatched path
- /// wouldn't be `None` as well. Note that this function will add jumps to the beginning of the `current_else` block,
- /// so it should be an empty block.
- fn pattern_match(
- &mut self,
- mut current: BasicBlockId,
- mut current_else: Option<BasicBlockId>,
- mut cond_place: Place,
- mut cond_ty: Ty,
- pattern: PatId,
- mut binding_mode: BindingAnnotation,
- ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
- Ok(match &self.body.pats[pattern] {
- Pat::Missing => return Err(MirLowerError::IncompleteExpr),
- Pat::Wild => (current, current_else),
- Pat::Tuple { args, ellipsis } => {
- pattern_matching_dereference(&mut cond_ty, &mut binding_mode, &mut cond_place);
- let subst = match cond_ty.kind(Interner) {
- TyKind::Tuple(_, s) => s,
- _ => {
- return Err(MirLowerError::TypeError(
- "non tuple type matched with tuple pattern",
- ))
- }
- };
- self.pattern_match_tuple_like(
- current,
- current_else,
- args.iter().enumerate().map(|(i, x)| {
- (
- PlaceElem::TupleField(i),
- *x,
- subst.at(Interner, i).assert_ty_ref(Interner).clone(),
- )
- }),
- *ellipsis,
- &cond_place,
- binding_mode,
- )?
- }
- Pat::Or(pats) => {
- let then_target = self.new_basic_block();
- let mut finished = false;
- for pat in &**pats {
- let (next, next_else) = self.pattern_match(
- current,
- None,
- cond_place.clone(),
- cond_ty.clone(),
- *pat,
- binding_mode,
- )?;
- self.set_goto(next, then_target);
- match next_else {
- Some(t) => {
- current = t;
- }
- None => {
- finished = true;
- break;
- }
- }
- }
- if !finished {
- let ce = *current_else.get_or_insert_with(|| self.new_basic_block());
- self.set_goto(current, ce);
- }
- (then_target, current_else)
- }
- Pat::Record { .. } => not_supported!("record pattern"),
- Pat::Range { .. } => not_supported!("range pattern"),
- Pat::Slice { .. } => not_supported!("slice pattern"),
- Pat::Path(_) => {
- let Some(variant) = self.infer.variant_resolution_for_pat(pattern) else {
- not_supported!("unresolved variant");
- };
- self.pattern_matching_variant(
- cond_ty,
- binding_mode,
- cond_place,
- variant,
- current,
- pattern.into(),
- current_else,
- &[],
- &None,
- )?
- }
- Pat::Lit(l) => {
- let then_target = self.new_basic_block();
- let else_target = current_else.unwrap_or_else(|| self.new_basic_block());
- match &self.body.exprs[*l] {
- Expr::Literal(l) => match l {
- hir_def::expr::Literal::Int(x, _) => {
- self.set_terminator(
- current,
- Terminator::SwitchInt {
- discr: Operand::Copy(cond_place),
- targets: SwitchTargets::static_if(
- *x as u128,
- then_target,
- else_target,
- ),
- },
- );
- }
- hir_def::expr::Literal::Uint(x, _) => {
- self.set_terminator(
- current,
- Terminator::SwitchInt {
- discr: Operand::Copy(cond_place),
- targets: SwitchTargets::static_if(*x, then_target, else_target),
- },
- );
- }
- _ => not_supported!("non int path literal"),
- },
- _ => not_supported!("expression path literal"),
- }
- (then_target, Some(else_target))
- }
- Pat::Bind { id, subpat } => {
- let target_place = self.result.binding_locals[*id];
- let mode = self.body.bindings[*id].mode;
- if let Some(subpat) = subpat {
- (current, current_else) = self.pattern_match(
- current,
- current_else,
- cond_place.clone(),
- cond_ty,
- *subpat,
- binding_mode,
- )?
- }
- if matches!(mode, BindingAnnotation::Ref | BindingAnnotation::RefMut) {
- binding_mode = mode;
- }
- self.push_storage_live(*id, current);
- self.push_assignment(
- current,
- target_place.into(),
- match binding_mode {
- BindingAnnotation::Unannotated | BindingAnnotation::Mutable => {
- Operand::Copy(cond_place).into()
- }
- BindingAnnotation::Ref => Rvalue::Ref(BorrowKind::Shared, cond_place),
- BindingAnnotation::RefMut => Rvalue::Ref(
- BorrowKind::Mut { allow_two_phase_borrow: false },
- cond_place,
- ),
- },
- pattern.into(),
- );
- (current, current_else)
- }
- Pat::TupleStruct { path: _, args, ellipsis } => {
- let Some(variant) = self.infer.variant_resolution_for_pat(pattern) else {
- not_supported!("unresolved variant");
- };
- self.pattern_matching_variant(
- cond_ty,
- binding_mode,
- cond_place,
- variant,
- current,
- pattern.into(),
- current_else,
- args,
- ellipsis,
- )?
- }
- Pat::Ref { .. } => not_supported!("& pattern"),
- Pat::Box { .. } => not_supported!("box pattern"),
- Pat::ConstBlock(_) => not_supported!("const block pattern"),
- })
- }
-
- fn pattern_matching_variant(
- &mut self,
- mut cond_ty: Ty,
- mut binding_mode: BindingAnnotation,
- mut cond_place: Place,
- variant: VariantId,
- current: BasicBlockId,
- span: MirSpan,
- current_else: Option<BasicBlockId>,
- args: &[PatId],
- ellipsis: &Option<usize>,
- ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
- pattern_matching_dereference(&mut cond_ty, &mut binding_mode, &mut cond_place);
- let subst = match cond_ty.kind(Interner) {
- TyKind::Adt(_, s) => s,
- _ => return Err(MirLowerError::TypeError("non adt type matched with tuple struct")),
- };
- let fields_type = self.db.field_types(variant);
- Ok(match variant {
- VariantId::EnumVariantId(v) => {
- let e = self.db.const_eval_discriminant(v)? as u128;
- let next = self.new_basic_block();
- let tmp = self.discr_temp_place();
- self.push_assignment(
- current,
- tmp.clone(),
- Rvalue::Discriminant(cond_place.clone()),
- span,
- );
- let else_target = current_else.unwrap_or_else(|| self.new_basic_block());
- self.set_terminator(
- current,
- Terminator::SwitchInt {
- discr: Operand::Copy(tmp),
- targets: SwitchTargets::static_if(e, next, else_target),
- },
- );
- let enum_data = self.db.enum_data(v.parent);
- let fields =
- enum_data.variants[v.local_id].variant_data.fields().iter().map(|(x, _)| {
- (
- PlaceElem::Field(FieldId { parent: v.into(), local_id: x }),
- fields_type[x].clone().substitute(Interner, subst),
- )
- });
- self.pattern_match_tuple_like(
- next,
- Some(else_target),
- args.iter().zip(fields).map(|(x, y)| (y.0, *x, y.1)),
- *ellipsis,
- &cond_place,
- binding_mode,
- )?
- }
- VariantId::StructId(s) => {
- let struct_data = self.db.struct_data(s);
- let fields = struct_data.variant_data.fields().iter().map(|(x, _)| {
- (
- PlaceElem::Field(FieldId { parent: s.into(), local_id: x }),
- fields_type[x].clone().substitute(Interner, subst),
- )
- });
- self.pattern_match_tuple_like(
- current,
- current_else,
- args.iter().zip(fields).map(|(x, y)| (y.0, *x, y.1)),
- *ellipsis,
- &cond_place,
- binding_mode,
- )?
- }
- VariantId::UnionId(_) => {
- return Err(MirLowerError::TypeError("pattern matching on union"))
- }
- })
- }
-
- fn pattern_match_tuple_like(
- &mut self,
- mut current: BasicBlockId,
- mut current_else: Option<BasicBlockId>,
- args: impl Iterator<Item = (PlaceElem, PatId, Ty)>,
- ellipsis: Option<usize>,
- cond_place: &Place,
- binding_mode: BindingAnnotation,
- ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
- if ellipsis.is_some() {
- not_supported!("tuple like pattern with ellipsis");
- }
- for (proj, arg, ty) in args {
- let mut cond_place = cond_place.clone();
- cond_place.projection.push(proj);
- (current, current_else) =
- self.pattern_match(current, current_else, cond_place, ty, arg, binding_mode)?;
- }
- Ok((current, current_else))
- }
-
- fn discr_temp_place(&mut self) -> Place {
+ fn discr_temp_place(&mut self, current: BasicBlockId) -> Place {
match &self.discr_temp {
Some(x) => x.clone(),
None => {
- let tmp: Place =
- self.temp(TyBuilder::discr_ty()).expect("discr_ty is never unsized").into();
+ let tmp: Place = self
+ .temp(TyBuilder::discr_ty(), current, MirSpan::Unknown)
+ .expect("discr_ty is never unsized")
+ .into();
self.discr_temp = Some(tmp.clone());
tmp
}
@@ -1266,19 +1416,34 @@ impl MirLowerCtx<'_> {
fn lower_loop(
&mut self,
prev_block: BasicBlockId,
+ place: Place,
label: Option<LabelId>,
+ span: MirSpan,
f: impl FnOnce(&mut MirLowerCtx<'_>, BasicBlockId) -> Result<()>,
) -> Result<Option<BasicBlockId>> {
- if label.is_some() {
- not_supported!("loop with label");
- }
let begin = self.new_basic_block();
- let prev =
- mem::replace(&mut self.current_loop_blocks, Some(LoopBlocks { begin, end: None }));
- self.set_goto(prev_block, begin);
+ let prev = mem::replace(
+ &mut self.current_loop_blocks,
+ Some(LoopBlocks { begin, end: None, place, drop_scope_index: self.drop_scopes.len() }),
+ );
+ let prev_label = if let Some(label) = label {
+ // We should generate the end now, to make sure that it wouldn't change later. It is
+ // bad as we may emit end (unnecessary unreachable block) for unterminating loop, but
+ // it should not affect correctness.
+ self.current_loop_end()?;
+ self.labeled_loop_blocks
+ .insert(label, self.current_loop_blocks.as_ref().unwrap().clone())
+ } else {
+ None
+ };
+ self.set_goto(prev_block, begin, span);
f(self, begin)?;
- let my = mem::replace(&mut self.current_loop_blocks, prev)
- .ok_or(MirLowerError::ImplementationError("current_loop_blocks is corrupt"))?;
+ let my = mem::replace(&mut self.current_loop_blocks, prev).ok_or(
+ MirLowerError::ImplementationError("current_loop_blocks is corrupt".to_string()),
+ )?;
+ if let Some(prev) = prev_label {
+ self.labeled_loop_blocks.insert(label.unwrap(), prev);
+ }
Ok(my.end)
}
@@ -1290,14 +1455,15 @@ impl MirLowerCtx<'_> {
&mut self,
b1: Option<BasicBlockId>,
b2: Option<BasicBlockId>,
+ span: MirSpan,
) -> Option<BasicBlockId> {
match (b1, b2) {
(None, None) => None,
(None, Some(b)) | (Some(b), None) => Some(b),
(Some(b1), Some(b2)) => {
let bm = self.new_basic_block();
- self.set_goto(b1, bm);
- self.set_goto(b2, bm);
+ self.set_goto(b1, bm, span);
+ self.set_goto(b2, bm, span);
Some(bm)
}
}
@@ -1307,7 +1473,9 @@ impl MirLowerCtx<'_> {
let r = match self
.current_loop_blocks
.as_mut()
- .ok_or(MirLowerError::ImplementationError("Current loop access out of loop"))?
+ .ok_or(MirLowerError::ImplementationError(
+ "Current loop access out of loop".to_string(),
+ ))?
.end
{
Some(x) => x,
@@ -1315,7 +1483,9 @@ impl MirLowerCtx<'_> {
let s = self.new_basic_block();
self.current_loop_blocks
.as_mut()
- .ok_or(MirLowerError::ImplementationError("Current loop access out of loop"))?
+ .ok_or(MirLowerError::ImplementationError(
+ "Current loop access out of loop".to_string(),
+ ))?
.end = Some(s);
s
}
@@ -1327,36 +1497,28 @@ impl MirLowerCtx<'_> {
is_ty_uninhabited_from(&self.infer[expr_id], self.owner.module(self.db.upcast()), self.db)
}
- /// This function push `StorageLive` statement for the binding, and applies changes to add `StorageDead` in
- /// the appropriated places.
- fn push_storage_live(&mut self, b: BindingId, current: BasicBlockId) {
- // Current implementation is wrong. It adds no `StorageDead` at the end of scope, and before each break
- // and continue. It just add a `StorageDead` before the `StorageLive`, which is not wrong, but unneeeded in
- // the proper implementation. Due this limitation, implementing a borrow checker on top of this mir will falsely
- // allow this:
- //
- // ```
- // let x;
- // loop {
- // let y = 2;
- // x = &y;
- // if some_condition {
- // break; // we need to add a StorageDead(y) above this to kill the x borrow
- // }
- // }
- // use(x)
- // ```
- // But I think this approach work for mutability analysis, as user can't write code which mutates a binding
- // after StorageDead, except loops, which are handled by this hack.
+ /// This function push `StorageLive` statement for the binding, and applies changes to add `StorageDead` and
+ /// `Drop` in the appropriated places.
+ fn push_storage_live(&mut self, b: BindingId, current: BasicBlockId) -> Result<()> {
let span = self.body.bindings[b]
.definitions
.first()
.copied()
.map(MirSpan::PatId)
.unwrap_or(MirSpan::Unknown);
- let l = self.result.binding_locals[b];
- self.push_statement(current, StatementKind::StorageDead(l).with_span(span));
+ let l = self.binding_local(b)?;
+ self.push_storage_live_for_local(l, current, span)
+ }
+
+ fn push_storage_live_for_local(
+ &mut self,
+ l: LocalId,
+ current: BasicBlockId,
+ span: MirSpan,
+ ) -> Result<()> {
+ self.drop_scopes.last_mut().unwrap().locals.push(l);
self.push_statement(current, StatementKind::StorageLive(l).with_span(span));
+ Ok(())
}
fn resolve_lang_item(&self, item: LangItem) -> Result<LangItemTarget> {
@@ -1366,81 +1528,204 @@ impl MirLowerCtx<'_> {
fn lower_block_to_place(
&mut self,
- label: Option<LabelId>,
- statements: &[hir_def::expr::Statement],
+ statements: &[hir_def::hir::Statement],
mut current: BasicBlockId,
tail: Option<ExprId>,
place: Place,
+ span: MirSpan,
) -> Result<Option<Idx<BasicBlock>>> {
- if label.is_some() {
- not_supported!("block with label");
- }
+ let scope = self.push_drop_scope();
for statement in statements.iter() {
match statement {
- hir_def::expr::Statement::Let { pat, initializer, else_branch, type_ref: _ } => {
+ hir_def::hir::Statement::Let { pat, initializer, else_branch, type_ref: _ } => {
if let Some(expr_id) = initializer {
let else_block;
let Some((init_place, c)) =
self.lower_expr_as_place(current, *expr_id, true)?
else {
+ scope.pop_assume_dropped(self);
return Ok(None);
};
current = c;
- (current, else_block) = self.pattern_match(
- current,
- None,
- init_place,
- self.expr_ty_after_adjustments(*expr_id),
- *pat,
- BindingAnnotation::Unannotated,
- )?;
+ (current, else_block) =
+ self.pattern_match(current, None, init_place, *pat)?;
match (else_block, else_branch) {
(None, _) => (),
(Some(else_block), None) => {
- self.set_terminator(else_block, Terminator::Unreachable);
+ self.set_terminator(else_block, TerminatorKind::Unreachable, span);
}
(Some(else_block), Some(else_branch)) => {
if let Some((_, b)) =
self.lower_expr_as_place(else_block, *else_branch, true)?
{
- self.set_terminator(b, Terminator::Unreachable);
+ self.set_terminator(b, TerminatorKind::Unreachable, span);
}
}
}
} else {
+ let mut err = None;
self.body.walk_bindings_in_pat(*pat, |b| {
- self.push_storage_live(b, current);
+ if let Err(e) = self.push_storage_live(b, current) {
+ err = Some(e);
+ }
});
+ if let Some(e) = err {
+ return Err(e);
+ }
}
}
- hir_def::expr::Statement::Expr { expr, has_semi: _ } => {
+ hir_def::hir::Statement::Expr { expr, has_semi: _ } => {
+ let scope2 = self.push_drop_scope();
let Some((_, c)) = self.lower_expr_as_place(current, *expr, true)? else {
+ scope2.pop_assume_dropped(self);
+ scope.pop_assume_dropped(self);
return Ok(None);
};
- current = c;
+ current = scope2.pop_and_drop(self, c);
}
}
}
- match tail {
- Some(tail) => self.lower_expr_to_place(tail, place, current),
- None => Ok(Some(current)),
+ if let Some(tail) = tail {
+ let Some(c) = self.lower_expr_to_place(tail, place, current)? else {
+ scope.pop_assume_dropped(self);
+ return Ok(None);
+ };
+ current = c;
}
+ current = scope.pop_and_drop(self, current);
+ Ok(Some(current))
}
-}
-fn pattern_matching_dereference(
- cond_ty: &mut Ty,
- binding_mode: &mut BindingAnnotation,
- cond_place: &mut Place,
-) {
- while let Some((ty, _, mu)) = cond_ty.as_reference() {
- if mu == Mutability::Mut && *binding_mode != BindingAnnotation::Ref {
- *binding_mode = BindingAnnotation::RefMut;
- } else {
- *binding_mode = BindingAnnotation::Ref;
+ fn lower_params_and_bindings(
+ &mut self,
+ params: impl Iterator<Item = (PatId, Ty)> + Clone,
+ pick_binding: impl Fn(BindingId) -> bool,
+ ) -> Result<BasicBlockId> {
+ let base_param_count = self.result.param_locals.len();
+ self.result.param_locals.extend(params.clone().map(|(x, ty)| {
+ let local_id = self.result.locals.alloc(Local { ty });
+ self.drop_scopes.last_mut().unwrap().locals.push(local_id);
+ if let Pat::Bind { id, subpat: None } = self.body[x] {
+ if matches!(
+ self.body.bindings[id].mode,
+ BindingAnnotation::Unannotated | BindingAnnotation::Mutable
+ ) {
+ self.result.binding_locals.insert(id, local_id);
+ }
+ }
+ local_id
+ }));
+ // and then rest of bindings
+ for (id, _) in self.body.bindings.iter() {
+ if !pick_binding(id) {
+ continue;
+ }
+ if !self.result.binding_locals.contains_idx(id) {
+ self.result
+ .binding_locals
+ .insert(id, self.result.locals.alloc(Local { ty: self.infer[id].clone() }));
+ }
+ }
+ let mut current = self.result.start_block;
+ for ((param, _), local) in
+ params.zip(self.result.param_locals.clone().into_iter().skip(base_param_count))
+ {
+ if let Pat::Bind { id, .. } = self.body[param] {
+ if local == self.binding_local(id)? {
+ continue;
+ }
+ }
+ let r = self.pattern_match(current, None, local.into(), param)?;
+ if let Some(b) = r.1 {
+ self.set_terminator(b, TerminatorKind::Unreachable, param.into());
+ }
+ current = r.0;
+ }
+ Ok(current)
+ }
+
+ fn binding_local(&self, b: BindingId) -> Result<LocalId> {
+ match self.result.binding_locals.get(b) {
+ Some(x) => Ok(*x),
+ None => {
+ // FIXME: It should never happens, but currently it will happen in `const_dependent_on_local` test, which
+ // is a hir lowering problem IMO.
+ // never!("Using unaccessable local for binding is always a bug");
+ Err(MirLowerError::UnaccessableLocal)
+ }
+ }
+ }
+
+ fn const_eval_discriminant(&self, variant: EnumVariantId) -> Result<i128> {
+ let r = self.db.const_eval_discriminant(variant);
+ match r {
+ Ok(r) => Ok(r),
+ Err(e) => {
+ let data = self.db.enum_data(variant.parent);
+ let name = format!(
+ "{}::{}",
+ data.name.display(self.db.upcast()),
+ data.variants[variant.local_id].name.display(self.db.upcast())
+ );
+ Err(MirLowerError::ConstEvalError(name, Box::new(e)))
+ }
+ }
+ }
+
+ fn drop_until_scope(&mut self, scope_index: usize, mut current: BasicBlockId) -> BasicBlockId {
+ for scope in self.drop_scopes[scope_index..].to_vec().iter().rev() {
+ self.emit_drop_and_storage_dead_for_scope(scope, &mut current);
+ }
+ current
+ }
+
+ fn push_drop_scope(&mut self) -> DropScopeToken {
+ self.drop_scopes.push(DropScope::default());
+ DropScopeToken
+ }
+
+ /// Don't call directly
+ fn pop_drop_scope_assume_dropped_internal(&mut self) {
+ self.drop_scopes.pop();
+ }
+
+ /// Don't call directly
+ fn pop_drop_scope_internal(&mut self, mut current: BasicBlockId) -> BasicBlockId {
+ let scope = self.drop_scopes.pop().unwrap();
+ self.emit_drop_and_storage_dead_for_scope(&scope, &mut current);
+ current
+ }
+
+ fn pop_drop_scope_assert_finished(
+ &mut self,
+ mut current: BasicBlockId,
+ ) -> Result<BasicBlockId> {
+ current = self.pop_drop_scope_internal(current);
+ if !self.drop_scopes.is_empty() {
+ implementation_error!("Mismatched count between drop scope push and pops");
+ }
+ Ok(current)
+ }
+
+ fn emit_drop_and_storage_dead_for_scope(
+ &mut self,
+ scope: &DropScope,
+ current: &mut Idx<BasicBlock>,
+ ) {
+ for &l in scope.locals.iter().rev() {
+ if !self.result.locals[l].ty.clone().is_copy(self.db, self.owner) {
+ let prev = std::mem::replace(current, self.new_basic_block());
+ self.set_terminator(
+ prev,
+ TerminatorKind::Drop { place: l.into(), target: *current, unwind: None },
+ MirSpan::Unknown,
+ );
+ }
+ self.push_statement(
+ *current,
+ StatementKind::StorageDead(l).with_span(MirSpan::Unknown),
+ );
}
- *cond_ty = ty.clone();
- cond_place.projection.push(ProjectionElem::Deref);
}
}
@@ -1452,6 +1737,26 @@ fn cast_kind(source_ty: &Ty, target_ty: &Ty) -> Result<CastKind> {
(_, chalk_ir::Scalar::Float(_)) => CastKind::IntToFloat,
(_, _) => CastKind::IntToInt,
},
+ (TyKind::Scalar(_), TyKind::Raw(..)) => CastKind::PointerFromExposedAddress,
+ (TyKind::Raw(..), TyKind::Scalar(_)) => CastKind::PointerExposeAddress,
+ (TyKind::Raw(_, a) | TyKind::Ref(_, _, a), TyKind::Raw(_, b) | TyKind::Ref(_, _, b)) => {
+ CastKind::Pointer(if a == b {
+ PointerCast::MutToConstPointer
+ } else if matches!(a.kind(Interner), TyKind::Slice(_) | TyKind::Str)
+ && matches!(b.kind(Interner), TyKind::Slice(_) | TyKind::Str)
+ {
+ // slice to slice cast is no-op (metadata is not touched), so we use this
+ PointerCast::MutToConstPointer
+ } else if matches!(b.kind(Interner), TyKind::Slice(_) | TyKind::Dyn(_)) {
+ PointerCast::Unsize
+ } else if matches!(a.kind(Interner), TyKind::Slice(s) if s == b) {
+ PointerCast::ArrayToPointer
+ } else {
+ // cast between two sized pointer, like *const i32 to *const i8. There is no specific variant
+ // for it in `PointerCast` so we use `MutToConstPointer`
+ PointerCast::MutToConstPointer
+ })
+ }
// Enum to int casts
(TyKind::Scalar(_), TyKind::Adt(..)) | (TyKind::Adt(..), TyKind::Scalar(_)) => {
CastKind::IntToInt
@@ -1460,20 +1765,123 @@ fn cast_kind(source_ty: &Ty, target_ty: &Ty) -> Result<CastKind> {
})
}
+pub fn mir_body_for_closure_query(
+ db: &dyn HirDatabase,
+ closure: ClosureId,
+) -> Result<Arc<MirBody>> {
+ let (owner, expr) = db.lookup_intern_closure(closure.into());
+ let body = db.body(owner);
+ let infer = db.infer(owner);
+ let Expr::Closure { args, body: root, .. } = &body[expr] else {
+ implementation_error!("closure expression is not closure");
+ };
+ let TyKind::Closure(_, substs) = &infer[expr].kind(Interner) else {
+ implementation_error!("closure expression is not closure");
+ };
+ let (captures, kind) = infer.closure_info(&closure);
+ let mut ctx = MirLowerCtx::new(db, owner, &body, &infer);
+ // 0 is return local
+ ctx.result.locals.alloc(Local { ty: infer[*root].clone() });
+ let closure_local = ctx.result.locals.alloc(Local {
+ ty: match kind {
+ FnTrait::FnOnce => infer[expr].clone(),
+ FnTrait::FnMut => TyKind::Ref(Mutability::Mut, static_lifetime(), infer[expr].clone())
+ .intern(Interner),
+ FnTrait::Fn => TyKind::Ref(Mutability::Not, static_lifetime(), infer[expr].clone())
+ .intern(Interner),
+ },
+ });
+ ctx.result.param_locals.push(closure_local);
+ let Some(sig) = ClosureSubst(substs).sig_ty().callable_sig(db) else {
+ implementation_error!("closure has not callable sig");
+ };
+ let current = ctx.lower_params_and_bindings(
+ args.iter().zip(sig.params().iter()).map(|(x, y)| (*x, y.clone())),
+ |_| true,
+ )?;
+ if let Some(current) = ctx.lower_expr_to_place(*root, return_slot().into(), current)? {
+ let current = ctx.pop_drop_scope_assert_finished(current)?;
+ ctx.set_terminator(current, TerminatorKind::Return, (*root).into());
+ }
+ let mut upvar_map: FxHashMap<LocalId, Vec<(&CapturedItem, usize)>> = FxHashMap::default();
+ for (i, capture) in captures.iter().enumerate() {
+ let local = ctx.binding_local(capture.place.local)?;
+ upvar_map.entry(local).or_default().push((capture, i));
+ }
+ let mut err = None;
+ let closure_local = ctx.result.locals.iter().nth(1).unwrap().0;
+ let closure_projection = match kind {
+ FnTrait::FnOnce => vec![],
+ FnTrait::FnMut | FnTrait::Fn => vec![ProjectionElem::Deref],
+ };
+ ctx.result.walk_places(|p| {
+ if let Some(x) = upvar_map.get(&p.local) {
+ let r = x.iter().find(|x| {
+ if p.projection.len() < x.0.place.projections.len() {
+ return false;
+ }
+ for (x, y) in p.projection.iter().zip(x.0.place.projections.iter()) {
+ match (x, y) {
+ (ProjectionElem::Deref, ProjectionElem::Deref) => (),
+ (ProjectionElem::Field(x), ProjectionElem::Field(y)) if x == y => (),
+ (
+ ProjectionElem::TupleOrClosureField(x),
+ ProjectionElem::TupleOrClosureField(y),
+ ) if x == y => (),
+ _ => return false,
+ }
+ }
+ true
+ });
+ match r {
+ Some(x) => {
+ p.local = closure_local;
+ let mut next_projs = closure_projection.clone();
+ next_projs.push(PlaceElem::TupleOrClosureField(x.1));
+ let prev_projs = mem::take(&mut p.projection);
+ if x.0.kind != CaptureKind::ByValue {
+ next_projs.push(ProjectionElem::Deref);
+ }
+ next_projs.extend(prev_projs.iter().cloned().skip(x.0.place.projections.len()));
+ p.projection = next_projs.into();
+ }
+ None => err = Some(p.clone()),
+ }
+ }
+ });
+ ctx.result.binding_locals = ctx
+ .result
+ .binding_locals
+ .into_iter()
+ .filter(|it| ctx.body.binding_owners.get(&it.0).copied() == Some(expr))
+ .collect();
+ if let Some(err) = err {
+ return Err(MirLowerError::UnresolvedUpvar(err));
+ }
+ ctx.result.shrink_to_fit();
+ Ok(Arc::new(ctx.result))
+}
+
pub fn mir_body_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Result<Arc<MirBody>> {
let _p = profile::span("mir_body_query").detail(|| match def {
- DefWithBodyId::FunctionId(it) => db.function_data(it).name.to_string(),
- DefWithBodyId::StaticId(it) => db.static_data(it).name.clone().to_string(),
- DefWithBodyId::ConstId(it) => {
- db.const_data(it).name.clone().unwrap_or_else(Name::missing).to_string()
- }
+ DefWithBodyId::FunctionId(it) => db.function_data(it).name.display(db.upcast()).to_string(),
+ DefWithBodyId::StaticId(it) => db.static_data(it).name.display(db.upcast()).to_string(),
+ DefWithBodyId::ConstId(it) => db
+ .const_data(it)
+ .name
+ .clone()
+ .unwrap_or_else(Name::missing)
+ .display(db.upcast())
+ .to_string(),
DefWithBodyId::VariantId(it) => {
- db.enum_data(it.parent).variants[it.local_id].name.to_string()
+ db.enum_data(it.parent).variants[it.local_id].name.display(db.upcast()).to_string()
}
+ DefWithBodyId::InTypeConstId(it) => format!("in type const {it:?}"),
});
let body = db.body(def);
let infer = db.infer(def);
- let result = lower_to_mir(db, def, &body, &infer, body.body_expr)?;
+ let mut result = lower_to_mir(db, def, &body, &infer, body.body_expr)?;
+ result.shrink_to_fit();
Ok(Arc::new(result))
}
@@ -1497,85 +1905,40 @@ pub fn lower_to_mir(
if let Some((_, x)) = infer.type_mismatches().next() {
return Err(MirLowerError::TypeMismatch(x.clone()));
}
- let mut basic_blocks = Arena::new();
- let start_block =
- basic_blocks.alloc(BasicBlock { statements: vec![], terminator: None, is_cleanup: false });
- let mut locals = Arena::new();
+ let mut ctx = MirLowerCtx::new(db, owner, body, infer);
// 0 is return local
- locals.alloc(Local { ty: infer[root_expr].clone() });
- let mut binding_locals: ArenaMap<BindingId, LocalId> = ArenaMap::new();
- // 1 to param_len is for params
- let param_locals: Vec<LocalId> = if let DefWithBodyId::FunctionId(fid) = owner {
- let substs = TyBuilder::placeholder_subst(db, fid);
- let callable_sig = db.callable_item_signature(fid.into()).substitute(Interner, &substs);
- body.params
- .iter()
- .zip(callable_sig.params().iter())
- .map(|(&x, ty)| {
- let local_id = locals.alloc(Local { ty: ty.clone() });
- if let Pat::Bind { id, subpat: None } = body[x] {
- if matches!(
- body.bindings[id].mode,
- BindingAnnotation::Unannotated | BindingAnnotation::Mutable
- ) {
- binding_locals.insert(id, local_id);
- }
- }
- local_id
- })
- .collect()
- } else {
- if !body.params.is_empty() {
- return Err(MirLowerError::TypeError("Unexpected parameter for non function body"));
- }
- vec![]
- };
- // and then rest of bindings
- for (id, _) in body.bindings.iter() {
- if !binding_locals.contains_idx(id) {
- binding_locals.insert(id, locals.alloc(Local { ty: infer[id].clone() }));
+ ctx.result.locals.alloc(Local { ty: ctx.expr_ty_after_adjustments(root_expr) });
+ let binding_picker = |b: BindingId| {
+ let owner = ctx.body.binding_owners.get(&b).copied();
+ if root_expr == body.body_expr {
+ owner.is_none()
+ } else {
+ owner == Some(root_expr)
}
- }
- let mir = MirBody {
- basic_blocks,
- locals,
- start_block,
- binding_locals,
- param_locals,
- owner,
- arg_count: body.params.len(),
- };
- let mut ctx = MirLowerCtx {
- result: mir,
- db,
- infer,
- body,
- owner,
- current_loop_blocks: None,
- discr_temp: None,
};
- let mut current = start_block;
- for (&param, local) in body.params.iter().zip(ctx.result.param_locals.clone().into_iter()) {
- if let Pat::Bind { id, .. } = body[param] {
- if local == ctx.result.binding_locals[id] {
- continue;
+ // 1 to param_len is for params
+ // FIXME: replace with let chain once it becomes stable
+ let current = 'b: {
+ if body.body_expr == root_expr {
+ // otherwise it's an inline const, and has no parameter
+ if let DefWithBodyId::FunctionId(fid) = owner {
+ let substs = TyBuilder::placeholder_subst(db, fid);
+ let callable_sig =
+ db.callable_item_signature(fid.into()).substitute(Interner, &substs);
+ break 'b ctx.lower_params_and_bindings(
+ body.params
+ .iter()
+ .zip(callable_sig.params().iter())
+ .map(|(x, y)| (*x, y.clone())),
+ binding_picker,
+ )?;
}
}
- let r = ctx.pattern_match(
- current,
- None,
- local.into(),
- ctx.result.locals[local].ty.clone(),
- param,
- BindingAnnotation::Unannotated,
- )?;
- if let Some(b) = r.1 {
- ctx.set_terminator(b, Terminator::Unreachable);
- }
- current = r.0;
- }
- if let Some(b) = ctx.lower_expr_to_place(root_expr, return_slot().into(), current)? {
- ctx.result.basic_blocks[b].terminator = Some(Terminator::Return);
+ ctx.lower_params_and_bindings([].into_iter(), binding_picker)?
+ };
+ if let Some(current) = ctx.lower_expr_to_place(root_expr, return_slot().into(), current)? {
+ let current = ctx.pop_drop_scope_assert_finished(current)?;
+ ctx.set_terminator(current, TerminatorKind::Return, root_expr.into());
}
Ok(ctx.result)
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs
index fe8147dcd..d2c8d9a08 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs
@@ -1,6 +1,7 @@
//! MIR lowering for places
use super::*;
+use hir_def::{lang_item::lang_attr, FunctionId};
use hir_expand::name;
macro_rules! not_supported {
@@ -15,8 +16,8 @@ impl MirLowerCtx<'_> {
expr_id: ExprId,
prev_block: BasicBlockId,
) -> Result<Option<(Place, BasicBlockId)>> {
- let ty = self.expr_ty(expr_id);
- let place = self.temp(ty)?;
+ let ty = self.expr_ty_without_adjust(expr_id);
+ let place = self.temp(ty, prev_block, expr_id.into())?;
let Some(current) = self.lower_expr_to_place_without_adjust(expr_id, place.into(), prev_block)? else {
return Ok(None);
};
@@ -29,9 +30,11 @@ impl MirLowerCtx<'_> {
prev_block: BasicBlockId,
adjustments: &[Adjustment],
) -> Result<Option<(Place, BasicBlockId)>> {
- let ty =
- adjustments.last().map(|x| x.target.clone()).unwrap_or_else(|| self.expr_ty(expr_id));
- let place = self.temp(ty)?;
+ let ty = adjustments
+ .last()
+ .map(|x| x.target.clone())
+ .unwrap_or_else(|| self.expr_ty_without_adjust(expr_id));
+ let place = self.temp(ty, prev_block, expr_id.into())?;
let Some(current) = self.lower_expr_to_place_with_adjust(expr_id, place.into(), prev_block, adjustments)? else {
return Ok(None);
};
@@ -62,7 +65,7 @@ impl MirLowerCtx<'_> {
)? else {
return Ok(None);
};
- x.0.projection.push(ProjectionElem::Deref);
+ x.0 = x.0.project(ProjectionElem::Deref);
Ok(Some(x))
}
Adjust::Deref(Some(od)) => {
@@ -79,7 +82,7 @@ impl MirLowerCtx<'_> {
r,
rest.last()
.map(|x| x.target.clone())
- .unwrap_or_else(|| self.expr_ty(expr_id)),
+ .unwrap_or_else(|| self.expr_ty_without_adjust(expr_id)),
last.target.clone(),
expr_id.into(),
match od.0 {
@@ -125,35 +128,74 @@ impl MirLowerCtx<'_> {
match &self.body.exprs[expr_id] {
Expr::Path(p) => {
let resolver = resolver_for_expr(self.db.upcast(), self.owner, expr_id);
- let Some(pr) = resolver.resolve_path_in_value_ns(self.db.upcast(), p.mod_path()) else {
- return Err(MirLowerError::unresolved_path(self.db, p));
- };
- let pr = match pr {
- ResolveValueResult::ValueNs(v) => v,
- ResolveValueResult::Partial(..) => return try_rvalue(self),
+ let Some(pr) = resolver.resolve_path_in_value_ns_fully(self.db.upcast(), p) else {
+ return try_rvalue(self);
};
match pr {
ValueNs::LocalBinding(pat_id) => {
- Ok(Some((self.result.binding_locals[pat_id].into(), current)))
+ Ok(Some((self.binding_local(pat_id)?.into(), current)))
+ }
+ ValueNs::StaticId(s) => {
+ let ty = self.expr_ty_without_adjust(expr_id);
+ let ref_ty =
+ TyKind::Ref(Mutability::Not, static_lifetime(), ty).intern(Interner);
+ let temp: Place = self.temp(ref_ty, current, expr_id.into())?.into();
+ self.push_assignment(
+ current,
+ temp.clone(),
+ Operand::Static(s).into(),
+ expr_id.into(),
+ );
+ Ok(Some((temp.project(ProjectionElem::Deref), current)))
}
_ => try_rvalue(self),
}
}
Expr::UnaryOp { expr, op } => match op {
- hir_def::expr::UnaryOp::Deref => {
- if !matches!(
- self.expr_ty(*expr).kind(Interner),
- TyKind::Ref(..) | TyKind::Raw(..)
- ) {
- let Some(_) = self.lower_expr_as_place(current, *expr, true)? else {
+ hir_def::hir::UnaryOp::Deref => {
+ let is_builtin = match self.expr_ty_without_adjust(*expr).kind(Interner) {
+ TyKind::Ref(..) | TyKind::Raw(..) => true,
+ TyKind::Adt(id, _) => {
+ if let Some(lang_item) = lang_attr(self.db.upcast(), id.0) {
+ lang_item == LangItem::OwnedBox
+ } else {
+ false
+ }
+ }
+ _ => false,
+ };
+ if !is_builtin {
+ let Some((p, current)) = self.lower_expr_as_place(current, *expr, true)? else {
return Ok(None);
};
- not_supported!("explicit overloaded deref");
+ return self.lower_overloaded_deref(
+ current,
+ p,
+ self.expr_ty_after_adjustments(*expr),
+ self.expr_ty_without_adjust(expr_id),
+ expr_id.into(),
+ 'b: {
+ if let Some((f, _)) = self.infer.method_resolution(expr_id) {
+ if let Some(deref_trait) =
+ self.resolve_lang_item(LangItem::DerefMut)?.as_trait()
+ {
+ if let Some(deref_fn) = self
+ .db
+ .trait_data(deref_trait)
+ .method_by_name(&name![deref_mut])
+ {
+ break 'b deref_fn == f;
+ }
+ }
+ }
+ false
+ },
+ );
}
let Some((mut r, current)) = self.lower_expr_as_place(current, *expr, true)? else {
return Ok(None);
};
- r.projection.push(ProjectionElem::Deref);
+ r = r.project(ProjectionElem::Deref);
Ok(Some((r, current)))
}
_ => try_rvalue(self),
@@ -169,25 +211,84 @@ impl MirLowerCtx<'_> {
let base_ty = self.expr_ty_after_adjustments(*base);
let index_ty = self.expr_ty_after_adjustments(*index);
if index_ty != TyBuilder::usize()
- || !matches!(base_ty.kind(Interner), TyKind::Array(..) | TyKind::Slice(..))
+ || !matches!(
+ base_ty.strip_reference().kind(Interner),
+ TyKind::Array(..) | TyKind::Slice(..)
+ )
{
- not_supported!("overloaded index");
+ let Some(index_fn) = self.infer.method_resolution(expr_id) else {
+ return Err(MirLowerError::UnresolvedMethod("[overloaded index]".to_string()));
+ };
+ let Some((base_place, current)) = self.lower_expr_as_place(current, *base, true)? else {
+ return Ok(None);
+ };
+ let Some((index_operand, current)) = self.lower_expr_to_some_operand(*index, current)? else {
+ return Ok(None);
+ };
+ return self.lower_overloaded_index(
+ current,
+ base_place,
+ base_ty,
+ self.expr_ty_without_adjust(expr_id),
+ index_operand,
+ expr_id.into(),
+ index_fn,
+ );
}
+ let adjusts = self
+ .infer
+ .expr_adjustments
+ .get(base)
+ .and_then(|x| x.split_last())
+ .map(|x| x.1)
+ .unwrap_or(&[]);
let Some((mut p_base, current)) =
- self.lower_expr_as_place(current, *base, true)? else {
+ self.lower_expr_as_place_with_adjust(current, *base, true, adjusts)?
+ else {
return Ok(None);
};
- let l_index = self.temp(self.expr_ty_after_adjustments(*index))?;
+ let l_index =
+ self.temp(self.expr_ty_after_adjustments(*index), current, expr_id.into())?;
let Some(current) = self.lower_expr_to_place(*index, l_index.into(), current)? else {
return Ok(None);
};
- p_base.projection.push(ProjectionElem::Index(l_index));
+ p_base = p_base.project(ProjectionElem::Index(l_index));
Ok(Some((p_base, current)))
}
_ => try_rvalue(self),
}
}
+ fn lower_overloaded_index(
+ &mut self,
+ current: BasicBlockId,
+ place: Place,
+ base_ty: Ty,
+ result_ty: Ty,
+ index_operand: Operand,
+ span: MirSpan,
+ index_fn: (FunctionId, Substitution),
+ ) -> Result<Option<(Place, BasicBlockId)>> {
+ let mutability = match base_ty.as_reference() {
+ Some((_, _, mutability)) => mutability,
+ None => Mutability::Not,
+ };
+ let result_ref = TyKind::Ref(mutability, static_lifetime(), result_ty).intern(Interner);
+ let mut result: Place = self.temp(result_ref, current, span)?.into();
+ let index_fn_op = Operand::const_zst(
+ TyKind::FnDef(
+ self.db.intern_callable_def(CallableDefId::FunctionId(index_fn.0)).into(),
+ index_fn.1,
+ )
+ .intern(Interner),
+ );
+ let Some(current) = self.lower_call(index_fn_op, Box::new([Operand::Copy(place), index_operand]), result.clone(), current, false, span)? else {
+ return Ok(None);
+ };
+ result = result.project(ProjectionElem::Deref);
+ Ok(Some((result, current)))
+ }
+
fn lower_overloaded_deref(
&mut self,
current: BasicBlockId,
@@ -209,7 +310,7 @@ impl MirLowerCtx<'_> {
};
let ty_ref = TyKind::Ref(chalk_mut, static_lifetime(), source_ty.clone()).intern(Interner);
let target_ty_ref = TyKind::Ref(chalk_mut, static_lifetime(), target_ty).intern(Interner);
- let ref_place: Place = self.temp(ty_ref)?.into();
+ let ref_place: Place = self.temp(ty_ref, current, span)?.into();
self.push_assignment(current, ref_place.clone(), Rvalue::Ref(borrow_kind, place), span);
let deref_trait = self
.resolve_lang_item(trait_lang_item)?
@@ -227,11 +328,11 @@ impl MirLowerCtx<'_> {
)
.intern(Interner),
);
- let mut result: Place = self.temp(target_ty_ref)?.into();
- let Some(current) = self.lower_call(deref_fn_op, vec![Operand::Copy(ref_place)], result.clone(), current, false)? else {
+ let mut result: Place = self.temp(target_ty_ref, current, span)?.into();
+ let Some(current) = self.lower_call(deref_fn_op, Box::new([Operand::Copy(ref_place)]), result.clone(), current, false, span)? else {
return Ok(None);
};
- result.projection.push(ProjectionElem::Deref);
+ result = result.project(ProjectionElem::Deref);
Ok(Some((result, current)))
}
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/pattern_matching.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/pattern_matching.rs
new file mode 100644
index 000000000..ff43c64a9
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/pattern_matching.rs
@@ -0,0 +1,617 @@
+//! MIR lowering for patterns
+
+use hir_def::{hir::LiteralOrConst, resolver::HasResolver, AssocItemId};
+
+use crate::BindingMode;
+
+use super::*;
+
+macro_rules! not_supported {
+ ($x: expr) => {
+ return Err(MirLowerError::NotSupported(format!($x)))
+ };
+}
+
+pub(super) enum AdtPatternShape<'a> {
+ Tuple { args: &'a [PatId], ellipsis: Option<usize> },
+ Record { args: &'a [RecordFieldPat] },
+ Unit,
+}
+
+/// We need to do pattern matching in two phases: One to check if the pattern matches, and one to fill the bindings
+/// of patterns. This is necessary to prevent double moves and similar problems. For example:
+/// ```ignore
+/// struct X;
+/// match (X, 3) {
+/// (b, 2) | (b, 3) => {},
+/// _ => {}
+/// }
+/// ```
+/// If we do everything in one pass, we will move `X` to the first `b`, then we see that the second field of tuple
+/// doesn't match and we should move the `X` to the second `b` (which here is the same thing, but doesn't need to be) and
+/// it might even doesn't match the second pattern and we may want to not move `X` at all.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+enum MatchingMode {
+ /// Check that if this pattern matches
+ Check,
+ /// Assume that this pattern matches, fill bindings
+ Bind,
+}
+
+impl MirLowerCtx<'_> {
+ /// It gets a `current` unterminated block, appends some statements and possibly a terminator to it to check if
+ /// the pattern matches and write bindings, and returns two unterminated blocks, one for the matched path (which
+ /// can be the `current` block) and one for the mismatched path. If the input pattern is irrefutable, the
+ /// mismatched path block is `None`.
+ ///
+ /// By default, it will create a new block for mismatched path. If you already have one, you can provide it with
+ /// `current_else` argument to save an unnecessary jump. If `current_else` isn't `None`, the result mismatched path
+ /// wouldn't be `None` as well. Note that this function will add jumps to the beginning of the `current_else` block,
+ /// so it should be an empty block.
+ pub(super) fn pattern_match(
+ &mut self,
+ current: BasicBlockId,
+ current_else: Option<BasicBlockId>,
+ cond_place: Place,
+ pattern: PatId,
+ ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
+ let (current, current_else) = self.pattern_match_inner(
+ current,
+ current_else,
+ cond_place.clone(),
+ pattern,
+ MatchingMode::Check,
+ )?;
+ let (current, current_else) = self.pattern_match_inner(
+ current,
+ current_else,
+ cond_place,
+ pattern,
+ MatchingMode::Bind,
+ )?;
+ Ok((current, current_else))
+ }
+
+ fn pattern_match_inner(
+ &mut self,
+ mut current: BasicBlockId,
+ mut current_else: Option<BasicBlockId>,
+ mut cond_place: Place,
+ pattern: PatId,
+ mode: MatchingMode,
+ ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
+ let cnt = self.infer.pat_adjustments.get(&pattern).map(|x| x.len()).unwrap_or_default();
+ cond_place.projection = cond_place
+ .projection
+ .iter()
+ .cloned()
+ .chain((0..cnt).map(|_| ProjectionElem::Deref))
+ .collect::<Vec<_>>()
+ .into();
+ Ok(match &self.body.pats[pattern] {
+ Pat::Missing => return Err(MirLowerError::IncompletePattern),
+ Pat::Wild => (current, current_else),
+ Pat::Tuple { args, ellipsis } => {
+ let subst = match self.infer[pattern].kind(Interner) {
+ TyKind::Tuple(_, s) => s,
+ _ => {
+ return Err(MirLowerError::TypeError(
+ "non tuple type matched with tuple pattern",
+ ))
+ }
+ };
+ self.pattern_match_tuple_like(
+ current,
+ current_else,
+ args,
+ *ellipsis,
+ (0..subst.len(Interner)).map(|i| PlaceElem::TupleOrClosureField(i)),
+ &(&mut cond_place),
+ mode,
+ )?
+ }
+ Pat::Or(pats) => {
+ let then_target = self.new_basic_block();
+ let mut finished = false;
+ for pat in &**pats {
+ let (mut next, next_else) = self.pattern_match_inner(
+ current,
+ None,
+ (&mut cond_place).clone(),
+ *pat,
+ MatchingMode::Check,
+ )?;
+ if mode == MatchingMode::Bind {
+ (next, _) = self.pattern_match_inner(
+ next,
+ None,
+ (&mut cond_place).clone(),
+ *pat,
+ MatchingMode::Bind,
+ )?;
+ }
+ self.set_goto(next, then_target, pattern.into());
+ match next_else {
+ Some(t) => {
+ current = t;
+ }
+ None => {
+ finished = true;
+ break;
+ }
+ }
+ }
+ if !finished {
+ if mode == MatchingMode::Bind {
+ self.set_terminator(current, TerminatorKind::Unreachable, pattern.into());
+ } else {
+ let ce = *current_else.get_or_insert_with(|| self.new_basic_block());
+ self.set_goto(current, ce, pattern.into());
+ }
+ }
+ (then_target, current_else)
+ }
+ Pat::Record { args, .. } => {
+ let Some(variant) = self.infer.variant_resolution_for_pat(pattern) else {
+ not_supported!("unresolved variant for record");
+ };
+ self.pattern_matching_variant(
+ cond_place,
+ variant,
+ current,
+ pattern.into(),
+ current_else,
+ AdtPatternShape::Record { args: &*args },
+ mode,
+ )?
+ }
+ Pat::Range { start, end } => {
+ let mut add_check = |l: &LiteralOrConst, binop| -> Result<()> {
+ let lv =
+ self.lower_literal_or_const_to_operand(self.infer[pattern].clone(), l)?;
+ let else_target = *current_else.get_or_insert_with(|| self.new_basic_block());
+ let next = self.new_basic_block();
+ let discr: Place =
+ self.temp(TyBuilder::bool(), current, pattern.into())?.into();
+ self.push_assignment(
+ current,
+ discr.clone(),
+ Rvalue::CheckedBinaryOp(
+ binop,
+ lv,
+ Operand::Copy((&mut cond_place).clone()),
+ ),
+ pattern.into(),
+ );
+ let discr = Operand::Copy(discr);
+ self.set_terminator(
+ current,
+ TerminatorKind::SwitchInt {
+ discr,
+ targets: SwitchTargets::static_if(1, next, else_target),
+ },
+ pattern.into(),
+ );
+ current = next;
+ Ok(())
+ };
+ if mode == MatchingMode::Check {
+ if let Some(start) = start {
+ add_check(start, BinOp::Le)?;
+ }
+ if let Some(end) = end {
+ add_check(end, BinOp::Ge)?;
+ }
+ }
+ (current, current_else)
+ }
+ Pat::Slice { prefix, slice, suffix } => {
+ if mode == MatchingMode::Check {
+ // emit runtime length check for slice
+ if let TyKind::Slice(_) = self.infer[pattern].kind(Interner) {
+ let pattern_len = prefix.len() + suffix.len();
+ let place_len: Place =
+ self.temp(TyBuilder::usize(), current, pattern.into())?.into();
+ self.push_assignment(
+ current,
+ place_len.clone(),
+ Rvalue::Len((&mut cond_place).clone()),
+ pattern.into(),
+ );
+ let else_target =
+ *current_else.get_or_insert_with(|| self.new_basic_block());
+ let next = self.new_basic_block();
+ if slice.is_none() {
+ self.set_terminator(
+ current,
+ TerminatorKind::SwitchInt {
+ discr: Operand::Copy(place_len),
+ targets: SwitchTargets::static_if(
+ pattern_len as u128,
+ next,
+ else_target,
+ ),
+ },
+ pattern.into(),
+ );
+ } else {
+ let c = Operand::from_concrete_const(
+ pattern_len.to_le_bytes().to_vec(),
+ MemoryMap::default(),
+ TyBuilder::usize(),
+ );
+ let discr: Place =
+ self.temp(TyBuilder::bool(), current, pattern.into())?.into();
+ self.push_assignment(
+ current,
+ discr.clone(),
+ Rvalue::CheckedBinaryOp(BinOp::Le, c, Operand::Copy(place_len)),
+ pattern.into(),
+ );
+ let discr = Operand::Copy(discr);
+ self.set_terminator(
+ current,
+ TerminatorKind::SwitchInt {
+ discr,
+ targets: SwitchTargets::static_if(1, next, else_target),
+ },
+ pattern.into(),
+ );
+ }
+ current = next;
+ }
+ }
+ for (i, &pat) in prefix.iter().enumerate() {
+ let next_place = (&mut cond_place).project(ProjectionElem::ConstantIndex {
+ offset: i as u64,
+ from_end: false,
+ });
+ (current, current_else) =
+ self.pattern_match_inner(current, current_else, next_place, pat, mode)?;
+ }
+ if let Some(slice) = slice {
+ if mode == MatchingMode::Bind {
+ if let Pat::Bind { id, subpat: _ } = self.body[*slice] {
+ let next_place = (&mut cond_place).project(ProjectionElem::Subslice {
+ from: prefix.len() as u64,
+ to: suffix.len() as u64,
+ });
+ (current, current_else) = self.pattern_match_binding(
+ id,
+ next_place,
+ (*slice).into(),
+ current,
+ current_else,
+ )?;
+ }
+ }
+ }
+ for (i, &pat) in suffix.iter().enumerate() {
+ let next_place = (&mut cond_place).project(ProjectionElem::ConstantIndex {
+ offset: i as u64,
+ from_end: true,
+ });
+ (current, current_else) =
+ self.pattern_match_inner(current, current_else, next_place, pat, mode)?;
+ }
+ (current, current_else)
+ }
+ Pat::Path(p) => match self.infer.variant_resolution_for_pat(pattern) {
+ Some(variant) => self.pattern_matching_variant(
+ cond_place,
+ variant,
+ current,
+ pattern.into(),
+ current_else,
+ AdtPatternShape::Unit,
+ mode,
+ )?,
+ None => {
+ let unresolved_name = || MirLowerError::unresolved_path(self.db, p);
+ let resolver = self.owner.resolver(self.db.upcast());
+ let pr = resolver
+ .resolve_path_in_value_ns(self.db.upcast(), p)
+ .ok_or_else(unresolved_name)?;
+ let (c, subst) = 'b: {
+ if let Some(x) = self.infer.assoc_resolutions_for_pat(pattern) {
+ if let AssocItemId::ConstId(c) = x.0 {
+ break 'b (c, x.1);
+ }
+ }
+ if let ResolveValueResult::ValueNs(v) = pr {
+ if let ValueNs::ConstId(c) = v {
+ break 'b (c, Substitution::empty(Interner));
+ }
+ }
+ not_supported!("path in pattern position that is not const or variant")
+ };
+ let tmp: Place =
+ self.temp(self.infer[pattern].clone(), current, pattern.into())?.into();
+ let span = pattern.into();
+ self.lower_const(
+ c.into(),
+ current,
+ tmp.clone(),
+ subst,
+ span,
+ self.infer[pattern].clone(),
+ )?;
+ let tmp2: Place = self.temp(TyBuilder::bool(), current, pattern.into())?.into();
+ self.push_assignment(
+ current,
+ tmp2.clone(),
+ Rvalue::CheckedBinaryOp(
+ BinOp::Eq,
+ Operand::Copy(tmp),
+ Operand::Copy(cond_place),
+ ),
+ span,
+ );
+ let next = self.new_basic_block();
+ let else_target = current_else.unwrap_or_else(|| self.new_basic_block());
+ self.set_terminator(
+ current,
+ TerminatorKind::SwitchInt {
+ discr: Operand::Copy(tmp2),
+ targets: SwitchTargets::static_if(1, next, else_target),
+ },
+ span,
+ );
+ (next, Some(else_target))
+ }
+ },
+ Pat::Lit(l) => match &self.body.exprs[*l] {
+ Expr::Literal(l) => {
+ let c = self.lower_literal_to_operand(self.infer[pattern].clone(), l)?;
+ if mode == MatchingMode::Check {
+ self.pattern_match_const(current_else, current, c, cond_place, pattern)?
+ } else {
+ (current, current_else)
+ }
+ }
+ _ => not_supported!("expression path literal"),
+ },
+ Pat::Bind { id, subpat } => {
+ if let Some(subpat) = subpat {
+ (current, current_else) = self.pattern_match_inner(
+ current,
+ current_else,
+ (&mut cond_place).clone(),
+ *subpat,
+ mode,
+ )?
+ }
+ if mode == MatchingMode::Bind {
+ self.pattern_match_binding(
+ *id,
+ cond_place,
+ pattern.into(),
+ current,
+ current_else,
+ )?
+ } else {
+ (current, current_else)
+ }
+ }
+ Pat::TupleStruct { path: _, args, ellipsis } => {
+ let Some(variant) = self.infer.variant_resolution_for_pat(pattern) else {
+ not_supported!("unresolved variant");
+ };
+ self.pattern_matching_variant(
+ cond_place,
+ variant,
+ current,
+ pattern.into(),
+ current_else,
+ AdtPatternShape::Tuple { args, ellipsis: *ellipsis },
+ mode,
+ )?
+ }
+ Pat::Ref { pat, mutability: _ } => self.pattern_match_inner(
+ current,
+ current_else,
+ cond_place.project(ProjectionElem::Deref),
+ *pat,
+ mode,
+ )?,
+ Pat::Box { .. } => not_supported!("box pattern"),
+ Pat::ConstBlock(_) => not_supported!("const block pattern"),
+ })
+ }
+
+ fn pattern_match_binding(
+ &mut self,
+ id: BindingId,
+ cond_place: Place,
+ span: MirSpan,
+ current: BasicBlockId,
+ current_else: Option<BasicBlockId>,
+ ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
+ let target_place = self.binding_local(id)?;
+ let mode = self.infer.binding_modes[id];
+ self.push_storage_live(id, current)?;
+ self.push_assignment(
+ current,
+ target_place.into(),
+ match mode {
+ BindingMode::Move => Operand::Copy(cond_place).into(),
+ BindingMode::Ref(Mutability::Not) => Rvalue::Ref(BorrowKind::Shared, cond_place),
+ BindingMode::Ref(Mutability::Mut) => {
+ Rvalue::Ref(BorrowKind::Mut { allow_two_phase_borrow: false }, cond_place)
+ }
+ },
+ span,
+ );
+ Ok((current, current_else))
+ }
+
+ fn pattern_match_const(
+ &mut self,
+ current_else: Option<BasicBlockId>,
+ current: BasicBlockId,
+ c: Operand,
+ cond_place: Place,
+ pattern: Idx<Pat>,
+ ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
+ let then_target = self.new_basic_block();
+ let else_target = current_else.unwrap_or_else(|| self.new_basic_block());
+ let discr: Place = self.temp(TyBuilder::bool(), current, pattern.into())?.into();
+ self.push_assignment(
+ current,
+ discr.clone(),
+ Rvalue::CheckedBinaryOp(BinOp::Eq, c, Operand::Copy(cond_place)),
+ pattern.into(),
+ );
+ let discr = Operand::Copy(discr);
+ self.set_terminator(
+ current,
+ TerminatorKind::SwitchInt {
+ discr,
+ targets: SwitchTargets::static_if(1, then_target, else_target),
+ },
+ pattern.into(),
+ );
+ Ok((then_target, Some(else_target)))
+ }
+
+ fn pattern_matching_variant(
+ &mut self,
+ cond_place: Place,
+ variant: VariantId,
+ mut current: BasicBlockId,
+ span: MirSpan,
+ mut current_else: Option<BasicBlockId>,
+ shape: AdtPatternShape<'_>,
+ mode: MatchingMode,
+ ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
+ Ok(match variant {
+ VariantId::EnumVariantId(v) => {
+ if mode == MatchingMode::Check {
+ let e = self.const_eval_discriminant(v)? as u128;
+ let tmp = self.discr_temp_place(current);
+ self.push_assignment(
+ current,
+ tmp.clone(),
+ Rvalue::Discriminant(cond_place.clone()),
+ span,
+ );
+ let next = self.new_basic_block();
+ let else_target = current_else.get_or_insert_with(|| self.new_basic_block());
+ self.set_terminator(
+ current,
+ TerminatorKind::SwitchInt {
+ discr: Operand::Copy(tmp),
+ targets: SwitchTargets::static_if(e, next, *else_target),
+ },
+ span,
+ );
+ current = next;
+ }
+ let enum_data = self.db.enum_data(v.parent);
+ self.pattern_matching_variant_fields(
+ shape,
+ &enum_data.variants[v.local_id].variant_data,
+ variant,
+ current,
+ current_else,
+ &cond_place,
+ mode,
+ )?
+ }
+ VariantId::StructId(s) => {
+ let struct_data = self.db.struct_data(s);
+ self.pattern_matching_variant_fields(
+ shape,
+ &struct_data.variant_data,
+ variant,
+ current,
+ current_else,
+ &cond_place,
+ mode,
+ )?
+ }
+ VariantId::UnionId(_) => {
+ return Err(MirLowerError::TypeError("pattern matching on union"))
+ }
+ })
+ }
+
+ fn pattern_matching_variant_fields(
+ &mut self,
+ shape: AdtPatternShape<'_>,
+ variant_data: &VariantData,
+ v: VariantId,
+ current: BasicBlockId,
+ current_else: Option<BasicBlockId>,
+ cond_place: &Place,
+ mode: MatchingMode,
+ ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
+ Ok(match shape {
+ AdtPatternShape::Record { args } => {
+ let it = args
+ .iter()
+ .map(|x| {
+ let field_id =
+ variant_data.field(&x.name).ok_or(MirLowerError::UnresolvedField)?;
+ Ok((
+ PlaceElem::Field(FieldId { parent: v.into(), local_id: field_id }),
+ x.pat,
+ ))
+ })
+ .collect::<Result<Vec<_>>>()?;
+ self.pattern_match_adt(current, current_else, it.into_iter(), cond_place, mode)?
+ }
+ AdtPatternShape::Tuple { args, ellipsis } => {
+ let fields = variant_data
+ .fields()
+ .iter()
+ .map(|(x, _)| PlaceElem::Field(FieldId { parent: v.into(), local_id: x }));
+ self.pattern_match_tuple_like(
+ current,
+ current_else,
+ args,
+ ellipsis,
+ fields,
+ cond_place,
+ mode,
+ )?
+ }
+ AdtPatternShape::Unit => (current, current_else),
+ })
+ }
+
+ fn pattern_match_adt(
+ &mut self,
+ mut current: BasicBlockId,
+ mut current_else: Option<BasicBlockId>,
+ args: impl Iterator<Item = (PlaceElem, PatId)>,
+ cond_place: &Place,
+ mode: MatchingMode,
+ ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
+ for (proj, arg) in args {
+ let cond_place = cond_place.project(proj);
+ (current, current_else) =
+ self.pattern_match_inner(current, current_else, cond_place, arg, mode)?;
+ }
+ Ok((current, current_else))
+ }
+
+ fn pattern_match_tuple_like(
+ &mut self,
+ current: BasicBlockId,
+ current_else: Option<BasicBlockId>,
+ args: &[PatId],
+ ellipsis: Option<usize>,
+ fields: impl DoubleEndedIterator<Item = PlaceElem> + Clone,
+ cond_place: &Place,
+ mode: MatchingMode,
+ ) -> Result<(BasicBlockId, Option<BasicBlockId>)> {
+ let (al, ar) = args.split_at(ellipsis.unwrap_or(args.len()));
+ let it = al
+ .iter()
+ .zip(fields.clone())
+ .chain(ar.iter().rev().zip(fields.rev()))
+ .map(|(x, y)| (y, *x));
+ self.pattern_match_adt(current, current_else, it, cond_place, mode)
+ }
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/monomorphization.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/monomorphization.rs
new file mode 100644
index 000000000..ce3f7a8e5
--- /dev/null
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/monomorphization.rs
@@ -0,0 +1,351 @@
+//! Monomorphization of mir, which is used in mir interpreter and const eval.
+//!
+//! The job of monomorphization is:
+//! * Monomorphization. That is, replacing `Option<T>` with `Option<i32>` where `T:=i32` substitution
+//! is provided
+//! * Normalizing types, for example replacing RPIT of other functions called in this body.
+//!
+//! So the monomorphization should be called even if the substitution is empty.
+
+use std::mem;
+
+use chalk_ir::{
+ fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable},
+ ConstData, DebruijnIndex,
+};
+use hir_def::{DefWithBodyId, GeneralConstId};
+use triomphe::Arc;
+
+use crate::{
+ consteval::unknown_const,
+ db::HirDatabase,
+ from_placeholder_idx,
+ infer::normalize,
+ method_resolution::lookup_impl_const,
+ utils::{generics, Generics},
+ ClosureId, Const, Interner, ProjectionTy, Substitution, TraitEnvironment, Ty, TyKind,
+};
+
+use super::{MirBody, MirLowerError, Operand, Rvalue, StatementKind, TerminatorKind};
+
+macro_rules! not_supported {
+ ($x: expr) => {
+ return Err(MirLowerError::NotSupported(format!($x)))
+ };
+}
+
+struct Filler<'a> {
+ db: &'a dyn HirDatabase,
+ trait_env: Arc<TraitEnvironment>,
+ subst: &'a Substitution,
+ generics: Option<Generics>,
+ owner: DefWithBodyId,
+}
+impl FallibleTypeFolder<Interner> for Filler<'_> {
+ type Error = MirLowerError;
+
+ fn as_dyn(&mut self) -> &mut dyn FallibleTypeFolder<Interner, Error = Self::Error> {
+ self
+ }
+
+ fn interner(&self) -> Interner {
+ Interner
+ }
+
+ fn try_fold_ty(
+ &mut self,
+ ty: Ty,
+ outer_binder: DebruijnIndex,
+ ) -> std::result::Result<Ty, Self::Error> {
+ match ty.kind(Interner) {
+ TyKind::AssociatedType(id, subst) => {
+ // I don't know exactly if and why this is needed, but it looks like `normalize_ty` likes
+ // this kind of associated types.
+ Ok(TyKind::Alias(chalk_ir::AliasTy::Projection(ProjectionTy {
+ associated_ty_id: *id,
+ substitution: subst.clone().try_fold_with(self, outer_binder)?,
+ }))
+ .intern(Interner))
+ }
+ TyKind::OpaqueType(id, subst) => {
+ let impl_trait_id = self.db.lookup_intern_impl_trait_id((*id).into());
+ let subst = subst.clone().try_fold_with(self.as_dyn(), outer_binder)?;
+ match impl_trait_id {
+ crate::ImplTraitId::ReturnTypeImplTrait(func, idx) => {
+ let infer = self.db.infer(func.into());
+ let filler = &mut Filler {
+ db: self.db,
+ owner: self.owner,
+ trait_env: self.trait_env.clone(),
+ subst: &subst,
+ generics: Some(generics(self.db.upcast(), func.into())),
+ };
+ filler.try_fold_ty(infer.type_of_rpit[idx].clone(), outer_binder)
+ }
+ crate::ImplTraitId::AsyncBlockTypeImplTrait(_, _) => {
+ not_supported!("async block impl trait");
+ }
+ }
+ }
+ _ => ty.try_super_fold_with(self.as_dyn(), outer_binder),
+ }
+ }
+
+ fn try_fold_free_placeholder_const(
+ &mut self,
+ _ty: chalk_ir::Ty<Interner>,
+ idx: chalk_ir::PlaceholderIndex,
+ _outer_binder: DebruijnIndex,
+ ) -> std::result::Result<chalk_ir::Const<Interner>, Self::Error> {
+ let x = from_placeholder_idx(self.db, idx);
+ let Some(idx) = self.generics.as_ref().and_then(|g| g.param_idx(x)) else {
+ not_supported!("missing idx in generics");
+ };
+ Ok(self
+ .subst
+ .as_slice(Interner)
+ .get(idx)
+ .and_then(|x| x.constant(Interner))
+ .ok_or_else(|| MirLowerError::GenericArgNotProvided(x, self.subst.clone()))?
+ .clone())
+ }
+
+ fn try_fold_free_placeholder_ty(
+ &mut self,
+ idx: chalk_ir::PlaceholderIndex,
+ _outer_binder: DebruijnIndex,
+ ) -> std::result::Result<Ty, Self::Error> {
+ let x = from_placeholder_idx(self.db, idx);
+ let Some(idx) = self.generics.as_ref().and_then(|g| g.param_idx(x)) else {
+ not_supported!("missing idx in generics");
+ };
+ Ok(self
+ .subst
+ .as_slice(Interner)
+ .get(idx)
+ .and_then(|x| x.ty(Interner))
+ .ok_or_else(|| MirLowerError::GenericArgNotProvided(x, self.subst.clone()))?
+ .clone())
+ }
+
+ fn try_fold_const(
+ &mut self,
+ constant: chalk_ir::Const<Interner>,
+ outer_binder: DebruijnIndex,
+ ) -> Result<chalk_ir::Const<Interner>, Self::Error> {
+ let next_ty = normalize(
+ self.db,
+ self.trait_env.clone(),
+ constant.data(Interner).ty.clone().try_fold_with(self, outer_binder)?,
+ );
+ ConstData { ty: next_ty, value: constant.data(Interner).value.clone() }
+ .intern(Interner)
+ .try_super_fold_with(self, outer_binder)
+ }
+}
+
+impl Filler<'_> {
+ fn fill_ty(&mut self, ty: &mut Ty) -> Result<(), MirLowerError> {
+ let tmp = mem::replace(ty, TyKind::Error.intern(Interner));
+ *ty = normalize(
+ self.db,
+ self.trait_env.clone(),
+ tmp.try_fold_with(self, DebruijnIndex::INNERMOST)?,
+ );
+ Ok(())
+ }
+
+ fn fill_const(&mut self, c: &mut Const) -> Result<(), MirLowerError> {
+ let tmp = mem::replace(c, unknown_const(c.data(Interner).ty.clone()));
+ *c = tmp.try_fold_with(self, DebruijnIndex::INNERMOST)?;
+ Ok(())
+ }
+
+ fn fill_subst(&mut self, ty: &mut Substitution) -> Result<(), MirLowerError> {
+ let tmp = mem::replace(ty, Substitution::empty(Interner));
+ *ty = tmp.try_fold_with(self, DebruijnIndex::INNERMOST)?;
+ Ok(())
+ }
+
+ fn fill_operand(&mut self, op: &mut Operand) -> Result<(), MirLowerError> {
+ match op {
+ Operand::Constant(c) => {
+ match &c.data(Interner).value {
+ chalk_ir::ConstValue::BoundVar(b) => {
+ let resolved = self
+ .subst
+ .as_slice(Interner)
+ .get(b.index)
+ .ok_or_else(|| {
+ MirLowerError::GenericArgNotProvided(
+ self.generics
+ .as_ref()
+ .and_then(|x| x.iter().nth(b.index))
+ .unwrap()
+ .0,
+ self.subst.clone(),
+ )
+ })?
+ .assert_const_ref(Interner);
+ *c = resolved.clone();
+ }
+ chalk_ir::ConstValue::InferenceVar(_)
+ | chalk_ir::ConstValue::Placeholder(_) => {}
+ chalk_ir::ConstValue::Concrete(cc) => match &cc.interned {
+ crate::ConstScalar::UnevaluatedConst(const_id, subst) => {
+ let mut const_id = *const_id;
+ let mut subst = subst.clone();
+ self.fill_subst(&mut subst)?;
+ if let GeneralConstId::ConstId(c) = const_id {
+ let (c, s) = lookup_impl_const(
+ self.db,
+ self.db.trait_environment_for_body(self.owner),
+ c,
+ subst,
+ );
+ const_id = GeneralConstId::ConstId(c);
+ subst = s;
+ }
+ let result =
+ self.db.const_eval(const_id.into(), subst).map_err(|e| {
+ let name = const_id.name(self.db.upcast());
+ MirLowerError::ConstEvalError(name, Box::new(e))
+ })?;
+ *c = result;
+ }
+ crate::ConstScalar::Bytes(_, _) | crate::ConstScalar::Unknown => (),
+ },
+ }
+ self.fill_const(c)?;
+ }
+ Operand::Copy(_) | Operand::Move(_) | Operand::Static(_) => (),
+ }
+ Ok(())
+ }
+
+ fn fill_body(&mut self, body: &mut MirBody) -> Result<(), MirLowerError> {
+ for (_, l) in body.locals.iter_mut() {
+ self.fill_ty(&mut l.ty)?;
+ }
+ for (_, bb) in body.basic_blocks.iter_mut() {
+ for statement in &mut bb.statements {
+ match &mut statement.kind {
+ StatementKind::Assign(_, r) => match r {
+ Rvalue::Aggregate(ak, ops) => {
+ for op in &mut **ops {
+ self.fill_operand(op)?;
+ }
+ match ak {
+ super::AggregateKind::Array(ty)
+ | super::AggregateKind::Tuple(ty)
+ | super::AggregateKind::Closure(ty) => self.fill_ty(ty)?,
+ super::AggregateKind::Adt(_, subst) => self.fill_subst(subst)?,
+ super::AggregateKind::Union(_, _) => (),
+ }
+ }
+ Rvalue::ShallowInitBox(_, ty) | Rvalue::ShallowInitBoxWithAlloc(ty) => {
+ self.fill_ty(ty)?;
+ }
+ Rvalue::Use(op) => {
+ self.fill_operand(op)?;
+ }
+ Rvalue::Repeat(op, len) => {
+ self.fill_operand(op)?;
+ self.fill_const(len)?;
+ }
+ Rvalue::Ref(_, _)
+ | Rvalue::Len(_)
+ | Rvalue::Cast(_, _, _)
+ | Rvalue::CheckedBinaryOp(_, _, _)
+ | Rvalue::UnaryOp(_, _)
+ | Rvalue::Discriminant(_)
+ | Rvalue::CopyForDeref(_) => (),
+ },
+ StatementKind::Deinit(_)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Nop => (),
+ }
+ }
+ if let Some(terminator) = &mut bb.terminator {
+ match &mut terminator.kind {
+ TerminatorKind::Call { func, args, .. } => {
+ self.fill_operand(func)?;
+ for op in &mut **args {
+ self.fill_operand(op)?;
+ }
+ }
+ TerminatorKind::SwitchInt { discr, .. } => {
+ self.fill_operand(discr)?;
+ }
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => (),
+ }
+ }
+ }
+ Ok(())
+ }
+}
+
+pub fn monomorphized_mir_body_query(
+ db: &dyn HirDatabase,
+ owner: DefWithBodyId,
+ subst: Substitution,
+ trait_env: Arc<crate::TraitEnvironment>,
+) -> Result<Arc<MirBody>, MirLowerError> {
+ let generics = owner.as_generic_def_id().map(|g_def| generics(db.upcast(), g_def));
+ let filler = &mut Filler { db, subst: &subst, trait_env, generics, owner };
+ let body = db.mir_body(owner)?;
+ let mut body = (*body).clone();
+ filler.fill_body(&mut body)?;
+ Ok(Arc::new(body))
+}
+
+pub fn monomorphized_mir_body_recover(
+ _: &dyn HirDatabase,
+ _: &[String],
+ _: &DefWithBodyId,
+ _: &Substitution,
+ _: &Arc<crate::TraitEnvironment>,
+) -> Result<Arc<MirBody>, MirLowerError> {
+ return Err(MirLowerError::Loop);
+}
+
+pub fn monomorphized_mir_body_for_closure_query(
+ db: &dyn HirDatabase,
+ closure: ClosureId,
+ subst: Substitution,
+ trait_env: Arc<crate::TraitEnvironment>,
+) -> Result<Arc<MirBody>, MirLowerError> {
+ let (owner, _) = db.lookup_intern_closure(closure.into());
+ let generics = owner.as_generic_def_id().map(|g_def| generics(db.upcast(), g_def));
+ let filler = &mut Filler { db, subst: &subst, trait_env, generics, owner };
+ let body = db.mir_body_for_closure(closure)?;
+ let mut body = (*body).clone();
+ filler.fill_body(&mut body)?;
+ Ok(Arc::new(body))
+}
+
+// FIXME: remove this function. Monomorphization is a time consuming job and should always be a query.
+pub fn monomorphize_mir_body_bad(
+ db: &dyn HirDatabase,
+ mut body: MirBody,
+ subst: Substitution,
+ trait_env: Arc<crate::TraitEnvironment>,
+) -> Result<MirBody, MirLowerError> {
+ let owner = body.owner;
+ let generics = owner.as_generic_def_id().map(|g_def| generics(db.upcast(), g_def));
+ let filler = &mut Filler { db, subst: &subst, trait_env, generics, owner };
+ filler.fill_body(&mut body)?;
+ Ok(body)
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs
index ffc08b7e3..ac23e77bd 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs
@@ -1,28 +1,83 @@
//! A pretty-printer for MIR.
-use std::fmt::{Display, Write};
+use std::{
+ fmt::{Debug, Display, Write},
+ mem,
+};
-use hir_def::{body::Body, expr::BindingId};
+use hir_def::{body::Body, hir::BindingId};
use hir_expand::name::Name;
use la_arena::ArenaMap;
use crate::{
db::HirDatabase,
- display::HirDisplay,
- mir::{PlaceElem, ProjectionElem, StatementKind, Terminator},
+ display::{ClosureStyle, HirDisplay},
+ mir::{PlaceElem, ProjectionElem, StatementKind, TerminatorKind},
+ ClosureId,
};
use super::{
AggregateKind, BasicBlockId, BorrowKind, LocalId, MirBody, Operand, Place, Rvalue, UnOp,
};
+macro_rules! w {
+ ($dst:expr, $($arg:tt)*) => {
+ { let _ = write!($dst, $($arg)*); }
+ };
+}
+
+macro_rules! wln {
+ ($dst:expr) => {
+ { let _ = writeln!($dst); }
+ };
+ ($dst:expr, $($arg:tt)*) => {
+ { let _ = writeln!($dst, $($arg)*); }
+ };
+}
+
impl MirBody {
pub fn pretty_print(&self, db: &dyn HirDatabase) -> String {
let hir_body = db.body(self.owner);
let mut ctx = MirPrettyCtx::new(self, &hir_body, db);
- ctx.for_body();
+ ctx.for_body(|this| match ctx.body.owner {
+ hir_def::DefWithBodyId::FunctionId(id) => {
+ let data = db.function_data(id);
+ w!(this, "fn {}() ", data.name.display(db.upcast()));
+ }
+ hir_def::DefWithBodyId::StaticId(id) => {
+ let data = db.static_data(id);
+ w!(this, "static {}: _ = ", data.name.display(db.upcast()));
+ }
+ hir_def::DefWithBodyId::ConstId(id) => {
+ let data = db.const_data(id);
+ w!(
+ this,
+ "const {}: _ = ",
+ data.name.as_ref().unwrap_or(&Name::missing()).display(db.upcast())
+ );
+ }
+ hir_def::DefWithBodyId::VariantId(id) => {
+ let data = db.enum_data(id.parent);
+ w!(this, "enum {} = ", data.name.display(db.upcast()));
+ }
+ hir_def::DefWithBodyId::InTypeConstId(id) => {
+ w!(this, "in type const {id:?} = ");
+ }
+ });
ctx.result
}
+
+ // String with lines is rendered poorly in `dbg` macros, which I use very much, so this
+ // function exists to solve that.
+ pub fn dbg(&self, db: &dyn HirDatabase) -> impl Debug {
+ struct StringDbg(String);
+ impl Debug for StringDbg {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.write_str(&self.0)
+ }
+ }
+ StringDbg(self.pretty_print(db))
+ }
}
struct MirPrettyCtx<'a> {
@@ -30,25 +85,10 @@ struct MirPrettyCtx<'a> {
hir_body: &'a Body,
db: &'a dyn HirDatabase,
result: String,
- ident: String,
+ indent: String,
local_to_binding: ArenaMap<LocalId, BindingId>,
}
-macro_rules! w {
- ($dst:expr, $($arg:tt)*) => {
- { let _ = write!($dst, $($arg)*); }
- };
-}
-
-macro_rules! wln {
- ($dst:expr) => {
- { let _ = writeln!($dst); }
- };
- ($dst:expr, $($arg:tt)*) => {
- { let _ = writeln!($dst, $($arg)*); }
- };
-}
-
impl Write for MirPrettyCtx<'_> {
fn write_str(&mut self, s: &str) -> std::fmt::Result {
let mut it = s.split('\n'); // note: `.lines()` is wrong here
@@ -66,31 +106,62 @@ enum LocalName {
Binding(Name, LocalId),
}
-impl Display for LocalName {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl HirDisplay for LocalName {
+ fn hir_fmt(
+ &self,
+ f: &mut crate::display::HirFormatter<'_>,
+ ) -> Result<(), crate::display::HirDisplayError> {
match self {
LocalName::Unknown(l) => write!(f, "_{}", u32::from(l.into_raw())),
- LocalName::Binding(n, l) => write!(f, "{n}_{}", u32::from(l.into_raw())),
+ LocalName::Binding(n, l) => {
+ write!(f, "{}_{}", n.display(f.db.upcast()), u32::from(l.into_raw()))
+ }
}
}
}
impl<'a> MirPrettyCtx<'a> {
- fn for_body(&mut self) {
+ fn for_body(&mut self, name: impl FnOnce(&mut MirPrettyCtx<'_>)) {
+ name(self);
self.with_block(|this| {
this.locals();
wln!(this);
this.blocks();
});
+ for &closure in &self.body.closures {
+ self.for_closure(closure);
+ }
+ }
+
+ fn for_closure(&mut self, closure: ClosureId) {
+ let body = match self.db.mir_body_for_closure(closure) {
+ Ok(x) => x,
+ Err(e) => {
+ wln!(self, "// error in {closure:?}: {e:?}");
+ return;
+ }
+ };
+ let result = mem::take(&mut self.result);
+ let indent = mem::take(&mut self.indent);
+ let mut ctx = MirPrettyCtx {
+ body: &body,
+ local_to_binding: body.binding_locals.iter().map(|(x, y)| (*y, x)).collect(),
+ result,
+ indent,
+ ..*self
+ };
+ ctx.for_body(|this| wln!(this, "// Closure: {:?}", closure));
+ self.result = ctx.result;
+ self.indent = ctx.indent;
}
fn with_block(&mut self, f: impl FnOnce(&mut MirPrettyCtx<'_>)) {
- self.ident += " ";
+ self.indent += " ";
wln!(self, "{{");
f(self);
for _ in 0..4 {
self.result.pop();
- self.ident.pop();
+ self.indent.pop();
}
wln!(self, "}}");
}
@@ -101,7 +172,7 @@ impl<'a> MirPrettyCtx<'a> {
body,
db,
result: String::new(),
- ident: String::new(),
+ indent: String::new(),
local_to_binding,
hir_body,
}
@@ -109,7 +180,7 @@ impl<'a> MirPrettyCtx<'a> {
fn write_line(&mut self) {
self.result.push('\n');
- self.result += &self.ident;
+ self.result += &self.indent;
}
fn write(&mut self, line: &str) {
@@ -118,7 +189,12 @@ impl<'a> MirPrettyCtx<'a> {
fn locals(&mut self) {
for (id, local) in self.body.locals.iter() {
- wln!(self, "let {}: {};", self.local_name(id), local.ty.display(self.db));
+ wln!(
+ self,
+ "let {}: {};",
+ self.local_name(id).display(self.db),
+ self.hir_display(&local.ty)
+ );
}
}
@@ -147,10 +223,10 @@ impl<'a> MirPrettyCtx<'a> {
wln!(this, ";");
}
StatementKind::StorageDead(p) => {
- wln!(this, "StorageDead({})", this.local_name(*p));
+ wln!(this, "StorageDead({})", this.local_name(*p).display(self.db));
}
StatementKind::StorageLive(p) => {
- wln!(this, "StorageLive({})", this.local_name(*p));
+ wln!(this, "StorageLive({})", this.local_name(*p).display(self.db));
}
StatementKind::Deinit(p) => {
w!(this, "Deinit(");
@@ -161,11 +237,11 @@ impl<'a> MirPrettyCtx<'a> {
}
}
match &block.terminator {
- Some(terminator) => match terminator {
- Terminator::Goto { target } => {
+ Some(terminator) => match &terminator.kind {
+ TerminatorKind::Goto { target } => {
wln!(this, "goto 'bb{};", u32::from(target.into_raw()))
}
- Terminator::SwitchInt { discr, targets } => {
+ TerminatorKind::SwitchInt { discr, targets } => {
w!(this, "switch ");
this.operand(discr);
w!(this, " ");
@@ -176,7 +252,7 @@ impl<'a> MirPrettyCtx<'a> {
wln!(this, "_ => {},", this.basic_block_id(targets.otherwise()));
});
}
- Terminator::Call { func, args, destination, target, .. } => {
+ TerminatorKind::Call { func, args, destination, target, .. } => {
w!(this, "Call ");
this.with_block(|this| {
w!(this, "func: ");
@@ -208,7 +284,7 @@ impl<'a> MirPrettyCtx<'a> {
fn f(this: &mut MirPrettyCtx<'_>, local: LocalId, projections: &[PlaceElem]) {
let Some((last, head)) = projections.split_last() else {
// no projection
- w!(this, "{}", this.local_name(local));
+ w!(this, "{}", this.local_name(local).display(this.db));
return;
};
match last {
@@ -226,21 +302,26 @@ impl<'a> MirPrettyCtx<'a> {
f(this, local, head);
let variant_name =
&this.db.enum_data(e.parent).variants[e.local_id].name;
- w!(this, " as {}).{}", variant_name, name);
+ w!(
+ this,
+ " as {}).{}",
+ variant_name.display(this.db.upcast()),
+ name.display(this.db.upcast())
+ );
}
hir_def::VariantId::StructId(_) | hir_def::VariantId::UnionId(_) => {
f(this, local, head);
- w!(this, ".{name}");
+ w!(this, ".{}", name.display(this.db.upcast()));
}
}
}
- ProjectionElem::TupleField(x) => {
+ ProjectionElem::TupleOrClosureField(x) => {
f(this, local, head);
w!(this, ".{}", x);
}
ProjectionElem::Index(l) => {
f(this, local, head);
- w!(this, "[{}]", this.local_name(*l));
+ w!(this, "[{}]", this.local_name(*l).display(this.db));
}
x => {
f(this, local, head);
@@ -258,7 +339,8 @@ impl<'a> MirPrettyCtx<'a> {
// equally. Feel free to change it.
self.place(p);
}
- Operand::Constant(c) => w!(self, "Const({})", c.display(self.db)),
+ Operand::Constant(c) => w!(self, "Const({})", self.hir_display(c)),
+ Operand::Static(s) => w!(self, "Static({:?})", s),
}
}
@@ -284,11 +366,21 @@ impl<'a> MirPrettyCtx<'a> {
self.operand_list(x);
w!(self, "]");
}
+ Rvalue::Repeat(op, len) => {
+ w!(self, "[");
+ self.operand(op);
+ w!(self, "; {}]", len.display(self.db));
+ }
Rvalue::Aggregate(AggregateKind::Adt(_, _), x) => {
w!(self, "Adt(");
self.operand_list(x);
w!(self, ")");
}
+ Rvalue::Aggregate(AggregateKind::Closure(_), x) => {
+ w!(self, "Closure(");
+ self.operand_list(x);
+ w!(self, ")");
+ }
Rvalue::Aggregate(AggregateKind::Union(_, _), x) => {
w!(self, "Union(");
self.operand_list(x);
@@ -300,9 +392,9 @@ impl<'a> MirPrettyCtx<'a> {
w!(self, ")");
}
Rvalue::Cast(ck, op, ty) => {
- w!(self, "Discriminant({ck:?}");
+ w!(self, "Cast({ck:?}, ");
self.operand(op);
- w!(self, "{})", ty.display(self.db));
+ w!(self, ", {})", self.hir_display(ty));
}
Rvalue::CheckedBinaryOp(b, o1, o2) => {
self.operand(o1);
@@ -322,6 +414,7 @@ impl<'a> MirPrettyCtx<'a> {
self.place(p);
w!(self, ")");
}
+ Rvalue::ShallowInitBoxWithAlloc(_) => w!(self, "ShallowInitBoxWithAlloc"),
Rvalue::ShallowInitBox(op, _) => {
w!(self, "ShallowInitBox(");
self.operand(op);
@@ -345,4 +438,8 @@ impl<'a> MirPrettyCtx<'a> {
}
}
}
+
+ fn hir_display<T: HirDisplay>(&self, ty: &'a T) -> impl Display + 'a {
+ ty.display(self.db).with_closure_style(ClosureStyle::ClosureWithSubst)
+ }
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/test_db.rs b/src/tools/rust-analyzer/crates/hir-ty/src/test_db.rs
index 8c48331b9..7d19e0a19 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/test_db.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/test_db.rs
@@ -1,18 +1,18 @@
//! Database used for testing `hir`.
-use std::{
- fmt, panic,
- sync::{Arc, Mutex},
-};
+use std::{fmt, panic, sync::Mutex};
use base_db::{
- salsa, AnchoredPath, CrateId, FileId, FileLoader, FileLoaderDelegate, SourceDatabase, Upcast,
+ salsa::{self, Durability},
+ AnchoredPath, CrateId, FileId, FileLoader, FileLoaderDelegate, SourceDatabase, Upcast,
};
use hir_def::{db::DefDatabase, ModuleId};
use hir_expand::db::ExpandDatabase;
-use stdx::hash::{NoHashHashMap, NoHashHashSet};
+use nohash_hasher::IntMap;
+use rustc_hash::FxHashSet;
use syntax::TextRange;
use test_utils::extract_annotations;
+use triomphe::Arc;
#[salsa::database(
base_db::SourceDatabaseExtStorage,
@@ -30,7 +30,7 @@ pub(crate) struct TestDB {
impl Default for TestDB {
fn default() -> Self {
let mut this = Self { storage: Default::default(), events: Default::default() };
- this.set_enable_proc_attr_macros(true);
+ this.set_expand_proc_attr_macros_with_durability(true, Durability::HIGH);
this
}
}
@@ -74,13 +74,13 @@ impl salsa::ParallelDatabase for TestDB {
impl panic::RefUnwindSafe for TestDB {}
impl FileLoader for TestDB {
- fn file_text(&self, file_id: FileId) -> Arc<String> {
+ fn file_text(&self, file_id: FileId) -> Arc<str> {
FileLoaderDelegate(self).file_text(file_id)
}
fn resolve_path(&self, path: AnchoredPath<'_>) -> Option<FileId> {
FileLoaderDelegate(self).resolve_path(path)
}
- fn relevant_crates(&self, file_id: FileId) -> Arc<NoHashHashSet<CrateId>> {
+ fn relevant_crates(&self, file_id: FileId) -> Arc<FxHashSet<CrateId>> {
FileLoaderDelegate(self).relevant_crates(file_id)
}
}
@@ -102,7 +102,7 @@ impl TestDB {
self.module_for_file_opt(file_id).unwrap()
}
- pub(crate) fn extract_annotations(&self) -> NoHashHashMap<FileId, Vec<(TextRange, String)>> {
+ pub(crate) fn extract_annotations(&self) -> IntMap<FileId, Vec<(TextRange, String)>> {
let mut files = Vec::new();
let crate_graph = self.crate_graph();
for krate in crate_graph.iter() {
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests.rs
index 83d31f002..857141280 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests.rs
@@ -10,14 +10,14 @@ mod display_source_code;
mod incremental;
mod diagnostics;
-use std::{collections::HashMap, env, sync::Arc};
+use std::{collections::HashMap, env};
use base_db::{fixture::WithFixture, FileRange, SourceDatabaseExt};
use expect_test::Expect;
use hir_def::{
body::{Body, BodySourceMap, SyntheticSyntax},
db::{DefDatabase, InternDatabase},
- expr::{ExprId, PatId},
+ hir::{ExprId, Pat, PatId},
item_scope::ItemScope,
nameres::DefMap,
src::HasSource,
@@ -32,6 +32,7 @@ use syntax::{
};
use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry};
use tracing_tree::HierarchicalLayer;
+use triomphe::Arc;
use crate::{
db::HirDatabase,
@@ -145,13 +146,17 @@ fn check_impl(ra_fixture: &str, allow_none: bool, only_types: bool, display_sour
let loc = db.lookup_intern_enum(it.parent);
loc.source(&db).value.syntax().text_range().start()
}
+ DefWithBodyId::InTypeConstId(it) => it.source(&db).syntax().text_range().start(),
});
let mut unexpected_type_mismatches = String::new();
for def in defs {
- let (_body, body_source_map) = db.body_with_source_map(def);
+ let (body, body_source_map) = db.body_with_source_map(def);
let inference_result = db.infer(def);
- for (pat, ty) in inference_result.type_of_pat.iter() {
+ for (pat, mut ty) in inference_result.type_of_pat.iter() {
+ if let Pat::Bind { id, .. } = body.pats[pat] {
+ ty = &inference_result.type_of_binding[id];
+ }
let node = match pat_node(&body_source_map, pat, &db) {
Some(value) => value,
None => continue,
@@ -159,7 +164,7 @@ fn check_impl(ra_fixture: &str, allow_none: bool, only_types: bool, display_sour
let range = node.as_ref().original_file_range(&db);
if let Some(expected) = types.remove(&range) {
let actual = if display_source {
- ty.display_source_code(&db, def.module(&db)).unwrap()
+ ty.display_source_code(&db, def.module(&db), true).unwrap()
} else {
ty.display_test(&db).to_string()
};
@@ -175,7 +180,7 @@ fn check_impl(ra_fixture: &str, allow_none: bool, only_types: bool, display_sour
let range = node.as_ref().original_file_range(&db);
if let Some(expected) = types.remove(&range) {
let actual = if display_source {
- ty.display_source_code(&db, def.module(&db)).unwrap()
+ ty.display_source_code(&db, def.module(&db), true).unwrap()
} else {
ty.display_test(&db).to_string()
};
@@ -198,8 +203,8 @@ fn check_impl(ra_fixture: &str, allow_none: bool, only_types: bool, display_sour
for (expr_or_pat, mismatch) in inference_result.type_mismatches() {
let Some(node) = (match expr_or_pat {
- hir_def::expr::ExprOrPatId::ExprId(expr) => expr_node(&body_source_map, expr, &db),
- hir_def::expr::ExprOrPatId::PatId(pat) => pat_node(&body_source_map, pat, &db),
+ hir_def::hir::ExprOrPatId::ExprId(expr) => expr_node(&body_source_map, expr, &db),
+ hir_def::hir::ExprOrPatId::PatId(pat) => pat_node(&body_source_map, pat, &db),
}) else { continue; };
let range = node.as_ref().original_file_range(&db);
let actual = format!(
@@ -246,7 +251,7 @@ fn expr_node(
) -> Option<InFile<SyntaxNode>> {
Some(match body_source_map.expr_syntax(expr) {
Ok(sp) => {
- let root = db.parse_or_expand(sp.file_id).unwrap();
+ let root = db.parse_or_expand(sp.file_id);
sp.map(|ptr| ptr.to_node(&root).syntax().clone())
}
Err(SyntheticSyntax) => return None,
@@ -260,7 +265,7 @@ fn pat_node(
) -> Option<InFile<SyntaxNode>> {
Some(match body_source_map.pat_syntax(pat) {
Ok(sp) => {
- let root = db.parse_or_expand(sp.file_id).unwrap();
+ let root = db.parse_or_expand(sp.file_id);
sp.map(|ptr| {
ptr.either(
|it| it.to_node(&root).syntax().clone(),
@@ -283,14 +288,18 @@ fn infer_with_mismatches(content: &str, include_mismatches: bool) -> String {
let mut buf = String::new();
let mut infer_def = |inference_result: Arc<InferenceResult>,
+ body: Arc<Body>,
body_source_map: Arc<BodySourceMap>| {
let mut types: Vec<(InFile<SyntaxNode>, &Ty)> = Vec::new();
let mut mismatches: Vec<(InFile<SyntaxNode>, &TypeMismatch)> = Vec::new();
- for (pat, ty) in inference_result.type_of_pat.iter() {
+ for (pat, mut ty) in inference_result.type_of_pat.iter() {
+ if let Pat::Bind { id, .. } = body.pats[pat] {
+ ty = &inference_result.type_of_binding[id];
+ }
let syntax_ptr = match body_source_map.pat_syntax(pat) {
Ok(sp) => {
- let root = db.parse_or_expand(sp.file_id).unwrap();
+ let root = db.parse_or_expand(sp.file_id);
sp.map(|ptr| {
ptr.either(
|it| it.to_node(&root).syntax().clone(),
@@ -309,7 +318,7 @@ fn infer_with_mismatches(content: &str, include_mismatches: bool) -> String {
for (expr, ty) in inference_result.type_of_expr.iter() {
let node = match body_source_map.expr_syntax(expr) {
Ok(sp) => {
- let root = db.parse_or_expand(sp.file_id).unwrap();
+ let root = db.parse_or_expand(sp.file_id);
sp.map(|ptr| ptr.to_node(&root).syntax().clone())
}
Err(SyntheticSyntax) => continue,
@@ -383,11 +392,12 @@ fn infer_with_mismatches(content: &str, include_mismatches: bool) -> String {
let loc = db.lookup_intern_enum(it.parent);
loc.source(&db).value.syntax().text_range().start()
}
+ DefWithBodyId::InTypeConstId(it) => it.source(&db).syntax().text_range().start(),
});
for def in defs {
- let (_body, source_map) = db.body_with_source_map(def);
+ let (body, source_map) = db.body_with_source_map(def);
let infer = db.infer(def);
- infer_def(infer, source_map);
+ infer_def(infer, body, source_map);
}
buf.truncate(buf.trim_end().len());
@@ -572,10 +582,9 @@ fn salsa_bug() {
let x = 1;
x.push(1);
}
- "
- .to_string();
+ ";
- db.set_file_text(pos.file_id, Arc::new(new_text));
+ db.set_file_text(pos.file_id, Arc::from(new_text));
let module = db.module_for_file(pos.file_id);
let crate_def_map = module.def_map(&db);
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/coercion.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/coercion.rs
index b524922b6..16e5ef85d 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/coercion.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/coercion.rs
@@ -258,7 +258,6 @@ fn test() {
#[test]
fn coerce_autoderef_block() {
- // FIXME: We should know mutability in overloaded deref
check_no_mismatches(
r#"
//- minicore: deref
@@ -268,7 +267,7 @@ fn takes_ref_str(x: &str) {}
fn returns_string() -> String { loop {} }
fn test() {
takes_ref_str(&{ returns_string() });
- // ^^^^^^^^^^^^^^^^^^^^^ adjustments: Deref(None), Deref(Some(OverloadedDeref(None))), Borrow(Ref(Not))
+ // ^^^^^^^^^^^^^^^^^^^^^ adjustments: Deref(None), Deref(Some(OverloadedDeref(Some(Not)))), Borrow(Ref(Not))
}
"#,
);
@@ -397,9 +396,39 @@ fn test() {
}
#[test]
+fn coerce_fn_item_to_fn_ptr_in_array() {
+ check_no_mismatches(
+ r"
+fn foo(x: u32) -> isize { 1 }
+fn bar(x: u32) -> isize { 1 }
+fn test() {
+ let f = [foo, bar];
+ // ^^^ adjustments: Pointer(ReifyFnPointer)
+}",
+ );
+}
+
+#[test]
fn coerce_fn_items_in_match_arms() {
cov_mark::check!(coerce_fn_reification);
+ check_no_mismatches(
+ r"
+fn foo1(x: u32) -> isize { 1 }
+fn foo2(x: u32) -> isize { 2 }
+fn foo3(x: u32) -> isize { 3 }
+fn test() {
+ let x = match 1 {
+ 1 => foo1,
+ // ^^^^ adjustments: Pointer(ReifyFnPointer)
+ 2 => foo2,
+ // ^^^^ adjustments: Pointer(ReifyFnPointer)
+ _ => foo3,
+ // ^^^^ adjustments: Pointer(ReifyFnPointer)
+ };
+ x;
+}",
+ );
check_types(
r"
fn foo1(x: u32) -> isize { 1 }
@@ -507,7 +536,6 @@ fn test() {
#[test]
fn coerce_unsize_generic() {
- // FIXME: fix the type mismatches here
check(
r#"
//- minicore: coerce_unsized
@@ -516,9 +544,9 @@ struct Bar<T>(Foo<T>);
fn test() {
let _: &Foo<[usize]> = &Foo { t: [1, 2, 3] };
- //^^^^^^^^^ expected [usize], got [usize; 3]
+ //^^^^^^^^^^^^^^^^^^^^^ expected &Foo<[usize]>, got &Foo<[i32; 3]>
let _: &Bar<[usize]> = &Bar(Foo { t: [1, 2, 3] });
- //^^^^^^^^^ expected [usize], got [usize; 3]
+ //^^^^^^^^^^^^^^^^^^^^^^^^^^ expected &Bar<[usize]>, got &Bar<[i32; 3]>
}
"#,
);
@@ -547,7 +575,7 @@ fn two_closures_lub() {
fn foo(c: i32) {
let add = |a: i32, b: i32| a + b;
let sub = |a, b| a - b;
- //^^^^^^^^^^^^ |i32, i32| -> i32
+ //^^^^^^^^^^^^ impl Fn(i32, i32) -> i32
if c > 42 { add } else { sub };
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ fn(i32, i32) -> i32
}
@@ -842,3 +870,74 @@ fn test() {
}",
);
}
+
+#[test]
+fn adjust_index() {
+ check_no_mismatches(
+ r"
+//- minicore: index, slice, coerce_unsized
+fn test() {
+ let x = [1, 2, 3];
+ x[2] = 6;
+ // ^ adjustments: Borrow(Ref(Mut))
+}
+ ",
+ );
+ check_no_mismatches(
+ r"
+//- minicore: index
+struct Struct;
+impl core::ops::Index<usize> for Struct {
+ type Output = ();
+
+ fn index(&self, index: usize) -> &Self::Output { &() }
+}
+struct StructMut;
+
+impl core::ops::Index<usize> for StructMut {
+ type Output = ();
+
+ fn index(&self, index: usize) -> &Self::Output { &() }
+}
+impl core::ops::IndexMut for StructMut {
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output { &mut () }
+}
+fn test() {
+ Struct[0];
+ // ^^^^^^ adjustments: Borrow(Ref(Not))
+ StructMut[0];
+ // ^^^^^^^^^ adjustments: Borrow(Ref(Not))
+ &mut StructMut[0];
+ // ^^^^^^^^^ adjustments: Borrow(Ref(Mut))
+}",
+ );
+}
+
+#[test]
+fn regression_14443_dyn_coercion_block_impls() {
+ check_no_mismatches(
+ r#"
+//- minicore: coerce_unsized
+trait T {}
+
+fn dyn_t(d: &dyn T) {}
+
+fn main() {
+ struct A;
+ impl T for A {}
+
+ let a = A;
+
+ let b = {
+ struct B;
+ impl T for B {}
+
+ B
+ };
+
+ dyn_t(&a);
+ dyn_t(&b);
+}
+"#,
+ )
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/incremental.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/incremental.rs
index 073d6d9be..bb15ca8c4 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/incremental.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/incremental.rs
@@ -1,6 +1,5 @@
-use std::sync::Arc;
-
use base_db::{fixture::WithFixture, SourceDatabaseExt};
+use triomphe::Arc;
use crate::{db::HirDatabase, test_db::TestDB};
@@ -33,10 +32,9 @@ fn typing_whitespace_inside_a_function_should_not_invalidate_types() {
+
1
}
- "
- .to_string();
+ ";
- db.set_file_text(pos.file_id, Arc::new(new_text));
+ db.set_file_text(pos.file_id, Arc::from(new_text));
{
let events = db.log_executed(|| {
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs
index 8b75ec842..111ac0b61 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs
@@ -140,6 +140,7 @@ fn infer_path_qualified_macros_expanded() {
fn expr_macro_def_expanded_in_various_places() {
check_infer(
r#"
+ //- minicore: iterator
macro spam() {
1isize
}
@@ -195,10 +196,19 @@ fn expr_macro_def_expanded_in_various_places() {
!0..6 '1isize': isize
39..442 '{ ...!(); }': ()
73..94 'spam!(...am!())': {unknown}
+ 100..119 'for _ ...!() {}': fn into_iter<isize>(isize) -> <isize as IntoIterator>::IntoIter
+ 100..119 'for _ ...!() {}': IntoIterator::IntoIter<isize>
+ 100..119 'for _ ...!() {}': !
+ 100..119 'for _ ...!() {}': IntoIterator::IntoIter<isize>
+ 100..119 'for _ ...!() {}': &mut IntoIterator::IntoIter<isize>
+ 100..119 'for _ ...!() {}': fn next<IntoIterator::IntoIter<isize>>(&mut IntoIterator::IntoIter<isize>) -> Option<<IntoIterator::IntoIter<isize> as Iterator>::Item>
+ 100..119 'for _ ...!() {}': Option<Iterator::Item<IntoIterator::IntoIter<isize>>>
100..119 'for _ ...!() {}': ()
- 104..105 '_': {unknown}
+ 100..119 'for _ ...!() {}': ()
+ 100..119 'for _ ...!() {}': ()
+ 104..105 '_': Iterator::Item<IntoIterator::IntoIter<isize>>
117..119 '{}': ()
- 124..134 '|| spam!()': || -> isize
+ 124..134 '|| spam!()': impl Fn() -> isize
140..156 'while ...!() {}': ()
154..156 '{}': ()
161..174 'break spam!()': !
@@ -221,6 +231,7 @@ fn expr_macro_def_expanded_in_various_places() {
fn expr_macro_rules_expanded_in_various_places() {
check_infer(
r#"
+ //- minicore: iterator
macro_rules! spam {
() => (1isize);
}
@@ -276,10 +287,19 @@ fn expr_macro_rules_expanded_in_various_places() {
!0..6 '1isize': isize
53..456 '{ ...!(); }': ()
87..108 'spam!(...am!())': {unknown}
+ 114..133 'for _ ...!() {}': fn into_iter<isize>(isize) -> <isize as IntoIterator>::IntoIter
+ 114..133 'for _ ...!() {}': IntoIterator::IntoIter<isize>
+ 114..133 'for _ ...!() {}': !
+ 114..133 'for _ ...!() {}': IntoIterator::IntoIter<isize>
+ 114..133 'for _ ...!() {}': &mut IntoIterator::IntoIter<isize>
+ 114..133 'for _ ...!() {}': fn next<IntoIterator::IntoIter<isize>>(&mut IntoIterator::IntoIter<isize>) -> Option<<IntoIterator::IntoIter<isize> as Iterator>::Item>
+ 114..133 'for _ ...!() {}': Option<Iterator::Item<IntoIterator::IntoIter<isize>>>
+ 114..133 'for _ ...!() {}': ()
114..133 'for _ ...!() {}': ()
- 118..119 '_': {unknown}
+ 114..133 'for _ ...!() {}': ()
+ 118..119 '_': Iterator::Item<IntoIterator::IntoIter<isize>>
131..133 '{}': ()
- 138..148 '|| spam!()': || -> isize
+ 138..148 '|| spam!()': impl Fn() -> isize
154..170 'while ...!() {}': ()
168..170 '{}': ()
175..188 'break spam!()': !
@@ -661,8 +681,9 @@ fn infer_builtin_macros_line() {
"#,
expect![[r#"
!0..1 '0': i32
+ !0..6 '0asu32': u32
63..87 '{ ...!(); }': ()
- 73..74 'x': i32
+ 73..74 'x': u32
"#]],
);
}
@@ -699,8 +720,9 @@ fn infer_builtin_macros_column() {
"#,
expect![[r#"
!0..1 '0': i32
+ !0..6 '0asu32': u32
65..91 '{ ...!(); }': ()
- 75..76 'x': i32
+ 75..76 'x': u32
"#]],
);
}
@@ -945,7 +967,7 @@ fn infer_builtin_macros_concat_with_lazy() {
#[test]
fn infer_builtin_macros_env() {
- check_infer(
+ check_types(
r#"
//- /main.rs env:foo=bar
#[rustc_builtin_macro]
@@ -953,13 +975,26 @@ fn infer_builtin_macros_env() {
fn main() {
let x = env!("foo");
+ //^ &str
+ }
+ "#,
+ );
+}
+
+#[test]
+fn infer_builtin_macros_option_env() {
+ check_types(
+ r#"
+ //- minicore: option
+ //- /main.rs env:foo=bar
+ #[rustc_builtin_macro]
+ macro_rules! option_env {() => {}}
+
+ fn main() {
+ let x = option_env!("foo");
+ //^ Option<&str>
}
"#,
- expect![[r#"
- !0..22 '"__RA_...TED__"': &str
- 62..90 '{ ...o"); }': ()
- 72..73 'x': &str
- "#]],
);
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs
index 378d47833..1e57a4ae2 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs
@@ -389,6 +389,24 @@ mod bar_test {
}
#[test]
+fn infer_trait_method_multiple_mutable_reference() {
+ check_types(
+ r#"
+trait Trait {
+ fn method(&mut self) -> i32 { 5 }
+}
+struct S;
+impl Trait for &mut &mut S {}
+fn test() {
+ let s = &mut &mut &mut S;
+ s.method();
+ //^^^^^^^^^^ i32
+}
+ "#,
+ );
+}
+
+#[test]
fn infer_trait_method_generic_1() {
// the trait implementation is intentionally incomplete -- it shouldn't matter
check_types(
@@ -1255,7 +1273,6 @@ fn foo<T: Trait>(a: &T) {
#[test]
fn autoderef_visibility_field() {
- // FIXME: We should know mutability in overloaded deref
check(
r#"
//- minicore: deref
@@ -1277,7 +1294,7 @@ mod a {
mod b {
fn foo() {
let x = super::a::Bar::new().0;
- // ^^^^^^^^^^^^^^^^^^^^ adjustments: Deref(Some(OverloadedDeref(None)))
+ // ^^^^^^^^^^^^^^^^^^^^ adjustments: Deref(Some(OverloadedDeref(Some(Not))))
// ^^^^^^^^^^^^^^^^^^^^^^ type: char
}
}
@@ -1723,7 +1740,7 @@ fn test() {
Foo.foo();
//^^^ adjustments: Borrow(Ref(Not))
(&Foo).foo();
- // ^^^^ adjustments: ,
+ // ^^^^ adjustments: Deref(None), Borrow(Ref(Not))
}
"#,
);
@@ -1922,3 +1939,54 @@ fn foo() {
"#,
);
}
+
+#[test]
+fn box_deref_is_builtin() {
+ check(
+ r#"
+//- minicore: deref
+use core::ops::Deref;
+
+#[lang = "owned_box"]
+struct Box<T>(*mut T);
+
+impl<T> Box<T> {
+ fn new(t: T) -> Self {
+ loop {}
+ }
+}
+
+impl<T> Deref for Box<T> {
+ type Target = T;
+ fn deref(&self) -> &Self::Target;
+}
+
+struct Foo;
+impl Foo {
+ fn foo(&self) {}
+}
+fn test() {
+ Box::new(Foo).foo();
+ //^^^^^^^^^^^^^ adjustments: Deref(None), Borrow(Ref(Not))
+}
+"#,
+ );
+}
+
+#[test]
+fn manually_drop_deref_is_not_builtin() {
+ check(
+ r#"
+//- minicore: manually_drop, deref
+struct Foo;
+impl Foo {
+ fn foo(&self) {}
+}
+use core::mem::ManuallyDrop;
+fn test() {
+ ManuallyDrop::new(Foo).foo();
+ //^^^^^^^^^^^^^^^^^^^^^^ adjustments: Deref(Some(OverloadedDeref(Some(Not)))), Borrow(Ref(Not))
+}
+"#,
+ );
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs
index fbdc8209f..59046c043 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs
@@ -327,6 +327,7 @@ fn diverging_expression_2() {
fn diverging_expression_3_break() {
check_infer_with_mismatches(
r"
+ //- minicore: iterator
//- /main.rs
fn test1() {
// should give type mismatch
@@ -360,6 +361,15 @@ fn diverging_expression_3_break() {
97..343 '{ ...; }; }': ()
140..141 'x': u32
149..175 '{ for ...; }; }': u32
+ 151..172 'for a ...eak; }': fn into_iter<{unknown}>({unknown}) -> <{unknown} as IntoIterator>::IntoIter
+ 151..172 'for a ...eak; }': {unknown}
+ 151..172 'for a ...eak; }': !
+ 151..172 'for a ...eak; }': {unknown}
+ 151..172 'for a ...eak; }': &mut {unknown}
+ 151..172 'for a ...eak; }': fn next<{unknown}>(&mut {unknown}) -> Option<<{unknown} as Iterator>::Item>
+ 151..172 'for a ...eak; }': Option<{unknown}>
+ 151..172 'for a ...eak; }': ()
+ 151..172 'for a ...eak; }': ()
151..172 'for a ...eak; }': ()
155..156 'a': {unknown}
160..161 'b': {unknown}
@@ -367,12 +377,30 @@ fn diverging_expression_3_break() {
164..169 'break': !
226..227 'x': u32
235..253 '{ for ... {}; }': u32
+ 237..250 'for a in b {}': fn into_iter<{unknown}>({unknown}) -> <{unknown} as IntoIterator>::IntoIter
+ 237..250 'for a in b {}': {unknown}
+ 237..250 'for a in b {}': !
+ 237..250 'for a in b {}': {unknown}
+ 237..250 'for a in b {}': &mut {unknown}
+ 237..250 'for a in b {}': fn next<{unknown}>(&mut {unknown}) -> Option<<{unknown} as Iterator>::Item>
+ 237..250 'for a in b {}': Option<{unknown}>
+ 237..250 'for a in b {}': ()
+ 237..250 'for a in b {}': ()
237..250 'for a in b {}': ()
241..242 'a': {unknown}
246..247 'b': {unknown}
248..250 '{}': ()
304..305 'x': u32
313..340 '{ for ...; }; }': u32
+ 315..337 'for a ...urn; }': fn into_iter<{unknown}>({unknown}) -> <{unknown} as IntoIterator>::IntoIter
+ 315..337 'for a ...urn; }': {unknown}
+ 315..337 'for a ...urn; }': !
+ 315..337 'for a ...urn; }': {unknown}
+ 315..337 'for a ...urn; }': &mut {unknown}
+ 315..337 'for a ...urn; }': fn next<{unknown}>(&mut {unknown}) -> Option<<{unknown} as Iterator>::Item>
+ 315..337 'for a ...urn; }': Option<{unknown}>
+ 315..337 'for a ...urn; }': ()
+ 315..337 'for a ...urn; }': ()
315..337 'for a ...urn; }': ()
319..320 'a': {unknown}
324..325 'b': {unknown}
@@ -483,3 +511,22 @@ fn example() -> bool {
"#,
);
}
+
+#[test]
+fn reservation_impl_should_be_ignored() {
+ // See rust-lang/rust#64631.
+ check_types(
+ r#"
+//- minicore: from
+struct S;
+#[rustc_reservation_impl]
+impl<T> From<!> for T {}
+fn foo<T, U: From<T>>(_: U) -> T { loop {} }
+
+fn test() {
+ let s = foo(S);
+ //^ S
+}
+"#,
+ );
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/patterns.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/patterns.rs
index 74bcab6ca..0f5a3e175 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/patterns.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/patterns.rs
@@ -1,11 +1,12 @@
use expect_test::expect;
-use super::{check, check_infer, check_infer_with_mismatches, check_types};
+use super::{check, check_infer, check_infer_with_mismatches, check_no_mismatches, check_types};
#[test]
fn infer_pattern() {
check_infer(
r#"
+ //- minicore: iterator
fn test(x: &i32) {
let y = x;
let &z = x;
@@ -46,6 +47,15 @@ fn infer_pattern() {
82..94 '(1, "hello")': (i32, &str)
83..84 '1': i32
86..93 '"hello"': &str
+ 101..151 'for (e... }': fn into_iter<{unknown}>({unknown}) -> <{unknown} as IntoIterator>::IntoIter
+ 101..151 'for (e... }': {unknown}
+ 101..151 'for (e... }': !
+ 101..151 'for (e... }': {unknown}
+ 101..151 'for (e... }': &mut {unknown}
+ 101..151 'for (e... }': fn next<{unknown}>(&mut {unknown}) -> Option<<{unknown} as Iterator>::Item>
+ 101..151 'for (e... }': Option<({unknown}, {unknown})>
+ 101..151 'for (e... }': ()
+ 101..151 'for (e... }': ()
101..151 'for (e... }': ()
105..111 '(e, f)': ({unknown}, {unknown})
106..107 'e': {unknown}
@@ -70,8 +80,8 @@ fn infer_pattern() {
228..233 '&true': &bool
229..233 'true': bool
234..236 '{}': ()
- 246..252 'lambda': |u64, u64, i32| -> i32
- 255..287 '|a: u6...b; c }': |u64, u64, i32| -> i32
+ 246..252 'lambda': impl Fn(u64, u64, i32) -> i32
+ 255..287 '|a: u6...b; c }': impl Fn(u64, u64, i32) -> i32
256..257 'a': u64
264..265 'b': u64
267..268 'c': i32
@@ -241,6 +251,21 @@ fn infer_pattern_match_ergonomics_ref() {
}
#[test]
+fn ref_pat_with_inference_variable() {
+ check_no_mismatches(
+ r#"
+enum E { A }
+fn test() {
+ let f = |e| match e {
+ &E::A => {}
+ };
+ f(&E::A);
+}
+"#,
+ );
+}
+
+#[test]
fn infer_pattern_match_slice() {
check_infer(
r#"
@@ -476,7 +501,7 @@ fn infer_adt_pattern() {
183..184 'x': usize
190..191 'x': usize
201..205 'E::B': E
- 209..212 'foo': {unknown}
+ 209..212 'foo': bool
216..217 '1': usize
227..231 'E::B': E
235..237 '10': usize
@@ -677,25 +702,25 @@ fn test() {
51..58 'loop {}': !
56..58 '{}': ()
72..171 '{ ... x); }': ()
- 78..81 'foo': fn foo<&(i32, &str), i32, |&(i32, &str)| -> i32>(&(i32, &str), |&(i32, &str)| -> i32) -> i32
+ 78..81 'foo': fn foo<&(i32, &str), i32, impl Fn(&(i32, &str)) -> i32>(&(i32, &str), impl Fn(&(i32, &str)) -> i32) -> i32
78..105 'foo(&(...y)| x)': i32
82..91 '&(1, "a")': &(i32, &str)
83..91 '(1, "a")': (i32, &str)
84..85 '1': i32
87..90 '"a"': &str
- 93..104 '|&(x, y)| x': |&(i32, &str)| -> i32
+ 93..104 '|&(x, y)| x': impl Fn(&(i32, &str)) -> i32
94..101 '&(x, y)': &(i32, &str)
95..101 '(x, y)': (i32, &str)
96..97 'x': i32
99..100 'y': &str
103..104 'x': i32
- 142..145 'foo': fn foo<&(i32, &str), &i32, |&(i32, &str)| -> &i32>(&(i32, &str), |&(i32, &str)| -> &i32) -> &i32
+ 142..145 'foo': fn foo<&(i32, &str), &i32, impl Fn(&(i32, &str)) -> &i32>(&(i32, &str), impl Fn(&(i32, &str)) -> &i32) -> &i32
142..168 'foo(&(...y)| x)': &i32
146..155 '&(1, "a")': &(i32, &str)
147..155 '(1, "a")': (i32, &str)
148..149 '1': i32
151..154 '"a"': &str
- 157..167 '|(x, y)| x': |&(i32, &str)| -> &i32
+ 157..167 '|(x, y)| x': impl Fn(&(i32, &str)) -> &i32
158..164 '(x, y)': (i32, &str)
159..160 'x': &i32
162..163 'y': &&str
@@ -1084,7 +1109,7 @@ fn var_args() {
#[lang = "va_list"]
pub struct VaListImpl<'f>;
fn my_fn(foo: ...) {}
- //^^^ VaListImpl
+ //^^^ VaListImpl<'_>
"#,
);
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs
index 689f0da44..047900a32 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs
@@ -246,6 +246,7 @@ fn infer_std_crash_5() {
// taken from rustc
check_infer(
r#"
+ //- minicore: iterator
fn extra_compiler_flags() {
for content in doesnt_matter {
let name = if doesnt_matter {
@@ -264,13 +265,22 @@ fn infer_std_crash_5() {
"#,
expect![[r#"
26..322 '{ ... } }': ()
+ 32..320 'for co... }': fn into_iter<{unknown}>({unknown}) -> <{unknown} as IntoIterator>::IntoIter
+ 32..320 'for co... }': {unknown}
+ 32..320 'for co... }': !
+ 32..320 'for co... }': {unknown}
+ 32..320 'for co... }': &mut {unknown}
+ 32..320 'for co... }': fn next<{unknown}>(&mut {unknown}) -> Option<<{unknown} as Iterator>::Item>
+ 32..320 'for co... }': Option<{unknown}>
+ 32..320 'for co... }': ()
+ 32..320 'for co... }': ()
32..320 'for co... }': ()
36..43 'content': {unknown}
47..60 'doesnt_matter': {unknown}
61..320 '{ ... }': ()
75..79 'name': &{unknown}
82..166 'if doe... }': &{unknown}
- 85..98 'doesnt_matter': {unknown}
+ 85..98 'doesnt_matter': bool
99..128 '{ ... }': &{unknown}
113..118 'first': &{unknown}
134..166 '{ ... }': &{unknown}
@@ -279,7 +289,7 @@ fn infer_std_crash_5() {
181..188 'content': &{unknown}
191..313 'if ICE... }': &{unknown}
194..231 'ICE_RE..._VALUE': {unknown}
- 194..247 'ICE_RE...&name)': {unknown}
+ 194..247 'ICE_RE...&name)': bool
241..246 '&name': &&{unknown}
242..246 'name': &{unknown}
248..276 '{ ... }': &{unknown}
@@ -805,19 +815,19 @@ fn issue_4966() {
225..229 'iter': T
244..246 '{}': Vec<A>
258..402 '{ ...r(); }': ()
- 268..273 'inner': Map<|&f64| -> f64>
- 276..300 'Map { ... 0.0 }': Map<|&f64| -> f64>
- 285..298 '|_: &f64| 0.0': |&f64| -> f64
+ 268..273 'inner': Map<impl Fn(&f64) -> f64>
+ 276..300 'Map { ... 0.0 }': Map<impl Fn(&f64) -> f64>
+ 285..298 '|_: &f64| 0.0': impl Fn(&f64) -> f64
286..287 '_': &f64
295..298 '0.0': f64
- 311..317 'repeat': Repeat<Map<|&f64| -> f64>>
- 320..345 'Repeat...nner }': Repeat<Map<|&f64| -> f64>>
- 338..343 'inner': Map<|&f64| -> f64>
- 356..359 'vec': Vec<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>>
- 362..371 'from_iter': fn from_iter<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>, Repeat<Map<|&f64| -> f64>>>(Repeat<Map<|&f64| -> f64>>) -> Vec<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>>
- 362..379 'from_i...epeat)': Vec<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>>
- 372..378 'repeat': Repeat<Map<|&f64| -> f64>>
- 386..389 'vec': Vec<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>>
+ 311..317 'repeat': Repeat<Map<impl Fn(&f64) -> f64>>
+ 320..345 'Repeat...nner }': Repeat<Map<impl Fn(&f64) -> f64>>
+ 338..343 'inner': Map<impl Fn(&f64) -> f64>
+ 356..359 'vec': Vec<IntoIterator::Item<Repeat<Map<impl Fn(&f64) -> f64>>>>
+ 362..371 'from_iter': fn from_iter<IntoIterator::Item<Repeat<Map<impl Fn(&f64) -> f64>>>, Repeat<Map<impl Fn(&f64) -> f64>>>(Repeat<Map<impl Fn(&f64) -> f64>>) -> Vec<IntoIterator::Item<Repeat<Map<impl Fn(&f64) -> f64>>>>
+ 362..379 'from_i...epeat)': Vec<IntoIterator::Item<Repeat<Map<impl Fn(&f64) -> f64>>>>
+ 372..378 'repeat': Repeat<Map<impl Fn(&f64) -> f64>>
+ 386..389 'vec': Vec<IntoIterator::Item<Repeat<Map<impl Fn(&f64) -> f64>>>>
386..399 'vec.foo_bar()': {unknown}
"#]],
);
@@ -852,7 +862,7 @@ fn main() {
123..126 'S()': S<i32>
132..133 's': S<i32>
132..144 's.g(|_x| {})': ()
- 136..143 '|_x| {}': |&i32| -> ()
+ 136..143 '|_x| {}': impl Fn(&i32)
137..139 '_x': &i32
141..143 '{}': ()
150..151 's': S<i32>
@@ -886,13 +896,13 @@ fn flush(&self) {
"#,
expect![[r#"
123..127 'self': &Mutex<T>
- 150..152 '{}': MutexGuard<T>
+ 150..152 '{}': MutexGuard<'_, T>
234..238 'self': &{unknown}
240..290 '{ ...()); }': ()
250..251 'w': &Mutex<BufWriter>
276..287 '*(w.lock())': BufWriter
278..279 'w': &Mutex<BufWriter>
- 278..286 'w.lock()': MutexGuard<BufWriter>
+ 278..286 'w.lock()': MutexGuard<'_, BufWriter>
"#]],
);
}
@@ -1060,7 +1070,7 @@ fn infix_parse<T, S>(_state: S, _level_code: &Fn(S)) -> T {
loop {}
}
-fn parse_arule() {
+fn parse_a_rule() {
infix_parse((), &(|_recurse| ()))
}
"#,
@@ -1068,6 +1078,23 @@ fn parse_arule() {
}
#[test]
+fn nested_closure() {
+ check_types(
+ r#"
+//- minicore: fn, option
+
+fn map<T, U>(o: Option<T>, f: impl FnOnce(T) -> U) -> Option<U> { loop {} }
+
+fn test() {
+ let o = Some(Some(2));
+ map(o, |s| map(s, |x| x));
+ // ^ i32
+}
+ "#,
+ );
+}
+
+#[test]
fn call_expected_type_closure() {
check_types(
r#"
@@ -1198,6 +1225,7 @@ fn mamba(a: U32!(), p: u32) -> u32 {
fn for_loop_block_expr_iterable() {
check_infer(
r#"
+//- minicore: iterator
fn test() {
for _ in { let x = 0; } {
let y = 0;
@@ -1206,8 +1234,17 @@ fn test() {
"#,
expect![[r#"
10..68 '{ ... } }': ()
+ 16..66 'for _ ... }': fn into_iter<()>(()) -> <() as IntoIterator>::IntoIter
+ 16..66 'for _ ... }': IntoIterator::IntoIter<()>
+ 16..66 'for _ ... }': !
+ 16..66 'for _ ... }': IntoIterator::IntoIter<()>
+ 16..66 'for _ ... }': &mut IntoIterator::IntoIter<()>
+ 16..66 'for _ ... }': fn next<IntoIterator::IntoIter<()>>(&mut IntoIterator::IntoIter<()>) -> Option<<IntoIterator::IntoIter<()> as Iterator>::Item>
+ 16..66 'for _ ... }': Option<Iterator::Item<IntoIterator::IntoIter<()>>>
+ 16..66 'for _ ... }': ()
16..66 'for _ ... }': ()
- 20..21 '_': {unknown}
+ 16..66 'for _ ... }': ()
+ 20..21 '_': Iterator::Item<IntoIterator::IntoIter<()>>
25..39 '{ let x = 0; }': ()
31..32 'x': i32
35..36 '0': i32
@@ -1458,13 +1495,12 @@ fn regression_11688_3() {
struct Ar<T, const N: u8>(T);
fn f<const LEN: usize, T, const BASE: u8>(
num_zeros: usize,
- ) -> dyn Iterator<Item = [Ar<T, BASE>; LEN]> {
+ ) -> &dyn Iterator<Item = [Ar<T, BASE>; LEN]> {
loop {}
}
fn dynamic_programming() {
- for board in f::<9, u8, 7>(1) {
- //^^^^^ [Ar<u8, 7>; 9]
- }
+ let board = f::<9, u8, 7>(1).next();
+ //^^^^^ Option<[Ar<u8, 7>; 9]>
}
"#,
);
@@ -1758,6 +1794,21 @@ const C: usize = 2 + 2;
}
#[test]
+fn regression_14456() {
+ check_types(
+ r#"
+//- minicore: future
+async fn x() {}
+fn f() {
+ let fut = x();
+ let t = [0u8; { let a = 2 + 2; a }];
+ //^ [u8; 4]
+}
+"#,
+ );
+}
+
+#[test]
fn regression_14164() {
check_types(
r#"
@@ -1788,3 +1839,142 @@ where
"#,
);
}
+
+#[test]
+fn match_ergonomics_with_binding_modes_interaction() {
+ check_types(
+ r"
+enum E { A }
+fn foo() {
+ match &E::A {
+ b @ (x @ E::A | x) => {
+ b;
+ //^ &E
+ x;
+ //^ &E
+ }
+ }
+}",
+ );
+}
+
+#[test]
+fn regression_14844() {
+ check_no_mismatches(
+ r#"
+pub type Ty = Unknown;
+
+pub struct Inner<T>();
+
+pub struct Outer {
+ pub inner: Inner<Ty>,
+}
+
+fn main() {
+ _ = Outer {
+ inner: Inner::<i32>(),
+ };
+}
+ "#,
+ );
+ check_no_mismatches(
+ r#"
+pub const ONE: usize = 1;
+
+pub struct Inner<const P: usize>();
+
+pub struct Outer {
+ pub inner: Inner<ONE>,
+}
+
+fn main() {
+ _ = Outer {
+ inner: Inner::<1>(),
+ };
+}
+ "#,
+ );
+ check_no_mismatches(
+ r#"
+pub const ONE: usize = unknown();
+
+pub struct Inner<const P: usize>();
+
+pub struct Outer {
+ pub inner: Inner<ONE>,
+}
+
+fn main() {
+ _ = Outer {
+ inner: Inner::<1>(),
+ };
+}
+ "#,
+ );
+ check_no_mismatches(
+ r#"
+pub const N: usize = 2 + 2;
+
+fn f(t: [u8; N]) {}
+
+fn main() {
+ let a = [1, 2, 3, 4];
+ f(a);
+ let b = [1; 4];
+ let c: [u8; N] = b;
+ let d = [1; N];
+ let e: [u8; N] = d;
+ let f = [1; N];
+ let g = match f {
+ [a, b, c, d] => a + b + c + d,
+ };
+}
+ "#,
+ );
+}
+
+#[test]
+fn regression_14844_2() {
+ check_no_mismatches(
+ r#"
+//- minicore: fn
+pub const ONE: usize = 1;
+
+pub type MyInner = Inner<ONE>;
+
+pub struct Inner<const P: usize>();
+
+impl Inner<1> {
+ fn map<F>(&self, func: F) -> bool
+ where
+ F: Fn(&MyInner) -> bool,
+ {
+ func(self)
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn dont_crash_on_slice_unsizing() {
+ check_no_mismatches(
+ r#"
+//- minicore: slice, unsize, coerce_unsized
+trait Tr {
+ fn f(self);
+}
+
+impl Tr for [i32] {
+ fn f(self) {
+ let t;
+ x(t);
+ }
+}
+
+fn x(a: [i32; 4]) {
+ let b = a.f();
+}
+ "#,
+ );
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs
index 13cc3fea5..a0ff62843 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs
@@ -854,9 +854,9 @@ fn test2(a1: *const A, a2: *mut A) {
237..239 'a2': *mut A
249..272 '{ ...2.b; }': ()
255..257 'a1': *const A
- 255..259 'a1.b': B
+ 255..259 'a1.b': {unknown}
265..267 'a2': *mut A
- 265..269 'a2.b': B
+ 265..269 'a2.b': {unknown}
"#]],
);
}
@@ -1812,6 +1812,52 @@ fn main() {
//^ [(); 7]
}"#,
);
+ check_types(
+ r#"
+trait Foo {
+ fn x(self);
+}
+
+impl Foo for u8 {
+ fn x(self) {
+ let t = [0; 4 + 2];
+ //^ [i32; 6]
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn const_eval_in_function_signature() {
+ check_types(
+ r#"
+const fn foo() -> usize {
+ 5
+}
+
+fn f() -> [u8; foo()] {
+ loop {}
+}
+
+fn main() {
+ let t = f();
+ //^ [u8; 5]
+}"#,
+ );
+ check_types(
+ r#"
+//- minicore: default, builtin_impls
+fn f() -> [u8; Default::default()] {
+ loop {}
+}
+
+fn main() {
+ let t = f();
+ //^ [u8; 0]
+}
+ "#,
+ );
}
#[test]
@@ -1906,8 +1952,8 @@ fn closure_return() {
"#,
expect![[r#"
16..58 '{ ...; }; }': u32
- 26..27 'x': || -> usize
- 30..55 '|| -> ...n 1; }': || -> usize
+ 26..27 'x': impl Fn() -> usize
+ 30..55 '|| -> ...n 1; }': impl Fn() -> usize
42..55 '{ return 1; }': usize
44..52 'return 1': !
51..52 '1': usize
@@ -1925,8 +1971,8 @@ fn closure_return_unit() {
"#,
expect![[r#"
16..47 '{ ...; }; }': u32
- 26..27 'x': || -> ()
- 30..44 '|| { return; }': || -> ()
+ 26..27 'x': impl Fn()
+ 30..44 '|| { return; }': impl Fn()
33..44 '{ return; }': ()
35..41 'return': !
"#]],
@@ -1943,8 +1989,8 @@ fn closure_return_inferred() {
"#,
expect![[r#"
16..46 '{ ..." }; }': u32
- 26..27 'x': || -> &str
- 30..43 '|| { "test" }': || -> &str
+ 26..27 'x': impl Fn() -> &str
+ 30..43 '|| { "test" }': impl Fn() -> &str
33..43 '{ "test" }': &str
35..41 '"test"': &str
"#]],
@@ -2034,6 +2080,56 @@ fn test() {
}
#[test]
+fn tuple_pattern_nested_match_ergonomics() {
+ check_no_mismatches(
+ r#"
+fn f(x: (&i32, &i32)) -> i32 {
+ match x {
+ (3, 4) => 5,
+ _ => 12,
+ }
+}
+ "#,
+ );
+ check_types(
+ r#"
+fn f(x: (&&&&i32, &&&i32)) {
+ let f = match x {
+ t @ (3, 4) => t,
+ _ => loop {},
+ };
+ f;
+ //^ (&&&&i32, &&&i32)
+}
+ "#,
+ );
+ check_types(
+ r#"
+fn f() {
+ let x = &&&(&&&2, &&&&&3);
+ let (y, z) = x;
+ //^ &&&&i32
+ let t @ (y, z) = x;
+ t;
+ //^ &&&(&&&i32, &&&&&i32)
+}
+ "#,
+ );
+ check_types(
+ r#"
+fn f() {
+ let x = &&&(&&&2, &&&&&3);
+ let (y, z) = x;
+ //^ &&&&i32
+ let t @ (y, z) = x;
+ t;
+ //^ &&&(&&&i32, &&&&&i32)
+}
+ "#,
+ );
+}
+
+#[test]
fn fn_pointer_return() {
check_infer(
r#"
@@ -2050,7 +2146,7 @@ fn fn_pointer_return() {
47..120 '{ ...hod; }': ()
57..63 'vtable': Vtable
66..90 'Vtable...| {} }': Vtable
- 83..88 '|| {}': || -> ()
+ 83..88 '|| {}': impl Fn()
86..88 '{}': ()
100..101 'm': fn()
104..110 'vtable': Vtable
@@ -2087,6 +2183,7 @@ async fn main() {
136..138 '()': ()
150..151 'w': i32
154..166 'const { 92 }': i32
+ 154..166 'const { 92 }': i32
162..164 '92': i32
176..177 't': i32
180..190 ''a: { 92 }': i32
@@ -2094,6 +2191,24 @@ async fn main() {
"#]],
)
}
+
+#[test]
+fn async_fn_and_try_operator() {
+ check_no_mismatches(
+ r#"
+//- minicore: future, result, fn, try, from
+async fn foo() -> Result<(), ()> {
+ Ok(())
+}
+
+async fn bar() -> Result<(), ()> {
+ let x = foo().await?;
+ Ok(x)
+}
+ "#,
+ )
+}
+
#[test]
fn async_block_early_return() {
check_infer(
@@ -2124,9 +2239,9 @@ fn main() {
149..151 'Ok': Ok<(), ()>(()) -> Result<(), ()>
149..155 'Ok(())': Result<(), ()>
152..154 '()': ()
- 167..171 'test': fn test<(), (), || -> impl Future<Output = Result<(), ()>>, impl Future<Output = Result<(), ()>>>(|| -> impl Future<Output = Result<(), ()>>)
+ 167..171 'test': fn test<(), (), impl Fn() -> impl Future<Output = Result<(), ()>>, impl Future<Output = Result<(), ()>>>(impl Fn() -> impl Future<Output = Result<(), ()>>)
167..228 'test(|... })': ()
- 172..227 '|| asy... }': || -> impl Future<Output = Result<(), ()>>
+ 172..227 '|| asy... }': impl Fn() -> impl Future<Output = Result<(), ()>>
175..227 'async ... }': impl Future<Output = Result<(), ()>>
191..205 'return Err(())': !
198..201 'Err': Err<(), ()>(()) -> Result<(), ()>
@@ -2252,8 +2367,8 @@ fn infer_labelled_break_with_val() {
"#,
expect![[r#"
9..335 '{ ... }; }': ()
- 19..21 '_x': || -> bool
- 24..332 '|| 'ou... }': || -> bool
+ 19..21 '_x': impl Fn() -> bool
+ 24..332 '|| 'ou... }': impl Fn() -> bool
27..332 ''outer... }': bool
40..332 '{ ... }': ()
54..59 'inner': i8
@@ -2678,6 +2793,179 @@ impl B for Astruct {}
}
#[test]
+fn capture_kinds_simple() {
+ check_types(
+ r#"
+struct S;
+
+impl S {
+ fn read(&self) -> &S { self }
+ fn write(&mut self) -> &mut S { self }
+ fn consume(self) -> S { self }
+}
+
+fn f() {
+ let x = S;
+ let c1 = || x.read();
+ //^^ impl Fn() -> &S
+ let c2 = || x.write();
+ //^^ impl FnMut() -> &mut S
+ let c3 = || x.consume();
+ //^^ impl FnOnce() -> S
+ let c3 = || x.consume().consume().consume();
+ //^^ impl FnOnce() -> S
+ let c3 = || x.consume().write().read();
+ //^^ impl FnOnce() -> &S
+ let x = &mut x;
+ let c1 = || x.write();
+ //^^ impl FnMut() -> &mut S
+ let x = S;
+ let c1 = || { let ref t = x; t };
+ //^^ impl Fn() -> &S
+ let c2 = || { let ref mut t = x; t };
+ //^^ impl FnMut() -> &mut S
+ let c3 = || { let t = x; t };
+ //^^ impl FnOnce() -> S
+}
+ "#,
+ )
+}
+
+#[test]
+fn capture_kinds_closure() {
+ check_types(
+ r#"
+//- minicore: copy, fn
+fn f() {
+ let mut x = 2;
+ x = 5;
+ let mut c1 = || { x = 3; x };
+ //^^^^^^ impl FnMut() -> i32
+ let mut c2 = || { c1() };
+ //^^^^^^ impl FnMut() -> i32
+ let mut c1 = || { x };
+ //^^^^^^ impl Fn() -> i32
+ let mut c2 = || { c1() };
+ //^^^^^^ impl Fn() -> i32
+ struct X;
+ let x = X;
+ let mut c1 = || { x };
+ //^^^^^^ impl FnOnce() -> X
+ let mut c2 = || { c1() };
+ //^^^^^^ impl FnOnce() -> X
+}
+ "#,
+ );
+}
+
+#[test]
+fn capture_kinds_overloaded_deref() {
+ check_types(
+ r#"
+//- minicore: fn, deref_mut
+use core::ops::{Deref, DerefMut};
+
+struct Foo;
+impl Deref for Foo {
+ type Target = (i32, u8);
+ fn deref(&self) -> &(i32, u8) {
+ &(5, 2)
+ }
+}
+impl DerefMut for Foo {
+ fn deref_mut(&mut self) -> &mut (i32, u8) {
+ &mut (5, 2)
+ }
+}
+fn test() {
+ let mut x = Foo;
+ let c1 = || *x;
+ //^^ impl Fn() -> (i32, u8)
+ let c2 = || { *x = (2, 5); };
+ //^^ impl FnMut()
+ let c3 = || { x.1 };
+ //^^ impl Fn() -> u8
+ let c4 = || { x.1 = 6; };
+ //^^ impl FnMut()
+}
+ "#,
+ );
+}
+
+#[test]
+fn capture_kinds_with_copy_types() {
+ check_types(
+ r#"
+//- minicore: copy, clone, derive
+#[derive(Clone, Copy)]
+struct Copy;
+struct NotCopy;
+#[derive(Clone, Copy)]
+struct Generic<T>(T);
+
+trait Tr {
+ type Assoc;
+}
+
+impl Tr for Copy {
+ type Assoc = NotCopy;
+}
+
+#[derive(Clone, Copy)]
+struct AssocGeneric<T: Tr>(T::Assoc);
+
+fn f() {
+ let a = Copy;
+ let b = NotCopy;
+ let c = Generic(Copy);
+ let d = Generic(NotCopy);
+ let e: AssocGeneric<Copy> = AssocGeneric(NotCopy);
+ let c1 = || a;
+ //^^ impl Fn() -> Copy
+ let c2 = || b;
+ //^^ impl FnOnce() -> NotCopy
+ let c3 = || c;
+ //^^ impl Fn() -> Generic<Copy>
+ let c3 = || d;
+ //^^ impl FnOnce() -> Generic<NotCopy>
+ let c3 = || e;
+ //^^ impl FnOnce() -> AssocGeneric<Copy>
+}
+ "#,
+ )
+}
+
+#[test]
+fn derive_macro_should_work_for_associated_type() {
+ check_types(
+ r#"
+//- minicore: copy, clone, derive
+#[derive(Clone)]
+struct X;
+#[derive(Clone)]
+struct Y;
+
+trait Tr {
+ type Assoc;
+}
+
+impl Tr for X {
+ type Assoc = Y;
+}
+
+#[derive(Clone)]
+struct AssocGeneric<T: Tr>(T::Assoc);
+
+fn f() {
+ let e: AssocGeneric<X> = AssocGeneric(Y);
+ let e_clone = e.clone();
+ //^^^^^^^ AssocGeneric<X>
+}
+ "#,
+ )
+}
+
+#[test]
fn cfgd_out_assoc_items() {
check_types(
r#"
@@ -2697,6 +2985,21 @@ fn f() {
}
#[test]
+fn infer_ref_to_raw_cast() {
+ check_types(
+ r#"
+struct S;
+
+fn f() {
+ let s = &mut S;
+ let s = s as *mut _;
+ //^ *mut S
+}
+ "#,
+ );
+}
+
+#[test]
fn infer_missing_type() {
check_types(
r#"
@@ -3194,6 +3497,22 @@ fn func() {
);
}
+#[test]
+fn pointee_trait() {
+ check_types(
+ r#"
+//- minicore: pointee
+use core::ptr::Pointee;
+fn func() {
+ let x: <u8 as Pointee>::Metadata;
+ //^ ()
+ let x: <[u8] as Pointee>::Metadata;
+ //^ usize
+}
+ "#,
+ );
+}
+
// FIXME
#[test]
fn castable_to() {
@@ -3258,35 +3577,60 @@ fn f<T>(t: Ark<T>) {
);
}
-// FIXME
#[test]
-fn castable_to2() {
- check_infer(
+fn const_dependent_on_local() {
+ check_types(
r#"
-fn func() {
- let x = &0u32 as *const _;
+fn main() {
+ let s = 5;
+ let t = [2; s];
+ //^ [i32; _]
}
"#,
- expect![[r#"
- 10..44 '{ ...t _; }': ()
- 20..21 'x': *const {unknown}
- 24..29 '&0u32': &u32
- 24..41 '&0u32 ...onst _': *const {unknown}
- 25..29 '0u32': u32
- "#]],
);
}
#[test]
fn issue_14275() {
- // FIXME: evaluate const generic
check_types(
r#"
struct Foo<const T: bool>;
fn main() {
const B: bool = false;
let foo = Foo::<B>;
- //^^^ Foo<_>
+ //^^^ Foo<false>
+}
+"#,
+ );
+ check_types(
+ r#"
+struct Foo<const T: bool>;
+impl Foo<true> {
+ fn foo(self) -> u8 { 2 }
+}
+impl Foo<false> {
+ fn foo(self) -> u16 { 5 }
+}
+fn main() {
+ const B: bool = false;
+ let foo: Foo<B> = Foo;
+ let x = foo.foo();
+ //^ u16
+}
+"#,
+ );
+}
+
+#[test]
+fn cstring_literals() {
+ check_types(
+ r#"
+#[lang = "CStr"]
+pub struct CStr;
+
+fn main() {
+ c"ello";
+ //^^^^^^^ &CStr
}
"#,
);
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs
index da76d7fd8..97ae732a9 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs
@@ -90,7 +90,7 @@ fn infer_async_closure() {
async fn test() {
let f = async move |x: i32| x + 42;
f;
-// ^ |i32| -> impl Future<Output = i32>
+// ^ impl Fn(i32) -> impl Future<Output = i32>
let a = f(4);
a;
// ^ impl Future<Output = i32>
@@ -99,7 +99,7 @@ async fn test() {
// ^ i32
let f = async move || 42;
f;
-// ^ || -> impl Future<Output = i32>
+// ^ impl Fn() -> impl Future<Output = i32>
let a = f();
a;
// ^ impl Future<Output = i32>
@@ -116,7 +116,7 @@ async fn test() {
};
let _: Option<u64> = c().await;
c;
-// ^ || -> impl Future<Output = Option<u64>>
+// ^ impl Fn() -> impl Future<Output = Option<u64>>
}
"#,
);
@@ -206,19 +206,27 @@ fn test() {
fn infer_try_trait() {
check_types(
r#"
-//- minicore: try, result
+//- minicore: try, result, from
fn test() {
let r: Result<i32, u64> = Result::Ok(1);
let v = r?;
v;
} //^ i32
-
-impl<O, E> core::ops::Try for Result<O, E> {
- type Output = O;
- type Error = Result<core::convert::Infallible, E>;
+"#,
+ );
}
-impl<T, E, F: From<E>> core::ops::FromResidual<Result<core::convert::Infallible, E>> for Result<T, F> {}
+#[test]
+fn infer_try_block() {
+ // FIXME: We should test more cases, but it currently doesn't work, since
+ // our labeled block type inference is broken.
+ check_types(
+ r#"
+//- minicore: try, option
+fn test() {
+ let x: Option<_> = try { Some(2)?; };
+ //^ Option<()>
+}
"#,
);
}
@@ -542,7 +550,7 @@ fn test() -> u64 {
53..54 'a': S
57..58 'S': S(fn(u32) -> u64) -> S
57..74 'S(|i| ...s u64)': S
- 59..73 '|i| 2*i as u64': |u32| -> u64
+ 59..73 '|i| 2*i as u64': impl Fn(u32) -> u64
60..61 'i': u32
63..64 '2': u64
63..73 '2*i as u64': u64
@@ -1325,9 +1333,9 @@ fn foo<const C: u8, T>() -> (impl FnOnce(&str, T), impl Trait<u8>) {
}
"#,
expect![[r#"
- 134..165 '{ ...(C)) }': (|&str, T| -> (), Bar<u8>)
- 140..163 '(|inpu...ar(C))': (|&str, T| -> (), Bar<u8>)
- 141..154 '|input, t| {}': |&str, T| -> ()
+ 134..165 '{ ...(C)) }': (impl Fn(&str, T), Bar<u8>)
+ 140..163 '(|inpu...ar(C))': (impl Fn(&str, T), Bar<u8>)
+ 141..154 '|input, t| {}': impl Fn(&str, T)
142..147 'input': &str
149..150 't': T
152..154 '{}': ()
@@ -1498,8 +1506,8 @@ fn main() {
71..105 '{ ...()); }': ()
77..78 'f': fn f(&dyn Fn(S))
77..102 'f(&|nu...foo())': ()
- 79..101 '&|numb....foo()': &|S| -> ()
- 80..101 '|numbe....foo()': |S| -> ()
+ 79..101 '&|numb....foo()': &impl Fn(S)
+ 80..101 '|numbe....foo()': impl Fn(S)
81..87 'number': S
89..95 'number': S
89..101 'number.foo()': ()
@@ -1904,13 +1912,13 @@ fn test() {
131..132 'f': F
151..153 '{}': Lazy<T, F>
251..497 '{ ...o(); }': ()
- 261..266 'lazy1': Lazy<Foo, || -> Foo>
- 283..292 'Lazy::new': fn new<Foo, || -> Foo>(|| -> Foo) -> Lazy<Foo, || -> Foo>
- 283..300 'Lazy::...| Foo)': Lazy<Foo, || -> Foo>
- 293..299 '|| Foo': || -> Foo
+ 261..266 'lazy1': Lazy<Foo, impl Fn() -> Foo>
+ 283..292 'Lazy::new': fn new<Foo, impl Fn() -> Foo>(impl Fn() -> Foo) -> Lazy<Foo, impl Fn() -> Foo>
+ 283..300 'Lazy::...| Foo)': Lazy<Foo, impl Fn() -> Foo>
+ 293..299 '|| Foo': impl Fn() -> Foo
296..299 'Foo': Foo
310..312 'r1': usize
- 315..320 'lazy1': Lazy<Foo, || -> Foo>
+ 315..320 'lazy1': Lazy<Foo, impl Fn() -> Foo>
315..326 'lazy1.foo()': usize
368..383 'make_foo_fn_ptr': fn() -> Foo
399..410 'make_foo_fn': fn make_foo_fn() -> Foo
@@ -1955,20 +1963,20 @@ fn test() {
163..167 '1u32': u32
174..175 'x': Option<u32>
174..190 'x.map(...v + 1)': Option<u32>
- 180..189 '|v| v + 1': |u32| -> u32
+ 180..189 '|v| v + 1': impl Fn(u32) -> u32
181..182 'v': u32
184..185 'v': u32
184..189 'v + 1': u32
188..189 '1': u32
196..197 'x': Option<u32>
196..212 'x.map(... 1u64)': Option<u64>
- 202..211 '|_v| 1u64': |u32| -> u64
+ 202..211 '|_v| 1u64': impl Fn(u32) -> u64
203..205 '_v': u32
207..211 '1u64': u64
222..223 'y': Option<i64>
239..240 'x': Option<u32>
239..252 'x.map(|_v| 1)': Option<i64>
- 245..251 '|_v| 1': |u32| -> i64
+ 245..251 '|_v| 1': impl Fn(u32) -> i64
246..248 '_v': u32
250..251 '1': i64
"#]],
@@ -1997,11 +2005,11 @@ fn test<F: FnOnce(u32) -> u64>(f: F) {
//^^^^ u64
let g = |v| v + 1;
//^^^^^ u64
- //^^^^^^^^^ |u64| -> u64
+ //^^^^^^^^^ impl Fn(u64) -> u64
g(1u64);
//^^^^^^^ u64
let h = |v| 1u128 + v;
- //^^^^^^^^^^^^^ |u128| -> u128
+ //^^^^^^^^^^^^^ impl Fn(u128) -> u128
}"#,
);
}
@@ -2054,17 +2062,17 @@ fn test() {
312..314 '{}': ()
330..489 '{ ... S); }': ()
340..342 'x1': u64
- 345..349 'foo1': fn foo1<S, u64, |S| -> u64>(S, |S| -> u64) -> u64
+ 345..349 'foo1': fn foo1<S, u64, impl Fn(S) -> u64>(S, impl Fn(S) -> u64) -> u64
345..368 'foo1(S...hod())': u64
350..351 'S': S
- 353..367 '|s| s.method()': |S| -> u64
+ 353..367 '|s| s.method()': impl Fn(S) -> u64
354..355 's': S
357..358 's': S
357..367 's.method()': u64
378..380 'x2': u64
- 383..387 'foo2': fn foo2<S, u64, |S| -> u64>(|S| -> u64, S) -> u64
+ 383..387 'foo2': fn foo2<S, u64, impl Fn(S) -> u64>(impl Fn(S) -> u64, S) -> u64
383..406 'foo2(|...(), S)': u64
- 388..402 '|s| s.method()': |S| -> u64
+ 388..402 '|s| s.method()': impl Fn(S) -> u64
389..390 's': S
392..393 's': S
392..402 's.method()': u64
@@ -2073,14 +2081,14 @@ fn test() {
421..422 'S': S
421..446 'S.foo1...hod())': u64
428..429 'S': S
- 431..445 '|s| s.method()': |S| -> u64
+ 431..445 '|s| s.method()': impl Fn(S) -> u64
432..433 's': S
435..436 's': S
435..445 's.method()': u64
456..458 'x4': u64
461..462 'S': S
461..486 'S.foo2...(), S)': u64
- 468..482 '|s| s.method()': |S| -> u64
+ 468..482 '|s| s.method()': impl Fn(S) -> u64
469..470 's': S
472..473 's': S
472..482 's.method()': u64
@@ -2554,9 +2562,9 @@ fn main() {
72..74 '_v': F
117..120 '{ }': ()
132..163 '{ ... }); }': ()
- 138..148 'f::<(), _>': fn f<(), |&()| -> ()>(|&()| -> ())
+ 138..148 'f::<(), _>': fn f<(), impl Fn(&())>(impl Fn(&()))
138..160 'f::<()... z; })': ()
- 149..159 '|z| { z; }': |&()| -> ()
+ 149..159 '|z| { z; }': impl Fn(&())
150..151 'z': &()
153..159 '{ z; }': ()
155..156 'z': &()
@@ -2713,9 +2721,9 @@ fn main() {
983..998 'Vec::<i32>::new': fn new<i32>() -> Vec<i32>
983..1000 'Vec::<...:new()': Vec<i32>
983..1012 'Vec::<...iter()': IntoIter<i32>
- 983..1075 'Vec::<...one })': FilterMap<IntoIter<i32>, |i32| -> Option<u32>>
+ 983..1075 'Vec::<...one })': FilterMap<IntoIter<i32>, impl Fn(i32) -> Option<u32>>
983..1101 'Vec::<... y; })': ()
- 1029..1074 '|x| if...None }': |i32| -> Option<u32>
+ 1029..1074 '|x| if...None }': impl Fn(i32) -> Option<u32>
1030..1031 'x': i32
1033..1074 'if x >...None }': Option<u32>
1036..1037 'x': i32
@@ -2728,7 +2736,7 @@ fn main() {
1049..1057 'x as u32': u32
1066..1074 '{ None }': Option<u32>
1068..1072 'None': Option<u32>
- 1090..1100 '|y| { y; }': |u32| -> ()
+ 1090..1100 '|y| { y; }': impl Fn(u32)
1091..1092 'y': u32
1094..1100 '{ y; }': ()
1096..1097 'y': u32
@@ -2971,13 +2979,13 @@ fn foo() {
52..126 '{ ...)(s) }': ()
62..63 's': Option<i32>
66..78 'Option::None': Option<i32>
- 88..89 'f': |Option<i32>| -> ()
- 92..111 '|x: Op...2>| {}': |Option<i32>| -> ()
+ 88..89 'f': impl Fn(Option<i32>)
+ 92..111 '|x: Op...2>| {}': impl Fn(Option<i32>)
93..94 'x': Option<i32>
109..111 '{}': ()
117..124 '(&f)(s)': ()
- 118..120 '&f': &|Option<i32>| -> ()
- 119..120 'f': |Option<i32>| -> ()
+ 118..120 '&f': &impl Fn(Option<i32>)
+ 119..120 'f': impl Fn(Option<i32>)
122..123 's': Option<i32>
"#]],
);
@@ -3043,7 +3051,7 @@ impl<T: ?Sized> core::ops::Deref for Box<T> {
type Target = T;
fn deref(&self) -> &T {
- &self.inner
+ unsafe { &*self.inner }
}
}
@@ -3054,23 +3062,25 @@ fn foo() {
}"#,
expect![[r#"
154..158 'self': &Box<T>
- 166..193 '{ ... }': &T
- 176..187 '&self.inner': &*mut T
- 177..181 'self': &Box<T>
- 177..187 'self.inner': *mut T
- 206..296 '{ ...&s); }': ()
- 216..217 's': Option<i32>
- 220..224 'None': Option<i32>
- 234..235 'f': Box<dyn FnOnce(&Option<i32>)>
- 269..282 'box (|ps| {})': Box<|&Option<i32>| -> ()>
- 274..281 '|ps| {}': |&Option<i32>| -> ()
- 275..277 'ps': &Option<i32>
- 279..281 '{}': ()
- 288..289 'f': Box<dyn FnOnce(&Option<i32>)>
- 288..293 'f(&s)': ()
- 290..292 '&s': &Option<i32>
- 291..292 's': Option<i32>
- 269..282: expected Box<dyn FnOnce(&Option<i32>)>, got Box<|&Option<i32>| -> ()>
+ 166..205 '{ ... }': &T
+ 176..199 'unsafe...nner }': &T
+ 185..197 '&*self.inner': &T
+ 186..197 '*self.inner': T
+ 187..191 'self': &Box<T>
+ 187..197 'self.inner': *mut T
+ 218..308 '{ ...&s); }': ()
+ 228..229 's': Option<i32>
+ 232..236 'None': Option<i32>
+ 246..247 'f': Box<dyn FnOnce(&Option<i32>)>
+ 281..294 'box (|ps| {})': Box<impl Fn(&Option<i32>)>
+ 286..293 '|ps| {}': impl Fn(&Option<i32>)
+ 287..289 'ps': &Option<i32>
+ 291..293 '{}': ()
+ 300..301 'f': Box<dyn FnOnce(&Option<i32>)>
+ 300..305 'f(&s)': ()
+ 302..304 '&s': &Option<i32>
+ 303..304 's': Option<i32>
+ 281..294: expected Box<dyn FnOnce(&Option<i32>)>, got Box<impl Fn(&Option<i32>)>
"#]],
);
}
@@ -3709,7 +3719,6 @@ async fn get_accounts() -> Result<u32, ()> {
#[test]
fn local_impl_1() {
- check!(block_local_impls);
check_types(
r#"
trait Trait<T> {
@@ -3731,7 +3740,6 @@ fn test() {
#[test]
fn local_impl_2() {
- check!(block_local_impls);
check_types(
r#"
struct S;
@@ -3753,7 +3761,6 @@ fn test() {
#[test]
fn local_impl_3() {
- check!(block_local_impls);
check_types(
r#"
trait Trait<T> {
@@ -3778,6 +3785,62 @@ fn test() {
}
#[test]
+fn foreign_trait_with_local_trait_impl() {
+ check!(block_local_impls);
+ check(
+ r#"
+mod module {
+ pub trait T {
+ const C: usize;
+ fn f(&self);
+ }
+}
+
+fn f() {
+ use module::T;
+ impl T for usize {
+ const C: usize = 0;
+ fn f(&self) {}
+ }
+ 0usize.f();
+ //^^^^^^^^^^ type: ()
+ usize::C;
+ //^^^^^^^^type: usize
+}
+"#,
+ );
+}
+
+#[test]
+fn regression_14443_trait_solve() {
+ check_no_mismatches(
+ r#"
+trait T {
+ fn f(&self) {}
+}
+
+
+fn main() {
+ struct A;
+ impl T for A {}
+
+ let a = A;
+
+ let b = {
+ struct B;
+ impl T for B {}
+
+ B
+ };
+
+ a.f();
+ b.f();
+}
+"#,
+ )
+}
+
+#[test]
fn associated_type_sized_bounds() {
check_infer(
r#"
@@ -4149,3 +4212,201 @@ fn test() {
"#,
);
}
+
+#[test]
+fn associated_type_in_struct_expr_path() {
+ // FIXME: All annotation should be resolvable.
+ // For lines marked as unstable, see rust-lang/rust#86935.
+ // FIXME: Remove the comments once stablized.
+ check_types(
+ r#"
+trait Trait {
+ type Assoc;
+ fn f();
+}
+
+struct S { x: u32 }
+
+impl Trait for () {
+ type Assoc = S;
+
+ fn f() {
+ let x = 42;
+ let a = Self::Assoc { x };
+ // ^ S
+ let a = <Self>::Assoc { x }; // unstable
+ // ^ {unknown}
+
+ // should be `Copy` but we don't track ownership anyway.
+ let value = S { x };
+ if let Self::Assoc { x } = value {}
+ // ^ u32
+ if let <Self>::Assoc { x } = value {} // unstable
+ // ^ {unknown}
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn associated_type_in_struct_expr_path_enum() {
+ // FIXME: All annotation should be resolvable.
+ // For lines marked as unstable, see rust-lang/rust#86935.
+ // FIXME: Remove the comments once stablized.
+ check_types(
+ r#"
+trait Trait {
+ type Assoc;
+ fn f();
+}
+
+enum E {
+ Unit,
+ Struct { x: u32 },
+}
+
+impl Trait for () {
+ type Assoc = E;
+
+ fn f() {
+ let a = Self::Assoc::Unit;
+ // ^ E
+ let a = <Self>::Assoc::Unit;
+ // ^ E
+ let a = <Self::Assoc>::Unit;
+ // ^ E
+ let a = <<Self>::Assoc>::Unit;
+ // ^ E
+
+ // should be `Copy` but we don't track ownership anyway.
+ let value = E::Unit;
+ if let Self::Assoc::Unit = value {}
+ // ^^^^^^^^^^^^^^^^^ E
+ if let <Self>::Assoc::Unit = value {}
+ // ^^^^^^^^^^^^^^^^^^^ E
+ if let <Self::Assoc>::Unit = value {}
+ // ^^^^^^^^^^^^^^^^^^^ E
+ if let <<Self>::Assoc>::Unit = value {}
+ // ^^^^^^^^^^^^^^^^^^^^^ E
+
+ let x = 42;
+ let a = Self::Assoc::Struct { x };
+ // ^ E
+ let a = <Self>::Assoc::Struct { x }; // unstable
+ // ^ {unknown}
+ let a = <Self::Assoc>::Struct { x }; // unstable
+ // ^ {unknown}
+ let a = <<Self>::Assoc>::Struct { x }; // unstable
+ // ^ {unknown}
+
+ // should be `Copy` but we don't track ownership anyway.
+ let value = E::Struct { x: 42 };
+ if let Self::Assoc::Struct { x } = value {}
+ // ^ u32
+ if let <Self>::Assoc::Struct { x } = value {} // unstable
+ // ^ {unknown}
+ if let <Self::Assoc>::Struct { x } = value {} // unstable
+ // ^ {unknown}
+ if let <<Self>::Assoc>::Struct { x } = value {} // unstable
+ // ^ {unknown}
+ }
+}
+ "#,
+ );
+}
+
+#[test]
+fn derive_macro_bounds() {
+ check_types(
+ r#"
+ //- minicore: clone, derive
+ #[derive(Clone)]
+ struct Copy;
+ struct NotCopy;
+ #[derive(Clone)]
+ struct Generic<T>(T);
+ trait Tr {
+ type Assoc;
+ }
+ impl Tr for Copy {
+ type Assoc = NotCopy;
+ }
+ #[derive(Clone)]
+ struct AssocGeneric<T: Tr>(T::Assoc);
+
+ // Currently rustc does not accept this.
+ // #[derive(Clone)]
+ // struct AssocGeneric2<T: Tr>(<T as Tr>::Assoc);
+
+ #[derive(Clone)]
+ struct AssocGeneric3<T: Tr>(Generic<T::Assoc>);
+
+ #[derive(Clone)]
+ struct Vec<T>();
+
+ #[derive(Clone)]
+ struct R1(Vec<R2>);
+ #[derive(Clone)]
+ struct R2(R1);
+
+ fn f() {
+ let x = (&Copy).clone();
+ //^ Copy
+ let x = (&NotCopy).clone();
+ //^ &NotCopy
+ let x = (&Generic(Copy)).clone();
+ //^ Generic<Copy>
+ let x = (&Generic(NotCopy)).clone();
+ //^ &Generic<NotCopy>
+ let x: &AssocGeneric<Copy> = &AssocGeneric(NotCopy);
+ let x = x.clone();
+ //^ &AssocGeneric<Copy>
+ // let x: &AssocGeneric2<Copy> = &AssocGeneric2(NotCopy);
+ // let x = x.clone();
+ let x: &AssocGeneric3<Copy> = &AssocGeneric3(Generic(NotCopy));
+ let x = x.clone();
+ //^ &AssocGeneric3<Copy>
+ let x = (&R1(Vec())).clone();
+ //^ R1
+ let x = (&R2(R1(Vec()))).clone();
+ //^ R2
+ }
+ "#,
+ );
+}
+
+#[test]
+fn trait_obligations_should_be_registered_during_path_inference() {
+ check_types(
+ r#"
+//- minicore: fn, from
+struct S<T>(T);
+fn map<T, U, F: FnOnce(T) -> S<U>>(_: T, _: F) -> U { loop {} }
+
+fn test(v: S<i32>) {
+ let res = map(v, Into::into);
+ //^^^ i32
+}
+"#,
+ );
+}
+
+#[test]
+fn fn_obligation_should_be_registered_during_path_inference() {
+ check_types(
+ r#"
+//- minicore: fn, from
+struct S<T>(T);
+impl<T> S<T> {
+ fn foo<U: Into<S<T>>>(_: U) -> Self { loop {} }
+}
+fn map<T, U, F: FnOnce(T) -> U>(_: T, _: F) -> U { loop {} }
+
+fn test(v: S<i32>) {
+ let res = map(v, S::foo);
+ //^^^ S<i32>
+}
+"#,
+ );
+}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tls.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tls.rs
index b7e6ee674..83814ed0e 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/tls.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/tls.rs
@@ -24,7 +24,8 @@ impl DebugContext<'_> {
AdtId::UnionId(it) => self.0.union_data(it).name.clone(),
AdtId::EnumId(it) => self.0.enum_data(it).name.clone(),
};
- name.fmt(f)
+ name.display(self.0.upcast()).fmt(f)?;
+ Ok(())
}
pub(crate) fn debug_trait_id(
@@ -34,7 +35,8 @@ impl DebugContext<'_> {
) -> Result<(), fmt::Error> {
let trait_: hir_def::TraitId = from_chalk_trait_id(id);
let trait_data = self.0.trait_data(trait_);
- trait_data.name.fmt(f)
+ trait_data.name.display(self.0.upcast()).fmt(f)?;
+ Ok(())
}
pub(crate) fn debug_assoc_type_id(
@@ -49,7 +51,13 @@ impl DebugContext<'_> {
_ => panic!("associated type not in trait"),
};
let trait_data = self.0.trait_data(trait_);
- write!(fmt, "{}::{}", trait_data.name, type_alias_data.name)
+ write!(
+ fmt,
+ "{}::{}",
+ trait_data.name.display(self.0.upcast()),
+ type_alias_data.name.display(self.0.upcast())
+ )?;
+ Ok(())
}
pub(crate) fn debug_projection_ty(
@@ -67,7 +75,7 @@ impl DebugContext<'_> {
let trait_ref = projection_ty.trait_ref(self.0);
let trait_params = trait_ref.substitution.as_slice(Interner);
let self_ty = trait_ref.self_type_parameter(Interner);
- write!(fmt, "<{self_ty:?} as {trait_name}")?;
+ write!(fmt, "<{self_ty:?} as {}", trait_name.display(self.0.upcast()))?;
if trait_params.len() > 1 {
write!(
fmt,
@@ -75,7 +83,7 @@ impl DebugContext<'_> {
trait_params[1..].iter().format_with(", ", |x, f| f(&format_args!("{x:?}"))),
)?;
}
- write!(fmt, ">::{}", type_alias_data.name)?;
+ write!(fmt, ">::{}", type_alias_data.name.display(self.0.upcast()))?;
let proj_params_count = projection_ty.substitution.len(Interner) - trait_params.len();
let proj_params = &projection_ty.substitution.as_slice(Interner)[..proj_params_count];
@@ -105,9 +113,9 @@ impl DebugContext<'_> {
}
};
match def {
- CallableDefId::FunctionId(_) => write!(fmt, "{{fn {name}}}"),
+ CallableDefId::FunctionId(_) => write!(fmt, "{{fn {}}}", name.display(self.0.upcast())),
CallableDefId::StructId(_) | CallableDefId::EnumVariantId(_) => {
- write!(fmt, "{{ctor {name}}}")
+ write!(fmt, "{{ctor {}}}", name.display(self.0.upcast()))
}
}
}
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/traits.rs b/src/tools/rust-analyzer/crates/hir-ty/src/traits.rs
index 3ab85c68f..f40b7db3a 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/traits.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/traits.rs
@@ -1,22 +1,24 @@
//! Trait solving using Chalk.
-use std::{env::var, sync::Arc};
+use std::env::var;
-use chalk_ir::GoalData;
+use chalk_ir::{fold::TypeFoldable, DebruijnIndex, GoalData};
use chalk_recursive::Cache;
-use chalk_solve::{logging_db::LoggingRustIrDatabase, Solver};
+use chalk_solve::{logging_db::LoggingRustIrDatabase, rust_ir, Solver};
use base_db::CrateId;
use hir_def::{
lang_item::{LangItem, LangItemTarget},
- TraitId,
+ BlockId, TraitId,
};
+use hir_expand::name::{name, Name};
use stdx::panic_context;
+use triomphe::Arc;
use crate::{
- db::HirDatabase, infer::unify::InferenceTable, AliasEq, AliasTy, Canonical, DomainGoal, Goal,
- Guidance, InEnvironment, Interner, ProjectionTy, ProjectionTyExt, Solution, TraitRefExt, Ty,
- TyKind, WhereClause,
+ db::HirDatabase, infer::unify::InferenceTable, utils::UnevaluatedConstEvaluatorFolder, AliasEq,
+ AliasTy, Canonical, DomainGoal, Goal, Guidance, InEnvironment, Interner, ProjectionTy,
+ ProjectionTyExt, Solution, TraitRefExt, Ty, TyKind, WhereClause,
};
/// This controls how much 'time' we give the Chalk solver before giving up.
@@ -26,6 +28,7 @@ const CHALK_SOLVER_FUEL: i32 = 1000;
pub(crate) struct ChalkContext<'a> {
pub(crate) db: &'a dyn HirDatabase,
pub(crate) krate: CrateId,
+ pub(crate) block: Option<BlockId>,
}
fn create_chalk_solver() -> chalk_recursive::RecursiveSolver<Interner> {
@@ -43,6 +46,7 @@ fn create_chalk_solver() -> chalk_recursive::RecursiveSolver<Interner> {
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct TraitEnvironment {
pub krate: CrateId,
+ pub block: Option<BlockId>,
// FIXME make this a BTreeMap
pub(crate) traits_from_clauses: Vec<(Ty, TraitId)>,
pub env: chalk_ir::Environment<Interner>,
@@ -52,6 +56,7 @@ impl TraitEnvironment {
pub fn empty(krate: CrateId) -> Self {
TraitEnvironment {
krate,
+ block: None,
traits_from_clauses: Vec::new(),
env: chalk_ir::Environment::new(Interner),
}
@@ -78,11 +83,12 @@ pub(crate) fn normalize_projection_query(
pub(crate) fn trait_solve_query(
db: &dyn HirDatabase,
krate: CrateId,
+ block: Option<BlockId>,
goal: Canonical<InEnvironment<Goal>>,
) -> Option<Solution> {
let _p = profile::span("trait_solve_query").detail(|| match &goal.value.goal.data(Interner) {
GoalData::DomainGoal(DomainGoal::Holds(WhereClause::Implemented(it))) => {
- db.trait_data(it.hir_trait_id()).name.to_string()
+ db.trait_data(it.hir_trait_id()).name.display(db.upcast()).to_string()
}
GoalData::DomainGoal(DomainGoal::Holds(WhereClause::AliasEq(_))) => "alias_eq".to_string(),
_ => "??".to_string(),
@@ -100,18 +106,25 @@ pub(crate) fn trait_solve_query(
}
}
+ // Chalk see `UnevaluatedConst` as a unique concrete value, but we see it as an alias for another const. So
+ // we should get rid of it when talking to chalk.
+ let goal = goal
+ .try_fold_with(&mut UnevaluatedConstEvaluatorFolder { db }, DebruijnIndex::INNERMOST)
+ .unwrap();
+
// We currently don't deal with universes (I think / hope they're not yet
// relevant for our use cases?)
let u_canonical = chalk_ir::UCanonical { canonical: goal, universes: 1 };
- solve(db, krate, &u_canonical)
+ solve(db, krate, block, &u_canonical)
}
fn solve(
db: &dyn HirDatabase,
krate: CrateId,
+ block: Option<BlockId>,
goal: &chalk_ir::UCanonical<chalk_ir::InEnvironment<chalk_ir::Goal<Interner>>>,
) -> Option<chalk_solve::Solution<Interner>> {
- let context = ChalkContext { db, krate };
+ let context = ChalkContext { db, krate, block };
tracing::debug!("solve goal: {:?}", goal);
let mut solver = create_chalk_solver();
@@ -171,8 +184,10 @@ fn is_chalk_print() -> bool {
std::env::var("CHALK_PRINT").is_ok()
}
-#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum FnTrait {
+ // Warning: Order is important. If something implements `x` it should also implement
+ // `y` if `y <= x`.
FnOnce,
FnMut,
Fn,
@@ -187,7 +202,23 @@ impl FnTrait {
}
}
- pub fn get_id(&self, db: &dyn HirDatabase, krate: CrateId) -> Option<TraitId> {
+ pub const fn to_chalk_ir(self) -> rust_ir::ClosureKind {
+ match self {
+ FnTrait::FnOnce => rust_ir::ClosureKind::FnOnce,
+ FnTrait::FnMut => rust_ir::ClosureKind::FnMut,
+ FnTrait::Fn => rust_ir::ClosureKind::Fn,
+ }
+ }
+
+ pub fn method_name(self) -> Name {
+ match self {
+ FnTrait::FnOnce => name!(call_once),
+ FnTrait::FnMut => name!(call_mut),
+ FnTrait::Fn => name!(call),
+ }
+ }
+
+ pub fn get_id(self, db: &dyn HirDatabase, krate: CrateId) -> Option<TraitId> {
let target = db.lang_item(krate, self.lang_item())?;
match target {
LangItemTarget::Trait(t) => Some(t),
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/utils.rs b/src/tools/rust-analyzer/crates/hir-ty/src/utils.rs
index 34d957e26..363658063 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/utils.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/utils.rs
@@ -1,10 +1,14 @@
//! Helper functions for working with def, which don't need to be a separate
//! query, but can't be computed directly from `*Data` (ie, which need a `db`).
-use std::iter;
+use std::{hash::Hash, iter};
use base_db::CrateId;
-use chalk_ir::{cast::Cast, fold::Shift, BoundVar, DebruijnIndex};
+use chalk_ir::{
+ cast::Cast,
+ fold::{FallibleTypeFolder, Shift},
+ BoundVar, DebruijnIndex,
+};
use either::Either;
use hir_def::{
db::DefDatabase,
@@ -15,16 +19,23 @@ use hir_def::{
lang_item::LangItem,
resolver::{HasResolver, TypeNs},
type_ref::{TraitBoundModifier, TypeRef},
- ConstParamId, FunctionId, GenericDefId, ItemContainerId, Lookup, TraitId, TypeAliasId,
- TypeOrConstParamId, TypeParamId,
+ ConstParamId, EnumId, EnumVariantId, FunctionId, GenericDefId, ItemContainerId,
+ LocalEnumVariantId, Lookup, OpaqueInternableThing, TraitId, TypeAliasId, TypeOrConstParamId,
+ TypeParamId,
};
use hir_expand::name::Name;
use intern::Interned;
use rustc_hash::FxHashSet;
use smallvec::{smallvec, SmallVec};
+use stdx::never;
use crate::{
- db::HirDatabase, ChalkTraitId, Interner, Substitution, TraitRef, TraitRefExt, WhereClause,
+ consteval::unknown_const,
+ db::HirDatabase,
+ layout::{Layout, TagEncoding},
+ mir::pad16,
+ ChalkTraitId, Const, ConstScalar, GenericArg, Interner, Substitution, TraitRef, TraitRefExt,
+ Ty, WhereClause,
};
pub(crate) fn fn_traits(
@@ -69,9 +80,7 @@ pub(super) fn all_super_trait_refs<T>(
cb: impl FnMut(TraitRef) -> Option<T>,
) -> Option<T> {
let seen = iter::once(trait_ref.trait_id).collect();
- let mut stack = Vec::new();
- stack.push(trait_ref);
- SuperTraits { db, seen, stack }.find_map(cb)
+ SuperTraits { db, seen, stack: vec![trait_ref] }.find_map(cb)
}
struct SuperTraits<'a> {
@@ -130,7 +139,7 @@ fn direct_super_traits(db: &dyn DefDatabase, trait_: TraitId, cb: impl FnMut(Tra
WherePredicate::Lifetime { .. } => None,
})
.filter(|(_, bound_modifier)| matches!(bound_modifier, TraitBoundModifier::None))
- .filter_map(|(path, _)| match resolver.resolve_path_in_type_ns_fully(db, path.mod_path()) {
+ .filter_map(|(path, _)| match resolver.resolve_path_in_type_ns_fully(db, path) {
Some(TypeNs::TraitId(t)) => Some(t),
_ => None,
})
@@ -176,6 +185,37 @@ pub(crate) fn generics(db: &dyn DefDatabase, def: GenericDefId) -> Generics {
Generics { def, params: db.generic_params(def), parent_generics }
}
+/// It is a bit different from the rustc equivalent. Currently it stores:
+/// - 0: the function signature, encoded as a function pointer type
+/// - 1..n: generics of the parent
+///
+/// and it doesn't store the closure types and fields.
+///
+/// Codes should not assume this ordering, and should always use methods available
+/// on this struct for retriving, and `TyBuilder::substs_for_closure` for creating.
+pub(crate) struct ClosureSubst<'a>(pub(crate) &'a Substitution);
+
+impl<'a> ClosureSubst<'a> {
+ pub(crate) fn parent_subst(&self) -> &'a [GenericArg] {
+ match self.0.as_slice(Interner) {
+ [_, x @ ..] => x,
+ _ => {
+ never!("Closure missing parameter");
+ &[]
+ }
+ }
+ }
+
+ pub(crate) fn sig_ty(&self) -> &'a Ty {
+ match self.0.as_slice(Interner) {
+ [x, ..] => x.assert_ty_ref(Interner),
+ _ => {
+ unreachable!("Closure missing sig_ty parameter");
+ }
+ }
+ }
+}
+
#[derive(Debug)]
pub(crate) struct Generics {
def: GenericDefId,
@@ -354,3 +394,99 @@ pub fn is_fn_unsafe_to_call(db: &dyn HirDatabase, func: FunctionId) -> bool {
_ => false,
}
}
+
+pub(crate) struct UnevaluatedConstEvaluatorFolder<'a> {
+ pub(crate) db: &'a dyn HirDatabase,
+}
+
+impl FallibleTypeFolder<Interner> for UnevaluatedConstEvaluatorFolder<'_> {
+ type Error = ();
+
+ fn as_dyn(&mut self) -> &mut dyn FallibleTypeFolder<Interner, Error = ()> {
+ self
+ }
+
+ fn interner(&self) -> Interner {
+ Interner
+ }
+
+ fn try_fold_const(
+ &mut self,
+ constant: Const,
+ _outer_binder: DebruijnIndex,
+ ) -> Result<Const, Self::Error> {
+ if let chalk_ir::ConstValue::Concrete(c) = &constant.data(Interner).value {
+ if let ConstScalar::UnevaluatedConst(id, subst) = &c.interned {
+ if let Ok(eval) = self.db.const_eval(*id, subst.clone()) {
+ return Ok(eval);
+ } else {
+ return Ok(unknown_const(constant.data(Interner).ty.clone()));
+ }
+ }
+ }
+ Ok(constant)
+ }
+}
+
+pub(crate) fn detect_variant_from_bytes<'a>(
+ layout: &'a Layout,
+ db: &dyn HirDatabase,
+ krate: CrateId,
+ b: &[u8],
+ e: EnumId,
+) -> Option<(LocalEnumVariantId, &'a Layout)> {
+ let (var_id, var_layout) = match &layout.variants {
+ hir_def::layout::Variants::Single { index } => (index.0, &*layout),
+ hir_def::layout::Variants::Multiple { tag, tag_encoding, variants, .. } => {
+ let target_data_layout = db.target_data_layout(krate)?;
+ let size = tag.size(&*target_data_layout).bytes_usize();
+ let offset = layout.fields.offset(0).bytes_usize(); // The only field on enum variants is the tag field
+ let tag = i128::from_le_bytes(pad16(&b[offset..offset + size], false));
+ match tag_encoding {
+ TagEncoding::Direct => {
+ let x = variants.iter_enumerated().find(|x| {
+ db.const_eval_discriminant(EnumVariantId { parent: e, local_id: x.0 .0 })
+ == Ok(tag)
+ })?;
+ (x.0 .0, x.1)
+ }
+ TagEncoding::Niche { untagged_variant, niche_start, .. } => {
+ let candidate_tag = tag.wrapping_sub(*niche_start as i128) as usize;
+ let variant = variants
+ .iter_enumerated()
+ .map(|(x, _)| x)
+ .filter(|x| x != untagged_variant)
+ .nth(candidate_tag)
+ .unwrap_or(*untagged_variant);
+ (variant.0, &variants[variant])
+ }
+ }
+ }
+ };
+ Some((var_id, var_layout))
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub(crate) struct InTypeConstIdMetadata(pub(crate) Ty);
+
+impl OpaqueInternableThing for InTypeConstIdMetadata {
+ fn dyn_hash(&self, mut state: &mut dyn std::hash::Hasher) {
+ self.hash(&mut state);
+ }
+
+ fn dyn_eq(&self, other: &dyn OpaqueInternableThing) -> bool {
+ other.as_any().downcast_ref::<Self>().map_or(false, |x| self == x)
+ }
+
+ fn dyn_clone(&self) -> Box<dyn OpaqueInternableThing> {
+ Box::new(self.clone())
+ }
+
+ fn as_any(&self) -> &dyn std::any::Any {
+ self
+ }
+
+ fn box_any(&self) -> Box<dyn std::any::Any> {
+ Box::new(self.clone())
+ }
+}