diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:24 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:24 +0000 |
commit | 023939b627b7dc93b01471f7d41fb8553ddb4ffa (patch) | |
tree | 60fc59477c605c72b0a1051409062ddecc43f877 /src/tools/rust-analyzer/crates/hir-ty | |
parent | Adding debian version 1.72.1+dfsg1-1. (diff) | |
download | rustc-023939b627b7dc93b01471f7d41fb8553ddb4ffa.tar.xz rustc-023939b627b7dc93b01471f7d41fb8553ddb4ffa.zip |
Merging upstream version 1.73.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/tools/rust-analyzer/crates/hir-ty')
46 files changed, 4067 insertions, 1421 deletions
diff --git a/src/tools/rust-analyzer/crates/hir-ty/Cargo.toml b/src/tools/rust-analyzer/crates/hir-ty/Cargo.toml index c8bea3450..abc19d63a 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/Cargo.toml +++ b/src/tools/rust-analyzer/crates/hir-ty/Cargo.toml @@ -19,14 +19,15 @@ bitflags = "2.1.0" smallvec.workspace = true ena = "0.14.0" either = "1.7.0" +oorandom = "11.1.3" tracing = "0.1.35" rustc-hash = "1.1.0" scoped-tls = "1.0.0" -chalk-solve = { version = "0.91.0", default-features = false } -chalk-ir = "0.91.0" -chalk-recursive = { version = "0.91.0", default-features = false } -chalk-derive = "0.91.0" -la-arena = { version = "0.3.0", path = "../../lib/la-arena" } +chalk-solve = { version = "0.92.0", default-features = false } +chalk-ir = "0.92.0" +chalk-recursive = { version = "0.92.0", default-features = false } +chalk-derive = "0.92.0" +la-arena.workspace = true once_cell = "1.17.0" triomphe.workspace = true nohash-hasher.workspace = true @@ -47,7 +48,6 @@ limit.workspace = true expect-test = "1.4.0" tracing = "0.1.35" tracing-subscriber = { version = "0.3.16", default-features = false, features = [ - "env-filter", "registry", ] } tracing-tree = "0.2.1" diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs b/src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs index 3860bccec..4625a3b01 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/autoderef.rs @@ -36,7 +36,7 @@ pub fn autoderef( ) -> impl Iterator<Item = Ty> { let mut table = InferenceTable::new(db, env); let ty = table.instantiate_canonical(ty); - let mut autoderef = Autoderef::new(&mut table, ty); + let mut autoderef = Autoderef::new(&mut table, ty, false); let mut v = Vec::new(); while let Some((ty, _steps)) = autoderef.next() { // `ty` may contain unresolved inference variables. Since there's no chance they would be @@ -63,12 +63,13 @@ pub(crate) struct Autoderef<'a, 'db> { ty: Ty, at_start: bool, steps: Vec<(AutoderefKind, Ty)>, + explicit: bool, } impl<'a, 'db> Autoderef<'a, 'db> { - pub(crate) fn new(table: &'a mut InferenceTable<'db>, ty: Ty) -> Self { + pub(crate) fn new(table: &'a mut InferenceTable<'db>, ty: Ty, explicit: bool) -> Self { let ty = table.resolve_ty_shallow(&ty); - Autoderef { table, ty, at_start: true, steps: Vec::new() } + Autoderef { table, ty, at_start: true, steps: Vec::new(), explicit } } pub(crate) fn step_count(&self) -> usize { @@ -97,7 +98,7 @@ impl Iterator for Autoderef<'_, '_> { return None; } - let (kind, new_ty) = autoderef_step(self.table, self.ty.clone())?; + let (kind, new_ty) = autoderef_step(self.table, self.ty.clone(), self.explicit)?; self.steps.push((kind, self.ty.clone())); self.ty = new_ty; @@ -109,8 +110,9 @@ impl Iterator for Autoderef<'_, '_> { pub(crate) fn autoderef_step( table: &mut InferenceTable<'_>, ty: Ty, + explicit: bool, ) -> Option<(AutoderefKind, Ty)> { - if let Some(derefed) = builtin_deref(table, &ty, false) { + if let Some(derefed) = builtin_deref(table, &ty, explicit) { Some((AutoderefKind::Builtin, table.resolve_ty_shallow(derefed))) } else { Some((AutoderefKind::Overloaded, deref_by_trait(table, ty)?)) @@ -124,7 +126,6 @@ pub(crate) fn builtin_deref<'ty>( ) -> Option<&'ty Ty> { match ty.kind(Interner) { TyKind::Ref(.., ty) => Some(ty), - // FIXME: Maybe accept this but diagnose if its not explicit? TyKind::Raw(.., ty) if explicit => Some(ty), &TyKind::Adt(chalk_ir::AdtId(adt), ref substs) => { if crate::lang_items::is_box(table.db, adt) { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs b/src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs index 5dd8e2719..f4fbace19 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/chalk_db.rs @@ -5,13 +5,13 @@ use std::{iter, sync::Arc}; use tracing::debug; -use chalk_ir::{cast::Cast, fold::shift::Shift, CanonicalVarKinds}; +use chalk_ir::{cast::Caster, fold::shift::Shift, CanonicalVarKinds}; use chalk_solve::rust_ir::{self, OpaqueTyDatumBound, WellKnownTrait}; use base_db::CrateId; use hir_def::{ hir::Movability, - lang_item::{lang_attr, LangItem, LangItemTarget}, + lang_item::{LangItem, LangItemTarget}, AssocItemId, BlockId, GenericDefId, HasModule, ItemContainerId, Lookup, TypeAliasId, }; use hir_expand::name::name; @@ -46,7 +46,7 @@ pub(crate) type AssociatedTyValue = chalk_solve::rust_ir::AssociatedTyValue<Inte pub(crate) type FnDefDatum = chalk_solve::rust_ir::FnDefDatum<Interner>; pub(crate) type Variances = chalk_ir::Variances<Interner>; -impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> { +impl chalk_solve::RustIrDatabase<Interner> for ChalkContext<'_> { fn associated_ty_data(&self, id: AssocTypeId) -> Arc<AssociatedTyDatum> { self.db.associated_ty_data(id) } @@ -60,9 +60,37 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> { // FIXME: keep track of these Arc::new(rust_ir::AdtRepr { c: false, packed: false, int: None }) } - fn discriminant_type(&self, _ty: chalk_ir::Ty<Interner>) -> chalk_ir::Ty<Interner> { - // FIXME: keep track of this - chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(chalk_ir::UintTy::U32)).intern(Interner) + fn discriminant_type(&self, ty: chalk_ir::Ty<Interner>) -> chalk_ir::Ty<Interner> { + if let chalk_ir::TyKind::Adt(id, _) = ty.kind(Interner) { + if let hir_def::AdtId::EnumId(e) = id.0 { + let enum_data = self.db.enum_data(e); + let ty = enum_data.repr.unwrap_or_default().discr_type(); + return chalk_ir::TyKind::Scalar(match ty { + hir_def::layout::IntegerType::Pointer(is_signed) => match is_signed { + true => chalk_ir::Scalar::Int(chalk_ir::IntTy::Isize), + false => chalk_ir::Scalar::Uint(chalk_ir::UintTy::Usize), + }, + hir_def::layout::IntegerType::Fixed(size, is_signed) => match is_signed { + true => chalk_ir::Scalar::Int(match size { + hir_def::layout::Integer::I8 => chalk_ir::IntTy::I8, + hir_def::layout::Integer::I16 => chalk_ir::IntTy::I16, + hir_def::layout::Integer::I32 => chalk_ir::IntTy::I32, + hir_def::layout::Integer::I64 => chalk_ir::IntTy::I64, + hir_def::layout::Integer::I128 => chalk_ir::IntTy::I128, + }), + false => chalk_ir::Scalar::Uint(match size { + hir_def::layout::Integer::I8 => chalk_ir::UintTy::U8, + hir_def::layout::Integer::I16 => chalk_ir::UintTy::U16, + hir_def::layout::Integer::I32 => chalk_ir::UintTy::U32, + hir_def::layout::Integer::I64 => chalk_ir::UintTy::U64, + hir_def::layout::Integer::I128 => chalk_ir::UintTy::U128, + }), + }, + }) + .intern(Interner); + } + } + chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(chalk_ir::UintTy::U8)).intern(Interner) } fn impl_datum(&self, impl_id: ImplId) -> Arc<ImplDatum> { self.db.impl_datum(self.krate, impl_id) @@ -565,7 +593,7 @@ pub(crate) fn trait_datum_query( let where_clauses = convert_where_clauses(db, trait_.into(), &bound_vars); let associated_ty_ids = trait_data.associated_types().map(to_assoc_type_id).collect(); let trait_datum_bound = rust_ir::TraitDatumBound { where_clauses }; - let well_known = lang_attr(db.upcast(), trait_).and_then(well_known_trait_from_lang_item); + let well_known = db.lang_attr(trait_.into()).and_then(well_known_trait_from_lang_item); let trait_datum = TraitDatum { id: trait_id, binders: make_binders(db, &generic_params, trait_datum_bound), @@ -593,6 +621,7 @@ fn well_known_trait_from_lang_item(item: LangItem) -> Option<WellKnownTrait> { LangItem::Unsize => WellKnownTrait::Unsize, LangItem::Tuple => WellKnownTrait::Tuple, LangItem::PointeeTrait => WellKnownTrait::Pointee, + LangItem::FnPtrTrait => WellKnownTrait::FnPtr, _ => return None, }) } @@ -614,6 +643,7 @@ fn lang_item_from_well_known_trait(trait_: WellKnownTrait) -> LangItem { WellKnownTrait::Unpin => LangItem::Unpin, WellKnownTrait::Unsize => LangItem::Unsize, WellKnownTrait::Pointee => LangItem::PointeeTrait, + WellKnownTrait::FnPtr => LangItem::FnPtrTrait, } } @@ -844,28 +874,34 @@ pub(super) fn generic_predicate_to_inline_bound( } let args_no_self = trait_ref.substitution.as_slice(Interner)[1..] .iter() - .map(|ty| ty.clone().cast(Interner)) + .cloned() + .casted(Interner) .collect(); let trait_bound = rust_ir::TraitBound { trait_id: trait_ref.trait_id, args_no_self }; Some(chalk_ir::Binders::new(binders, rust_ir::InlineBound::TraitBound(trait_bound))) } WhereClause::AliasEq(AliasEq { alias: AliasTy::Projection(projection_ty), ty }) => { - let trait_ = projection_ty.trait_(db); - if projection_ty.self_type_parameter(db) != self_ty_shifted_in { + let generics = + generics(db.upcast(), from_assoc_type_id(projection_ty.associated_ty_id).into()); + let (assoc_args, trait_args) = + projection_ty.substitution.as_slice(Interner).split_at(generics.len_self()); + let (self_ty, args_no_self) = + trait_args.split_first().expect("projection without trait self type"); + if self_ty.assert_ty_ref(Interner) != &self_ty_shifted_in { return None; } - let args_no_self = projection_ty.substitution.as_slice(Interner)[1..] - .iter() - .map(|ty| ty.clone().cast(Interner)) - .collect(); + + let args_no_self = args_no_self.iter().cloned().casted(Interner).collect(); + let parameters = assoc_args.to_vec(); + let alias_eq_bound = rust_ir::AliasEqBound { value: ty.clone(), trait_bound: rust_ir::TraitBound { - trait_id: to_chalk_trait_id(trait_), + trait_id: to_chalk_trait_id(projection_ty.trait_(db)), args_no_self, }, associated_ty_id: projection_ty.associated_ty_id, - parameters: Vec::new(), // FIXME we don't support generic associated types yet + parameters, }; Some(chalk_ir::Binders::new( binders, diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs b/src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs index a8071591a..c0b243ea2 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/chalk_ext.rs @@ -343,7 +343,8 @@ impl TyExt for Ty { fn is_copy(self, db: &dyn HirDatabase, owner: DefWithBodyId) -> bool { let crate_id = owner.module(db.upcast()).krate(); - let Some(copy_trait) = db.lang_item(crate_id, LangItem::Copy).and_then(|x| x.as_trait()) else { + let Some(copy_trait) = db.lang_item(crate_id, LangItem::Copy).and_then(|it| it.as_trait()) + else { return false; }; let trait_ref = TyBuilder::trait_ref(db, copy_trait).push(self).build(); diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs b/src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs index 262341c6e..1c0f7b08d 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/consteval.rs @@ -16,7 +16,8 @@ use triomphe::Arc; use crate::{ db::HirDatabase, infer::InferenceContext, lower::ParamLoweringMode, mir::monomorphize_mir_body_bad, to_placeholder_idx, utils::Generics, Const, ConstData, - ConstScalar, ConstValue, GenericArg, Interner, MemoryMap, Substitution, Ty, TyBuilder, + ConstScalar, ConstValue, GenericArg, Interner, MemoryMap, Substitution, TraitEnvironment, Ty, + TyBuilder, }; use super::mir::{interpret_mir, lower_to_mir, pad16, MirEvalError, MirLowerError}; @@ -88,7 +89,7 @@ pub(crate) fn path_to_const( ConstValue::Placeholder(to_placeholder_idx(db, p.into())) } ParamLoweringMode::Variable => match args.param_idx(p.into()) { - Some(x) => ConstValue::BoundVar(BoundVar::new(debruijn, x)), + Some(it) => ConstValue::BoundVar(BoundVar::new(debruijn, it)), None => { never!( "Generic list doesn't contain this param: {:?}, {:?}, {:?}", @@ -135,15 +136,15 @@ pub fn intern_const_ref( ty: Ty, krate: CrateId, ) -> Const { - let layout = db.layout_of_ty(ty.clone(), krate); + let layout = db.layout_of_ty(ty.clone(), Arc::new(TraitEnvironment::empty(krate))); let bytes = match value { LiteralConstRef::Int(i) => { // FIXME: We should handle failure of layout better. - let size = layout.map(|x| x.size.bytes_usize()).unwrap_or(16); + let size = layout.map(|it| it.size.bytes_usize()).unwrap_or(16); ConstScalar::Bytes(i.to_le_bytes()[0..size].to_vec(), MemoryMap::default()) } LiteralConstRef::UInt(i) => { - let size = layout.map(|x| x.size.bytes_usize()).unwrap_or(16); + let size = layout.map(|it| it.size.bytes_usize()).unwrap_or(16); ConstScalar::Bytes(i.to_le_bytes()[0..size].to_vec(), MemoryMap::default()) } LiteralConstRef::Bool(b) => ConstScalar::Bytes(vec![*b as u8], MemoryMap::default()), @@ -171,9 +172,9 @@ pub fn try_const_usize(db: &dyn HirDatabase, c: &Const) -> Option<u128> { chalk_ir::ConstValue::InferenceVar(_) => None, chalk_ir::ConstValue::Placeholder(_) => None, chalk_ir::ConstValue::Concrete(c) => match &c.interned { - ConstScalar::Bytes(x, _) => Some(u128::from_le_bytes(pad16(&x, false))), + ConstScalar::Bytes(it, _) => Some(u128::from_le_bytes(pad16(&it, false))), ConstScalar::UnevaluatedConst(c, subst) => { - let ec = db.const_eval(*c, subst.clone()).ok()?; + let ec = db.const_eval(*c, subst.clone(), None).ok()?; try_const_usize(db, &ec) } _ => None, @@ -186,6 +187,7 @@ pub(crate) fn const_eval_recover( _: &[String], _: &GeneralConstId, _: &Substitution, + _: &Option<Arc<TraitEnvironment>>, ) -> Result<Const, ConstEvalError> { Err(ConstEvalError::MirLowerError(MirLowerError::Loop)) } @@ -210,6 +212,7 @@ pub(crate) fn const_eval_query( db: &dyn HirDatabase, def: GeneralConstId, subst: Substitution, + trait_env: Option<Arc<TraitEnvironment>>, ) -> Result<Const, ConstEvalError> { let body = match def { GeneralConstId::ConstId(c) => { @@ -228,7 +231,7 @@ pub(crate) fn const_eval_query( } GeneralConstId::InTypeConstId(c) => db.mir_body(c.into())?, }; - let c = interpret_mir(db, &body, false).0?; + let c = interpret_mir(db, body, false, trait_env).0?; Ok(c) } @@ -241,7 +244,7 @@ pub(crate) fn const_eval_static_query( Substitution::empty(Interner), db.trait_environment_for_body(def.into()), )?; - let c = interpret_mir(db, &body, false).0?; + let c = interpret_mir(db, body, false, None).0?; Ok(c) } @@ -268,7 +271,7 @@ pub(crate) fn const_eval_discriminant_variant( Substitution::empty(Interner), db.trait_environment_for_body(def), )?; - let c = interpret_mir(db, &mir_body, false).0?; + let c = interpret_mir(db, mir_body, false, None).0?; let c = try_const_usize(db, &c).unwrap() as i128; Ok(c) } @@ -293,7 +296,7 @@ pub(crate) fn eval_to_const( } let infer = ctx.clone().resolve_all(); if let Ok(mir_body) = lower_to_mir(ctx.db, ctx.owner, &ctx.body, &infer, expr) { - if let Ok(result) = interpret_mir(db, &mir_body, true).0 { + if let Ok(result) = interpret_mir(db, Arc::new(mir_body), true, None).0 { return result; } } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs index 0db1fefbf..666955fa1 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests.rs @@ -1,10 +1,11 @@ use base_db::{fixture::WithFixture, FileId}; use chalk_ir::Substitution; use hir_def::db::DefDatabase; +use test_utils::skip_slow_tests; use crate::{ consteval::try_const_usize, db::HirDatabase, mir::pad16, test_db::TestDB, Const, ConstScalar, - Interner, + Interner, MemoryMap, }; use super::{ @@ -16,7 +17,7 @@ mod intrinsics; fn simplify(e: ConstEvalError) -> ConstEvalError { match e { - ConstEvalError::MirEvalError(MirEvalError::InFunction(_, e, _, _)) => { + ConstEvalError::MirEvalError(MirEvalError::InFunction(e, _)) => { simplify(ConstEvalError::MirEvalError(*e)) } _ => e, @@ -36,7 +37,37 @@ fn check_fail(ra_fixture: &str, error: impl FnOnce(ConstEvalError) -> bool) { #[track_caller] fn check_number(ra_fixture: &str, answer: i128) { - let (db, file_id) = TestDB::with_single_file(ra_fixture); + check_answer(ra_fixture, |b, _| { + assert_eq!( + b, + &answer.to_le_bytes()[0..b.len()], + "Bytes differ. In decimal form: actual = {}, expected = {answer}", + i128::from_le_bytes(pad16(b, true)) + ); + }); +} + +#[track_caller] +fn check_str(ra_fixture: &str, answer: &str) { + check_answer(ra_fixture, |b, mm| { + let addr = usize::from_le_bytes(b[0..b.len() / 2].try_into().unwrap()); + let size = usize::from_le_bytes(b[b.len() / 2..].try_into().unwrap()); + let Some(bytes) = mm.get(addr, size) else { + panic!("string data missed in the memory map"); + }; + assert_eq!( + bytes, + answer.as_bytes(), + "Bytes differ. In string form: actual = {}, expected = {answer}", + String::from_utf8_lossy(bytes) + ); + }); +} + +#[track_caller] +fn check_answer(ra_fixture: &str, check: impl FnOnce(&[u8], &MemoryMap)) { + let (db, file_ids) = TestDB::with_many_files(ra_fixture); + let file_id = *file_ids.last().unwrap(); let r = match eval_goal(&db, file_id) { Ok(t) => t, Err(e) => { @@ -46,13 +77,8 @@ fn check_number(ra_fixture: &str, answer: i128) { }; match &r.data(Interner).value { chalk_ir::ConstValue::Concrete(c) => match &c.interned { - ConstScalar::Bytes(b, _) => { - assert_eq!( - b, - &answer.to_le_bytes()[0..b.len()], - "Bytes differ. In decimal form: actual = {}, expected = {answer}", - i128::from_le_bytes(pad16(b, true)) - ); + ConstScalar::Bytes(b, mm) => { + check(b, mm); } x => panic!("Expected number but found {:?}", x), }, @@ -87,8 +113,8 @@ fn eval_goal(db: &TestDB, file_id: FileId) -> Result<Const, ConstEvalError> { } _ => None, }) - .unwrap(); - db.const_eval(const_id.into(), Substitution::empty(Interner)) + .expect("No const named GOAL found in the test"); + db.const_eval(const_id.into(), Substitution::empty(Interner), None) } #[test] @@ -108,6 +134,7 @@ fn bit_op() { check_fail(r#"const GOAL: i8 = 1 << 8"#, |e| { e == ConstEvalError::MirEvalError(MirEvalError::Panic("Overflow in Shl".to_string())) }); + check_number(r#"const GOAL: i32 = 100000000i32 << 11"#, (100000000i32 << 11) as i128); } #[test] @@ -166,14 +193,21 @@ fn casts() { check_number( r#" //- minicore: coerce_unsized, index, slice + struct X { + unsize_field: [u8], + } + const GOAL: usize = { let a = [10, 20, 3, 15]; let x: &[i32] = &a; - let y: *const [i32] = x; - let z = y as *const [u8]; // slice fat pointer cast don't touch metadata - let q = z as *const str; - let p = q as *const [u8]; - let w = unsafe { &*z }; + let x: *const [i32] = x; + let x = x as *const [u8]; // slice fat pointer cast don't touch metadata + let x = x as *const str; + let x = x as *const X; + let x = x as *const [i16]; + let x = x as *const X; + let x = x as *const [u8]; + let w = unsafe { &*x }; w.len() }; "#, @@ -199,6 +233,30 @@ fn raw_pointer_equality() { } #[test] +fn alignment() { + check_answer( + r#" +//- minicore: transmute +use core::mem::transmute; +const GOAL: usize = { + let x: i64 = 2; + transmute(&x) +} + "#, + |b, _| assert_eq!(b[0] % 8, 0), + ); + check_answer( + r#" +//- minicore: transmute +use core::mem::transmute; +static X: i64 = 12; +const GOAL: usize = transmute(&X); + "#, + |b, _| assert_eq!(b[0] % 8, 0), + ); +} + +#[test] fn locals() { check_number( r#" @@ -1129,6 +1187,25 @@ fn pattern_matching_ergonomics() { } #[test] +fn destructing_assignment() { + check_number( + r#" + //- minicore: add + const fn f(i: &mut u8) -> &mut u8 { + *i += 1; + i + } + const GOAL: u8 = { + let mut i = 4; + _ = f(&mut i); + i + }; + "#, + 5, + ); +} + +#[test] fn let_else() { check_number( r#" @@ -1370,14 +1447,14 @@ fn builtin_derive_macro() { #[derive(Clone)] struct Y { field1: i32, - field2: u8, + field2: ((i32, u8), i64), } const GOAL: u8 = { - let x = X(2, Z::Foo(Y { field1: 4, field2: 5 }), 8); + let x = X(2, Z::Foo(Y { field1: 4, field2: ((32, 5), 12) }), 8); let x = x.clone(); let Z::Foo(t) = x.1; - t.field2 + t.field2.0 .1 }; "#, 5, @@ -1551,6 +1628,58 @@ fn closures() { } #[test] +fn manual_fn_trait_impl() { + check_number( + r#" +//- minicore: fn, copy +struct S(i32); + +impl FnOnce<(i32, i32)> for S { + type Output = i32; + + extern "rust-call" fn call_once(self, arg: (i32, i32)) -> i32 { + arg.0 + arg.1 + self.0 + } +} + +const GOAL: i32 = { + let s = S(1); + s(2, 3) +}; +"#, + 6, + ); +} + +#[test] +fn closure_capture_unsized_type() { + check_number( + r#" + //- minicore: fn, copy, slice, index, coerce_unsized + fn f<T: A>(x: &<T as A>::Ty) -> &<T as A>::Ty { + let c = || &*x; + c() + } + + trait A { + type Ty; + } + + impl A for i32 { + type Ty = [u8]; + } + + const GOAL: u8 = { + let k: &[u8] = &[1, 2, 3]; + let k = f::<i32>(k); + k[0] + k[1] + k[2] + } + "#, + 6, + ); +} + +#[test] fn closure_and_impl_fn() { check_number( r#" @@ -1636,6 +1765,24 @@ fn function_pointer_in_constants() { } #[test] +fn function_pointer_and_niche_optimization() { + check_number( + r#" + //- minicore: option + const GOAL: i32 = { + let f: fn(i32) -> i32 = |x| x + 2; + let init = Some(f); + match init { + Some(t) => t(3), + None => 222, + } + }; + "#, + 5, + ); +} + +#[test] fn function_pointer() { check_number( r#" @@ -1663,6 +1810,18 @@ fn function_pointer() { ); check_number( r#" + fn add2(x: u8) -> u8 { + x + 2 + } + const GOAL: u8 = { + let plus2 = add2 as fn(u8) -> u8; + plus2(3) + }; + "#, + 5, + ); + check_number( + r#" //- minicore: coerce_unsized, index, slice fn add2(x: u8) -> u8 { x + 2 @@ -1847,6 +2006,65 @@ fn dyn_trait() { "#, 900, ); + check_number( + r#" + //- minicore: coerce_unsized, index, slice + trait A { + fn x(&self) -> i32; + } + + trait B: A {} + + impl A for i32 { + fn x(&self) -> i32 { + 5 + } + } + + impl B for i32 { + + } + + const fn f(x: &dyn B) -> i32 { + x.x() + } + + const GOAL: i32 = f(&2i32); + "#, + 5, + ); +} + +#[test] +fn coerce_unsized() { + check_number( + r#" +//- minicore: coerce_unsized, deref_mut, slice, index, transmute, non_null +use core::ops::{Deref, DerefMut, CoerceUnsized}; +use core::{marker::Unsize, mem::transmute, ptr::NonNull}; + +struct ArcInner<T: ?Sized> { + strong: usize, + weak: usize, + data: T, +} + +pub struct Arc<T: ?Sized> { + inner: NonNull<ArcInner<T>>, +} + +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {} + +const GOAL: usize = { + let x = transmute::<usize, Arc<[i32; 3]>>(12); + let y: Arc<[i32]> = x; + let z = transmute::<Arc<[i32]>, (usize, usize)>(y); + z.1 +}; + + "#, + 3, + ); } #[test] @@ -1961,6 +2179,17 @@ fn array_and_index() { } #[test] +fn string() { + check_str( + r#" + //- minicore: coerce_unsized, index, slice + const GOAL: &str = "hello"; + "#, + "hello", + ); +} + +#[test] fn byte_string() { check_number( r#" @@ -2018,6 +2247,57 @@ fn consts() { "#, 6, ); + + check_number( + r#" + const F1: i32 = 2147483647; + const F2: i32 = F1 - 25; + const GOAL: i32 = F2; + "#, + 2147483622, + ); + + check_number( + r#" + const F1: i32 = -2147483648; + const F2: i32 = F1 + 18; + const GOAL: i32 = F2; + "#, + -2147483630, + ); + + check_number( + r#" + const F1: i32 = 10; + const F2: i32 = F1 - 20; + const GOAL: i32 = F2; + "#, + -10, + ); + + check_number( + r#" + const F1: i32 = 25; + const F2: i32 = F1 - 25; + const GOAL: i32 = F2; + "#, + 0, + ); + + check_number( + r#" + const A: i32 = -2147483648; + const GOAL: bool = A > 0; + "#, + 0, + ); + + check_number( + r#" + const GOAL: i64 = (-2147483648_i32) as i64; + "#, + -2147483648, + ); } #[test] @@ -2116,11 +2396,14 @@ fn const_loop() { fn const_transfer_memory() { check_number( r#" - const A1: &i32 = &2; - const A2: &i32 = &5; - const GOAL: i32 = *A1 + *A2; + //- minicore: slice, index, coerce_unsized + const A1: &i32 = &1; + const A2: &i32 = &10; + const A3: [&i32; 3] = [&1, &2, &100]; + const A4: (i32, &i32) = (1, &1000); + const GOAL: i32 = *A1 + *A2 + *A3[2] + *A4.1; "#, - 7, + 1111, ); } @@ -2287,6 +2570,51 @@ fn const_trait_assoc() { ); check_number( r#" + //- /a/lib.rs crate:a + pub trait ToConst { + const VAL: usize; + } + pub const fn to_const<T: ToConst>() -> usize { + T::VAL + } + //- /main.rs crate:main deps:a + use a::{ToConst, to_const}; + struct U0; + impl ToConst for U0 { + const VAL: usize = 5; + } + const GOAL: usize = to_const::<U0>(); + "#, + 5, + ); + check_number( + r#" + //- minicore: size_of, fn + //- /a/lib.rs crate:a + use core::mem::size_of; + pub struct S<T>(T); + impl<T> S<T> { + pub const X: usize = { + let k: T; + let f = || core::mem::size_of::<T>(); + f() + }; + } + //- /main.rs crate:main deps:a + use a::{S}; + trait Tr { + type Ty; + } + impl Tr for i32 { + type Ty = u64; + } + struct K<T: Tr>(<T as Tr>::Ty); + const GOAL: usize = S::<K<i32>>::X; + "#, + 8, + ); + check_number( + r#" struct S<T>(*mut T); trait MySized: Sized { @@ -2311,21 +2639,11 @@ fn const_trait_assoc() { } #[test] -fn panic_messages() { - check_fail( - r#" - //- minicore: panic - const GOAL: u8 = { - let x: u16 = 2; - panic!("hello"); - }; - "#, - |e| e == ConstEvalError::MirEvalError(MirEvalError::Panic("hello".to_string())), - ); -} - -#[test] fn exec_limits() { + if skip_slow_tests() { + return; + } + check_fail( r#" const GOAL: usize = loop {}; @@ -2339,7 +2657,7 @@ fn exec_limits() { } const GOAL: i32 = f(0); "#, - |e| e == ConstEvalError::MirEvalError(MirEvalError::StackOverflow), + |e| e == ConstEvalError::MirEvalError(MirEvalError::ExecutionLimitExceeded), ); // Reasonable code should still work check_number( @@ -2356,9 +2674,31 @@ fn exec_limits() { } sum } - const GOAL: i32 = f(10000); + const GOAL: i32 = f(1000); "#, - 10000 * 10000, + 1000 * 1000, + ); +} + +#[test] +fn memory_limit() { + check_fail( + r#" + extern "Rust" { + #[rustc_allocator] + fn __rust_alloc(size: usize, align: usize) -> *mut u8; + } + + const GOAL: u8 = unsafe { + __rust_alloc(30_000_000_000, 1); // 30GB + 2 + }; + "#, + |e| { + e == ConstEvalError::MirEvalError(MirEvalError::Panic( + "Memory allocation of 30000000000 bytes failed".to_string(), + )) + }, ); } @@ -2377,6 +2717,37 @@ fn type_error() { } #[test] +fn unsized_field() { + check_number( + r#" + //- minicore: coerce_unsized, index, slice, transmute + use core::mem::transmute; + + struct Slice([usize]); + struct Slice2(Slice); + + impl Slice2 { + fn as_inner(&self) -> &Slice { + &self.0 + } + + fn as_bytes(&self) -> &[usize] { + &self.as_inner().0 + } + } + + const GOAL: usize = unsafe { + let x: &[usize] = &[1, 2, 3]; + let x: &Slice2 = transmute(x); + let x = x.as_bytes(); + x[0] + x[1] + x[2] + x.len() * 100 + }; + "#, + 306, + ); +} + +#[test] fn unsized_local() { check_fail( r#" diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests/intrinsics.rs b/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests/intrinsics.rs index e05d824db..2855f7890 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests/intrinsics.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/consteval/tests/intrinsics.rs @@ -15,6 +15,171 @@ fn size_of() { } #[test] +fn size_of_val() { + check_number( + r#" + //- minicore: coerce_unsized + extern "rust-intrinsic" { + pub fn size_of_val<T: ?Sized>(_: *const T) -> usize; + } + + struct X(i32, u8); + + const GOAL: usize = size_of_val(&X(1, 2)); + "#, + 8, + ); + check_number( + r#" + //- minicore: coerce_unsized + extern "rust-intrinsic" { + pub fn size_of_val<T: ?Sized>(_: *const T) -> usize; + } + + const GOAL: usize = { + let it: &[i32] = &[1, 2, 3]; + size_of_val(it) + }; + "#, + 12, + ); + check_number( + r#" + //- minicore: coerce_unsized, transmute + use core::mem::transmute; + + extern "rust-intrinsic" { + pub fn size_of_val<T: ?Sized>(_: *const T) -> usize; + } + + struct X { + x: i64, + y: u8, + t: [i32], + } + + const GOAL: usize = unsafe { + let y: &X = transmute([0usize, 3]); + size_of_val(y) + }; + "#, + 24, + ); + check_number( + r#" + //- minicore: coerce_unsized, transmute + use core::mem::transmute; + + extern "rust-intrinsic" { + pub fn size_of_val<T: ?Sized>(_: *const T) -> usize; + } + + struct X { + x: i32, + y: i64, + t: [u8], + } + + const GOAL: usize = unsafe { + let y: &X = transmute([0usize, 15]); + size_of_val(y) + }; + "#, + 32, + ); + check_number( + r#" + //- minicore: coerce_unsized, fmt, builtin_impls + extern "rust-intrinsic" { + pub fn size_of_val<T: ?Sized>(_: *const T) -> usize; + } + + const GOAL: usize = { + let x: &i16 = &5; + let y: &dyn core::fmt::Debug = x; + let z: &dyn core::fmt::Debug = &y; + size_of_val(x) + size_of_val(y) * 10 + size_of_val(z) * 100 + }; + "#, + 1622, + ); + check_number( + r#" + //- minicore: coerce_unsized + extern "rust-intrinsic" { + pub fn size_of_val<T: ?Sized>(_: *const T) -> usize; + } + + const GOAL: usize = { + size_of_val("salam") + }; + "#, + 5, + ); +} + +#[test] +fn min_align_of_val() { + check_number( + r#" + //- minicore: coerce_unsized + extern "rust-intrinsic" { + pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize; + } + + struct X(i32, u8); + + const GOAL: usize = min_align_of_val(&X(1, 2)); + "#, + 4, + ); + check_number( + r#" + //- minicore: coerce_unsized + extern "rust-intrinsic" { + pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize; + } + + const GOAL: usize = { + let x: &[i32] = &[1, 2, 3]; + min_align_of_val(x) + }; + "#, + 4, + ); +} + +#[test] +fn type_name() { + check_str( + r#" + extern "rust-intrinsic" { + pub fn type_name<T: ?Sized>() -> &'static str; + } + + const GOAL: &str = type_name::<i32>(); + "#, + "i32", + ); + check_str( + r#" + extern "rust-intrinsic" { + pub fn type_name<T: ?Sized>() -> &'static str; + } + + mod mod1 { + pub mod mod2 { + pub struct Ty; + } + } + + const GOAL: &str = type_name::<mod1::mod2::Ty>(); + "#, + "mod1::mod2::Ty", + ); +} + +#[test] fn transmute() { check_number( r#" @@ -29,9 +194,28 @@ fn transmute() { } #[test] +fn read_via_copy() { + check_number( + r#" + extern "rust-intrinsic" { + pub fn read_via_copy<T>(e: *const T) -> T; + pub fn volatile_load<T>(e: *const T) -> T; + } + + const GOAL: i32 = { + let x = 2; + read_via_copy(&x) + volatile_load(&x) + }; + "#, + 4, + ); +} + +#[test] fn const_eval_select() { check_number( r#" + //- minicore: fn extern "rust-intrinsic" { pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET where @@ -68,7 +252,29 @@ fn wrapping_add() { } #[test] -fn saturating_add() { +fn ptr_offset_from() { + check_number( + r#" + //- minicore: index, slice, coerce_unsized + extern "rust-intrinsic" { + pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize; + pub fn ptr_offset_from_unsigned<T>(ptr: *const T, base: *const T) -> usize; + } + + const GOAL: isize = { + let x = [1, 2, 3, 4, 5i32]; + let r1 = -ptr_offset_from(&x[0], &x[4]); + let r2 = ptr_offset_from(&x[3], &x[1]); + let r3 = ptr_offset_from_unsigned(&x[3], &x[0]) as isize; + r3 * 100 + r2 * 10 + r1 + }; + "#, + 324, + ); +} + +#[test] +fn saturating() { check_number( r#" extern "rust-intrinsic" { @@ -82,6 +288,16 @@ fn saturating_add() { check_number( r#" extern "rust-intrinsic" { + pub fn saturating_sub<T>(a: T, b: T) -> T; + } + + const GOAL: bool = saturating_sub(5u8, 7) == 0 && saturating_sub(8u8, 4) == 4; + "#, + 1, + ); + check_number( + r#" + extern "rust-intrinsic" { pub fn saturating_add<T>(a: T, b: T) -> T; } @@ -112,6 +328,7 @@ fn allocator() { *ptr = 23; *ptr2 = 32; let ptr = __rust_realloc(ptr, 4, 1, 8); + let ptr = __rust_realloc(ptr, 8, 1, 3); let ptr2 = ((ptr as usize) + 1) as *mut u8; *ptr + *ptr2 }; @@ -160,6 +377,24 @@ fn needs_drop() { } #[test] +fn discriminant_value() { + check_number( + r#" + //- minicore: discriminant, option + use core::marker::DiscriminantKind; + extern "rust-intrinsic" { + pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant; + } + const GOAL: bool = { + discriminant_value(&Some(2i32)) == discriminant_value(&Some(5i32)) + && discriminant_value(&Some(2i32)) != discriminant_value(&None::<i32>) + }; + "#, + 1, + ); +} + +#[test] fn likely() { check_number( r#" @@ -225,6 +460,8 @@ fn atomic() { pub fn atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T; pub fn atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T; pub fn atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T; + pub fn atomic_fence_seqcst(); + pub fn atomic_singlethreadfence_acqrel(); } fn should_not_reach() { @@ -239,6 +476,7 @@ fn atomic() { if (30, true) != atomic_cxchg_release_seqcst(&mut y, 30, 40) { should_not_reach(); } + atomic_fence_seqcst(); if (40, false) != atomic_cxchg_release_seqcst(&mut y, 30, 50) { should_not_reach(); } @@ -246,6 +484,7 @@ fn atomic() { should_not_reach(); } let mut z = atomic_xsub_seqcst(&mut x, -200); + atomic_singlethreadfence_acqrel(); atomic_xor_seqcst(&mut x, 1024); atomic_load_seqcst(&x) + z * 3 + atomic_load_seqcst(&y) * 2 }; @@ -328,6 +567,24 @@ fn copy_nonoverlapping() { } #[test] +fn write_bytes() { + check_number( + r#" + extern "rust-intrinsic" { + fn write_bytes<T>(dst: *mut T, val: u8, count: usize); + } + + const GOAL: i32 = unsafe { + let mut x = 2; + write_bytes(&mut x, 5, 1); + x + }; + "#, + 0x05050505, + ); +} + +#[test] fn copy() { check_number( r#" @@ -363,6 +620,20 @@ fn ctpop() { } #[test] +fn ctlz() { + check_number( + r#" + extern "rust-intrinsic" { + pub fn ctlz<T: Copy>(x: T) -> T; + } + + const GOAL: u8 = ctlz(0b0001_1100_u8); + "#, + 3, + ); +} + +#[test] fn cttz() { check_number( r#" @@ -375,3 +646,85 @@ fn cttz() { 3, ); } + +#[test] +fn rotate() { + check_number( + r#" + extern "rust-intrinsic" { + pub fn rotate_left<T: Copy>(x: T, y: T) -> T; + } + + const GOAL: i64 = rotate_left(0xaa00000000006e1i64, 12); + "#, + 0x6e10aa, + ); + check_number( + r#" + extern "rust-intrinsic" { + pub fn rotate_right<T: Copy>(x: T, y: T) -> T; + } + + const GOAL: i64 = rotate_right(0x6e10aa, 12); + "#, + 0xaa00000000006e1, + ); + check_number( + r#" + extern "rust-intrinsic" { + pub fn rotate_left<T: Copy>(x: T, y: T) -> T; + } + + const GOAL: i8 = rotate_left(129, 2); + "#, + 6, + ); + check_number( + r#" + extern "rust-intrinsic" { + pub fn rotate_right<T: Copy>(x: T, y: T) -> T; + } + + const GOAL: i32 = rotate_right(10006016, 1020315); + "#, + 320192512, + ); +} + +#[test] +fn simd() { + check_number( + r#" + pub struct i8x16( + i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8, + ); + extern "platform-intrinsic" { + pub fn simd_bitmask<T, U>(x: T) -> U; + } + const GOAL: u16 = simd_bitmask(i8x16( + 0, 1, 0, 0, 2, 255, 100, 0, 50, 0, 1, 1, 0, 0, 0, 0 + )); + "#, + 0b0000110101110010, + ); + check_number( + r#" + pub struct i8x16( + i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8,i8, + ); + extern "platform-intrinsic" { + pub fn simd_lt<T, U>(x: T, y: T) -> U; + pub fn simd_bitmask<T, U>(x: T) -> U; + } + const GOAL: u16 = simd_bitmask(simd_lt::<i8x16, i8x16>( + i8x16( + -105, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 + ), + i8x16( + -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 + ), + )); + "#, + 0xFFFF, + ); +} diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/db.rs b/src/tools/rust-analyzer/crates/hir-ty/src/db.rs index 9dd810f84..9c96b5ab8 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/db.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/db.rs @@ -77,8 +77,12 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> { #[salsa::invoke(crate::consteval::const_eval_query)] #[salsa::cycle(crate::consteval::const_eval_recover)] - fn const_eval(&self, def: GeneralConstId, subst: Substitution) - -> Result<Const, ConstEvalError>; + fn const_eval( + &self, + def: GeneralConstId, + subst: Substitution, + trait_env: Option<Arc<crate::TraitEnvironment>>, + ) -> Result<Const, ConstEvalError>; #[salsa::invoke(crate::consteval::const_eval_static_query)] #[salsa::cycle(crate::consteval::const_eval_static_recover)] @@ -100,16 +104,28 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> { &self, def: AdtId, subst: Substitution, - krate: CrateId, + env: Arc<crate::TraitEnvironment>, ) -> Result<Arc<Layout>, LayoutError>; #[salsa::invoke(crate::layout::layout_of_ty_query)] #[salsa::cycle(crate::layout::layout_of_ty_recover)] - fn layout_of_ty(&self, ty: Ty, krate: CrateId) -> Result<Arc<Layout>, LayoutError>; + fn layout_of_ty( + &self, + ty: Ty, + env: Arc<crate::TraitEnvironment>, + ) -> Result<Arc<Layout>, LayoutError>; #[salsa::invoke(crate::layout::target_data_layout_query)] fn target_data_layout(&self, krate: CrateId) -> Option<Arc<TargetDataLayout>>; + #[salsa::invoke(crate::method_resolution::lookup_impl_method_query)] + fn lookup_impl_method( + &self, + env: Arc<crate::TraitEnvironment>, + func: FunctionId, + fn_subst: Substitution, + ) -> (FunctionId, Substitution); + #[salsa::invoke(crate::lower::callable_item_sig)] fn callable_item_signature(&self, def: CallableDefId) -> PolyFnSig; diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics.rs index 4b147b997..ef43ed5c4 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics.rs @@ -5,7 +5,7 @@ mod unsafe_check; mod decl_check; pub use crate::diagnostics::{ - decl_check::{incorrect_case, IncorrectCase}, + decl_check::{incorrect_case, CaseType, IncorrectCase}, expr::{ record_literal_missing_fields, record_pattern_missing_fields, BodyValidationDiagnostic, }, diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs index 1233469b9..a94a962c1 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/diagnostics/decl_check.rs @@ -14,13 +14,12 @@ mod case_conv; use std::fmt; -use base_db::CrateId; use hir_def::{ data::adt::VariantData, hir::{Pat, PatId}, src::HasSource, - AdtId, AttrDefId, ConstId, EnumId, FunctionId, ItemContainerId, Lookup, ModuleDefId, StaticId, - StructId, + AdtId, AttrDefId, ConstId, DefWithBodyId, EnumId, EnumVariantId, FunctionId, ItemContainerId, + Lookup, ModuleDefId, StaticId, StructId, }; use hir_expand::{ name::{AsName, Name}, @@ -44,24 +43,20 @@ mod allow { pub(super) const NON_CAMEL_CASE_TYPES: &str = "non_camel_case_types"; } -pub fn incorrect_case( - db: &dyn HirDatabase, - krate: CrateId, - owner: ModuleDefId, -) -> Vec<IncorrectCase> { +pub fn incorrect_case(db: &dyn HirDatabase, owner: ModuleDefId) -> Vec<IncorrectCase> { let _p = profile::span("validate_module_item"); - let mut validator = DeclValidator::new(db, krate); + let mut validator = DeclValidator::new(db); validator.validate_item(owner); validator.sink } #[derive(Debug)] pub enum CaseType { - // `some_var` + /// `some_var` LowerSnakeCase, - // `SOME_CONST` + /// `SOME_CONST` UpperSnakeCase, - // `SomeStruct` + /// `SomeStruct` UpperCamelCase, } @@ -120,7 +115,6 @@ pub struct IncorrectCase { pub(super) struct DeclValidator<'a> { db: &'a dyn HirDatabase, - krate: CrateId, pub(super) sink: Vec<IncorrectCase>, } @@ -132,8 +126,8 @@ struct Replacement { } impl<'a> DeclValidator<'a> { - pub(super) fn new(db: &'a dyn HirDatabase, krate: CrateId) -> DeclValidator<'a> { - DeclValidator { db, krate, sink: Vec::new() } + pub(super) fn new(db: &'a dyn HirDatabase) -> DeclValidator<'a> { + DeclValidator { db, sink: Vec::new() } } pub(super) fn validate_item(&mut self, item: ModuleDefId) { @@ -181,6 +175,8 @@ impl<'a> DeclValidator<'a> { AttrDefId::TraitAliasId(taid) => Some(taid.lookup(self.db.upcast()).container.into()), AttrDefId::ImplId(iid) => Some(iid.lookup(self.db.upcast()).container.into()), AttrDefId::ExternBlockId(id) => Some(id.lookup(self.db.upcast()).container.into()), + AttrDefId::ExternCrateId(id) => Some(id.lookup(self.db.upcast()).container.into()), + AttrDefId::UseId(id) => Some(id.lookup(self.db.upcast()).container.into()), // These warnings should not explore macro definitions at all AttrDefId::MacroId(_) => None, AttrDefId::AdtId(aid) => match aid { @@ -194,8 +190,7 @@ impl<'a> DeclValidator<'a> { AttrDefId::TypeAliasId(_) => None, AttrDefId::GenericParamId(_) => None, } - .map(|mid| self.allowed(mid, allow_name, true)) - .unwrap_or(false) + .is_some_and(|mid| self.allowed(mid, allow_name, true)) } fn validate_func(&mut self, func: FunctionId) { @@ -205,17 +200,7 @@ impl<'a> DeclValidator<'a> { return; } - let body = self.db.body(func.into()); - - // Recursively validate inner scope items, such as static variables and constants. - for (_, block_def_map) in body.blocks(self.db.upcast()) { - for (_, module) in block_def_map.modules() { - for def_id in module.scope.declarations() { - let mut validator = DeclValidator::new(self.db, self.krate); - validator.validate_item(def_id); - } - } - } + self.validate_body_inner_items(func.into()); // Check whether non-snake case identifiers are allowed for this function. if self.allowed(func.into(), allow::NON_SNAKE_CASE, false) { @@ -230,6 +215,8 @@ impl<'a> DeclValidator<'a> { expected_case: CaseType::LowerSnakeCase, }); + let body = self.db.body(func.into()); + // Check the patterns inside the function body. // This includes function parameters. let pats_replacements = body @@ -495,6 +482,11 @@ impl<'a> DeclValidator<'a> { fn validate_enum(&mut self, enum_id: EnumId) { let data = self.db.enum_data(enum_id); + for (local_id, _) in data.variants.iter() { + let variant_id = EnumVariantId { parent: enum_id, local_id }; + self.validate_body_inner_items(variant_id.into()); + } + // Check whether non-camel case names are allowed for this enum. if self.allowed(enum_id.into(), allow::NON_CAMEL_CASE_TYPES, false) { return; @@ -511,13 +503,11 @@ impl<'a> DeclValidator<'a> { // Check the field names. let enum_fields_replacements = data .variants - .iter() - .filter_map(|(_, variant)| { + .values() + .filter_map(|variant| { Some(Replacement { current_name: variant.name.clone(), - suggested_text: to_camel_case( - &variant.name.display(self.db.upcast()).to_string(), - )?, + suggested_text: to_camel_case(&variant.name.to_smol_str())?, expected_case: CaseType::UpperCamelCase, }) }) @@ -621,6 +611,8 @@ impl<'a> DeclValidator<'a> { fn validate_const(&mut self, const_id: ConstId) { let data = self.db.const_data(const_id); + self.validate_body_inner_items(const_id.into()); + if self.allowed(const_id.into(), allow::NON_UPPER_CASE_GLOBAL, false) { return; } @@ -630,7 +622,7 @@ impl<'a> DeclValidator<'a> { None => return, }; - let const_name = name.display(self.db.upcast()).to_string(); + let const_name = name.to_smol_str(); let replacement = if let Some(new_name) = to_upper_snake_case(&const_name) { Replacement { current_name: name.clone(), @@ -669,13 +661,15 @@ impl<'a> DeclValidator<'a> { return; } + self.validate_body_inner_items(static_id.into()); + if self.allowed(static_id.into(), allow::NON_UPPER_CASE_GLOBAL, false) { return; } let name = &data.name; - let static_name = name.display(self.db.upcast()).to_string(); + let static_name = name.to_smol_str(); let replacement = if let Some(new_name) = to_upper_snake_case(&static_name) { Replacement { current_name: name.clone(), @@ -706,4 +700,17 @@ impl<'a> DeclValidator<'a> { self.sink.push(diagnostic); } + + // FIXME: We don't currently validate names within `DefWithBodyId::InTypeConstId`. + /// Recursively validates inner scope items, such as static variables and constants. + fn validate_body_inner_items(&mut self, body_id: DefWithBodyId) { + let body = self.db.body(body_id); + for (_, block_def_map) in body.blocks(self.db.upcast()) { + for (_, module) in block_def_map.modules() { + for def_id in module.scope.declarations() { + self.validate_item(def_id); + } + } + } + } } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/display.rs b/src/tools/rust-analyzer/crates/hir-ty/src/display.rs index c1df24d17..1b4ee4613 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/display.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/display.rs @@ -29,6 +29,7 @@ use itertools::Itertools; use la_arena::ArenaMap; use smallvec::SmallVec; use stdx::never; +use triomphe::Arc; use crate::{ consteval::try_const_usize, @@ -43,26 +44,19 @@ use crate::{ AdtId, AliasEq, AliasTy, Binders, CallableDefId, CallableSig, Const, ConstScalar, ConstValue, DomainGoal, GenericArg, ImplTraitId, Interner, Lifetime, LifetimeData, LifetimeOutlives, MemoryMap, Mutability, OpaqueTy, ProjectionTy, ProjectionTyExt, QuantifiedWhereClause, Scalar, - Substitution, TraitRef, TraitRefExt, Ty, TyExt, WhereClause, + Substitution, TraitEnvironment, TraitRef, TraitRefExt, Ty, TyExt, WhereClause, }; pub trait HirWrite: fmt::Write { - fn start_location_link(&mut self, location: ModuleDefId); - fn end_location_link(&mut self); + fn start_location_link(&mut self, _location: ModuleDefId) {} + fn end_location_link(&mut self) {} } // String will ignore link metadata -impl HirWrite for String { - fn start_location_link(&mut self, _: ModuleDefId) {} - - fn end_location_link(&mut self) {} -} +impl HirWrite for String {} // `core::Formatter` will ignore metadata -impl HirWrite for fmt::Formatter<'_> { - fn start_location_link(&mut self, _: ModuleDefId) {} - fn end_location_link(&mut self) {} -} +impl HirWrite for fmt::Formatter<'_> {} pub struct HirFormatter<'a> { pub db: &'a dyn HirDatabase, @@ -192,7 +186,7 @@ pub trait HirDisplay { } } -impl<'a> HirFormatter<'a> { +impl HirFormatter<'_> { pub fn write_joined<T: HirDisplay>( &mut self, iter: impl IntoIterator<Item = T>, @@ -342,7 +336,7 @@ impl<T: HirDisplay> HirDisplayWrapper<'_, T> { } } -impl<'a, T> fmt::Display for HirDisplayWrapper<'a, T> +impl<T> fmt::Display for HirDisplayWrapper<'_, T> where T: HirDisplay, { @@ -360,7 +354,7 @@ where const TYPE_HINT_TRUNCATION: &str = "…"; -impl<T: HirDisplay> HirDisplay for &'_ T { +impl<T: HirDisplay> HirDisplay for &T { fn hir_fmt(&self, f: &mut HirFormatter<'_>) -> Result<(), HirDisplayError> { HirDisplay::hir_fmt(*self, f) } @@ -446,28 +440,6 @@ impl HirDisplay for Const { } } -pub struct HexifiedConst(pub Const); - -impl HirDisplay for HexifiedConst { - fn hir_fmt(&self, f: &mut HirFormatter<'_>) -> Result<(), HirDisplayError> { - let data = &self.0.data(Interner); - if let TyKind::Scalar(s) = data.ty.kind(Interner) { - if matches!(s, Scalar::Int(_) | Scalar::Uint(_)) { - if let ConstValue::Concrete(c) = &data.value { - if let ConstScalar::Bytes(b, m) = &c.interned { - let value = u128::from_le_bytes(pad16(b, false)); - if value >= 10 { - render_const_scalar(f, &b, m, &data.ty)?; - return write!(f, " ({:#X})", value); - } - } - } - } - } - self.0.hir_fmt(f) - } -} - fn render_const_scalar( f: &mut HirFormatter<'_>, b: &[u8], @@ -476,33 +448,35 @@ fn render_const_scalar( ) -> Result<(), HirDisplayError> { // FIXME: We need to get krate from the final callers of the hir display // infrastructure and have it here as a field on `f`. - let krate = *f.db.crate_graph().crates_in_topological_order().last().unwrap(); + let trait_env = Arc::new(TraitEnvironment::empty( + *f.db.crate_graph().crates_in_topological_order().last().unwrap(), + )); match ty.kind(Interner) { TyKind::Scalar(s) => match s { Scalar::Bool => write!(f, "{}", if b[0] == 0 { false } else { true }), Scalar::Char => { - let x = u128::from_le_bytes(pad16(b, false)) as u32; - let Ok(c) = char::try_from(x) else { + let it = u128::from_le_bytes(pad16(b, false)) as u32; + let Ok(c) = char::try_from(it) else { return f.write_str("<unicode-error>"); }; write!(f, "{c:?}") } Scalar::Int(_) => { - let x = i128::from_le_bytes(pad16(b, true)); - write!(f, "{x}") + let it = i128::from_le_bytes(pad16(b, true)); + write!(f, "{it}") } Scalar::Uint(_) => { - let x = u128::from_le_bytes(pad16(b, false)); - write!(f, "{x}") + let it = u128::from_le_bytes(pad16(b, false)); + write!(f, "{it}") } Scalar::Float(fl) => match fl { chalk_ir::FloatTy::F32 => { - let x = f32::from_le_bytes(b.try_into().unwrap()); - write!(f, "{x:?}") + let it = f32::from_le_bytes(b.try_into().unwrap()); + write!(f, "{it:?}") } chalk_ir::FloatTy::F64 => { - let x = f64::from_le_bytes(b.try_into().unwrap()); - write!(f, "{x:?}") + let it = f64::from_le_bytes(b.try_into().unwrap()); + write!(f, "{it:?}") } }, }, @@ -519,7 +493,7 @@ fn render_const_scalar( TyKind::Slice(ty) => { let addr = usize::from_le_bytes(b[0..b.len() / 2].try_into().unwrap()); let count = usize::from_le_bytes(b[b.len() / 2..].try_into().unwrap()); - let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else { + let Ok(layout) = f.db.layout_of_ty(ty.clone(), trait_env) else { return f.write_str("<layout-error>"); }; let size_one = layout.size.bytes_usize(); @@ -545,7 +519,7 @@ fn render_const_scalar( let Ok(t) = memory_map.vtable.ty(ty_id) else { return f.write_str("<ty-missing-in-vtable-map>"); }; - let Ok(layout) = f.db.layout_of_ty(t.clone(), krate) else { + let Ok(layout) = f.db.layout_of_ty(t.clone(), trait_env) else { return f.write_str("<layout-error>"); }; let size = layout.size.bytes_usize(); @@ -577,7 +551,7 @@ fn render_const_scalar( return f.write_str("<layout-error>"); } }); - let Ok(layout) = f.db.layout_of_ty(t.clone(), krate) else { + let Ok(layout) = f.db.layout_of_ty(t.clone(), trait_env) else { return f.write_str("<layout-error>"); }; let size = layout.size.bytes_usize(); @@ -589,7 +563,7 @@ fn render_const_scalar( } }, TyKind::Tuple(_, subst) => { - let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else { + let Ok(layout) = f.db.layout_of_ty(ty.clone(), trait_env.clone()) else { return f.write_str("<layout-error>"); }; f.write_str("(")?; @@ -602,7 +576,7 @@ fn render_const_scalar( } let ty = ty.assert_ty_ref(Interner); // Tuple only has type argument let offset = layout.fields.offset(id).bytes_usize(); - let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else { + let Ok(layout) = f.db.layout_of_ty(ty.clone(), trait_env.clone()) else { f.write_str("<layout-error>")?; continue; }; @@ -612,7 +586,7 @@ fn render_const_scalar( f.write_str(")") } TyKind::Adt(adt, subst) => { - let Ok(layout) = f.db.layout_of_adt(adt.0, subst.clone(), krate) else { + let Ok(layout) = f.db.layout_of_adt(adt.0, subst.clone(), trait_env.clone()) else { return f.write_str("<layout-error>"); }; match adt.0 { @@ -624,7 +598,7 @@ fn render_const_scalar( &data.variant_data, f, &field_types, - adt.0.module(f.db.upcast()).krate(), + f.db.trait_environment(adt.0.into()), &layout, subst, b, @@ -636,7 +610,8 @@ fn render_const_scalar( } hir_def::AdtId::EnumId(e) => { let Some((var_id, var_layout)) = - detect_variant_from_bytes(&layout, f.db, krate, b, e) else { + detect_variant_from_bytes(&layout, f.db, trait_env.clone(), b, e) + else { return f.write_str("<failed-to-detect-variant>"); }; let data = &f.db.enum_data(e).variants[var_id]; @@ -647,7 +622,7 @@ fn render_const_scalar( &data.variant_data, f, &field_types, - adt.0.module(f.db.upcast()).krate(), + f.db.trait_environment(adt.0.into()), &var_layout, subst, b, @@ -658,15 +633,15 @@ fn render_const_scalar( } TyKind::FnDef(..) => ty.hir_fmt(f), TyKind::Function(_) | TyKind::Raw(_, _) => { - let x = u128::from_le_bytes(pad16(b, false)); - write!(f, "{:#X} as ", x)?; + let it = u128::from_le_bytes(pad16(b, false)); + write!(f, "{:#X} as ", it)?; ty.hir_fmt(f) } TyKind::Array(ty, len) => { let Some(len) = try_const_usize(f.db, len) else { return f.write_str("<unknown-array-len>"); }; - let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else { + let Ok(layout) = f.db.layout_of_ty(ty.clone(), trait_env) else { return f.write_str("<layout-error>"); }; let size_one = layout.size.bytes_usize(); @@ -705,7 +680,7 @@ fn render_variant_after_name( data: &VariantData, f: &mut HirFormatter<'_>, field_types: &ArenaMap<LocalFieldId, Binders<Ty>>, - krate: CrateId, + trait_env: Arc<TraitEnvironment>, layout: &Layout, subst: &Substitution, b: &[u8], @@ -716,7 +691,7 @@ fn render_variant_after_name( let render_field = |f: &mut HirFormatter<'_>, id: LocalFieldId| { let offset = layout.fields.offset(u32::from(id.into_raw()) as usize).bytes_usize(); let ty = field_types[id].clone().substitute(Interner, subst); - let Ok(layout) = f.db.layout_of_ty(ty.clone(), krate) else { + let Ok(layout) = f.db.layout_of_ty(ty.clone(), trait_env.clone()) else { return f.write_str("<layout-error>"); }; let size = layout.size.bytes_usize(); @@ -735,7 +710,7 @@ fn render_variant_after_name( } write!(f, " }}")?; } else { - let mut it = it.map(|x| x.0); + let mut it = it.map(|it| it.0); write!(f, "(")?; if let Some(id) = it.next() { render_field(f, id)?; @@ -903,6 +878,13 @@ impl HirDisplay for Ty { TyKind::FnDef(def, parameters) => { let def = from_chalk(db, *def); let sig = db.callable_item_signature(def).substitute(Interner, parameters); + + if f.display_target.is_source_code() { + // `FnDef` is anonymous and there's no surface syntax for it. Show it as a + // function pointer type. + return sig.hir_fmt(f); + } + f.start_location_link(def.into()); match def { CallableDefId::FunctionId(ff) => { @@ -1277,19 +1259,20 @@ fn hir_fmt_generics( i: usize, parameters: &Substitution, ) -> bool { - if parameter.ty(Interner).map(|x| x.kind(Interner)) == Some(&TyKind::Error) + if parameter.ty(Interner).map(|it| it.kind(Interner)) + == Some(&TyKind::Error) { return true; } if let Some(ConstValue::Concrete(c)) = - parameter.constant(Interner).map(|x| &x.data(Interner).value) + parameter.constant(Interner).map(|it| &it.data(Interner).value) { if c.interned == ConstScalar::Unknown { return true; } } let default_parameter = match default_parameters.get(i) { - Some(x) => x, + Some(it) => it, None => return true, }; let actual_default = diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer.rs index 1ac0837b5..b4915dbf0 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/infer.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer.rs @@ -13,6 +13,15 @@ //! to certain types. To record this, we use the union-find implementation from //! the `ena` crate, which is extracted from rustc. +mod cast; +pub(crate) mod closure; +mod coerce; +mod expr; +mod mutability; +mod pat; +mod path; +pub(crate) mod unify; + use std::{convert::identity, ops::Index}; use chalk_ir::{ @@ -60,15 +69,8 @@ pub use coerce::could_coerce; #[allow(unreachable_pub)] pub use unify::could_unify; -pub(crate) use self::closure::{CaptureKind, CapturedItem, CapturedItemWithoutTy}; - -pub(crate) mod unify; -mod path; -mod expr; -mod pat; -mod coerce; -pub(crate) mod closure; -mod mutability; +use cast::CastCheck; +pub(crate) use closure::{CaptureKind, CapturedItem, CapturedItemWithoutTy}; /// The entry point of type inference. pub(crate) fn infer_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<InferenceResult> { @@ -290,7 +292,7 @@ impl Default for InternedStandardTypes { /// ``` /// /// Note that for a struct, the 'deep' unsizing of the struct is not recorded. -/// E.g., `struct Foo<T> { x: T }` we can coerce &Foo<[i32; 4]> to &Foo<[i32]> +/// E.g., `struct Foo<T> { it: T }` we can coerce &Foo<[i32; 4]> to &Foo<[i32]> /// The autoderef and -ref are the same as in the above example, but the type /// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about /// the underlying conversions from `[i32; 4]` to `[i32]`. @@ -508,6 +510,8 @@ pub(crate) struct InferenceContext<'a> { diverges: Diverges, breakables: Vec<BreakableContext>, + deferred_cast_checks: Vec<CastCheck>, + // fields related to closure capture current_captures: Vec<CapturedItemWithoutTy>, current_closure: Option<ClosureId>, @@ -582,7 +586,8 @@ impl<'a> InferenceContext<'a> { resolver, diverges: Diverges::Maybe, breakables: Vec::new(), - current_captures: vec![], + deferred_cast_checks: Vec::new(), + current_captures: Vec::new(), current_closure: None, deferred_closures: FxHashMap::default(), closure_dependencies: FxHashMap::default(), @@ -594,7 +599,7 @@ impl<'a> InferenceContext<'a> { // used this function for another workaround, mention it here. If you really need this function and believe that // there is no problem in it being `pub(crate)`, remove this comment. pub(crate) fn resolve_all(self) -> InferenceResult { - let InferenceContext { mut table, mut result, .. } = self; + let InferenceContext { mut table, mut result, deferred_cast_checks, .. } = self; // Destructure every single field so whenever new fields are added to `InferenceResult` we // don't forget to handle them here. let InferenceResult { @@ -622,6 +627,13 @@ impl<'a> InferenceContext<'a> { table.fallback_if_possible(); + // Comment from rustc: + // Even though coercion casts provide type hints, we check casts after fallback for + // backwards compatibility. This makes fallback a stronger type hint than a cast coercion. + for cast in deferred_cast_checks { + cast.check(&mut table); + } + // FIXME resolve obligations as well (use Guidance if necessary) table.resolve_obligations_as_possible(); @@ -1172,7 +1184,7 @@ impl<'a> InferenceContext<'a> { unresolved: Option<usize>, path: &ModPath, ) -> (Ty, Option<VariantId>) { - let remaining = unresolved.map(|x| path.segments()[x..].len()).filter(|x| x > &0); + let remaining = unresolved.map(|it| path.segments()[it..].len()).filter(|it| it > &0); match remaining { None => { let variant = ty.as_adt().and_then(|(adt_id, _)| match adt_id { @@ -1232,7 +1244,9 @@ impl<'a> InferenceContext<'a> { .as_function()? .lookup(self.db.upcast()) .container - else { return None }; + else { + return None; + }; self.resolve_output_on(trait_) } @@ -1322,7 +1336,7 @@ impl Expectation { /// The primary use case is where the expected type is a fat pointer, /// like `&[isize]`. For example, consider the following statement: /// - /// let x: &[isize] = &[1, 2, 3]; + /// let it: &[isize] = &[1, 2, 3]; /// /// In this case, the expected type for the `&[1, 2, 3]` expression is /// `&[isize]`. If however we were to say that `[1, 2, 3]` has the diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/cast.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/cast.rs new file mode 100644 index 000000000..9e1c74b16 --- /dev/null +++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/cast.rs @@ -0,0 +1,46 @@ +//! Type cast logic. Basically coercion + additional casts. + +use crate::{infer::unify::InferenceTable, Interner, Ty, TyExt, TyKind}; + +#[derive(Clone, Debug)] +pub(super) struct CastCheck { + expr_ty: Ty, + cast_ty: Ty, +} + +impl CastCheck { + pub(super) fn new(expr_ty: Ty, cast_ty: Ty) -> Self { + Self { expr_ty, cast_ty } + } + + pub(super) fn check(self, table: &mut InferenceTable<'_>) { + // FIXME: This function currently only implements the bits that influence the type + // inference. We should return the adjustments on success and report diagnostics on error. + let expr_ty = table.resolve_ty_shallow(&self.expr_ty); + let cast_ty = table.resolve_ty_shallow(&self.cast_ty); + + if expr_ty.contains_unknown() || cast_ty.contains_unknown() { + return; + } + + if table.coerce(&expr_ty, &cast_ty).is_ok() { + return; + } + + if check_ref_to_ptr_cast(expr_ty, cast_ty, table) { + // Note that this type of cast is actually split into a coercion to a + // pointer type and a cast: + // &[T; N] -> *[T; N] -> *T + return; + } + + // FIXME: Check other kinds of non-coercion casts and report error if any? + } +} + +fn check_ref_to_ptr_cast(expr_ty: Ty, cast_ty: Ty, table: &mut InferenceTable<'_>) -> bool { + let Some((expr_inner_ty, _, _)) = expr_ty.as_reference() else { return false; }; + let Some((cast_inner_ty, _)) = cast_ty.as_raw_ptr() else { return false; }; + let TyKind::Array(expr_elt_ty, _) = expr_inner_ty.kind(Interner) else { return false; }; + table.coerce(expr_elt_ty, cast_inner_ty).is_ok() +} diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs index ff64ae252..1781f6c58 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/closure.rs @@ -139,7 +139,7 @@ impl HirPlace { ) -> CaptureKind { match current_capture { CaptureKind::ByRef(BorrowKind::Mut { .. }) => { - if self.projections[len..].iter().any(|x| *x == ProjectionElem::Deref) { + if self.projections[len..].iter().any(|it| *it == ProjectionElem::Deref) { current_capture = CaptureKind::ByRef(BorrowKind::Unique); } } @@ -199,7 +199,7 @@ impl CapturedItem { .to_string(), VariantData::Tuple(fields) => fields .iter() - .position(|x| x.0 == f.local_id) + .position(|it| it.0 == f.local_id) .unwrap_or_default() .to_string(), VariantData::Unit => "[missing field]".to_string(), @@ -439,10 +439,10 @@ impl InferenceContext<'_> { } fn walk_expr(&mut self, tgt_expr: ExprId) { - if let Some(x) = self.result.expr_adjustments.get_mut(&tgt_expr) { + if let Some(it) = self.result.expr_adjustments.get_mut(&tgt_expr) { // FIXME: this take is completely unneeded, and just is here to make borrow checker // happy. Remove it if you can. - let x_taken = mem::take(x); + let x_taken = mem::take(it); self.walk_expr_with_adjust(tgt_expr, &x_taken); *self.result.expr_adjustments.get_mut(&tgt_expr).unwrap() = x_taken; } else { @@ -488,10 +488,6 @@ impl InferenceContext<'_> { self.consume_expr(*tail); } } - Expr::While { condition, body, label: _ } => { - self.consume_expr(*condition); - self.consume_expr(*body); - } Expr::Call { callee, args, is_assignee_expr: _ } => { self.consume_expr(*callee); self.consume_exprs(args.iter().copied()); @@ -536,7 +532,7 @@ impl InferenceContext<'_> { if let &Some(expr) = spread { self.consume_expr(expr); } - self.consume_exprs(fields.iter().map(|x| x.expr)); + self.consume_exprs(fields.iter().map(|it| it.expr)); } Expr::Field { expr, name: _ } => self.select_from_expr(*expr), Expr::UnaryOp { expr, op: UnaryOp::Deref } => { @@ -548,7 +544,7 @@ impl InferenceContext<'_> { } else if let Some((f, _)) = self.result.method_resolution(tgt_expr) { let mutability = 'b: { if let Some(deref_trait) = - self.resolve_lang_item(LangItem::DerefMut).and_then(|x| x.as_trait()) + self.resolve_lang_item(LangItem::DerefMut).and_then(|it| it.as_trait()) { if let Some(deref_fn) = self.db.trait_data(deref_trait).method_by_name(&name![deref_mut]) @@ -615,8 +611,8 @@ impl InferenceContext<'_> { "We sort closures, so we should always have data for inner closures", ); let mut cc = mem::take(&mut self.current_captures); - cc.extend(captures.iter().filter(|x| self.is_upvar(&x.place)).map(|x| { - CapturedItemWithoutTy { place: x.place.clone(), kind: x.kind, span: x.span } + cc.extend(captures.iter().filter(|it| self.is_upvar(&it.place)).map(|it| { + CapturedItemWithoutTy { place: it.place.clone(), kind: it.kind, span: it.span } })); self.current_captures = cc; } @@ -694,7 +690,7 @@ impl InferenceContext<'_> { }, }, } - if self.result.pat_adjustments.get(&p).map_or(false, |x| !x.is_empty()) { + if self.result.pat_adjustments.get(&p).map_or(false, |it| !it.is_empty()) { for_mut = BorrowKind::Unique; } self.body.walk_pats_shallow(p, |p| self.walk_pat_inner(p, update_result, for_mut)); @@ -706,9 +702,9 @@ impl InferenceContext<'_> { fn expr_ty_after_adjustments(&self, e: ExprId) -> Ty { let mut ty = None; - if let Some(x) = self.result.expr_adjustments.get(&e) { - if let Some(x) = x.last() { - ty = Some(x.target.clone()); + if let Some(it) = self.result.expr_adjustments.get(&e) { + if let Some(it) = it.last() { + ty = Some(it.target.clone()); } } ty.unwrap_or_else(|| self.expr_ty(e)) @@ -727,7 +723,7 @@ impl InferenceContext<'_> { // FIXME: We handle closure as a special case, since chalk consider every closure as copy. We // should probably let chalk know which closures are copy, but I don't know how doing it // without creating query cycles. - return self.result.closure_info.get(id).map(|x| x.1 == FnTrait::Fn).unwrap_or(true); + return self.result.closure_info.get(id).map(|it| it.1 == FnTrait::Fn).unwrap_or(true); } self.table.resolve_completely(ty).is_copy(self.db, self.owner) } @@ -748,7 +744,7 @@ impl InferenceContext<'_> { } fn minimize_captures(&mut self) { - self.current_captures.sort_by_key(|x| x.place.projections.len()); + self.current_captures.sort_by_key(|it| it.place.projections.len()); let mut hash_map = HashMap::<HirPlace, usize>::new(); let result = mem::take(&mut self.current_captures); for item in result { @@ -759,7 +755,7 @@ impl InferenceContext<'_> { break Some(*k); } match it.next() { - Some(x) => lookup_place.projections.push(x.clone()), + Some(it) => lookup_place.projections.push(it.clone()), None => break None, } }; @@ -780,7 +776,7 @@ impl InferenceContext<'_> { } fn consume_with_pat(&mut self, mut place: HirPlace, pat: PatId) { - let cnt = self.result.pat_adjustments.get(&pat).map(|x| x.len()).unwrap_or_default(); + let cnt = self.result.pat_adjustments.get(&pat).map(|it| it.len()).unwrap_or_default(); place.projections = place .projections .iter() @@ -894,10 +890,10 @@ impl InferenceContext<'_> { fn closure_kind(&self) -> FnTrait { let mut r = FnTrait::Fn; - for x in &self.current_captures { + for it in &self.current_captures { r = cmp::min( r, - match &x.kind { + match &it.kind { CaptureKind::ByRef(BorrowKind::Unique | BorrowKind::Mut { .. }) => { FnTrait::FnMut } @@ -933,7 +929,7 @@ impl InferenceContext<'_> { } self.minimize_captures(); let result = mem::take(&mut self.current_captures); - let captures = result.into_iter().map(|x| x.with_ty(self)).collect::<Vec<_>>(); + let captures = result.into_iter().map(|it| it.with_ty(self)).collect::<Vec<_>>(); self.result.closure_info.insert(closure, (captures, closure_kind)); closure_kind } @@ -973,20 +969,20 @@ impl InferenceContext<'_> { fn sort_closures(&mut self) -> Vec<(ClosureId, Vec<(Ty, Ty, Vec<Ty>, ExprId)>)> { let mut deferred_closures = mem::take(&mut self.deferred_closures); let mut dependents_count: FxHashMap<ClosureId, usize> = - deferred_closures.keys().map(|x| (*x, 0)).collect(); + deferred_closures.keys().map(|it| (*it, 0)).collect(); for (_, deps) in &self.closure_dependencies { for dep in deps { *dependents_count.entry(*dep).or_default() += 1; } } let mut queue: Vec<_> = - deferred_closures.keys().copied().filter(|x| dependents_count[x] == 0).collect(); + deferred_closures.keys().copied().filter(|it| dependents_count[it] == 0).collect(); let mut result = vec![]; - while let Some(x) = queue.pop() { - if let Some(d) = deferred_closures.remove(&x) { - result.push((x, d)); + while let Some(it) = queue.pop() { + if let Some(d) = deferred_closures.remove(&it) { + result.push((it, d)); } - for dep in self.closure_dependencies.get(&x).into_iter().flat_map(|x| x.iter()) { + for dep in self.closure_dependencies.get(&it).into_iter().flat_map(|it| it.iter()) { let cnt = dependents_count.get_mut(dep).unwrap(); *cnt -= 1; if *cnt == 0 { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs index 05a476f63..8e7e62c49 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/coerce.rs @@ -220,7 +220,7 @@ pub(crate) fn coerce( Ok((adjustments, table.resolve_with_fallback(ty, &fallback))) } -impl<'a> InferenceContext<'a> { +impl InferenceContext<'_> { /// Unify two types, but may coerce the first one to the second one /// using "implicit coercion rules" if needed. pub(super) fn coerce( @@ -239,7 +239,7 @@ impl<'a> InferenceContext<'a> { } } -impl<'a> InferenceTable<'a> { +impl InferenceTable<'_> { /// Unify two types, but may coerce the first one to the second one /// using "implicit coercion rules" if needed. pub(crate) fn coerce( @@ -377,7 +377,7 @@ impl<'a> InferenceTable<'a> { let snapshot = self.snapshot(); - let mut autoderef = Autoderef::new(self, from_ty.clone()); + let mut autoderef = Autoderef::new(self, from_ty.clone(), false); let mut first_error = None; let mut found = None; diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs index 194471f00..8cbdae625 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/expr.rs @@ -46,11 +46,11 @@ use crate::{ }; use super::{ - coerce::auto_deref_adjust_steps, find_breakable, BreakableContext, Diverges, Expectation, - InferenceContext, InferenceDiagnostic, TypeMismatch, + cast::CastCheck, coerce::auto_deref_adjust_steps, find_breakable, BreakableContext, Diverges, + Expectation, InferenceContext, InferenceDiagnostic, TypeMismatch, }; -impl<'a> InferenceContext<'a> { +impl InferenceContext<'_> { pub(crate) fn infer_expr(&mut self, tgt_expr: ExprId, expected: &Expectation) -> Ty { let ty = self.infer_expr_inner(tgt_expr, expected); if let Some(expected_ty) = expected.only_has_type(&mut self.table) { @@ -198,19 +198,6 @@ impl<'a> InferenceContext<'a> { None => self.result.standard_types.never.clone(), } } - &Expr::While { condition, body, label } => { - self.with_breakable_ctx(BreakableKind::Loop, None, label, |this| { - this.infer_expr( - condition, - &Expectation::HasType(this.result.standard_types.bool_.clone()), - ); - this.infer_expr(body, &Expectation::HasType(TyBuilder::unit())); - }); - - // the body may not run, so it diverging doesn't mean we diverge - self.diverges = Diverges::Maybe; - TyBuilder::unit() - } Expr::Closure { body, args, ret_type, arg_types, closure_kind, capture_by: _ } => { assert_eq!(args.len(), arg_types.len()); @@ -316,7 +303,7 @@ impl<'a> InferenceContext<'a> { } Expr::Call { callee, args, .. } => { let callee_ty = self.infer_expr(*callee, &Expectation::none()); - let mut derefs = Autoderef::new(&mut self.table, callee_ty.clone()); + let mut derefs = Autoderef::new(&mut self.table, callee_ty.clone(), false); let (res, derefed_callee) = 'b: { // manual loop to be able to access `derefs.table` while let Some((callee_deref_ty, _)) = derefs.next() { @@ -574,16 +561,8 @@ impl<'a> InferenceContext<'a> { } Expr::Cast { expr, type_ref } => { let cast_ty = self.make_ty(type_ref); - // FIXME: propagate the "castable to" expectation - let inner_ty = self.infer_expr_no_expect(*expr); - match (inner_ty.kind(Interner), cast_ty.kind(Interner)) { - (TyKind::Ref(_, _, inner), TyKind::Raw(_, cast)) => { - // FIXME: record invalid cast diagnostic in case of mismatch - self.unify(inner, cast); - } - // FIXME check the other kinds of cast... - _ => (), - } + let expr_ty = self.infer_expr(*expr, &Expectation::Castable(cast_ty.clone())); + self.deferred_cast_checks.push(CastCheck::new(expr_ty, cast_ty.clone())); cast_ty } Expr::Ref { expr, rawness, mutability } => { @@ -928,7 +907,7 @@ impl<'a> InferenceContext<'a> { if let TyKind::Ref(Mutability::Mut, _, inner) = derefed_callee.kind(Interner) { if adjustments .last() - .map(|x| matches!(x.kind, Adjust::Borrow(_))) + .map(|it| matches!(it.kind, Adjust::Borrow(_))) .unwrap_or(true) { // prefer reborrow to move @@ -1385,7 +1364,7 @@ impl<'a> InferenceContext<'a> { receiver_ty: &Ty, name: &Name, ) -> Option<(Ty, Option<FieldId>, Vec<Adjustment>, bool)> { - let mut autoderef = Autoderef::new(&mut self.table, receiver_ty.clone()); + let mut autoderef = Autoderef::new(&mut self.table, receiver_ty.clone(), false); let mut private_field = None; let res = autoderef.by_ref().find_map(|(derefed_ty, _)| { let (field_id, parameters) = match derefed_ty.kind(Interner) { @@ -1449,6 +1428,13 @@ impl<'a> InferenceContext<'a> { fn infer_field_access(&mut self, tgt_expr: ExprId, receiver: ExprId, name: &Name) -> Ty { let receiver_ty = self.infer_expr_inner(receiver, &Expectation::none()); + + if name.is_missing() { + // Bail out early, don't even try to look up field. Also, we don't issue an unresolved + // field diagnostic because this is a syntax error rather than a semantic error. + return self.err_ty(); + } + match self.lookup_field(&receiver_ty, name) { Some((ty, field_id, adjustments, is_public)) => { self.write_expr_adj(receiver, adjustments); @@ -1585,7 +1571,7 @@ impl<'a> InferenceContext<'a> { output: Ty, inputs: Vec<Ty>, ) -> Vec<Ty> { - if let Some(expected_ty) = expected_output.to_option(&mut self.table) { + if let Some(expected_ty) = expected_output.only_has_type(&mut self.table) { self.table.fudge_inference(|table| { if table.try_unify(&expected_ty, &output).is_ok() { table.resolve_with_fallback(inputs, &|var, kind, _, _| match kind { @@ -1658,6 +1644,7 @@ impl<'a> InferenceContext<'a> { // the parameter to coerce to the expected type (for example in // `coerce_unsize_expected_type_4`). let param_ty = self.normalize_associated_types_in(param_ty); + let expected_ty = self.normalize_associated_types_in(expected_ty); let expected = Expectation::rvalue_hint(self, expected_ty); // infer with the expected type we have... let ty = self.infer_expr_inner(arg, &expected); diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/mutability.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/mutability.rs index 46f2e1d7d..396ca0044 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/mutability.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/mutability.rs @@ -12,7 +12,7 @@ use crate::{lower::lower_to_chalk_mutability, Adjust, Adjustment, AutoBorrow, Ov use super::InferenceContext; -impl<'a> InferenceContext<'a> { +impl InferenceContext<'_> { pub(crate) fn infer_mut_body(&mut self) { self.infer_mut_expr(self.body.body_expr, Mutability::Not); } @@ -69,16 +69,12 @@ impl<'a> InferenceContext<'a> { self.infer_mut_expr(*tail, Mutability::Not); } } - &Expr::While { condition: c, body, label: _ } => { - self.infer_mut_expr(c, Mutability::Not); - self.infer_mut_expr(body, Mutability::Not); - } - Expr::MethodCall { receiver: x, method_name: _, args, generic_args: _ } - | Expr::Call { callee: x, args, is_assignee_expr: _ } => { - self.infer_mut_not_expr_iter(args.iter().copied().chain(Some(*x))); + Expr::MethodCall { receiver: it, method_name: _, args, generic_args: _ } + | Expr::Call { callee: it, args, is_assignee_expr: _ } => { + self.infer_mut_not_expr_iter(args.iter().copied().chain(Some(*it))); } Expr::Match { expr, arms } => { - let m = self.pat_iter_bound_mutability(arms.iter().map(|x| x.pat)); + let m = self.pat_iter_bound_mutability(arms.iter().map(|it| it.pat)); self.infer_mut_expr(*expr, m); for arm in arms.iter() { self.infer_mut_expr(arm.expr, Mutability::Not); @@ -96,7 +92,7 @@ impl<'a> InferenceContext<'a> { } } Expr::RecordLit { path: _, fields, spread, ellipsis: _, is_assignee_expr: _ } => { - self.infer_mut_not_expr_iter(fields.iter().map(|x| x.expr).chain(*spread)) + self.infer_mut_not_expr_iter(fields.iter().map(|it| it.expr).chain(*spread)) } &Expr::Index { base, index } => { if mutability == Mutability::Mut { @@ -204,8 +200,8 @@ impl<'a> InferenceContext<'a> { } /// Checks if the pat contains a `ref mut` binding. Such paths makes the context of bounded expressions - /// mutable. For example in `let (ref mut x0, ref x1) = *x;` we need to use `DerefMut` for `*x` but in - /// `let (ref x0, ref x1) = *x;` we should use `Deref`. + /// mutable. For example in `let (ref mut x0, ref x1) = *it;` we need to use `DerefMut` for `*it` but in + /// `let (ref x0, ref x1) = *it;` we should use `Deref`. fn pat_bound_mutability(&self, pat: PatId) -> Mutability { let mut r = Mutability::Not; self.body.walk_bindings_in_pat(pat, |b| { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs index 2480f8bab..5da0ab76b 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/pat.rs @@ -56,7 +56,7 @@ impl PatLike for PatId { } } -impl<'a> InferenceContext<'a> { +impl InferenceContext<'_> { /// Infers type for tuple struct pattern or its corresponding assignee expression. /// /// Ellipses found in the original pattern or expression must be filtered out. @@ -306,7 +306,7 @@ impl<'a> InferenceContext<'a> { self.result .pat_adjustments .get(&pat) - .and_then(|x| x.first()) + .and_then(|it| it.first()) .unwrap_or(&self.result.type_of_pat[pat]) .clone() } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs b/src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs index e33d8f179..0fb71135b 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/infer/unify.rs @@ -22,7 +22,7 @@ use crate::{ TraitEnvironment, Ty, TyBuilder, TyExt, TyKind, VariableKind, }; -impl<'a> InferenceContext<'a> { +impl InferenceContext<'_> { pub(super) fn canonicalize<T: TypeFoldable<Interner> + HasInterner<Interner = Interner>>( &mut self, t: T, @@ -91,7 +91,7 @@ pub(crate) fn unify( let mut table = InferenceTable::new(db, env); let vars = Substitution::from_iter( Interner, - tys.binders.iter(Interner).map(|x| match &x.kind { + tys.binders.iter(Interner).map(|it| match &it.kind { chalk_ir::VariableKind::Ty(_) => { GenericArgData::Ty(table.new_type_var()).intern(Interner) } @@ -252,7 +252,8 @@ impl<'a> InferenceTable<'a> { // and registering an obligation. But it needs chalk support, so we handle the most basic // case (a non associated const without generic parameters) manually. if subst.len(Interner) == 0 { - if let Ok(eval) = self.db.const_eval((*c_id).into(), subst.clone()) + if let Ok(eval) = + self.db.const_eval((*c_id).into(), subst.clone(), None) { eval } else { @@ -547,7 +548,7 @@ impl<'a> InferenceTable<'a> { table: &'a mut InferenceTable<'b>, highest_known_var: InferenceVar, } - impl<'a, 'b> TypeFolder<Interner> for VarFudger<'a, 'b> { + impl TypeFolder<Interner> for VarFudger<'_, '_> { fn as_dyn(&mut self) -> &mut dyn TypeFolder<Interner, Error = Self::Error> { self } @@ -686,8 +687,8 @@ impl<'a> InferenceTable<'a> { let mut arg_tys = vec![]; let arg_ty = TyBuilder::tuple(num_args) - .fill(|x| { - let arg = match x { + .fill(|it| { + let arg = match it { ParamKind::Type => self.new_type_var(), ParamKind::Const(ty) => { never!("Tuple with const parameter"); @@ -753,7 +754,7 @@ impl<'a> InferenceTable<'a> { { fold_tys_and_consts( ty, - |x, _| match x { + |it, _| match it { Either::Left(ty) => Either::Left(self.insert_type_vars_shallow(ty)), Either::Right(c) => Either::Right(self.insert_const_vars_shallow(c)), }, @@ -785,7 +786,7 @@ impl<'a> InferenceTable<'a> { crate::ConstScalar::Unknown => self.new_const_var(data.ty.clone()), // try to evaluate unevaluated const. Replace with new var if const eval failed. crate::ConstScalar::UnevaluatedConst(id, subst) => { - if let Ok(eval) = self.db.const_eval(*id, subst.clone()) { + if let Ok(eval) = self.db.const_eval(*id, subst.clone(), None) { eval } else { self.new_const_var(data.ty.clone()) @@ -798,7 +799,7 @@ impl<'a> InferenceTable<'a> { } } -impl<'a> fmt::Debug for InferenceTable<'a> { +impl fmt::Debug for InferenceTable<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("InferenceTable").field("num_vars", &self.type_variable_table.len()).finish() } @@ -826,7 +827,7 @@ mod resolve { pub(super) var_stack: &'a mut Vec<InferenceVar>, pub(super) fallback: F, } - impl<'a, 'b, F> TypeFolder<Interner> for Resolver<'a, 'b, F> + impl<F> TypeFolder<Interner> for Resolver<'_, '_, F> where F: Fn(InferenceVar, VariableKind, GenericArg, DebruijnIndex) -> GenericArg, { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs index 35d3407c1..b15339d44 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs @@ -1,13 +1,12 @@ //! Compute the binary representation of a type -use base_db::CrateId; use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy}; use hir_def::{ layout::{ Abi, FieldsShape, Integer, LayoutCalculator, LayoutS, Primitive, ReprOptions, Scalar, Size, StructKind, TargetDataLayout, WrappingRange, }, - LocalEnumVariantId, LocalFieldId, + LocalEnumVariantId, LocalFieldId, StructId, }; use la_arena::{Idx, RawIdx}; use stdx::never; @@ -15,7 +14,7 @@ use triomphe::Arc; use crate::{ consteval::try_const_usize, db::HirDatabase, infer::normalize, layout::adt::struct_variant_idx, - utils::ClosureSubst, Interner, Substitution, TraitEnvironment, Ty, + utils::ClosureSubst, Interner, ProjectionTy, Substitution, TraitEnvironment, Ty, }; pub use self::{ @@ -24,8 +23,8 @@ pub use self::{ }; macro_rules! user_error { - ($x: expr) => { - return Err(LayoutError::UserError(format!($x))) + ($it: expr) => { + return Err(LayoutError::UserError(format!($it))) }; } @@ -61,7 +60,6 @@ pub enum LayoutError { } struct LayoutCx<'a> { - krate: CrateId, target: &'a TargetDataLayout, } @@ -77,18 +75,101 @@ impl<'a> LayoutCalculator for LayoutCx<'a> { } } +// FIXME: move this to the `rustc_abi`. +fn layout_of_simd_ty( + db: &dyn HirDatabase, + id: StructId, + subst: &Substitution, + env: Arc<TraitEnvironment>, + dl: &TargetDataLayout, +) -> Result<Arc<Layout>, LayoutError> { + let fields = db.field_types(id.into()); + + // Supported SIMD vectors are homogeneous ADTs with at least one field: + // + // * #[repr(simd)] struct S(T, T, T, T); + // * #[repr(simd)] struct S { it: T, y: T, z: T, w: T } + // * #[repr(simd)] struct S([T; 4]) + // + // where T is a primitive scalar (integer/float/pointer). + + let f0_ty = match fields.iter().next() { + Some(it) => it.1.clone().substitute(Interner, subst), + None => { + user_error!("simd type with zero fields"); + } + }; + + // The element type and number of elements of the SIMD vector + // are obtained from: + // + // * the element type and length of the single array field, if + // the first field is of array type, or + // + // * the homogeneous field type and the number of fields. + let (e_ty, e_len, is_array) = if let TyKind::Array(e_ty, _) = f0_ty.kind(Interner) { + // Extract the number of elements from the layout of the array field: + let FieldsShape::Array { count, .. } = db.layout_of_ty(f0_ty.clone(), env.clone())?.fields else { + user_error!("Array with non array layout"); + }; + + (e_ty.clone(), count, true) + } else { + // First ADT field is not an array: + (f0_ty, fields.iter().count() as u64, false) + }; + + // Compute the ABI of the element type: + let e_ly = db.layout_of_ty(e_ty, env.clone())?; + let Abi::Scalar(e_abi) = e_ly.abi else { + user_error!("simd type with inner non scalar type"); + }; + + // Compute the size and alignment of the vector: + let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow)?; + let align = dl.vector_align(size); + let size = size.align_to(align.abi); + + // Compute the placement of the vector fields: + let fields = if is_array { + FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() } + } else { + FieldsShape::Array { stride: e_ly.size, count: e_len } + }; + + Ok(Arc::new(Layout { + variants: Variants::Single { index: struct_variant_idx() }, + fields, + abi: Abi::Vector { element: e_abi, count: e_len }, + largest_niche: e_ly.largest_niche, + size, + align, + })) +} + pub fn layout_of_ty_query( db: &dyn HirDatabase, ty: Ty, - krate: CrateId, + trait_env: Arc<TraitEnvironment>, ) -> Result<Arc<Layout>, LayoutError> { - let Some(target) = db.target_data_layout(krate) else { return Err(LayoutError::TargetLayoutNotAvailable) }; - let cx = LayoutCx { krate, target: &target }; + let krate = trait_env.krate; + let Some(target) = db.target_data_layout(krate) else { + return Err(LayoutError::TargetLayoutNotAvailable); + }; + let cx = LayoutCx { target: &target }; let dl = &*cx.current_data_layout(); - let trait_env = Arc::new(TraitEnvironment::empty(krate)); - let ty = normalize(db, trait_env, ty.clone()); + let ty = normalize(db, trait_env.clone(), ty.clone()); let result = match ty.kind(Interner) { - TyKind::Adt(AdtId(def), subst) => return db.layout_of_adt(*def, subst.clone(), krate), + TyKind::Adt(AdtId(def), subst) => { + if let hir_def::AdtId::StructId(s) = def { + let data = db.struct_data(*s); + let repr = data.repr.unwrap_or_default(); + if repr.simd() { + return layout_of_simd_ty(db, *s, subst, trait_env.clone(), &target); + } + }; + return db.layout_of_adt(*def, subst.clone(), trait_env.clone()); + } TyKind::Scalar(s) => match s { chalk_ir::Scalar::Bool => Layout::scalar( dl, @@ -145,9 +226,9 @@ pub fn layout_of_ty_query( let fields = tys .iter(Interner) - .map(|k| db.layout_of_ty(k.assert_ty_ref(Interner).clone(), krate)) + .map(|k| db.layout_of_ty(k.assert_ty_ref(Interner).clone(), trait_env.clone())) .collect::<Result<Vec<_>, _>>()?; - let fields = fields.iter().map(|x| &**x).collect::<Vec<_>>(); + let fields = fields.iter().map(|it| &**it).collect::<Vec<_>>(); let fields = fields.iter().collect::<Vec<_>>(); cx.univariant(dl, &fields, &ReprOptions::default(), kind).ok_or(LayoutError::Unknown)? } @@ -155,7 +236,7 @@ pub fn layout_of_ty_query( let count = try_const_usize(db, &count).ok_or(LayoutError::UserError( "unevaluated or mistyped const generic parameter".to_string(), ))? as u64; - let element = db.layout_of_ty(element.clone(), krate)?; + let element = db.layout_of_ty(element.clone(), trait_env.clone())?; let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow)?; let abi = if count != 0 && matches!(element.abi, Abi::Uninhabited) { @@ -176,7 +257,7 @@ pub fn layout_of_ty_query( } } TyKind::Slice(element) => { - let element = db.layout_of_ty(element.clone(), krate)?; + let element = db.layout_of_ty(element.clone(), trait_env.clone())?; Layout { variants: Variants::Single { index: struct_variant_idx() }, fields: FieldsShape::Array { stride: element.size, count: 0 }, @@ -198,7 +279,15 @@ pub fn layout_of_ty_query( // return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr))); // } - let unsized_part = struct_tail_erasing_lifetimes(db, pointee.clone()); + let mut unsized_part = struct_tail_erasing_lifetimes(db, pointee.clone()); + if let TyKind::AssociatedType(id, subst) = unsized_part.kind(Interner) { + unsized_part = TyKind::Alias(chalk_ir::AliasTy::Projection(ProjectionTy { + associated_ty_id: *id, + substitution: subst.clone(), + })) + .intern(Interner); + } + unsized_part = normalize(db, trait_env.clone(), unsized_part); let metadata = match unsized_part.kind(Interner) { TyKind::Slice(_) | TyKind::Str => { scalar_unit(dl, Primitive::Int(dl.ptr_sized_integer(), false)) @@ -252,7 +341,7 @@ pub fn layout_of_ty_query( match impl_trait_id { crate::ImplTraitId::ReturnTypeImplTrait(func, idx) => { let infer = db.infer(func.into()); - return db.layout_of_ty(infer.type_of_rpit[idx].clone(), krate); + return db.layout_of_ty(infer.type_of_rpit[idx].clone(), trait_env.clone()); } crate::ImplTraitId::AsyncBlockTypeImplTrait(_, _) => { return Err(LayoutError::NotImplemented) @@ -265,14 +354,14 @@ pub fn layout_of_ty_query( let (captures, _) = infer.closure_info(c); let fields = captures .iter() - .map(|x| { + .map(|it| { db.layout_of_ty( - x.ty.clone().substitute(Interner, ClosureSubst(subst).parent_subst()), - krate, + it.ty.clone().substitute(Interner, ClosureSubst(subst).parent_subst()), + trait_env.clone(), ) }) .collect::<Result<Vec<_>, _>>()?; - let fields = fields.iter().map(|x| &**x).collect::<Vec<_>>(); + let fields = fields.iter().map(|it| &**it).collect::<Vec<_>>(); let fields = fields.iter().collect::<Vec<_>>(); cx.univariant(dl, &fields, &ReprOptions::default(), StructKind::AlwaysSized) .ok_or(LayoutError::Unknown)? @@ -281,8 +370,16 @@ pub fn layout_of_ty_query( return Err(LayoutError::NotImplemented) } TyKind::Error => return Err(LayoutError::HasErrorType), - TyKind::AssociatedType(_, _) - | TyKind::Alias(_) + TyKind::AssociatedType(id, subst) => { + // Try again with `TyKind::Alias` to normalize the associated type. + let ty = TyKind::Alias(chalk_ir::AliasTy::Projection(ProjectionTy { + associated_ty_id: *id, + substitution: subst.clone(), + })) + .intern(Interner); + return db.layout_of_ty(ty, trait_env); + } + TyKind::Alias(_) | TyKind::Placeholder(_) | TyKind::BoundVar(_) | TyKind::InferenceVar(_, _) => return Err(LayoutError::HasPlaceholder), @@ -294,7 +391,7 @@ pub fn layout_of_ty_recover( _: &dyn HirDatabase, _: &[String], _: &Ty, - _: &CrateId, + _: &Arc<TraitEnvironment>, ) -> Result<Arc<Layout>, LayoutError> { user_error!("infinite sized recursive type"); } @@ -315,7 +412,10 @@ fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty { let data = db.struct_data(*i); let mut it = data.variant_data.fields().iter().rev(); match it.next() { - Some((f, _)) => field_ty(db, (*i).into(), f, subst), + Some((f, _)) => { + let last_field_ty = field_ty(db, (*i).into(), f, subst); + struct_tail_erasing_lifetimes(db, last_field_ty) + } None => pointee, } } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs index bd2752a71..1c92e80f3 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs @@ -2,7 +2,6 @@ use std::{cmp, ops::Bound}; -use base_db::CrateId; use hir_def::{ data::adt::VariantData, layout::{Integer, LayoutCalculator, ReprOptions, TargetDataLayout}, @@ -16,7 +15,7 @@ use crate::{ db::HirDatabase, lang_items::is_unsafe_cell, layout::{field_ty, Layout, LayoutError, RustcEnumVariantIdx}, - Substitution, + Substitution, TraitEnvironment, }; use super::LayoutCx; @@ -29,15 +28,18 @@ pub fn layout_of_adt_query( db: &dyn HirDatabase, def: AdtId, subst: Substitution, - krate: CrateId, + trait_env: Arc<TraitEnvironment>, ) -> Result<Arc<Layout>, LayoutError> { - let Some(target) = db.target_data_layout(krate) else { return Err(LayoutError::TargetLayoutNotAvailable) }; - let cx = LayoutCx { krate, target: &target }; + let krate = trait_env.krate; + let Some(target) = db.target_data_layout(krate) else { + return Err(LayoutError::TargetLayoutNotAvailable); + }; + let cx = LayoutCx { target: &target }; let dl = cx.current_data_layout(); let handle_variant = |def: VariantId, var: &VariantData| { var.fields() .iter() - .map(|(fd, _)| db.layout_of_ty(field_ty(db, def, fd, &subst), cx.krate)) + .map(|(fd, _)| db.layout_of_ty(field_ty(db, def, fd, &subst), trait_env.clone())) .collect::<Result<Vec<_>, _>>() }; let (variants, repr) = match def { @@ -70,9 +72,9 @@ pub fn layout_of_adt_query( }; let variants = variants .iter() - .map(|x| x.iter().map(|x| &**x).collect::<Vec<_>>()) + .map(|it| it.iter().map(|it| &**it).collect::<Vec<_>>()) .collect::<SmallVec<[_; 1]>>(); - let variants = variants.iter().map(|x| x.iter().collect()).collect(); + let variants = variants.iter().map(|it| it.iter().collect()).collect(); let result = if matches!(def, AdtId::UnionId(..)) { cx.layout_of_union(&repr, &variants).ok_or(LayoutError::Unknown)? } else { @@ -103,7 +105,7 @@ pub fn layout_of_adt_query( && variants .iter() .next() - .and_then(|x| x.last().map(|x| x.is_unsized())) + .and_then(|it| it.last().map(|it| !it.is_unsized())) .unwrap_or(true), ) .ok_or(LayoutError::SizeOverflow)? @@ -116,9 +118,9 @@ fn layout_scalar_valid_range(db: &dyn HirDatabase, def: AdtId) -> (Bound<u128>, let get = |name| { let attr = attrs.by_key(name).tt_values(); for tree in attr { - if let Some(x) = tree.token_trees.first() { - if let Ok(x) = x.to_string().parse() { - return Bound::Included(x); + if let Some(it) = tree.token_trees.first() { + if let Ok(it) = it.to_string().parse() { + return Bound::Included(it); } } } @@ -132,7 +134,7 @@ pub fn layout_of_adt_recover( _: &[String], _: &AdtId, _: &Substitution, - _: &CrateId, + _: &Arc<TraitEnvironment>, ) -> Result<Arc<Layout>, LayoutError> { user_error!("infinite sized recursive type"); } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs index 0ff8c532d..333ad473a 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs @@ -26,7 +26,7 @@ fn eval_goal(ra_fixture: &str, minicore: &str) -> Result<Arc<Layout>, LayoutErro ); let (db, file_ids) = TestDB::with_many_files(&ra_fixture); - let (adt_or_type_alias_id, module_id) = file_ids + let adt_or_type_alias_id = file_ids .into_iter() .find_map(|file_id| { let module_id = db.module_for_file(file_id); @@ -47,7 +47,7 @@ fn eval_goal(ra_fixture: &str, minicore: &str) -> Result<Arc<Layout>, LayoutErro } _ => None, })?; - Some((adt_or_type_alias_id, module_id)) + Some(adt_or_type_alias_id) }) .unwrap(); let goal_ty = match adt_or_type_alias_id { @@ -58,7 +58,13 @@ fn eval_goal(ra_fixture: &str, minicore: &str) -> Result<Arc<Layout>, LayoutErro db.ty(ty_id.into()).substitute(Interner, &Substitution::empty(Interner)) } }; - db.layout_of_ty(goal_ty, module_id.krate()) + db.layout_of_ty( + goal_ty, + db.trait_environment(match adt_or_type_alias_id { + Either::Left(adt) => hir_def::GenericDefId::AdtId(adt), + Either::Right(ty) => hir_def::GenericDefId::TypeAliasId(ty), + }), + ) } /// A version of `eval_goal` for types that can not be expressed in ADTs, like closures and `impl Trait` @@ -72,7 +78,7 @@ fn eval_expr(ra_fixture: &str, minicore: &str) -> Result<Arc<Layout>, LayoutErro let module_id = db.module_for_file(file_id); let def_map = module_id.def_map(&db); let scope = &def_map[module_id.local_id].scope; - let adt_id = scope + let function_id = scope .declarations() .find_map(|x| match x { hir_def::ModuleDefId::FunctionId(x) => { @@ -82,11 +88,11 @@ fn eval_expr(ra_fixture: &str, minicore: &str) -> Result<Arc<Layout>, LayoutErro _ => None, }) .unwrap(); - let hir_body = db.body(adt_id.into()); + let hir_body = db.body(function_id.into()); let b = hir_body.bindings.iter().find(|x| x.1.name.to_smol_str() == "goal").unwrap().0; - let infer = db.infer(adt_id.into()); + let infer = db.infer(function_id.into()); let goal_ty = infer.type_of_binding[b].clone(); - db.layout_of_ty(goal_ty, module_id.krate()) + db.layout_of_ty(goal_ty, db.trait_environment(function_id.into())) } #[track_caller] @@ -271,6 +277,20 @@ struct Goal(Foo<S>); } #[test] +fn simd_types() { + check_size_and_align( + r#" + #[repr(simd)] + struct SimdType(i64, i64); + struct Goal(SimdType); + "#, + "", + 16, + 16, + ); +} + +#[test] fn return_position_impl_trait() { size_and_align_expr! { trait T {} @@ -344,6 +364,24 @@ fn return_position_impl_trait() { } #[test] +fn unsized_ref() { + size_and_align! { + struct S1([u8]); + struct S2(S1); + struct S3(i32, str); + struct S4(u64, S3); + #[allow(dead_code)] + struct S5 { + field1: u8, + field2: i16, + field_last: S4, + } + + struct Goal(&'static S1, &'static S2, &'static S3, &'static S4, &'static S5); + } +} + +#[test] fn enums() { size_and_align! { enum Goal { @@ -369,11 +407,11 @@ fn tuple() { } #[test] -fn non_zero() { +fn non_zero_and_non_null() { size_and_align! { - minicore: non_zero, option; - use core::num::NonZeroU8; - struct Goal(Option<NonZeroU8>); + minicore: non_zero, non_null, option; + use core::{num::NonZeroU8, ptr::NonNull}; + struct Goal(Option<NonZeroU8>, Option<NonNull<i32>>); } } @@ -432,3 +470,41 @@ fn enums_with_discriminants() { } } } + +#[test] +fn core_mem_discriminant() { + size_and_align! { + minicore: discriminant; + struct S(i32, u64); + struct Goal(core::mem::Discriminant<S>); + } + size_and_align! { + minicore: discriminant; + #[repr(u32)] + enum S { + A, + B, + C, + } + struct Goal(core::mem::Discriminant<S>); + } + size_and_align! { + minicore: discriminant; + enum S { + A(i32), + B(i64), + C(u8), + } + struct Goal(core::mem::Discriminant<S>); + } + size_and_align! { + minicore: discriminant; + #[repr(C, u16)] + enum S { + A(i32), + B(i64) = 200, + C = 1000, + } + struct Goal(core::mem::Discriminant<S>); + } +} diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs b/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs index 1a4d003bf..b3ca2a222 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/lib.rs @@ -180,9 +180,16 @@ impl MemoryMap { /// allocator function as `f` and it will return a mapping of old addresses to new addresses. fn transform_addresses( &self, - mut f: impl FnMut(&[u8]) -> Result<usize, MirEvalError>, + mut f: impl FnMut(&[u8], usize) -> Result<usize, MirEvalError>, ) -> Result<HashMap<usize, usize>, MirEvalError> { - self.memory.iter().map(|x| Ok((*x.0, f(x.1)?))).collect() + self.memory + .iter() + .map(|x| { + let addr = *x.0; + let align = if addr == 0 { 64 } else { (addr - (addr & (addr - 1))).min(64) }; + Ok((addr, f(x.1, align)?)) + }) + .collect() } fn get<'a>(&'a self, addr: usize, size: usize) -> Option<&'a [u8]> { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/lower.rs b/src/tools/rust-analyzer/crates/hir-ty/src/lower.rs index 9951a1c75..2837f400b 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/lower.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/lower.rs @@ -23,7 +23,7 @@ use hir_def::{ generics::{ TypeOrConstParamData, TypeParamProvenance, WherePredicate, WherePredicateTypeTarget, }, - lang_item::{lang_attr, LangItem}, + lang_item::LangItem, nameres::MacroSubNs, path::{GenericArg, GenericArgs, ModPath, Path, PathKind, PathSegment, PathSegments}, resolver::{HasResolver, Resolver, TypeNs}, @@ -959,10 +959,10 @@ impl<'a> TyLoweringContext<'a> { } pub(crate) fn lower_where_predicate( - &'a self, - where_predicate: &'a WherePredicate, + &self, + where_predicate: &WherePredicate, ignore_bindings: bool, - ) -> impl Iterator<Item = QuantifiedWhereClause> + 'a { + ) -> impl Iterator<Item = QuantifiedWhereClause> { match where_predicate { WherePredicate::ForLifetime { target, bound, .. } | WherePredicate::TypeBound { target, bound } => { @@ -1012,7 +1012,7 @@ impl<'a> TyLoweringContext<'a> { // (So ideally, we'd only ignore `~const Drop` here) // - `Destruct` impls are built-in in 1.62 (current nightly as of 08-04-2022), so until // the builtin impls are supported by Chalk, we ignore them here. - if let Some(lang) = lang_attr(self.db.upcast(), tr.hir_trait_id()) { + if let Some(lang) = self.db.lang_attr(tr.hir_trait_id().into()) { if matches!(lang, LangItem::Drop | LangItem::Destruct) { return false; } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs b/src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs index ab6430e8f..f3a5f69b2 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/method_resolution.rs @@ -534,7 +534,7 @@ impl ReceiverAdjustments { let mut ty = table.resolve_ty_shallow(&ty); let mut adjust = Vec::new(); for _ in 0..self.autoderefs { - match autoderef::autoderef_step(table, ty.clone()) { + match autoderef::autoderef_step(table, ty.clone(), true) { None => { never!("autoderef not possible for {:?}", ty); ty = TyKind::Error.intern(Interner); @@ -559,10 +559,10 @@ impl ReceiverAdjustments { adjust.push(a); } if self.unsize_array { - ty = 'x: { + ty = 'it: { if let TyKind::Ref(m, l, inner) = ty.kind(Interner) { if let TyKind::Array(inner, _) = inner.kind(Interner) { - break 'x TyKind::Ref( + break 'it TyKind::Ref( m.clone(), l.clone(), TyKind::Slice(inner.clone()).intern(Interner), @@ -665,13 +665,21 @@ pub fn is_dyn_method( }; let self_ty = trait_ref.self_type_parameter(Interner); if let TyKind::Dyn(d) = self_ty.kind(Interner) { - let is_my_trait_in_bounds = - d.bounds.skip_binders().as_slice(Interner).iter().any(|x| match x.skip_binders() { - // rustc doesn't accept `impl Foo<2> for dyn Foo<5>`, so if the trait id is equal, no matter - // what the generics are, we are sure that the method is come from the vtable. - WhereClause::Implemented(tr) => tr.trait_id == trait_ref.trait_id, - _ => false, - }); + let is_my_trait_in_bounds = d + .bounds + .skip_binders() + .as_slice(Interner) + .iter() + .map(|it| it.skip_binders()) + .flat_map(|it| match it { + WhereClause::Implemented(tr) => { + all_super_traits(db.upcast(), from_chalk_trait_id(tr.trait_id)) + } + _ => smallvec![], + }) + // rustc doesn't accept `impl Foo<2> for dyn Foo<5>`, so if the trait id is equal, no matter + // what the generics are, we are sure that the method is come from the vtable. + .any(|x| x == trait_id); if is_my_trait_in_bounds { return Some(fn_params); } @@ -682,14 +690,14 @@ pub fn is_dyn_method( /// Looks up the impl method that actually runs for the trait method `func`. /// /// Returns `func` if it's not a method defined in a trait or the lookup failed. -pub fn lookup_impl_method( +pub(crate) fn lookup_impl_method_query( db: &dyn HirDatabase, env: Arc<TraitEnvironment>, func: FunctionId, fn_subst: Substitution, ) -> (FunctionId, Substitution) { let ItemContainerId::TraitId(trait_id) = func.lookup(db.upcast()).container else { - return (func, fn_subst) + return (func, fn_subst); }; let trait_params = db.generic_params(trait_id.into()).type_or_consts.len(); let fn_params = fn_subst.len(Interner) - trait_params; @@ -699,8 +707,8 @@ pub fn lookup_impl_method( }; let name = &db.function_data(func).name; - let Some((impl_fn, impl_subst)) = lookup_impl_assoc_item_for_trait_ref(trait_ref, db, env, name) - .and_then(|assoc| { + let Some((impl_fn, impl_subst)) = + lookup_impl_assoc_item_for_trait_ref(trait_ref, db, env, name).and_then(|assoc| { if let (AssocItemId::FunctionId(id), subst) = assoc { Some((id, subst)) } else { @@ -731,7 +739,7 @@ fn lookup_impl_assoc_item_for_trait_ref( let impls = db.trait_impls_in_deps(env.krate); let self_impls = match self_ty.kind(Interner) { TyKind::Adt(id, _) => { - id.0.module(db.upcast()).containing_block().map(|x| db.trait_impls_in_block(x)) + id.0.module(db.upcast()).containing_block().map(|it| db.trait_impls_in_block(it)) } _ => None, }; @@ -895,8 +903,8 @@ pub fn iterate_method_candidates_dyn( // (just as rustc does an autoderef and then autoref again). // We have to be careful about the order we're looking at candidates - // in here. Consider the case where we're resolving `x.clone()` - // where `x: &Vec<_>`. This resolves to the clone method with self + // in here. Consider the case where we're resolving `it.clone()` + // where `it: &Vec<_>`. This resolves to the clone method with self // type `Vec<_>`, *not* `&_`. I.e. we need to consider methods where // the receiver type exactly matches before cases where we have to // do autoref. But in the autoderef steps, the `&_` self type comes @@ -1012,8 +1020,8 @@ fn iterate_method_candidates_by_receiver( let snapshot = table.snapshot(); // We're looking for methods with *receiver* type receiver_ty. These could // be found in any of the derefs of receiver_ty, so we have to go through - // that. - let mut autoderef = autoderef::Autoderef::new(&mut table, receiver_ty.clone()); + // that, including raw derefs. + let mut autoderef = autoderef::Autoderef::new(&mut table, receiver_ty.clone(), true); while let Some((self_ty, _)) = autoderef.next() { iterate_inherent_methods( &self_ty, @@ -1028,7 +1036,7 @@ fn iterate_method_candidates_by_receiver( table.rollback_to(snapshot); - let mut autoderef = autoderef::Autoderef::new(&mut table, receiver_ty.clone()); + let mut autoderef = autoderef::Autoderef::new(&mut table, receiver_ty.clone(), true); while let Some((self_ty, _)) = autoderef.next() { iterate_trait_method_candidates( &self_ty, @@ -1480,8 +1488,8 @@ fn generic_implements_goal( .push(self_ty.value.clone()) .fill_with_bound_vars(DebruijnIndex::INNERMOST, kinds.len()) .build(); - kinds.extend(trait_ref.substitution.iter(Interner).skip(1).map(|x| { - let vk = match x.data(Interner) { + kinds.extend(trait_ref.substitution.iter(Interner).skip(1).map(|it| { + let vk = match it.data(Interner) { chalk_ir::GenericArgData::Ty(_) => { chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General) } @@ -1504,7 +1512,7 @@ fn autoderef_method_receiver( ty: Ty, ) -> Vec<(Canonical<Ty>, ReceiverAdjustments)> { let mut deref_chain: Vec<_> = Vec::new(); - let mut autoderef = autoderef::Autoderef::new(table, ty); + let mut autoderef = autoderef::Autoderef::new(table, ty, false); while let Some((ty, derefs)) = autoderef.next() { deref_chain.push(( autoderef.table.canonicalize(ty).value, diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir.rs index 2345bab0b..4723c25ed 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir.rs @@ -3,9 +3,14 @@ use std::{fmt::Display, iter}; use crate::{ - consteval::usize_const, db::HirDatabase, display::HirDisplay, infer::PointerCast, - lang_items::is_box, mapping::ToChalk, CallableDefId, ClosureId, Const, ConstScalar, - InferenceResult, Interner, MemoryMap, Substitution, Ty, TyKind, + consteval::usize_const, + db::HirDatabase, + display::HirDisplay, + infer::{normalize, PointerCast}, + lang_items::is_box, + mapping::ToChalk, + CallableDefId, ClosureId, Const, ConstScalar, InferenceResult, Interner, MemoryMap, + Substitution, TraitEnvironment, Ty, TyKind, }; use base_db::CrateId; use chalk_ir::Mutability; @@ -22,7 +27,9 @@ mod pretty; mod monomorphization; pub use borrowck::{borrowck_query, BorrowckResult, MutabilityReason}; -pub use eval::{interpret_mir, pad16, Evaluator, MirEvalError, VTableMap}; +pub use eval::{ + interpret_mir, pad16, render_const_using_debug_impl, Evaluator, MirEvalError, VTableMap, +}; pub use lower::{ lower_to_mir, mir_body_for_closure_query, mir_body_query, mir_body_recover, MirLowerError, }; @@ -32,6 +39,7 @@ pub use monomorphization::{ }; use smallvec::{smallvec, SmallVec}; use stdx::{impl_from, never}; +use triomphe::Arc; use super::consteval::{intern_const_scalar, try_const_usize}; @@ -129,13 +137,21 @@ pub enum ProjectionElem<V, T> { impl<V, T> ProjectionElem<V, T> { pub fn projected_ty( &self, - base: Ty, + mut base: Ty, db: &dyn HirDatabase, closure_field: impl FnOnce(ClosureId, &Substitution, usize) -> Ty, krate: CrateId, ) -> Ty { + if matches!(base.kind(Interner), TyKind::Alias(_) | TyKind::AssociatedType(..)) { + base = normalize( + db, + // FIXME: we should get this from caller + Arc::new(TraitEnvironment::empty(krate)), + base, + ); + } match self { - ProjectionElem::Deref => match &base.data(Interner).kind { + ProjectionElem::Deref => match &base.kind(Interner) { TyKind::Raw(_, inner) | TyKind::Ref(_, _, inner) => inner.clone(), TyKind::Adt(adt, subst) if is_box(db, adt.0) => { subst.at(Interner, 0).assert_ty_ref(Interner).clone() @@ -145,7 +161,7 @@ impl<V, T> ProjectionElem<V, T> { return TyKind::Error.intern(Interner); } }, - ProjectionElem::Field(f) => match &base.data(Interner).kind { + ProjectionElem::Field(f) => match &base.kind(Interner) { TyKind::Adt(_, subst) => { db.field_types(f.parent)[f.local_id].clone().substitute(Interner, subst) } @@ -154,7 +170,7 @@ impl<V, T> ProjectionElem<V, T> { return TyKind::Error.intern(Interner); } }, - ProjectionElem::TupleOrClosureField(f) => match &base.data(Interner).kind { + ProjectionElem::TupleOrClosureField(f) => match &base.kind(Interner) { TyKind::Tuple(_, subst) => subst .as_slice(Interner) .get(*f) @@ -171,7 +187,7 @@ impl<V, T> ProjectionElem<V, T> { } }, ProjectionElem::ConstantIndex { .. } | ProjectionElem::Index(_) => { - match &base.data(Interner).kind { + match &base.kind(Interner) { TyKind::Array(inner, _) | TyKind::Slice(inner) => inner.clone(), _ => { never!("Overloaded index is not a projection"); @@ -179,7 +195,7 @@ impl<V, T> ProjectionElem<V, T> { } } } - &ProjectionElem::Subslice { from, to } => match &base.data(Interner).kind { + &ProjectionElem::Subslice { from, to } => match &base.kind(Interner) { TyKind::Array(inner, c) => { let next_c = usize_const( db, @@ -218,6 +234,7 @@ impl Place { self.local == child.local && child.projection.starts_with(&self.projection) } + /// The place itself is not included fn iterate_over_parents(&self) -> impl Iterator<Item = Place> + '_ { (0..self.projection.len()) .map(|x| &self.projection[0..x]) @@ -321,8 +338,8 @@ impl SwitchTargets { #[derive(Debug, PartialEq, Eq, Clone)] pub struct Terminator { - span: MirSpan, - kind: TerminatorKind, + pub span: MirSpan, + pub kind: TerminatorKind, } #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs index a5dd0182e..ad98e8fa1 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/borrowck.rs @@ -52,7 +52,7 @@ fn all_mir_bodies( let closures = body.closures.clone(); Box::new( iter::once(Ok(body)) - .chain(closures.into_iter().flat_map(|x| for_closure(db, x))), + .chain(closures.into_iter().flat_map(|it| for_closure(db, it))), ) } Err(e) => Box::new(iter::once(Err(e))), @@ -62,7 +62,7 @@ fn all_mir_bodies( Ok(body) => { let closures = body.closures.clone(); Box::new( - iter::once(Ok(body)).chain(closures.into_iter().flat_map(|x| for_closure(db, x))), + iter::once(Ok(body)).chain(closures.into_iter().flat_map(|it| for_closure(db, it))), ) } Err(e) => Box::new(iter::once(Err(e))), @@ -171,7 +171,7 @@ fn moved_out_of_ref(db: &dyn HirDatabase, body: &MirBody) -> Vec<MovedOutOfRef> } TerminatorKind::Call { func, args, .. } => { for_operand(func, terminator.span); - args.iter().for_each(|x| for_operand(x, terminator.span)); + args.iter().for_each(|it| for_operand(it, terminator.span)); } TerminatorKind::Assert { cond, .. } => { for_operand(cond, terminator.span); @@ -245,7 +245,7 @@ fn ever_initialized_map( body: &MirBody, ) -> ArenaMap<BasicBlockId, ArenaMap<LocalId, bool>> { let mut result: ArenaMap<BasicBlockId, ArenaMap<LocalId, bool>> = - body.basic_blocks.iter().map(|x| (x.0, ArenaMap::default())).collect(); + body.basic_blocks.iter().map(|it| (it.0, ArenaMap::default())).collect(); fn dfs( db: &dyn HirDatabase, body: &MirBody, @@ -271,7 +271,10 @@ fn ever_initialized_map( } } let Some(terminator) = &block.terminator else { - never!("Terminator should be none only in construction.\nThe body:\n{}", body.pretty_print(db)); + never!( + "Terminator should be none only in construction.\nThe body:\n{}", + body.pretty_print(db) + ); return; }; let targets = match &terminator.kind { @@ -311,7 +314,7 @@ fn ever_initialized_map( result[body.start_block].insert(l, true); dfs(db, body, body.start_block, l, &mut result); } - for l in body.locals.iter().map(|x| x.0) { + for l in body.locals.iter().map(|it| it.0) { if !result[body.start_block].contains_idx(l) { result[body.start_block].insert(l, false); dfs(db, body, body.start_block, l, &mut result); @@ -325,10 +328,10 @@ fn mutability_of_locals( body: &MirBody, ) -> ArenaMap<LocalId, MutabilityReason> { let mut result: ArenaMap<LocalId, MutabilityReason> = - body.locals.iter().map(|x| (x.0, MutabilityReason::Not)).collect(); + body.locals.iter().map(|it| (it.0, MutabilityReason::Not)).collect(); let mut push_mut_span = |local, span| match &mut result[local] { MutabilityReason::Mut { spans } => spans.push(span), - x @ MutabilityReason::Not => *x = MutabilityReason::Mut { spans: vec![span] }, + it @ MutabilityReason::Not => *it = MutabilityReason::Mut { spans: vec![span] }, }; let ever_init_maps = ever_initialized_map(db, body); for (block_id, mut ever_init_map) in ever_init_maps.into_iter() { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs index 9acf9d39e..9e30eed56 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs @@ -1,6 +1,13 @@ //! This module provides a MIR interpreter, which is used in const eval. -use std::{borrow::Cow, collections::HashMap, fmt::Write, iter, ops::Range}; +use std::{ + borrow::Cow, + cell::RefCell, + collections::{HashMap, HashSet}, + fmt::Write, + iter, mem, + ops::Range, +}; use base_db::{CrateId, FileId}; use chalk_ir::Mutability; @@ -8,12 +15,13 @@ use either::Either; use hir_def::{ builtin_type::BuiltinType, data::adt::{StructFlags, VariantData}, - lang_item::{lang_attr, LangItem}, + lang_item::LangItem, layout::{TagEncoding, Variants}, - AdtId, DefWithBodyId, EnumVariantId, FunctionId, HasModule, ItemContainerId, Lookup, StaticId, - VariantId, + resolver::{HasResolver, TypeNs, ValueNs}, + AdtId, ConstId, DefWithBodyId, EnumVariantId, FunctionId, HasModule, ItemContainerId, Lookup, + StaticId, VariantId, }; -use hir_expand::InFile; +use hir_expand::{mod_path::ModPath, InFile}; use intern::Interned; use la_arena::ArenaMap; use rustc_hash::{FxHashMap, FxHashSet}; @@ -28,7 +36,7 @@ use crate::{ infer::PointerCast, layout::{Layout, LayoutError, RustcEnumVariantIdx}, mapping::from_chalk, - method_resolution::{is_dyn_method, lookup_impl_method}, + method_resolution::{is_dyn_method, lookup_impl_const}, name, static_lifetime, traits::FnTrait, utils::{detect_variant_from_bytes, ClosureSubst}, @@ -37,8 +45,9 @@ use crate::{ }; use super::{ - return_slot, AggregateKind, BinOp, CastKind, LocalId, MirBody, MirLowerError, MirSpan, Operand, - Place, ProjectionElem, Rvalue, StatementKind, TerminatorKind, UnOp, + return_slot, AggregateKind, BasicBlockId, BinOp, CastKind, LocalId, MirBody, MirLowerError, + MirSpan, Operand, Place, PlaceElem, ProjectionElem, Rvalue, StatementKind, TerminatorKind, + UnOp, }; mod shim; @@ -48,15 +57,15 @@ mod tests; macro_rules! from_bytes { ($ty:tt, $value:expr) => { ($ty::from_le_bytes(match ($value).try_into() { - Ok(x) => x, + Ok(it) => it, Err(_) => return Err(MirEvalError::TypeError(stringify!(mismatched size in constructing $ty))), })) }; } macro_rules! not_supported { - ($x: expr) => { - return Err(MirEvalError::NotSupported(format!($x))) + ($it: expr) => { + return Err(MirEvalError::NotSupported(format!($it))) }; } @@ -67,18 +76,22 @@ pub struct VTableMap { } impl VTableMap { + const OFFSET: usize = 1000; // We should add some offset to ids to make 0 (null) an invalid id. + fn id(&mut self, ty: Ty) -> usize { - if let Some(x) = self.ty_to_id.get(&ty) { - return *x; + if let Some(it) = self.ty_to_id.get(&ty) { + return *it; } - let id = self.id_to_ty.len(); + let id = self.id_to_ty.len() + VTableMap::OFFSET; self.id_to_ty.push(ty.clone()); self.ty_to_id.insert(ty, id); id } pub(crate) fn ty(&self, id: usize) -> Result<&Ty> { - self.id_to_ty.get(id).ok_or(MirEvalError::InvalidVTableId(id)) + id.checked_sub(VTableMap::OFFSET) + .and_then(|id| self.id_to_ty.get(id)) + .ok_or(MirEvalError::InvalidVTableId(id)) } fn ty_of_bytes(&self, bytes: &[u8]) -> Result<&Ty> { @@ -114,11 +127,25 @@ impl TlsData { } } +struct StackFrame { + locals: Locals, + destination: Option<BasicBlockId>, + prev_stack_ptr: usize, + span: (MirSpan, DefWithBodyId), +} + +#[derive(Clone)] +enum MirOrDynIndex { + Mir(Arc<MirBody>), + Dyn(usize), +} + pub struct Evaluator<'a> { db: &'a dyn HirDatabase, trait_env: Arc<TraitEnvironment>, stack: Vec<u8>, heap: Vec<u8>, + code_stack: Vec<StackFrame>, /// Stores the global location of the statics. We const evaluate every static first time we need it /// and see it's missing, then we add it to this to reuse. static_locations: FxHashMap<StaticId, Address>, @@ -127,8 +154,21 @@ pub struct Evaluator<'a> { /// time of use. vtable_map: VTableMap, thread_local_storage: TlsData, + random_state: oorandom::Rand64, stdout: Vec<u8>, stderr: Vec<u8>, + layout_cache: RefCell<FxHashMap<Ty, Arc<Layout>>>, + projected_ty_cache: RefCell<FxHashMap<(Ty, PlaceElem), Ty>>, + not_special_fn_cache: RefCell<FxHashSet<FunctionId>>, + mir_or_dyn_index_cache: RefCell<FxHashMap<(FunctionId, Substitution), MirOrDynIndex>>, + /// Constantly dropping and creating `Locals` is very costly. We store + /// old locals that we normaly want to drop here, to reuse their allocations + /// later. + unused_locals_store: RefCell<FxHashMap<DefWithBodyId, Vec<Locals>>>, + cached_ptr_size: usize, + cached_fn_trait_func: Option<FunctionId>, + cached_fn_mut_trait_func: Option<FunctionId>, + cached_fn_once_trait_func: Option<FunctionId>, crate_id: CrateId, // FIXME: This is a workaround, see the comment on `interpret_mir` assert_placeholder_ty_is_unused: bool, @@ -136,6 +176,8 @@ pub struct Evaluator<'a> { execution_limit: usize, /// An additional limit on stack depth, to prevent stack overflow stack_depth_limit: usize, + /// Maximum count of bytes that heap and stack can grow + memory_limit: usize, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -192,7 +234,7 @@ impl IntervalAndTy { addr: Address, ty: Ty, evaluator: &Evaluator<'_>, - locals: &Locals<'_>, + locals: &Locals, ) -> Result<IntervalAndTy> { let size = evaluator.size_of_sized(&ty, locals, "type of interval")?; Ok(IntervalAndTy { interval: Interval { addr, size }, ty }) @@ -226,18 +268,28 @@ impl IntervalOrOwned { } } +#[cfg(target_pointer_width = "64")] +const STACK_OFFSET: usize = 1 << 60; +#[cfg(target_pointer_width = "64")] +const HEAP_OFFSET: usize = 1 << 59; + +#[cfg(target_pointer_width = "32")] +const STACK_OFFSET: usize = 1 << 30; +#[cfg(target_pointer_width = "32")] +const HEAP_OFFSET: usize = 1 << 29; + impl Address { - fn from_bytes(x: &[u8]) -> Result<Self> { - Ok(Address::from_usize(from_bytes!(usize, x))) + fn from_bytes(it: &[u8]) -> Result<Self> { + Ok(Address::from_usize(from_bytes!(usize, it))) } - fn from_usize(x: usize) -> Self { - if x > usize::MAX / 2 { - Stack(x - usize::MAX / 2) - } else if x > usize::MAX / 4 { - Heap(x - usize::MAX / 4) + fn from_usize(it: usize) -> Self { + if it > STACK_OFFSET { + Stack(it - STACK_OFFSET) + } else if it > HEAP_OFFSET { + Heap(it - HEAP_OFFSET) } else { - Invalid(x) + Invalid(it) } } @@ -247,23 +299,23 @@ impl Address { fn to_usize(&self) -> usize { let as_num = match self { - Stack(x) => *x + usize::MAX / 2, - Heap(x) => *x + usize::MAX / 4, - Invalid(x) => *x, + Stack(it) => *it + STACK_OFFSET, + Heap(it) => *it + HEAP_OFFSET, + Invalid(it) => *it, }; as_num } fn map(&self, f: impl FnOnce(usize) -> usize) -> Address { match self { - Stack(x) => Stack(f(*x)), - Heap(x) => Heap(f(*x)), - Invalid(x) => Invalid(f(*x)), + Stack(it) => Stack(f(*it)), + Heap(it) => Heap(f(*it)), + Invalid(it) => Invalid(f(*it)), } } fn offset(&self, offset: usize) -> Address { - self.map(|x| x + offset) + self.map(|it| it + offset) } } @@ -282,13 +334,14 @@ pub enum MirEvalError { TypeIsUnsized(Ty, &'static str), NotSupported(String), InvalidConst(Const), - InFunction(Either<FunctionId, ClosureId>, Box<MirEvalError>, MirSpan, DefWithBodyId), + InFunction(Box<MirEvalError>, Vec<(Either<FunctionId, ClosureId>, MirSpan, DefWithBodyId)>), ExecutionLimitExceeded, StackOverflow, TargetDataLayoutNotAvailable, InvalidVTableId(usize), CoerceUnsizedError(Ty), LangItemNotFound(LangItem), + BrokenLayout(Layout), } impl MirEvalError { @@ -300,40 +353,42 @@ impl MirEvalError { ) -> std::result::Result<(), std::fmt::Error> { writeln!(f, "Mir eval error:")?; let mut err = self; - while let MirEvalError::InFunction(func, e, span, def) = err { + while let MirEvalError::InFunction(e, stack) = err { err = e; - match func { - Either::Left(func) => { - let function_name = db.function_data(*func); - writeln!( - f, - "In function {} ({:?})", - function_name.name.display(db.upcast()), - func - )?; - } - Either::Right(clos) => { - writeln!(f, "In {:?}", clos)?; + for (func, span, def) in stack.iter().take(30).rev() { + match func { + Either::Left(func) => { + let function_name = db.function_data(*func); + writeln!( + f, + "In function {} ({:?})", + function_name.name.display(db.upcast()), + func + )?; + } + Either::Right(clos) => { + writeln!(f, "In {:?}", clos)?; + } } + let source_map = db.body_with_source_map(*def).1; + let span: InFile<SyntaxNodePtr> = match span { + MirSpan::ExprId(e) => match source_map.expr_syntax(*e) { + Ok(s) => s.map(|it| it.into()), + Err(_) => continue, + }, + MirSpan::PatId(p) => match source_map.pat_syntax(*p) { + Ok(s) => s.map(|it| match it { + Either::Left(e) => e.into(), + Either::Right(e) => e.into(), + }), + Err(_) => continue, + }, + MirSpan::Unknown => continue, + }; + let file_id = span.file_id.original_file(db.upcast()); + let text_range = span.value.text_range(); + writeln!(f, "{}", span_formatter(file_id, text_range))?; } - let source_map = db.body_with_source_map(*def).1; - let span: InFile<SyntaxNodePtr> = match span { - MirSpan::ExprId(e) => match source_map.expr_syntax(*e) { - Ok(s) => s.map(|x| x.into()), - Err(_) => continue, - }, - MirSpan::PatId(p) => match source_map.pat_syntax(*p) { - Ok(s) => s.map(|x| match x { - Either::Left(e) => e.into(), - Either::Right(e) => e.into(), - }), - Err(_) => continue, - }, - MirSpan::Unknown => continue, - }; - let file_id = span.file_id.original_file(db.upcast()); - let text_range = span.value.text_range(); - writeln!(f, "{}", span_formatter(file_id, text_range))?; } match err { MirEvalError::InFunction(..) => unreachable!(), @@ -373,6 +428,7 @@ impl MirEvalError { | MirEvalError::TargetDataLayoutNotAvailable | MirEvalError::CoerceUnsizedError(_) | MirEvalError::LangItemNotFound(_) + | MirEvalError::BrokenLayout(_) | MirEvalError::InvalidVTableId(_) => writeln!(f, "{:?}", err)?, } Ok(()) @@ -407,19 +463,14 @@ impl std::fmt::Debug for MirEvalError { Self::CoerceUnsizedError(arg0) => { f.debug_tuple("CoerceUnsizedError").field(arg0).finish() } + Self::BrokenLayout(arg0) => f.debug_tuple("BrokenLayout").field(arg0).finish(), Self::InvalidVTableId(arg0) => f.debug_tuple("InvalidVTableId").field(arg0).finish(), Self::NotSupported(arg0) => f.debug_tuple("NotSupported").field(arg0).finish(), Self::InvalidConst(arg0) => { let data = &arg0.data(Interner); f.debug_struct("InvalidConst").field("ty", &data.ty).field("value", &arg0).finish() } - Self::InFunction(func, e, span, _) => { - let mut e = &**e; - let mut stack = vec![(*func, *span)]; - while let Self::InFunction(f, next_e, span, _) = e { - e = &next_e; - stack.push((*f, *span)); - } + Self::InFunction(e, stack) => { f.debug_struct("WithStack").field("error", e).field("stack", &stack).finish() } } @@ -435,85 +486,126 @@ struct DropFlags { impl DropFlags { fn add_place(&mut self, p: Place) { - if p.iterate_over_parents().any(|x| self.need_drop.contains(&x)) { + if p.iterate_over_parents().any(|it| self.need_drop.contains(&it)) { return; } - self.need_drop.retain(|x| !p.is_parent(x)); + self.need_drop.retain(|it| !p.is_parent(it)); self.need_drop.insert(p); } fn remove_place(&mut self, p: &Place) -> bool { // FIXME: replace parents with parts + if let Some(parent) = p.iterate_over_parents().find(|it| self.need_drop.contains(&it)) { + self.need_drop.remove(&parent); + return true; + } self.need_drop.remove(p) } + + fn clear(&mut self) { + self.need_drop.clear(); + } } #[derive(Debug)] -struct Locals<'a> { - ptr: &'a ArenaMap<LocalId, Interval>, - body: &'a MirBody, +struct Locals { + ptr: ArenaMap<LocalId, Interval>, + body: Arc<MirBody>, drop_flags: DropFlags, } pub fn interpret_mir( db: &dyn HirDatabase, - body: &MirBody, + body: Arc<MirBody>, // FIXME: This is workaround. Ideally, const generics should have a separate body (issue #7434), but now // they share their body with their parent, so in MIR lowering we have locals of the parent body, which // might have placeholders. With this argument, we (wrongly) assume that every placeholder type has // a zero size, hoping that they are all outside of our current body. Even without a fix for #7434, we can // (and probably should) do better here, for example by excluding bindings outside of the target expression. assert_placeholder_ty_is_unused: bool, + trait_env: Option<Arc<TraitEnvironment>>, ) -> (Result<Const>, String, String) { let ty = body.locals[return_slot()].ty.clone(); - let mut evaluator = Evaluator::new(db, body, assert_placeholder_ty_is_unused); - let x: Result<Const> = (|| { - let bytes = evaluator.interpret_mir(&body, None.into_iter())?; + let mut evaluator = Evaluator::new(db, body.owner, assert_placeholder_ty_is_unused, trait_env); + let it: Result<Const> = (|| { + if evaluator.ptr_size() != std::mem::size_of::<usize>() { + not_supported!("targets with different pointer size from host"); + } + let bytes = evaluator.interpret_mir(body.clone(), None.into_iter())?; let mut memory_map = evaluator.create_memory_map( &bytes, &ty, - &Locals { ptr: &ArenaMap::new(), body: &body, drop_flags: DropFlags::default() }, + &Locals { ptr: ArenaMap::new(), body, drop_flags: DropFlags::default() }, )?; memory_map.vtable = evaluator.vtable_map.clone(); return Ok(intern_const_scalar(ConstScalar::Bytes(bytes, memory_map), ty)); })(); ( - x, + it, String::from_utf8_lossy(&evaluator.stdout).into_owned(), String::from_utf8_lossy(&evaluator.stderr).into_owned(), ) } +#[cfg(test)] +const EXECUTION_LIMIT: usize = 100_000; +#[cfg(not(test))] +const EXECUTION_LIMIT: usize = 10_000_000; + impl Evaluator<'_> { pub fn new<'a>( db: &'a dyn HirDatabase, - body: &MirBody, + owner: DefWithBodyId, assert_placeholder_ty_is_unused: bool, + trait_env: Option<Arc<TraitEnvironment>>, ) -> Evaluator<'a> { - let crate_id = body.owner.module(db.upcast()).krate(); - let trait_env = db.trait_environment_for_body(body.owner); + let crate_id = owner.module(db.upcast()).krate(); Evaluator { stack: vec![0], heap: vec![0], + code_stack: vec![], vtable_map: VTableMap::default(), thread_local_storage: TlsData::default(), static_locations: HashMap::default(), db, - trait_env, + random_state: oorandom::Rand64::new(0), + trait_env: trait_env.unwrap_or_else(|| db.trait_environment_for_body(owner)), crate_id, stdout: vec![], stderr: vec![], assert_placeholder_ty_is_unused, stack_depth_limit: 100, - execution_limit: 1000_000, + execution_limit: EXECUTION_LIMIT, + memory_limit: 1000_000_000, // 2GB, 1GB for stack and 1GB for heap + layout_cache: RefCell::new(HashMap::default()), + projected_ty_cache: RefCell::new(HashMap::default()), + not_special_fn_cache: RefCell::new(HashSet::default()), + mir_or_dyn_index_cache: RefCell::new(HashMap::default()), + unused_locals_store: RefCell::new(HashMap::default()), + cached_ptr_size: match db.target_data_layout(crate_id) { + Some(it) => it.pointer_size.bytes_usize(), + None => 8, + }, + cached_fn_trait_func: db + .lang_item(crate_id, LangItem::Fn) + .and_then(|x| x.as_trait()) + .and_then(|x| db.trait_data(x).method_by_name(&name![call])), + cached_fn_mut_trait_func: db + .lang_item(crate_id, LangItem::FnMut) + .and_then(|x| x.as_trait()) + .and_then(|x| db.trait_data(x).method_by_name(&name![call_mut])), + cached_fn_once_trait_func: db + .lang_item(crate_id, LangItem::FnOnce) + .and_then(|x| x.as_trait()) + .and_then(|x| db.trait_data(x).method_by_name(&name![call_once])), } } - fn place_addr(&self, p: &Place, locals: &Locals<'_>) -> Result<Address> { + fn place_addr(&self, p: &Place, locals: &Locals) -> Result<Address> { Ok(self.place_addr_and_ty_and_metadata(p, locals)?.0) } - fn place_interval(&self, p: &Place, locals: &Locals<'_>) -> Result<Interval> { + fn place_interval(&self, p: &Place, locals: &Locals) -> Result<Interval> { let place_addr_and_ty = self.place_addr_and_ty_and_metadata(p, locals)?; Ok(Interval { addr: place_addr_and_ty.0, @@ -526,39 +618,47 @@ impl Evaluator<'_> { } fn ptr_size(&self) -> usize { - match self.db.target_data_layout(self.crate_id) { - Some(x) => x.pointer_size.bytes_usize(), - None => 8, + self.cached_ptr_size + } + + fn projected_ty(&self, ty: Ty, proj: PlaceElem) -> Ty { + let pair = (ty, proj); + if let Some(r) = self.projected_ty_cache.borrow().get(&pair) { + return r.clone(); } + let (ty, proj) = pair; + let r = proj.projected_ty( + ty.clone(), + self.db, + |c, subst, f| { + let (def, _) = self.db.lookup_intern_closure(c.into()); + let infer = self.db.infer(def); + let (captures, _) = infer.closure_info(&c); + let parent_subst = ClosureSubst(subst).parent_subst(); + captures + .get(f) + .expect("broken closure field") + .ty + .clone() + .substitute(Interner, parent_subst) + }, + self.crate_id, + ); + self.projected_ty_cache.borrow_mut().insert((ty, proj), r.clone()); + r } fn place_addr_and_ty_and_metadata<'a>( &'a self, p: &Place, - locals: &'a Locals<'a>, + locals: &'a Locals, ) -> Result<(Address, Ty, Option<IntervalOrOwned>)> { let mut addr = locals.ptr[p.local].addr; let mut ty: Ty = locals.body.locals[p.local].ty.clone(); let mut metadata: Option<IntervalOrOwned> = None; // locals are always sized for proj in &*p.projection { let prev_ty = ty.clone(); - ty = proj.projected_ty( - ty, - self.db, - |c, subst, f| { - let (def, _) = self.db.lookup_intern_closure(c.into()); - let infer = self.db.infer(def); - let (captures, _) = infer.closure_info(&c); - let parent_subst = ClosureSubst(subst).parent_subst(); - captures - .get(f) - .expect("broken closure field") - .ty - .clone() - .substitute(Interner, parent_subst) - }, - self.crate_id, - ); + ty = self.projected_ty(ty, proj.clone()); match proj { ProjectionElem::Deref => { metadata = if self.size_align_of(&ty, locals)?.is_none() { @@ -569,8 +669,8 @@ impl Evaluator<'_> { } else { None }; - let x = from_bytes!(usize, self.read_memory(addr, self.ptr_size())?); - addr = Address::from_usize(x); + let it = from_bytes!(usize, self.read_memory(addr, self.ptr_size())?); + addr = Address::from_usize(it); } ProjectionElem::Index(op) => { let offset = from_bytes!( @@ -586,13 +686,13 @@ impl Evaluator<'_> { let offset = if from_end { let len = match prev_ty.kind(Interner) { TyKind::Array(_, c) => match try_const_usize(self.db, c) { - Some(x) => x as u64, + Some(it) => it as u64, None => { not_supported!("indexing array with unknown const from end") } }, TyKind::Slice(_) => match metadata { - Some(x) => from_bytes!(u64, x.get(self)?), + Some(it) => from_bytes!(u64, it.get(self)?), None => not_supported!("slice place without metadata"), }, _ => not_supported!("bad type for const index"), @@ -607,13 +707,13 @@ impl Evaluator<'_> { addr = addr.offset(ty_size * offset); } &ProjectionElem::Subslice { from, to } => { - let inner_ty = match &ty.data(Interner).kind { + let inner_ty = match &ty.kind(Interner) { TyKind::Array(inner, _) | TyKind::Slice(inner) => inner.clone(), _ => TyKind::Error.intern(Interner), }; metadata = match metadata { - Some(x) => { - let prev_len = from_bytes!(u64, x.get(self)?); + Some(it) => { + let prev_len = from_bytes!(u64, it.get(self)?); Some(IntervalOrOwned::Owned( (prev_len - from - to).to_le_bytes().to_vec(), )) @@ -636,8 +736,8 @@ impl Evaluator<'_> { Variants::Single { .. } => &layout, Variants::Multiple { variants, .. } => { &variants[match f.parent { - hir_def::VariantId::EnumVariantId(x) => { - RustcEnumVariantIdx(x.local_id) + hir_def::VariantId::EnumVariantId(it) => { + RustcEnumVariantIdx(it.local_id) } _ => { return Err(MirEvalError::TypeError( @@ -652,8 +752,10 @@ impl Evaluator<'_> { .offset(u32::from(f.local_id.into_raw()) as usize) .bytes_usize(); addr = addr.offset(offset); - // FIXME: support structs with unsized fields - metadata = None; + // Unsized field metadata is equal to the metadata of the struct + if self.size_align_of(&ty, locals)?.is_some() { + metadata = None; + } } ProjectionElem::OpaqueCast(_) => not_supported!("opaque cast"), } @@ -662,22 +764,26 @@ impl Evaluator<'_> { } fn layout(&self, ty: &Ty) -> Result<Arc<Layout>> { - self.db - .layout_of_ty(ty.clone(), self.crate_id) - .map_err(|e| MirEvalError::LayoutError(e, ty.clone())) + if let Some(x) = self.layout_cache.borrow().get(ty) { + return Ok(x.clone()); + } + let r = self + .db + .layout_of_ty(ty.clone(), self.trait_env.clone()) + .map_err(|e| MirEvalError::LayoutError(e, ty.clone()))?; + self.layout_cache.borrow_mut().insert(ty.clone(), r.clone()); + Ok(r) } fn layout_adt(&self, adt: AdtId, subst: Substitution) -> Result<Arc<Layout>> { - self.db.layout_of_adt(adt, subst.clone(), self.crate_id).map_err(|e| { - MirEvalError::LayoutError(e, TyKind::Adt(chalk_ir::AdtId(adt), subst).intern(Interner)) - }) + self.layout(&TyKind::Adt(chalk_ir::AdtId(adt), subst).intern(Interner)) } - fn place_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<Ty> { + fn place_ty<'a>(&'a self, p: &Place, locals: &'a Locals) -> Result<Ty> { Ok(self.place_addr_and_ty_and_metadata(p, locals)?.1) } - fn operand_ty(&self, o: &Operand, locals: &Locals<'_>) -> Result<Ty> { + fn operand_ty(&self, o: &Operand, locals: &Locals) -> Result<Ty> { Ok(match o { Operand::Copy(p) | Operand::Move(p) => self.place_ty(p, locals)?, Operand::Constant(c) => c.data(Interner).ty.clone(), @@ -688,11 +794,7 @@ impl Evaluator<'_> { }) } - fn operand_ty_and_eval( - &mut self, - o: &Operand, - locals: &mut Locals<'_>, - ) -> Result<IntervalAndTy> { + fn operand_ty_and_eval(&mut self, o: &Operand, locals: &mut Locals) -> Result<IntervalAndTy> { Ok(IntervalAndTy { interval: self.eval_operand(o, locals)?, ty: self.operand_ty(o, locals)?, @@ -701,39 +803,178 @@ impl Evaluator<'_> { fn interpret_mir( &mut self, - body: &MirBody, - args: impl Iterator<Item = Vec<u8>>, + body: Arc<MirBody>, + args: impl Iterator<Item = IntervalOrOwned>, ) -> Result<Vec<u8>> { - if let Some(x) = self.stack_depth_limit.checked_sub(1) { - self.stack_depth_limit = x; + if let Some(it) = self.stack_depth_limit.checked_sub(1) { + self.stack_depth_limit = it; } else { return Err(MirEvalError::StackOverflow); } let mut current_block_idx = body.start_block; - let mut locals = - Locals { ptr: &ArenaMap::new(), body: &body, drop_flags: DropFlags::default() }; - let (locals_ptr, stack_size) = { - let mut stack_ptr = self.stack.len(); - let addr = body - .locals - .iter() - .map(|(id, x)| { - let size = - self.size_of_sized(&x.ty, &locals, "no unsized local in extending stack")?; - let my_ptr = stack_ptr; - stack_ptr += size; - Ok((id, Interval { addr: Stack(my_ptr), size })) - }) - .collect::<Result<ArenaMap<LocalId, _>>>()?; - let stack_size = stack_ptr - self.stack.len(); - (addr, stack_size) - }; - locals.ptr = &locals_ptr; - self.stack.extend(iter::repeat(0).take(stack_size)); + let (mut locals, prev_stack_ptr) = self.create_locals_for_body(&body, None)?; + self.fill_locals_for_body(&body, &mut locals, args)?; + let prev_code_stack = mem::take(&mut self.code_stack); + let span = (MirSpan::Unknown, body.owner); + self.code_stack.push(StackFrame { locals, destination: None, prev_stack_ptr, span }); + 'stack: loop { + let Some(mut my_stack_frame) = self.code_stack.pop() else { + not_supported!("missing stack frame"); + }; + let e = (|| { + let mut locals = &mut my_stack_frame.locals; + let body = locals.body.clone(); + loop { + let current_block = &body.basic_blocks[current_block_idx]; + if let Some(it) = self.execution_limit.checked_sub(1) { + self.execution_limit = it; + } else { + return Err(MirEvalError::ExecutionLimitExceeded); + } + for statement in ¤t_block.statements { + match &statement.kind { + StatementKind::Assign(l, r) => { + let addr = self.place_addr(l, &locals)?; + let result = self.eval_rvalue(r, &mut locals)?.to_vec(&self)?; + self.write_memory(addr, &result)?; + locals.drop_flags.add_place(l.clone()); + } + StatementKind::Deinit(_) => not_supported!("de-init statement"), + StatementKind::StorageLive(_) + | StatementKind::StorageDead(_) + | StatementKind::Nop => (), + } + } + let Some(terminator) = current_block.terminator.as_ref() else { + not_supported!("block without terminator"); + }; + match &terminator.kind { + TerminatorKind::Goto { target } => { + current_block_idx = *target; + } + TerminatorKind::Call { + func, + args, + destination, + target, + cleanup: _, + from_hir_call: _, + } => { + let destination_interval = self.place_interval(destination, &locals)?; + let fn_ty = self.operand_ty(func, &locals)?; + let args = args + .iter() + .map(|it| self.operand_ty_and_eval(it, &mut locals)) + .collect::<Result<Vec<_>>>()?; + let stack_frame = match &fn_ty.kind(Interner) { + TyKind::Function(_) => { + let bytes = self.eval_operand(func, &mut locals)?; + self.exec_fn_pointer( + bytes, + destination_interval, + &args, + &locals, + *target, + terminator.span, + )? + } + TyKind::FnDef(def, generic_args) => self.exec_fn_def( + *def, + generic_args, + destination_interval, + &args, + &locals, + *target, + terminator.span, + )?, + it => not_supported!("unknown function type {it:?}"), + }; + locals.drop_flags.add_place(destination.clone()); + if let Some(stack_frame) = stack_frame { + self.code_stack.push(my_stack_frame); + current_block_idx = stack_frame.locals.body.start_block; + self.code_stack.push(stack_frame); + return Ok(None); + } else { + current_block_idx = + target.ok_or(MirEvalError::UndefinedBehavior( + "Diverging function returned".to_owned(), + ))?; + } + } + TerminatorKind::SwitchInt { discr, targets } => { + let val = u128::from_le_bytes(pad16( + self.eval_operand(discr, &mut locals)?.get(&self)?, + false, + )); + current_block_idx = targets.target_for_value(val); + } + TerminatorKind::Return => { + break; + } + TerminatorKind::Unreachable => { + return Err(MirEvalError::UndefinedBehavior( + "unreachable executed".to_owned(), + )); + } + TerminatorKind::Drop { place, target, unwind: _ } => { + self.drop_place(place, &mut locals, terminator.span)?; + current_block_idx = *target; + } + _ => not_supported!("unknown terminator"), + } + } + Ok(Some(my_stack_frame)) + })(); + let my_stack_frame = match e { + Ok(None) => continue 'stack, + Ok(Some(x)) => x, + Err(e) => { + let my_code_stack = mem::replace(&mut self.code_stack, prev_code_stack); + let mut error_stack = vec![]; + for frame in my_code_stack.into_iter().rev() { + if let DefWithBodyId::FunctionId(f) = frame.locals.body.owner { + error_stack.push((Either::Left(f), frame.span.0, frame.span.1)); + } + } + return Err(MirEvalError::InFunction(Box::new(e), error_stack)); + } + }; + let return_interval = my_stack_frame.locals.ptr[return_slot()]; + self.unused_locals_store + .borrow_mut() + .entry(my_stack_frame.locals.body.owner) + .or_default() + .push(my_stack_frame.locals); + match my_stack_frame.destination { + None => { + self.code_stack = prev_code_stack; + self.stack_depth_limit += 1; + return Ok(return_interval.get(self)?.to_vec()); + } + Some(bb) => { + // We don't support const promotion, so we can't truncate the stack yet. + let _ = my_stack_frame.prev_stack_ptr; + // self.stack.truncate(my_stack_frame.prev_stack_ptr); + current_block_idx = bb; + } + } + } + } + + fn fill_locals_for_body( + &mut self, + body: &MirBody, + locals: &mut Locals, + args: impl Iterator<Item = IntervalOrOwned>, + ) -> Result<()> { let mut remain_args = body.param_locals.len(); - for ((l, interval), value) in locals_ptr.iter().skip(1).zip(args) { + for ((l, interval), value) in locals.ptr.iter().skip(1).zip(args) { locals.drop_flags.add_place(l.into()); - interval.write_from_bytes(self, &value)?; + match value { + IntervalOrOwned::Owned(value) => interval.write_from_bytes(self, &value)?, + IntervalOrOwned::Borrowed(value) => interval.write_from_interval(self, value)?, + } if remain_args == 0 { return Err(MirEvalError::TypeError("more arguments provided")); } @@ -742,101 +983,64 @@ impl Evaluator<'_> { if remain_args > 0 { return Err(MirEvalError::TypeError("not enough arguments provided")); } - loop { - let current_block = &body.basic_blocks[current_block_idx]; - if let Some(x) = self.execution_limit.checked_sub(1) { - self.execution_limit = x; - } else { - return Err(MirEvalError::ExecutionLimitExceeded); - } - for statement in ¤t_block.statements { - match &statement.kind { - StatementKind::Assign(l, r) => { - let addr = self.place_addr(l, &locals)?; - let result = self.eval_rvalue(r, &mut locals)?.to_vec(&self)?; - self.write_memory(addr, &result)?; - locals.drop_flags.add_place(l.clone()); - } - StatementKind::Deinit(_) => not_supported!("de-init statement"), - StatementKind::StorageLive(_) - | StatementKind::StorageDead(_) - | StatementKind::Nop => (), + Ok(()) + } + + fn create_locals_for_body( + &mut self, + body: &Arc<MirBody>, + destination: Option<Interval>, + ) -> Result<(Locals, usize)> { + let mut locals = + match self.unused_locals_store.borrow_mut().entry(body.owner).or_default().pop() { + None => Locals { + ptr: ArenaMap::new(), + body: body.clone(), + drop_flags: DropFlags::default(), + }, + Some(mut l) => { + l.drop_flags.clear(); + l.body = body.clone(); + l } - } - let Some(terminator) = current_block.terminator.as_ref() else { - not_supported!("block without terminator"); }; - match &terminator.kind { - TerminatorKind::Goto { target } => { - current_block_idx = *target; - } - TerminatorKind::Call { - func, - args, - destination, - target, - cleanup: _, - from_hir_call: _, - } => { - let destination_interval = self.place_interval(destination, &locals)?; - let fn_ty = self.operand_ty(func, &locals)?; - let args = args - .iter() - .map(|x| self.operand_ty_and_eval(x, &mut locals)) - .collect::<Result<Vec<_>>>()?; - match &fn_ty.data(Interner).kind { - TyKind::Function(_) => { - let bytes = self.eval_operand(func, &mut locals)?; - self.exec_fn_pointer( - bytes, - destination_interval, - &args, - &locals, - terminator.span, - )?; - } - TyKind::FnDef(def, generic_args) => { - self.exec_fn_def( - *def, - generic_args, - destination_interval, - &args, - &locals, - terminator.span, - )?; - } - x => not_supported!("unknown function type {x:?}"), + let stack_size = { + let mut stack_ptr = self.stack.len(); + for (id, it) in body.locals.iter() { + if id == return_slot() { + if let Some(destination) = destination { + locals.ptr.insert(id, destination); + continue; } - locals.drop_flags.add_place(destination.clone()); - current_block_idx = target.expect("broken mir, function without target"); - } - TerminatorKind::SwitchInt { discr, targets } => { - let val = u128::from_le_bytes(pad16( - self.eval_operand(discr, &mut locals)?.get(&self)?, - false, - )); - current_block_idx = targets.target_for_value(val); - } - TerminatorKind::Return => { - self.stack_depth_limit += 1; - return Ok(locals.ptr[return_slot()].get(self)?.to_vec()); } - TerminatorKind::Unreachable => { - return Err(MirEvalError::UndefinedBehavior("unreachable executed".to_owned())); - } - TerminatorKind::Drop { place, target, unwind: _ } => { - self.drop_place(place, &mut locals, terminator.span)?; - current_block_idx = *target; + let (size, align) = self.size_align_of_sized( + &it.ty, + &locals, + "no unsized local in extending stack", + )?; + while stack_ptr % align != 0 { + stack_ptr += 1; } - _ => not_supported!("unknown terminator"), + let my_ptr = stack_ptr; + stack_ptr += size; + locals.ptr.insert(id, Interval { addr: Stack(my_ptr), size }); } + stack_ptr - self.stack.len() + }; + let prev_stack_pointer = self.stack.len(); + if stack_size > self.memory_limit { + return Err(MirEvalError::Panic(format!( + "Stack overflow. Tried to grow stack to {stack_size} bytes" + ))); } + self.stack.extend(iter::repeat(0).take(stack_size)); + Ok((locals, prev_stack_pointer)) } - fn eval_rvalue(&mut self, r: &Rvalue, locals: &mut Locals<'_>) -> Result<IntervalOrOwned> { + fn eval_rvalue(&mut self, r: &Rvalue, locals: &mut Locals) -> Result<IntervalOrOwned> { use IntervalOrOwned::*; Ok(match r { - Rvalue::Use(x) => Borrowed(self.eval_operand(x, locals)?), + Rvalue::Use(it) => Borrowed(self.eval_operand(it, locals)?), Rvalue::Ref(_, p) => { let (addr, _, metadata) = self.place_addr_and_ty_and_metadata(p, locals)?; let mut r = addr.to_bytes(); @@ -881,9 +1085,9 @@ impl Evaluator<'_> { c[0] = 1 - c[0]; } else { match op { - UnOp::Not => c.iter_mut().for_each(|x| *x = !*x), + UnOp::Not => c.iter_mut().for_each(|it| *it = !*it), UnOp::Neg => { - c.iter_mut().for_each(|x| *x = !*x); + c.iter_mut().for_each(|it| *it = !*it); for k in c.iter_mut() { let o; (*k, o) = k.overflowing_add(1); @@ -948,8 +1152,8 @@ impl Evaluator<'_> { }; Owned(r.to_le_bytes().into()) } - x => not_supported!( - "invalid binop {x:?} on floating point operators" + it => not_supported!( + "invalid binop {it:?} on floating point operators" ), } } @@ -976,8 +1180,8 @@ impl Evaluator<'_> { }; Owned(r.to_le_bytes().into()) } - x => not_supported!( - "invalid binop {x:?} on floating point operators" + it => not_supported!( + "invalid binop {it:?} on floating point operators" ), } } @@ -1034,13 +1238,18 @@ impl Evaluator<'_> { BinOp::Shr => l128.checked_shr(shift_amount), _ => unreachable!(), }; + if shift_amount as usize >= lc.len() * 8 { + return Err(MirEvalError::Panic(format!( + "Overflow in {op:?}" + ))); + } if let Some(r) = r { break 'b r; } }; return Err(MirEvalError::Panic(format!("Overflow in {op:?}"))); }; - check_overflow(r)? + Owned(r.to_le_bytes()[..lc.len()].to_vec()) } BinOp::Offset => not_supported!("offset binop"), } @@ -1049,64 +1258,15 @@ impl Evaluator<'_> { Rvalue::Discriminant(p) => { let ty = self.place_ty(p, locals)?; let bytes = self.eval_place(p, locals)?.get(&self)?; - let layout = self.layout(&ty)?; - let enum_id = 'b: { - match ty.kind(Interner) { - TyKind::Adt(e, _) => match e.0 { - AdtId::EnumId(e) => break 'b e, - _ => (), - }, - _ => (), - } - return Ok(Owned(0u128.to_le_bytes().to_vec())); - }; - match &layout.variants { - Variants::Single { index } => { - let r = self.const_eval_discriminant(EnumVariantId { - parent: enum_id, - local_id: index.0, - })?; - Owned(r.to_le_bytes().to_vec()) - } - Variants::Multiple { tag, tag_encoding, variants, .. } => { - let Some(target_data_layout) = self.db.target_data_layout(self.crate_id) else { - not_supported!("missing target data layout"); - }; - let size = tag.size(&*target_data_layout).bytes_usize(); - let offset = layout.fields.offset(0).bytes_usize(); // The only field on enum variants is the tag field - match tag_encoding { - TagEncoding::Direct => { - let tag = &bytes[offset..offset + size]; - Owned(pad16(tag, false).to_vec()) - } - TagEncoding::Niche { untagged_variant, niche_start, .. } => { - let tag = &bytes[offset..offset + size]; - let candidate_tag = i128::from_le_bytes(pad16(tag, false)) - .wrapping_sub(*niche_start as i128) - as usize; - let variant = variants - .iter_enumerated() - .map(|(x, _)| x) - .filter(|x| x != untagged_variant) - .nth(candidate_tag) - .unwrap_or(*untagged_variant) - .0; - let result = self.const_eval_discriminant(EnumVariantId { - parent: enum_id, - local_id: variant, - })?; - Owned(result.to_le_bytes().to_vec()) - } - } - } - } + let result = self.compute_discriminant(ty, bytes)?; + Owned(result.to_le_bytes().to_vec()) } - Rvalue::Repeat(x, len) => { + Rvalue::Repeat(it, len) => { let len = match try_const_usize(self.db, &len) { - Some(x) => x as usize, + Some(it) => it as usize, None => not_supported!("non evaluatable array len in repeat Rvalue"), }; - let val = self.eval_operand(x, locals)?.get(self)?; + let val = self.eval_operand(it, locals)?.get(self)?; let size = len * val.len(); Owned(val.iter().copied().cycle().take(size).collect()) } @@ -1115,20 +1275,20 @@ impl Evaluator<'_> { let Some((size, align)) = self.size_align_of(ty, locals)? else { not_supported!("unsized box initialization"); }; - let addr = self.heap_allocate(size, align); + let addr = self.heap_allocate(size, align)?; Owned(addr.to_bytes()) } Rvalue::CopyForDeref(_) => not_supported!("copy for deref"), Rvalue::Aggregate(kind, values) => { let values = values .iter() - .map(|x| self.eval_operand(x, locals)) + .map(|it| self.eval_operand(it, locals)) .collect::<Result<Vec<_>>>()?; match kind { AggregateKind::Array(_) => { let mut r = vec![]; - for x in values { - let value = x.get(&self)?; + for it in values { + let value = it.get(&self)?; r.extend(value); } Owned(r) @@ -1139,11 +1299,12 @@ impl Evaluator<'_> { layout.size.bytes_usize(), &layout, None, - values.iter().map(|&x| x.into()), + values.iter().map(|&it| it.into()), )?) } - AggregateKind::Union(x, f) => { - let layout = self.layout_adt((*x).into(), Substitution::empty(Interner))?; + AggregateKind::Union(it, f) => { + let layout = + self.layout_adt((*it).into(), Substitution::empty(Interner))?; let offset = layout .fields .offset(u32::from(f.local_id.into_raw()) as usize) @@ -1153,14 +1314,14 @@ impl Evaluator<'_> { result[offset..offset + op.len()].copy_from_slice(op); Owned(result) } - AggregateKind::Adt(x, subst) => { + AggregateKind::Adt(it, subst) => { let (size, variant_layout, tag) = - self.layout_of_variant(*x, subst.clone(), locals)?; + self.layout_of_variant(*it, subst.clone(), locals)?; Owned(self.make_by_layout( size, &variant_layout, tag, - values.iter().map(|&x| x.into()), + values.iter().map(|&it| it.into()), )?) } AggregateKind::Closure(ty) => { @@ -1169,7 +1330,7 @@ impl Evaluator<'_> { layout.size.bytes_usize(), &layout, None, - values.iter().map(|&x| x.into()), + values.iter().map(|&it| it.into()), )?) } } @@ -1179,7 +1340,7 @@ impl Evaluator<'_> { PointerCast::ReifyFnPointer | PointerCast::ClosureFnPointer(_) => { let current_ty = self.operand_ty(operand, locals)?; if let TyKind::FnDef(_, _) | TyKind::Closure(_, _) = - ¤t_ty.data(Interner).kind + ¤t_ty.kind(Interner) { let id = self.vtable_map.id(current_ty); let ptr_size = self.ptr_size(); @@ -1229,21 +1390,75 @@ impl Evaluator<'_> { }) } + fn compute_discriminant(&self, ty: Ty, bytes: &[u8]) -> Result<i128> { + let layout = self.layout(&ty)?; + let enum_id = 'b: { + match ty.kind(Interner) { + TyKind::Adt(e, _) => match e.0 { + AdtId::EnumId(e) => break 'b e, + _ => (), + }, + _ => (), + } + return Ok(0); + }; + match &layout.variants { + Variants::Single { index } => { + let r = self.const_eval_discriminant(EnumVariantId { + parent: enum_id, + local_id: index.0, + })?; + Ok(r) + } + Variants::Multiple { tag, tag_encoding, variants, .. } => { + let Some(target_data_layout) = self.db.target_data_layout(self.crate_id) else { + not_supported!("missing target data layout"); + }; + let size = tag.size(&*target_data_layout).bytes_usize(); + let offset = layout.fields.offset(0).bytes_usize(); // The only field on enum variants is the tag field + match tag_encoding { + TagEncoding::Direct => { + let tag = &bytes[offset..offset + size]; + Ok(i128::from_le_bytes(pad16(tag, false))) + } + TagEncoding::Niche { untagged_variant, niche_start, .. } => { + let tag = &bytes[offset..offset + size]; + let candidate_tag = i128::from_le_bytes(pad16(tag, false)) + .wrapping_sub(*niche_start as i128) + as usize; + let variant = variants + .iter_enumerated() + .map(|(it, _)| it) + .filter(|it| it != untagged_variant) + .nth(candidate_tag) + .unwrap_or(*untagged_variant) + .0; + let result = self.const_eval_discriminant(EnumVariantId { + parent: enum_id, + local_id: variant, + })?; + Ok(result) + } + } + } + } + } + fn coerce_unsized_look_through_fields<T>( &self, ty: &Ty, goal: impl Fn(&TyKind) -> Option<T>, ) -> Result<T> { let kind = ty.kind(Interner); - if let Some(x) = goal(kind) { - return Ok(x); + if let Some(it) = goal(kind) { + return Ok(it); } if let TyKind::Adt(id, subst) = kind { if let AdtId::StructId(struct_id) = id.0 { let field_types = self.db.field_types(struct_id.into()); let mut field_types = field_types.iter(); if let Some(ty) = - field_types.next().map(|x| x.1.clone().substitute(Interner, subst)) + field_types.next().map(|it| it.1.clone().substitute(Interner, subst)) { return self.coerce_unsized_look_through_fields(&ty, goal); } @@ -1258,66 +1473,99 @@ impl Evaluator<'_> { current_ty: &Ty, target_ty: &Ty, ) -> Result<IntervalOrOwned> { - use IntervalOrOwned::*; - fn for_ptr(x: &TyKind) -> Option<Ty> { - match x { + fn for_ptr(it: &TyKind) -> Option<Ty> { + match it { TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => Some(ty.clone()), _ => None, } } - Ok(match self.coerce_unsized_look_through_fields(target_ty, for_ptr)? { - ty => match &ty.data(Interner).kind { - TyKind::Slice(_) => { - match self.coerce_unsized_look_through_fields(current_ty, for_ptr)? { - ty => match &ty.data(Interner).kind { - TyKind::Array(_, size) => { - let len = match try_const_usize(self.db, size) { - None => not_supported!( - "unevaluatble len of array in coerce unsized" - ), - Some(x) => x as usize, - }; - let mut r = Vec::with_capacity(16); - let addr = addr.get(self)?; - r.extend(addr.iter().copied()); - r.extend(len.to_le_bytes().into_iter()); - Owned(r) - } - t => { - not_supported!("slice unsizing from non array type {t:?}") - } - }, - } + let target_ty = self.coerce_unsized_look_through_fields(target_ty, for_ptr)?; + let current_ty = self.coerce_unsized_look_through_fields(current_ty, for_ptr)?; + + self.unsizing_ptr_from_addr(target_ty, current_ty, addr) + } + + /// Adds metadata to the address and create the fat pointer result of the unsizing operation. + fn unsizing_ptr_from_addr( + &mut self, + target_ty: Ty, + current_ty: Ty, + addr: Interval, + ) -> Result<IntervalOrOwned> { + use IntervalOrOwned::*; + Ok(match &target_ty.kind(Interner) { + TyKind::Slice(_) => match ¤t_ty.kind(Interner) { + TyKind::Array(_, size) => { + let len = match try_const_usize(self.db, size) { + None => { + not_supported!("unevaluatble len of array in coerce unsized") + } + Some(it) => it as usize, + }; + let mut r = Vec::with_capacity(16); + let addr = addr.get(self)?; + r.extend(addr.iter().copied()); + r.extend(len.to_le_bytes().into_iter()); + Owned(r) } - TyKind::Dyn(_) => match ¤t_ty.data(Interner).kind { - TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => { - let vtable = self.vtable_map.id(ty.clone()); - let mut r = Vec::with_capacity(16); - let addr = addr.get(self)?; - r.extend(addr.iter().copied()); - r.extend(vtable.to_le_bytes().into_iter()); - Owned(r) + t => { + not_supported!("slice unsizing from non array type {t:?}") + } + }, + TyKind::Dyn(_) => { + let vtable = self.vtable_map.id(current_ty.clone()); + let mut r = Vec::with_capacity(16); + let addr = addr.get(self)?; + r.extend(addr.iter().copied()); + r.extend(vtable.to_le_bytes().into_iter()); + Owned(r) + } + TyKind::Adt(id, target_subst) => match ¤t_ty.kind(Interner) { + TyKind::Adt(current_id, current_subst) => { + if id != current_id { + not_supported!("unsizing struct with different type"); } - _ => not_supported!("dyn unsizing from non pointers"), - }, - _ => not_supported!("unknown unsized cast"), + let id = match id.0 { + AdtId::StructId(s) => s, + AdtId::UnionId(_) => not_supported!("unsizing unions"), + AdtId::EnumId(_) => not_supported!("unsizing enums"), + }; + let Some((last_field, _)) = + self.db.struct_data(id).variant_data.fields().iter().rev().next() + else { + not_supported!("unsizing struct without field"); + }; + let target_last_field = self.db.field_types(id.into())[last_field] + .clone() + .substitute(Interner, target_subst); + let current_last_field = self.db.field_types(id.into())[last_field] + .clone() + .substitute(Interner, current_subst); + return self.unsizing_ptr_from_addr( + target_last_field, + current_last_field, + addr, + ); + } + _ => not_supported!("unsizing struct with non adt type"), }, + _ => not_supported!("unknown unsized cast"), }) } fn layout_of_variant( &mut self, - x: VariantId, + it: VariantId, subst: Substitution, - locals: &Locals<'_>, + locals: &Locals, ) -> Result<(usize, Arc<Layout>, Option<(usize, usize, i128)>)> { - let adt = x.adt_id(); + let adt = it.adt_id(); if let DefWithBodyId::VariantId(f) = locals.body.owner { - if let VariantId::EnumVariantId(x) = x { + if let VariantId::EnumVariantId(it) = it { if AdtId::from(f.parent) == adt { // Computing the exact size of enums require resolving the enum discriminants. In order to prevent loops (and // infinite sized type errors) we use a dummy layout - let i = self.const_eval_discriminant(x)?; + let i = self.const_eval_discriminant(it)?; return Ok((16, self.layout(&TyBuilder::unit())?, Some((0, 16, i)))); } } @@ -1330,8 +1578,8 @@ impl Evaluator<'_> { .db .target_data_layout(self.crate_id) .ok_or(MirEvalError::TargetDataLayoutNotAvailable)?; - let enum_variant_id = match x { - VariantId::EnumVariantId(x) => x, + let enum_variant_id = match it { + VariantId::EnumVariantId(it) => it, _ => not_supported!("multi variant layout for non-enums"), }; let rustc_enum_variant_idx = RustcEnumVariantIdx(enum_variant_id.local_id); @@ -1345,8 +1593,8 @@ impl Evaluator<'_> { } else { discriminant = (variants .iter_enumerated() - .filter(|(x, _)| x != untagged_variant) - .position(|(x, _)| x == rustc_enum_variant_idx) + .filter(|(it, _)| it != untagged_variant) + .position(|(it, _)| it == rustc_enum_variant_idx) .unwrap() as i128) .wrapping_add(*niche_start as i128); true @@ -1379,18 +1627,24 @@ impl Evaluator<'_> { ) -> Result<Vec<u8>> { let mut result = vec![0; size]; if let Some((offset, size, value)) = tag { - result[offset..offset + size].copy_from_slice(&value.to_le_bytes()[0..size]); + match result.get_mut(offset..offset + size) { + Some(it) => it.copy_from_slice(&value.to_le_bytes()[0..size]), + None => return Err(MirEvalError::BrokenLayout(variant_layout.clone())), + } } for (i, op) in values.enumerate() { let offset = variant_layout.fields.offset(i).bytes_usize(); let op = op.get(&self)?; - result[offset..offset + op.len()].copy_from_slice(op); + match result.get_mut(offset..offset + op.len()) { + Some(it) => it.copy_from_slice(op), + None => return Err(MirEvalError::BrokenLayout(variant_layout.clone())), + } } Ok(result) } - fn eval_operand(&mut self, x: &Operand, locals: &mut Locals<'_>) -> Result<Interval> { - Ok(match x { + fn eval_operand(&mut self, it: &Operand, locals: &mut Locals) -> Result<Interval> { + Ok(match it { Operand::Copy(p) | Operand::Move(p) => { locals.drop_flags.remove_place(p); self.eval_place(p, locals)? @@ -1399,61 +1653,66 @@ impl Evaluator<'_> { let addr = self.eval_static(*st, locals)?; Interval::new(addr, self.ptr_size()) } - Operand::Constant(konst) => { - let data = &konst.data(Interner); - match &data.value { - chalk_ir::ConstValue::BoundVar(_) => not_supported!("bound var constant"), - chalk_ir::ConstValue::InferenceVar(_) => { - not_supported!("inference var constant") - } - chalk_ir::ConstValue::Placeholder(_) => not_supported!("placeholder constant"), - chalk_ir::ConstValue::Concrete(c) => { - self.allocate_const_in_heap(c, &data.ty, locals, konst)? - } - } - } + Operand::Constant(konst) => self.allocate_const_in_heap(locals, konst)?, }) } - fn allocate_const_in_heap( - &mut self, - c: &chalk_ir::ConcreteConst<Interner>, - ty: &Ty, - locals: &Locals<'_>, - konst: &chalk_ir::Const<Interner>, - ) -> Result<Interval> { - Ok(match &c.interned { - ConstScalar::Bytes(v, memory_map) => { - let mut v: Cow<'_, [u8]> = Cow::Borrowed(v); - let patch_map = memory_map.transform_addresses(|b| { - let addr = self.heap_allocate(b.len(), 1); // FIXME: align is wrong - self.write_memory(addr, b)?; - Ok(addr.to_usize()) - })?; - let (size, align) = self.size_align_of(ty, locals)?.unwrap_or((v.len(), 1)); - if size != v.len() { - // Handle self enum - if size == 16 && v.len() < 16 { - v = Cow::Owned(pad16(&v, false).to_vec()); - } else if size < 16 && v.len() == 16 { - v = Cow::Owned(v[0..size].to_vec()); - } else { - return Err(MirEvalError::InvalidConst(konst.clone())); + fn allocate_const_in_heap(&mut self, locals: &Locals, konst: &Const) -> Result<Interval> { + let ty = &konst.data(Interner).ty; + let chalk_ir::ConstValue::Concrete(c) = &konst.data(Interner).value else { + not_supported!("evaluating non concrete constant"); + }; + let result_owner; + let (v, memory_map) = match &c.interned { + ConstScalar::Bytes(v, mm) => (v, mm), + ConstScalar::UnevaluatedConst(const_id, subst) => 'b: { + let mut const_id = *const_id; + let mut subst = subst.clone(); + if let hir_def::GeneralConstId::ConstId(c) = const_id { + let (c, s) = lookup_impl_const(self.db, self.trait_env.clone(), c, subst); + const_id = hir_def::GeneralConstId::ConstId(c); + subst = s; + } + result_owner = self + .db + .const_eval(const_id.into(), subst, Some(self.trait_env.clone())) + .map_err(|e| { + let name = const_id.name(self.db.upcast()); + MirEvalError::ConstEvalError(name, Box::new(e)) + })?; + if let chalk_ir::ConstValue::Concrete(c) = &result_owner.data(Interner).value { + if let ConstScalar::Bytes(v, mm) = &c.interned { + break 'b (v, mm); } } - let addr = self.heap_allocate(size, align); - self.write_memory(addr, &v)?; - self.patch_addresses(&patch_map, &memory_map.vtable, addr, ty, locals)?; - Interval::new(addr, size) - } - ConstScalar::UnevaluatedConst(..) => { - not_supported!("unevaluated const present in monomorphized mir"); + not_supported!("unevaluatable constant"); } ConstScalar::Unknown => not_supported!("evaluating unknown const"), - }) + }; + let mut v: Cow<'_, [u8]> = Cow::Borrowed(v); + let patch_map = memory_map.transform_addresses(|b, align| { + let addr = self.heap_allocate(b.len(), align)?; + self.write_memory(addr, b)?; + Ok(addr.to_usize()) + })?; + let (size, align) = self.size_align_of(ty, locals)?.unwrap_or((v.len(), 1)); + if size != v.len() { + // Handle self enum + if size == 16 && v.len() < 16 { + v = Cow::Owned(pad16(&v, false).to_vec()); + } else if size < 16 && v.len() == 16 { + v = Cow::Owned(v[0..size].to_vec()); + } else { + return Err(MirEvalError::InvalidConst(konst.clone())); + } + } + let addr = self.heap_allocate(size, align)?; + self.write_memory(addr, &v)?; + self.patch_addresses(&patch_map, &memory_map.vtable, addr, ty, locals)?; + Ok(Interval::new(addr, size)) } - fn eval_place(&mut self, p: &Place, locals: &Locals<'_>) -> Result<Interval> { + fn eval_place(&mut self, p: &Place, locals: &Locals) -> Result<Interval> { let addr = self.place_addr(p, locals)?; Ok(Interval::new( addr, @@ -1466,11 +1725,11 @@ impl Evaluator<'_> { return Ok(&[]); } let (mem, pos) = match addr { - Stack(x) => (&self.stack, x), - Heap(x) => (&self.heap, x), - Invalid(x) => { + Stack(it) => (&self.stack, it), + Heap(it) => (&self.heap, it), + Invalid(it) => { return Err(MirEvalError::UndefinedBehavior(format!( - "read invalid memory address {x} with size {size}" + "read invalid memory address {it} with size {size}" ))); } }; @@ -1478,28 +1737,35 @@ impl Evaluator<'_> { .ok_or_else(|| MirEvalError::UndefinedBehavior("out of bound memory read".to_string())) } - fn write_memory(&mut self, addr: Address, r: &[u8]) -> Result<()> { - if r.is_empty() { - return Ok(()); - } + fn write_memory_using_ref(&mut self, addr: Address, size: usize) -> Result<&mut [u8]> { let (mem, pos) = match addr { - Stack(x) => (&mut self.stack, x), - Heap(x) => (&mut self.heap, x), - Invalid(x) => { + Stack(it) => (&mut self.stack, it), + Heap(it) => (&mut self.heap, it), + Invalid(it) => { return Err(MirEvalError::UndefinedBehavior(format!( - "write invalid memory address {x} with content {r:?}" + "write invalid memory address {it} with size {size}" ))); } }; - mem.get_mut(pos..pos + r.len()) - .ok_or_else(|| { - MirEvalError::UndefinedBehavior("out of bound memory write".to_string()) - })? - .copy_from_slice(r); + Ok(mem.get_mut(pos..pos + size).ok_or_else(|| { + MirEvalError::UndefinedBehavior("out of bound memory write".to_string()) + })?) + } + + fn write_memory(&mut self, addr: Address, r: &[u8]) -> Result<()> { + if r.is_empty() { + return Ok(()); + } + self.write_memory_using_ref(addr, r.len())?.copy_from_slice(r); Ok(()) } - fn size_align_of(&self, ty: &Ty, locals: &Locals<'_>) -> Result<Option<(usize, usize)>> { + fn size_align_of(&self, ty: &Ty, locals: &Locals) -> Result<Option<(usize, usize)>> { + if let Some(layout) = self.layout_cache.borrow().get(ty) { + return Ok(layout + .is_sized() + .then(|| (layout.size.bytes_usize(), layout.align.abi.bytes() as usize))); + } if let DefWithBodyId::VariantId(f) = locals.body.owner { if let Some((adt, _)) = ty.as_adt() { if AdtId::from(f.parent) == adt { @@ -1523,39 +1789,61 @@ impl Evaluator<'_> { /// A version of `self.size_of` which returns error if the type is unsized. `what` argument should /// be something that complete this: `error: type {ty} was unsized. {what} should be sized` - fn size_of_sized(&self, ty: &Ty, locals: &Locals<'_>, what: &'static str) -> Result<usize> { + fn size_of_sized(&self, ty: &Ty, locals: &Locals, what: &'static str) -> Result<usize> { match self.size_align_of(ty, locals)? { - Some(x) => Ok(x.0), + Some(it) => Ok(it.0), None => Err(MirEvalError::TypeIsUnsized(ty.clone(), what)), } } - fn heap_allocate(&mut self, size: usize, _align: usize) -> Address { + /// A version of `self.size_align_of` which returns error if the type is unsized. `what` argument should + /// be something that complete this: `error: type {ty} was unsized. {what} should be sized` + fn size_align_of_sized( + &self, + ty: &Ty, + locals: &Locals, + what: &'static str, + ) -> Result<(usize, usize)> { + match self.size_align_of(ty, locals)? { + Some(it) => Ok(it), + None => Err(MirEvalError::TypeIsUnsized(ty.clone(), what)), + } + } + + fn heap_allocate(&mut self, size: usize, align: usize) -> Result<Address> { + if !align.is_power_of_two() || align > 10000 { + return Err(MirEvalError::UndefinedBehavior(format!("Alignment {align} is invalid"))); + } + while self.heap.len() % align != 0 { + self.heap.push(0); + } + if size.checked_add(self.heap.len()).map_or(true, |x| x > self.memory_limit) { + return Err(MirEvalError::Panic(format!("Memory allocation of {size} bytes failed"))); + } let pos = self.heap.len(); self.heap.extend(iter::repeat(0).take(size)); - Address::Heap(pos) + Ok(Address::Heap(pos)) } fn detect_fn_trait(&self, def: FunctionId) -> Option<FnTrait> { - use LangItem::*; - let ItemContainerId::TraitId(parent) = self.db.lookup_intern_function(def).container else { - return None; - }; - let l = lang_attr(self.db.upcast(), parent)?; - match l { - FnOnce => Some(FnTrait::FnOnce), - FnMut => Some(FnTrait::FnMut), - Fn => Some(FnTrait::Fn), - _ => None, + let def = Some(def); + if def == self.cached_fn_trait_func { + Some(FnTrait::Fn) + } else if def == self.cached_fn_mut_trait_func { + Some(FnTrait::FnMut) + } else if def == self.cached_fn_once_trait_func { + Some(FnTrait::FnOnce) + } else { + None } } - fn create_memory_map(&self, bytes: &[u8], ty: &Ty, locals: &Locals<'_>) -> Result<MemoryMap> { + fn create_memory_map(&self, bytes: &[u8], ty: &Ty, locals: &Locals) -> Result<MemoryMap> { fn rec( this: &Evaluator<'_>, bytes: &[u8], ty: &Ty, - locals: &Locals<'_>, + locals: &Locals, mm: &mut MemoryMap, ) -> Result<()> { match ty.kind(Interner) { @@ -1602,6 +1890,17 @@ impl Evaluator<'_> { } } } + chalk_ir::TyKind::Array(inner, len) => { + let len = match try_const_usize(this.db, &len) { + Some(it) => it as usize, + None => not_supported!("non evaluatable array len in patching addresses"), + }; + let size = this.size_of_sized(inner, locals, "inner of array")?; + for i in 0..len { + let offset = i * size; + rec(this, &bytes[offset..offset + size], inner, locals, mm)?; + } + } chalk_ir::TyKind::Tuple(_, subst) => { let layout = this.layout(ty)?; for (id, ty) in subst.iter(Interner).enumerate() { @@ -1628,9 +1927,13 @@ impl Evaluator<'_> { } AdtId::EnumId(e) => { let layout = this.layout(ty)?; - if let Some((v, l)) = - detect_variant_from_bytes(&layout, this.db, this.crate_id, bytes, e) - { + if let Some((v, l)) = detect_variant_from_bytes( + &layout, + this.db, + this.trait_env.clone(), + bytes, + e, + ) { let data = &this.db.enum_data(e).variants[v].variant_data; let field_types = this .db @@ -1661,7 +1964,7 @@ impl Evaluator<'_> { old_vtable: &VTableMap, addr: Address, ty: &Ty, - locals: &Locals<'_>, + locals: &Locals, ) -> Result<()> { // FIXME: support indirect references let layout = self.layout(ty)?; @@ -1672,14 +1975,14 @@ impl Evaluator<'_> { match size { Some(_) => { let current = from_bytes!(usize, self.read_memory(addr, my_size)?); - if let Some(x) = patch_map.get(¤t) { - self.write_memory(addr, &x.to_le_bytes())?; + if let Some(it) = patch_map.get(¤t) { + self.write_memory(addr, &it.to_le_bytes())?; } } None => { let current = from_bytes!(usize, self.read_memory(addr, my_size / 2)?); - if let Some(x) = patch_map.get(¤t) { - self.write_memory(addr, &x.to_le_bytes())?; + if let Some(it) = patch_map.get(¤t) { + self.write_memory(addr, &it.to_le_bytes())?; } } } @@ -1706,10 +2009,31 @@ impl Evaluator<'_> { AdtId::UnionId(_) => (), AdtId::EnumId(_) => (), }, + TyKind::Tuple(_, subst) => { + for (id, ty) in subst.iter(Interner).enumerate() { + let ty = ty.assert_ty_ref(Interner); // Tuple only has type argument + let offset = layout.fields.offset(id).bytes_usize(); + self.patch_addresses(patch_map, old_vtable, addr.offset(offset), ty, locals)?; + } + } + TyKind::Array(inner, len) => { + let len = match try_const_usize(self.db, &len) { + Some(it) => it as usize, + None => not_supported!("non evaluatable array len in patching addresses"), + }; + let size = self.size_of_sized(inner, locals, "inner of array")?; + for i in 0..len { + self.patch_addresses( + patch_map, + old_vtable, + addr.offset(i * size), + inner, + locals, + )?; + } + } TyKind::AssociatedType(_, _) | TyKind::Scalar(_) - | TyKind::Tuple(_, _) - | TyKind::Array(_, _) | TyKind::Slice(_) | TyKind::Raw(_, _) | TyKind::OpaqueType(_, _) @@ -1735,21 +2059,21 @@ impl Evaluator<'_> { bytes: Interval, destination: Interval, args: &[IntervalAndTy], - locals: &Locals<'_>, + locals: &Locals, + target_bb: Option<BasicBlockId>, span: MirSpan, - ) -> Result<()> { + ) -> Result<Option<StackFrame>> { let id = from_bytes!(usize, bytes.get(self)?); let next_ty = self.vtable_map.ty(id)?.clone(); - match &next_ty.data(Interner).kind { + match &next_ty.kind(Interner) { TyKind::FnDef(def, generic_args) => { - self.exec_fn_def(*def, generic_args, destination, args, &locals, span)?; + self.exec_fn_def(*def, generic_args, destination, args, &locals, target_bb, span) } TyKind::Closure(id, subst) => { - self.exec_closure(*id, bytes.slice(0..0), subst, destination, args, locals, span)?; + self.exec_closure(*id, bytes.slice(0..0), subst, destination, args, locals, span) } - _ => return Err(MirEvalError::TypeError("function pointer to non function")), + _ => Err(MirEvalError::TypeError("function pointer to non function")), } - Ok(()) } fn exec_closure( @@ -1759,9 +2083,9 @@ impl Evaluator<'_> { generic_args: &Substitution, destination: Interval, args: &[IntervalAndTy], - locals: &Locals<'_>, + locals: &Locals, span: MirSpan, - ) -> Result<()> { + ) -> Result<Option<StackFrame>> { let mir_body = self .db .monomorphized_mir_body_for_closure( @@ -1769,7 +2093,7 @@ impl Evaluator<'_> { generic_args.clone(), self.trait_env.clone(), ) - .map_err(|x| MirEvalError::MirLowerErrorForClosure(closure, x))?; + .map_err(|it| MirEvalError::MirLowerErrorForClosure(closure, it))?; let closure_data = if mir_body.locals[mir_body.param_locals[0]].ty.as_reference().is_some() { closure_data.addr.to_bytes() @@ -1777,12 +2101,18 @@ impl Evaluator<'_> { closure_data.get(self)?.to_owned() }; let arg_bytes = iter::once(Ok(closure_data)) - .chain(args.iter().map(|x| Ok(x.get(&self)?.to_owned()))) + .chain(args.iter().map(|it| Ok(it.get(&self)?.to_owned()))) .collect::<Result<Vec<_>>>()?; - let bytes = self.interpret_mir(&mir_body, arg_bytes.into_iter()).map_err(|e| { - MirEvalError::InFunction(Either::Right(closure), Box::new(e), span, locals.body.owner) - })?; - destination.write_from_bytes(self, &bytes) + let bytes = self + .interpret_mir(mir_body, arg_bytes.into_iter().map(IntervalOrOwned::Owned)) + .map_err(|e| { + MirEvalError::InFunction( + Box::new(e), + vec![(Either::Right(closure), span, locals.body.owner)], + ) + })?; + destination.write_from_bytes(self, &bytes)?; + Ok(None) } fn exec_fn_def( @@ -1791,18 +2121,34 @@ impl Evaluator<'_> { generic_args: &Substitution, destination: Interval, args: &[IntervalAndTy], - locals: &Locals<'_>, + locals: &Locals, + target_bb: Option<BasicBlockId>, span: MirSpan, - ) -> Result<()> { + ) -> Result<Option<StackFrame>> { let def: CallableDefId = from_chalk(self.db, def); let generic_args = generic_args.clone(); match def { CallableDefId::FunctionId(def) => { if let Some(_) = self.detect_fn_trait(def) { - self.exec_fn_trait(&args, destination, locals, span)?; - return Ok(()); + return self.exec_fn_trait( + def, + args, + generic_args, + locals, + destination, + target_bb, + span, + ); } - self.exec_fn_with_args(def, args, generic_args, locals, destination, span)?; + self.exec_fn_with_args( + def, + args, + generic_args, + locals, + destination, + target_bb, + span, + ) } CallableDefId::StructId(id) => { let (size, variant_layout, tag) = @@ -1811,9 +2157,10 @@ impl Evaluator<'_> { size, &variant_layout, tag, - args.iter().map(|x| x.interval.into()), + args.iter().map(|it| it.interval.into()), )?; destination.write_from_bytes(self, &result)?; + Ok(None) } CallableDefId::EnumVariantId(id) => { let (size, variant_layout, tag) = @@ -1822,12 +2169,46 @@ impl Evaluator<'_> { size, &variant_layout, tag, - args.iter().map(|x| x.interval.into()), + args.iter().map(|it| it.interval.into()), )?; destination.write_from_bytes(self, &result)?; + Ok(None) } } - Ok(()) + } + + fn get_mir_or_dyn_index( + &self, + def: FunctionId, + generic_args: Substitution, + locals: &Locals, + span: MirSpan, + ) -> Result<MirOrDynIndex> { + let pair = (def, generic_args); + if let Some(r) = self.mir_or_dyn_index_cache.borrow().get(&pair) { + return Ok(r.clone()); + } + let (def, generic_args) = pair; + let r = if let Some(self_ty_idx) = + is_dyn_method(self.db, self.trait_env.clone(), def, generic_args.clone()) + { + MirOrDynIndex::Dyn(self_ty_idx) + } else { + let (imp, generic_args) = + self.db.lookup_impl_method(self.trait_env.clone(), def, generic_args.clone()); + let mir_body = self + .db + .monomorphized_mir_body(imp.into(), generic_args, self.trait_env.clone()) + .map_err(|e| { + MirEvalError::InFunction( + Box::new(MirEvalError::MirLowerError(imp, e)), + vec![(Either::Left(imp), span, locals.body.owner)], + ) + })?; + MirOrDynIndex::Mir(mir_body) + }; + self.mir_or_dyn_index_cache.borrow_mut().insert((def, generic_args), r.clone()); + Ok(r) } fn exec_fn_with_args( @@ -1835,10 +2216,11 @@ impl Evaluator<'_> { def: FunctionId, args: &[IntervalAndTy], generic_args: Substitution, - locals: &Locals<'_>, + locals: &Locals, destination: Interval, + target_bb: Option<BasicBlockId>, span: MirSpan, - ) -> Result<()> { + ) -> Result<Option<StackFrame>> { if self.detect_and_exec_special_function( def, args, @@ -1847,85 +2229,96 @@ impl Evaluator<'_> { destination, span, )? { - return Ok(()); + return Ok(None); } - let arg_bytes = - args.iter().map(|x| Ok(x.get(&self)?.to_owned())).collect::<Result<Vec<_>>>()?; - if let Some(self_ty_idx) = - is_dyn_method(self.db, self.trait_env.clone(), def, generic_args.clone()) - { - // In the layout of current possible receiver, which at the moment of writing this code is one of - // `&T`, `&mut T`, `Box<T>`, `Rc<T>`, `Arc<T>`, and `Pin<P>` where `P` is one of possible recievers, - // the vtable is exactly in the `[ptr_size..2*ptr_size]` bytes. So we can use it without branching on - // the type. - let ty = - self.vtable_map.ty_of_bytes(&arg_bytes[0][self.ptr_size()..self.ptr_size() * 2])?; - let mut args_for_target = args.to_vec(); - args_for_target[0] = IntervalAndTy { - interval: args_for_target[0].interval.slice(0..self.ptr_size()), - ty: ty.clone(), - }; - let ty = GenericArgData::Ty(ty.clone()).intern(Interner); - let generics_for_target = - Substitution::from_iter( + let arg_bytes = args.iter().map(|it| IntervalOrOwned::Borrowed(it.interval)); + match self.get_mir_or_dyn_index(def, generic_args.clone(), locals, span)? { + MirOrDynIndex::Dyn(self_ty_idx) => { + // In the layout of current possible receiver, which at the moment of writing this code is one of + // `&T`, `&mut T`, `Box<T>`, `Rc<T>`, `Arc<T>`, and `Pin<P>` where `P` is one of possible recievers, + // the vtable is exactly in the `[ptr_size..2*ptr_size]` bytes. So we can use it without branching on + // the type. + let first_arg = arg_bytes.clone().next().unwrap(); + let first_arg = first_arg.get(self)?; + let ty = self + .vtable_map + .ty_of_bytes(&first_arg[self.ptr_size()..self.ptr_size() * 2])?; + let mut args_for_target = args.to_vec(); + args_for_target[0] = IntervalAndTy { + interval: args_for_target[0].interval.slice(0..self.ptr_size()), + ty: ty.clone(), + }; + let ty = GenericArgData::Ty(ty.clone()).intern(Interner); + let generics_for_target = Substitution::from_iter( Interner, - generic_args.iter(Interner).enumerate().map(|(i, x)| { + generic_args.iter(Interner).enumerate().map(|(i, it)| { if i == self_ty_idx { &ty } else { - x + it } }), ); - return self.exec_fn_with_args( - def, - &args_for_target, - generics_for_target, + return self.exec_fn_with_args( + def, + &args_for_target, + generics_for_target, + locals, + destination, + target_bb, + span, + ); + } + MirOrDynIndex::Mir(body) => self.exec_looked_up_function( + body, locals, - destination, + def, + arg_bytes, span, - ); + destination, + target_bb, + ), } - let (imp, generic_args) = - lookup_impl_method(self.db, self.trait_env.clone(), def, generic_args); - self.exec_looked_up_function(generic_args, locals, imp, arg_bytes, span, destination) } fn exec_looked_up_function( &mut self, - generic_args: Substitution, - locals: &Locals<'_>, - imp: FunctionId, - arg_bytes: Vec<Vec<u8>>, + mir_body: Arc<MirBody>, + locals: &Locals, + def: FunctionId, + arg_bytes: impl Iterator<Item = IntervalOrOwned>, span: MirSpan, destination: Interval, - ) -> Result<()> { - let def = imp.into(); - let mir_body = self - .db - .monomorphized_mir_body(def, generic_args, self.trait_env.clone()) - .map_err(|e| { + target_bb: Option<BasicBlockId>, + ) -> Result<Option<StackFrame>> { + Ok(if let Some(target_bb) = target_bb { + let (mut locals, prev_stack_ptr) = + self.create_locals_for_body(&mir_body, Some(destination))?; + self.fill_locals_for_body(&mir_body, &mut locals, arg_bytes.into_iter())?; + let span = (span, locals.body.owner); + Some(StackFrame { locals, destination: Some(target_bb), prev_stack_ptr, span }) + } else { + let result = self.interpret_mir(mir_body, arg_bytes).map_err(|e| { MirEvalError::InFunction( - Either::Left(imp), - Box::new(MirEvalError::MirLowerError(imp, e)), - span, - locals.body.owner, + Box::new(e), + vec![(Either::Left(def), span, locals.body.owner)], ) })?; - let result = self.interpret_mir(&mir_body, arg_bytes.iter().cloned()).map_err(|e| { - MirEvalError::InFunction(Either::Left(imp), Box::new(e), span, locals.body.owner) - })?; - destination.write_from_bytes(self, &result)?; - Ok(()) + destination.write_from_bytes(self, &result)?; + None + }) } fn exec_fn_trait( &mut self, + def: FunctionId, args: &[IntervalAndTy], + generic_args: Substitution, + locals: &Locals, destination: Interval, - locals: &Locals<'_>, + target_bb: Option<BasicBlockId>, span: MirSpan, - ) -> Result<()> { + ) -> Result<Option<StackFrame>> { let func = args.get(0).ok_or(MirEvalError::TypeError("fn trait with no arg"))?; let mut func_ty = func.ty.clone(); let mut func_data = func.interval; @@ -1940,15 +2333,30 @@ impl Evaluator<'_> { let size = self.size_of_sized(&func_ty, locals, "self type of fn trait")?; func_data = Interval { addr: Address::from_bytes(func_data.get(self)?)?, size }; } - match &func_ty.data(Interner).kind { + match &func_ty.kind(Interner) { TyKind::FnDef(def, subst) => { - self.exec_fn_def(*def, subst, destination, &args[1..], locals, span)?; + return self.exec_fn_def( + *def, + subst, + destination, + &args[1..], + locals, + target_bb, + span, + ); } TyKind::Function(_) => { - self.exec_fn_pointer(func_data, destination, &args[1..], locals, span)?; + return self.exec_fn_pointer( + func_data, + destination, + &args[1..], + locals, + target_bb, + span, + ); } TyKind::Closure(closure, subst) => { - self.exec_closure( + return self.exec_closure( *closure, func_data, &Substitution::from_iter(Interner, ClosureSubst(subst).parent_subst()), @@ -1956,14 +2364,45 @@ impl Evaluator<'_> { &args[1..], locals, span, - )?; + ); + } + _ => { + // try to execute the manual impl of `FnTrait` for structs (nightly feature used in std) + let arg0 = func; + let args = &args[1..]; + let arg1 = { + let ty = TyKind::Tuple( + args.len(), + Substitution::from_iter(Interner, args.iter().map(|it| it.ty.clone())), + ) + .intern(Interner); + let layout = self.layout(&ty)?; + let result = self.make_by_layout( + layout.size.bytes_usize(), + &layout, + None, + args.iter().map(|it| IntervalOrOwned::Borrowed(it.interval)), + )?; + // FIXME: there is some leak here + let size = layout.size.bytes_usize(); + let addr = self.heap_allocate(size, layout.align.abi.bytes() as usize)?; + self.write_memory(addr, &result)?; + IntervalAndTy { interval: Interval { addr, size }, ty } + }; + return self.exec_fn_with_args( + def, + &[arg0.clone(), arg1], + generic_args, + locals, + destination, + target_bb, + span, + ); } - x => not_supported!("Call FnTrait methods with type {x:?}"), } - Ok(()) } - fn eval_static(&mut self, st: StaticId, locals: &Locals<'_>) -> Result<Address> { + fn eval_static(&mut self, st: StaticId, locals: &Locals) -> Result<Address> { if let Some(o) = self.static_locations.get(&st) { return Ok(*o); }; @@ -1975,21 +2414,16 @@ impl Evaluator<'_> { Box::new(e), ) })?; - let data = &konst.data(Interner); - if let chalk_ir::ConstValue::Concrete(c) = &data.value { - self.allocate_const_in_heap(&c, &data.ty, locals, &konst)? - } else { - not_supported!("unevaluatable static"); - } + self.allocate_const_in_heap(locals, &konst)? } else { let ty = &self.db.infer(st.into())[self.db.body(st.into()).body_expr]; let Some((size, align)) = self.size_align_of(&ty, locals)? else { not_supported!("unsized extern static"); }; - let addr = self.heap_allocate(size, align); + let addr = self.heap_allocate(size, align)?; Interval::new(addr, size) }; - let addr = self.heap_allocate(self.ptr_size(), self.ptr_size()); + let addr = self.heap_allocate(self.ptr_size(), self.ptr_size())?; self.write_memory(addr, &result.addr.to_bytes())?; self.static_locations.insert(st, addr); Ok(addr) @@ -2011,13 +2445,13 @@ impl Evaluator<'_> { } } - fn drop_place(&mut self, place: &Place, locals: &mut Locals<'_>, span: MirSpan) -> Result<()> { + fn drop_place(&mut self, place: &Place, locals: &mut Locals, span: MirSpan) -> Result<()> { let (addr, ty, metadata) = self.place_addr_and_ty_and_metadata(place, locals)?; if !locals.drop_flags.remove_place(place) { return Ok(()); } let metadata = match metadata { - Some(x) => x.get(self)?.to_vec(), + Some(it) => it.get(self)?.to_vec(), None => vec![], }; self.run_drop_glue_deep(ty, locals, addr, &metadata, span) @@ -2026,7 +2460,7 @@ impl Evaluator<'_> { fn run_drop_glue_deep( &mut self, ty: Ty, - locals: &Locals<'_>, + locals: &Locals, addr: Address, _metadata: &[u8], span: MirSpan, @@ -2039,20 +2473,19 @@ impl Evaluator<'_> { // we can ignore drop in them. return Ok(()); }; - let (impl_drop_candidate, subst) = lookup_impl_method( - self.db, - self.trait_env.clone(), - drop_fn, - Substitution::from1(Interner, ty.clone()), - ); - if impl_drop_candidate != drop_fn { + + let generic_args = Substitution::from1(Interner, ty.clone()); + if let Ok(MirOrDynIndex::Mir(body)) = + self.get_mir_or_dyn_index(drop_fn, generic_args, locals, span) + { self.exec_looked_up_function( - subst, + body, locals, - impl_drop_candidate, - vec![addr.to_bytes()], + drop_fn, + [IntervalOrOwned::Owned(addr.to_bytes())].into_iter(), span, Interval { addr: Address::Invalid(0), size: 0 }, + None, )?; } match ty.kind(Interner) { @@ -2121,10 +2554,77 @@ impl Evaluator<'_> { } } -pub fn pad16(x: &[u8], is_signed: bool) -> [u8; 16] { - let is_negative = is_signed && x.last().unwrap_or(&0) > &128; +pub fn render_const_using_debug_impl( + db: &dyn HirDatabase, + owner: ConstId, + c: &Const, +) -> Result<String> { + let mut evaluator = Evaluator::new(db, owner.into(), false, None); + let locals = &Locals { + ptr: ArenaMap::new(), + body: db + .mir_body(owner.into()) + .map_err(|_| MirEvalError::NotSupported("unreachable".to_string()))?, + drop_flags: DropFlags::default(), + }; + let data = evaluator.allocate_const_in_heap(locals, c)?; + let resolver = owner.resolver(db.upcast()); + let Some(TypeNs::TraitId(debug_trait)) = resolver.resolve_path_in_type_ns_fully( + db.upcast(), + &hir_def::path::Path::from_known_path_with_no_generic(ModPath::from_segments( + hir_expand::mod_path::PathKind::Abs, + [name![core], name![fmt], name![Debug]].into_iter(), + )), + ) else { + not_supported!("core::fmt::Debug not found"); + }; + let Some(debug_fmt_fn) = db.trait_data(debug_trait).method_by_name(&name![fmt]) else { + not_supported!("core::fmt::Debug::fmt not found"); + }; + // a1 = &[""] + let a1 = evaluator.heap_allocate(evaluator.ptr_size() * 2, evaluator.ptr_size())?; + // a2 = &[::core::fmt::ArgumentV1::new(&(THE_CONST), ::core::fmt::Debug::fmt)] + // FIXME: we should call the said function, but since its name is going to break in the next rustc version + // and its ABI doesn't break yet, we put it in memory manually. + let a2 = evaluator.heap_allocate(evaluator.ptr_size() * 2, evaluator.ptr_size())?; + evaluator.write_memory(a2, &data.addr.to_bytes())?; + let debug_fmt_fn_ptr = evaluator.vtable_map.id(TyKind::FnDef( + db.intern_callable_def(debug_fmt_fn.into()).into(), + Substitution::from1(Interner, c.data(Interner).ty.clone()), + ) + .intern(Interner)); + evaluator.write_memory(a2.offset(evaluator.ptr_size()), &debug_fmt_fn_ptr.to_le_bytes())?; + // a3 = ::core::fmt::Arguments::new_v1(a1, a2) + // FIXME: similarly, we should call function here, not directly working with memory. + let a3 = evaluator.heap_allocate(evaluator.ptr_size() * 6, evaluator.ptr_size())?; + evaluator.write_memory(a3.offset(2 * evaluator.ptr_size()), &a1.to_bytes())?; + evaluator.write_memory(a3.offset(3 * evaluator.ptr_size()), &[1])?; + evaluator.write_memory(a3.offset(4 * evaluator.ptr_size()), &a2.to_bytes())?; + evaluator.write_memory(a3.offset(5 * evaluator.ptr_size()), &[1])?; + let Some(ValueNs::FunctionId(format_fn)) = resolver.resolve_path_in_value_ns_fully( + db.upcast(), + &hir_def::path::Path::from_known_path_with_no_generic(ModPath::from_segments( + hir_expand::mod_path::PathKind::Abs, + [name![std], name![fmt], name![format]].into_iter(), + )), + ) else { + not_supported!("std::fmt::format not found"); + }; + let message_string = evaluator.interpret_mir( + db.mir_body(format_fn.into()).map_err(|e| MirEvalError::MirLowerError(format_fn, e))?, + [IntervalOrOwned::Borrowed(Interval { addr: a3, size: evaluator.ptr_size() * 6 })] + .into_iter(), + )?; + let addr = + Address::from_bytes(&message_string[evaluator.ptr_size()..2 * evaluator.ptr_size()])?; + let size = from_bytes!(usize, message_string[2 * evaluator.ptr_size()..]); + Ok(std::string::String::from_utf8_lossy(evaluator.read_memory(addr, size)?).into_owned()) +} + +pub fn pad16(it: &[u8], is_signed: bool) -> [u8; 16] { + let is_negative = is_signed && it.last().unwrap_or(&0) > &127; let fill_with = if is_negative { 255 } else { 0 }; - x.iter() + it.iter() .copied() .chain(iter::repeat(fill_with)) .take(16) diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs index 3b9ef03c3..b2e29fd34 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs @@ -3,20 +3,26 @@ use std::cmp; +use chalk_ir::TyKind; +use hir_def::resolver::HasResolver; +use hir_expand::mod_path::ModPath; + use super::*; +mod simd; + macro_rules! from_bytes { ($ty:tt, $value:expr) => { ($ty::from_le_bytes(match ($value).try_into() { - Ok(x) => x, + Ok(it) => it, Err(_) => return Err(MirEvalError::TypeError("mismatched size")), })) }; } macro_rules! not_supported { - ($x: expr) => { - return Err(MirEvalError::NotSupported(format!($x))) + ($it: expr) => { + return Err(MirEvalError::NotSupported(format!($it))) }; } @@ -26,10 +32,13 @@ impl Evaluator<'_> { def: FunctionId, args: &[IntervalAndTy], generic_args: &Substitution, - locals: &Locals<'_>, + locals: &Locals, destination: Interval, span: MirSpan, ) -> Result<bool> { + if self.not_special_fn_cache.borrow().contains(&def) { + return Ok(false); + } let function_data = self.db.function_data(def); let is_intrinsic = match &function_data.abi { Some(abi) => *abi == Interned::new_str("rust-intrinsic"), @@ -53,6 +62,28 @@ impl Evaluator<'_> { )?; return Ok(true); } + let is_platform_intrinsic = match &function_data.abi { + Some(abi) => *abi == Interned::new_str("platform-intrinsic"), + None => match def.lookup(self.db.upcast()).container { + hir_def::ItemContainerId::ExternBlockId(block) => { + let id = block.lookup(self.db.upcast()).id; + id.item_tree(self.db.upcast())[id.value].abi.as_deref() + == Some("platform-intrinsic") + } + _ => false, + }, + }; + if is_platform_intrinsic { + self.exec_platform_intrinsic( + function_data.name.as_text().unwrap_or_default().as_str(), + args, + generic_args, + destination, + &locals, + span, + )?; + return Ok(true); + } let is_extern_c = match def.lookup(self.db.upcast()).container { hir_def::ItemContainerId::ExternBlockId(block) => { let id = block.lookup(self.db.upcast()).id; @@ -74,31 +105,110 @@ impl Evaluator<'_> { let alloc_fn = function_data .attrs .iter() - .filter_map(|x| x.path().as_ident()) - .filter_map(|x| x.as_str()) - .find(|x| { + .filter_map(|it| it.path().as_ident()) + .filter_map(|it| it.as_str()) + .find(|it| { [ "rustc_allocator", "rustc_deallocator", "rustc_reallocator", "rustc_allocator_zeroed", ] - .contains(x) + .contains(it) }); if let Some(alloc_fn) = alloc_fn { self.exec_alloc_fn(alloc_fn, args, destination)?; return Ok(true); } - if let Some(x) = self.detect_lang_function(def) { + if let Some(it) = self.detect_lang_function(def) { let arg_bytes = - args.iter().map(|x| Ok(x.get(&self)?.to_owned())).collect::<Result<Vec<_>>>()?; - let result = self.exec_lang_item(x, generic_args, &arg_bytes, locals, span)?; + args.iter().map(|it| Ok(it.get(&self)?.to_owned())).collect::<Result<Vec<_>>>()?; + let result = self.exec_lang_item(it, generic_args, &arg_bytes, locals, span)?; destination.write_from_bytes(self, &result)?; return Ok(true); } + if let ItemContainerId::TraitId(t) = def.lookup(self.db.upcast()).container { + if self.db.lang_attr(t.into()) == Some(LangItem::Clone) { + let [self_ty] = generic_args.as_slice(Interner) else { + not_supported!("wrong generic arg count for clone"); + }; + let Some(self_ty) = self_ty.ty(Interner) else { + not_supported!("wrong generic arg kind for clone"); + }; + // Clone has special impls for tuples and function pointers + if matches!(self_ty.kind(Interner), TyKind::Function(_) | TyKind::Tuple(..)) { + self.exec_clone(def, args, self_ty.clone(), locals, destination, span)?; + return Ok(true); + } + // Return early to prevent caching clone as non special fn. + return Ok(false); + } + } + self.not_special_fn_cache.borrow_mut().insert(def); Ok(false) } + /// Clone has special impls for tuples and function pointers + fn exec_clone( + &mut self, + def: FunctionId, + args: &[IntervalAndTy], + self_ty: Ty, + locals: &Locals, + destination: Interval, + span: MirSpan, + ) -> Result<()> { + match self_ty.kind(Interner) { + TyKind::Function(_) => { + let [arg] = args else { + not_supported!("wrong arg count for clone"); + }; + let addr = Address::from_bytes(arg.get(self)?)?; + return destination + .write_from_interval(self, Interval { addr, size: destination.size }); + } + TyKind::Tuple(_, subst) => { + let [arg] = args else { + not_supported!("wrong arg count for clone"); + }; + let addr = Address::from_bytes(arg.get(self)?)?; + let layout = self.layout(&self_ty)?; + for (i, ty) in subst.iter(Interner).enumerate() { + let ty = ty.assert_ty_ref(Interner); + let size = self.layout(ty)?.size.bytes_usize(); + let tmp = self.heap_allocate(self.ptr_size(), self.ptr_size())?; + let arg = IntervalAndTy { + interval: Interval { addr: tmp, size: self.ptr_size() }, + ty: TyKind::Ref(Mutability::Not, static_lifetime(), ty.clone()) + .intern(Interner), + }; + let offset = layout.fields.offset(i).bytes_usize(); + self.write_memory(tmp, &addr.offset(offset).to_bytes())?; + self.exec_clone( + def, + &[arg], + ty.clone(), + locals, + destination.slice(offset..offset + size), + span, + )?; + } + } + _ => { + self.exec_fn_with_args( + def, + args, + Substitution::from1(Interner, self_ty), + locals, + destination, + None, + span, + )?; + } + } + Ok(()) + } + fn exec_alloc_fn( &mut self, alloc_fn: &str, @@ -112,7 +222,7 @@ impl Evaluator<'_> { }; let size = from_bytes!(usize, size.get(self)?); let align = from_bytes!(usize, align.get(self)?); - let result = self.heap_allocate(size, align); + let result = self.heap_allocate(size, align)?; destination.write_from_bytes(self, &result.to_bytes())?; } "rustc_deallocator" => { /* no-op for now */ } @@ -120,14 +230,18 @@ impl Evaluator<'_> { let [ptr, old_size, align, new_size] = args else { return Err(MirEvalError::TypeError("rustc_allocator args are not provided")); }; - let ptr = Address::from_bytes(ptr.get(self)?)?; let old_size = from_bytes!(usize, old_size.get(self)?); let new_size = from_bytes!(usize, new_size.get(self)?); - let align = from_bytes!(usize, align.get(self)?); - let result = self.heap_allocate(new_size, align); - Interval { addr: result, size: old_size } - .write_from_interval(self, Interval { addr: ptr, size: old_size })?; - destination.write_from_bytes(self, &result.to_bytes())?; + if old_size >= new_size { + destination.write_from_interval(self, ptr.interval)?; + } else { + let ptr = Address::from_bytes(ptr.get(self)?)?; + let align = from_bytes!(usize, align.get(self)?); + let result = self.heap_allocate(new_size, align)?; + Interval { addr: result, size: old_size } + .write_from_interval(self, Interval { addr: ptr, size: old_size })?; + destination.write_from_bytes(self, &result.to_bytes())?; + } } _ => not_supported!("unknown alloc function"), } @@ -136,7 +250,7 @@ impl Evaluator<'_> { fn detect_lang_function(&self, def: FunctionId) -> Option<LangItem> { use LangItem::*; - let candidate = lang_attr(self.db.upcast(), def)?; + let candidate = self.db.lang_attr(def.into())?; // We want to execute these functions with special logic if [PanicFmt, BeginPanic, SliceLen, DropInPlace].contains(&candidate) { return Some(candidate); @@ -146,56 +260,35 @@ impl Evaluator<'_> { fn exec_lang_item( &mut self, - x: LangItem, + it: LangItem, generic_args: &Substitution, args: &[Vec<u8>], - locals: &Locals<'_>, + locals: &Locals, span: MirSpan, ) -> Result<Vec<u8>> { use LangItem::*; let mut args = args.iter(); - match x { + match it { BeginPanic => Err(MirEvalError::Panic("<unknown-panic-payload>".to_string())), PanicFmt => { let message = (|| { - let arguments_struct = - self.db.lang_item(self.crate_id, LangItem::FormatArguments)?.as_struct()?; - let arguments_layout = self - .layout_adt(arguments_struct.into(), Substitution::empty(Interner)) - .ok()?; - let arguments_field_pieces = - self.db.struct_data(arguments_struct).variant_data.field(&name![pieces])?; - let pieces_offset = arguments_layout - .fields - .offset(u32::from(arguments_field_pieces.into_raw()) as usize) - .bytes_usize(); - let ptr_size = self.ptr_size(); - let arg = args.next()?; - let pieces_array_addr = - Address::from_bytes(&arg[pieces_offset..pieces_offset + ptr_size]).ok()?; - let pieces_array_len = usize::from_le_bytes( - (&arg[pieces_offset + ptr_size..pieces_offset + 2 * ptr_size]) - .try_into() - .ok()?, - ); - let mut message = "".to_string(); - for i in 0..pieces_array_len { - let piece_ptr_addr = pieces_array_addr.offset(2 * i * ptr_size); - let piece_addr = - Address::from_bytes(self.read_memory(piece_ptr_addr, ptr_size).ok()?) - .ok()?; - let piece_len = usize::from_le_bytes( - self.read_memory(piece_ptr_addr.offset(ptr_size), ptr_size) - .ok()? - .try_into() - .ok()?, - ); - let piece_data = self.read_memory(piece_addr, piece_len).ok()?; - message += &std::string::String::from_utf8_lossy(piece_data); - } - Some(message) + let resolver = self.db.crate_def_map(self.crate_id).crate_root().resolver(self.db.upcast()); + let Some(format_fn) = resolver.resolve_path_in_value_ns_fully( + self.db.upcast(), + &hir_def::path::Path::from_known_path_with_no_generic(ModPath::from_segments( + hir_expand::mod_path::PathKind::Abs, + [name![std], name![fmt], name![format]].into_iter(), + )), + ) else { + not_supported!("std::fmt::format not found"); + }; + let hir_def::resolver::ValueNs::FunctionId(format_fn) = format_fn else { not_supported!("std::fmt::format is not a function") }; + let message_string = self.interpret_mir(self.db.mir_body(format_fn.into()).map_err(|e| MirEvalError::MirLowerError(format_fn, e))?, args.map(|x| IntervalOrOwned::Owned(x.clone())))?; + let addr = Address::from_bytes(&message_string[self.ptr_size()..2 * self.ptr_size()])?; + let size = from_bytes!(usize, message_string[2 * self.ptr_size()..]); + Ok(std::string::String::from_utf8_lossy(self.read_memory(addr, size)?).into_owned()) })() - .unwrap_or_else(|| "<format-args-evaluation-failed>".to_string()); + .unwrap_or_else(|e| format!("Failed to render panic format args: {e:?}")); Err(MirEvalError::Panic(message)) } SliceLen => { @@ -207,7 +300,7 @@ impl Evaluator<'_> { } DropInPlace => { let ty = - generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)).ok_or( + generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)).ok_or( MirEvalError::TypeError( "generic argument of drop_in_place is not provided", ), @@ -224,7 +317,35 @@ impl Evaluator<'_> { )?; Ok(vec![]) } - x => not_supported!("Executing lang item {x:?}"), + it => not_supported!("Executing lang item {it:?}"), + } + } + + fn exec_syscall( + &mut self, + id: i64, + args: &[IntervalAndTy], + destination: Interval, + _locals: &Locals, + _span: MirSpan, + ) -> Result<()> { + match id { + 318 => { + // SYS_getrandom + let [buf, len, _flags] = args else { + return Err(MirEvalError::TypeError("SYS_getrandom args are not provided")); + }; + let addr = Address::from_bytes(buf.get(self)?)?; + let size = from_bytes!(usize, len.get(self)?); + for i in 0..size { + let rand_byte = self.random_state.rand_u64() as u8; + self.write_memory(addr.offset(i), &[rand_byte])?; + } + destination.write_from_interval(self, len.interval) + } + _ => { + not_supported!("Unknown syscall id {id:?}") + } } } @@ -234,8 +355,8 @@ impl Evaluator<'_> { args: &[IntervalAndTy], _generic_args: &Substitution, destination: Interval, - locals: &Locals<'_>, - _span: MirSpan, + locals: &Locals, + span: MirSpan, ) -> Result<()> { match as_str { "memcmp" => { @@ -299,7 +420,9 @@ impl Evaluator<'_> { } "pthread_getspecific" => { let Some(arg0) = args.get(0) else { - return Err(MirEvalError::TypeError("pthread_getspecific arg0 is not provided")); + return Err(MirEvalError::TypeError( + "pthread_getspecific arg0 is not provided", + )); }; let key = from_bytes!(usize, &pad16(arg0.get(self)?, false)[0..8]); let value = self.thread_local_storage.get_key(key)?; @@ -308,11 +431,15 @@ impl Evaluator<'_> { } "pthread_setspecific" => { let Some(arg0) = args.get(0) else { - return Err(MirEvalError::TypeError("pthread_setspecific arg0 is not provided")); + return Err(MirEvalError::TypeError( + "pthread_setspecific arg0 is not provided", + )); }; let key = from_bytes!(usize, &pad16(arg0.get(self)?, false)[0..8]); let Some(arg1) = args.get(1) else { - return Err(MirEvalError::TypeError("pthread_setspecific arg1 is not provided")); + return Err(MirEvalError::TypeError( + "pthread_setspecific arg1 is not provided", + )); }; let value = from_bytes!(u128, pad16(arg1.get(self)?, false)); self.thread_local_storage.set_key(key, value)?; @@ -326,17 +453,52 @@ impl Evaluator<'_> { destination.write_from_bytes(self, &0u64.to_le_bytes()[0..destination.size])?; Ok(()) } + "syscall" => { + let Some((id, rest)) = args.split_first() else { + return Err(MirEvalError::TypeError( + "syscall arg1 is not provided", + )); + }; + let id = from_bytes!(i64, id.get(self)?); + self.exec_syscall(id, rest, destination, locals, span) + } + "sched_getaffinity" => { + let [_pid, _set_size, set] = args else { + return Err(MirEvalError::TypeError("libc::write args are not provided")); + }; + let set = Address::from_bytes(set.get(self)?)?; + // Only enable core 0 (we are single threaded anyway), which is bitset 0x0000001 + self.write_memory(set, &[1])?; + // return 0 as success + self.write_memory_using_ref(destination.addr, destination.size)?.fill(0); + Ok(()) + } _ => not_supported!("unknown external function {as_str}"), } } + fn exec_platform_intrinsic( + &mut self, + name: &str, + args: &[IntervalAndTy], + generic_args: &Substitution, + destination: Interval, + locals: &Locals, + span: MirSpan, + ) -> Result<()> { + if let Some(name) = name.strip_prefix("simd_") { + return self.exec_simd_intrinsic(name, args, generic_args, destination, locals, span); + } + not_supported!("unknown platform intrinsic {name}"); + } + fn exec_intrinsic( &mut self, name: &str, args: &[IntervalAndTy], generic_args: &Substitution, destination: Interval, - locals: &Locals<'_>, + locals: &Locals, span: MirSpan, ) -> Result<()> { if let Some(name) = name.strip_prefix("atomic_") { @@ -347,7 +509,9 @@ impl Evaluator<'_> { "sqrt" | "sin" | "cos" | "exp" | "exp2" | "log" | "log10" | "log2" | "fabs" | "floor" | "ceil" | "trunc" | "rint" | "nearbyint" | "round" | "roundeven" => { let [arg] = args else { - return Err(MirEvalError::TypeError("f64 intrinsic signature doesn't match fn (f64) -> f64")); + return Err(MirEvalError::TypeError( + "f64 intrinsic signature doesn't match fn (f64) -> f64", + )); }; let arg = from_bytes!(f64, arg.get(self)?); match name { @@ -373,7 +537,9 @@ impl Evaluator<'_> { } "pow" | "minnum" | "maxnum" | "copysign" => { let [arg1, arg2] = args else { - return Err(MirEvalError::TypeError("f64 intrinsic signature doesn't match fn (f64, f64) -> f64")); + return Err(MirEvalError::TypeError( + "f64 intrinsic signature doesn't match fn (f64, f64) -> f64", + )); }; let arg1 = from_bytes!(f64, arg1.get(self)?); let arg2 = from_bytes!(f64, arg2.get(self)?); @@ -387,7 +553,9 @@ impl Evaluator<'_> { } "powi" => { let [arg1, arg2] = args else { - return Err(MirEvalError::TypeError("powif64 signature doesn't match fn (f64, i32) -> f64")); + return Err(MirEvalError::TypeError( + "powif64 signature doesn't match fn (f64, i32) -> f64", + )); }; let arg1 = from_bytes!(f64, arg1.get(self)?); let arg2 = from_bytes!(i32, arg2.get(self)?); @@ -395,7 +563,9 @@ impl Evaluator<'_> { } "fma" => { let [arg1, arg2, arg3] = args else { - return Err(MirEvalError::TypeError("fmaf64 signature doesn't match fn (f64, f64, f64) -> f64")); + return Err(MirEvalError::TypeError( + "fmaf64 signature doesn't match fn (f64, f64, f64) -> f64", + )); }; let arg1 = from_bytes!(f64, arg1.get(self)?); let arg2 = from_bytes!(f64, arg2.get(self)?); @@ -411,7 +581,9 @@ impl Evaluator<'_> { "sqrt" | "sin" | "cos" | "exp" | "exp2" | "log" | "log10" | "log2" | "fabs" | "floor" | "ceil" | "trunc" | "rint" | "nearbyint" | "round" | "roundeven" => { let [arg] = args else { - return Err(MirEvalError::TypeError("f32 intrinsic signature doesn't match fn (f32) -> f32")); + return Err(MirEvalError::TypeError( + "f32 intrinsic signature doesn't match fn (f32) -> f32", + )); }; let arg = from_bytes!(f32, arg.get(self)?); match name { @@ -437,7 +609,9 @@ impl Evaluator<'_> { } "pow" | "minnum" | "maxnum" | "copysign" => { let [arg1, arg2] = args else { - return Err(MirEvalError::TypeError("f32 intrinsic signature doesn't match fn (f32, f32) -> f32")); + return Err(MirEvalError::TypeError( + "f32 intrinsic signature doesn't match fn (f32, f32) -> f32", + )); }; let arg1 = from_bytes!(f32, arg1.get(self)?); let arg2 = from_bytes!(f32, arg2.get(self)?); @@ -451,7 +625,9 @@ impl Evaluator<'_> { } "powi" => { let [arg1, arg2] = args else { - return Err(MirEvalError::TypeError("powif32 signature doesn't match fn (f32, i32) -> f32")); + return Err(MirEvalError::TypeError( + "powif32 signature doesn't match fn (f32, i32) -> f32", + )); }; let arg1 = from_bytes!(f32, arg1.get(self)?); let arg2 = from_bytes!(i32, arg2.get(self)?); @@ -459,7 +635,9 @@ impl Evaluator<'_> { } "fma" => { let [arg1, arg2, arg3] = args else { - return Err(MirEvalError::TypeError("fmaf32 signature doesn't match fn (f32, f32, f32) -> f32")); + return Err(MirEvalError::TypeError( + "fmaf32 signature doesn't match fn (f32, f32, f32) -> f32", + )); }; let arg1 = from_bytes!(f32, arg1.get(self)?); let arg2 = from_bytes!(f32, arg2.get(self)?); @@ -472,21 +650,77 @@ impl Evaluator<'_> { } match name { "size_of" => { - let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else { + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { return Err(MirEvalError::TypeError("size_of generic arg is not provided")); }; let size = self.size_of_sized(ty, locals, "size_of arg")?; destination.write_from_bytes(self, &size.to_le_bytes()[0..destination.size]) } "min_align_of" | "pref_align_of" => { - let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else { + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) else { return Err(MirEvalError::TypeError("align_of generic arg is not provided")); }; let align = self.layout(ty)?.align.abi.bytes(); destination.write_from_bytes(self, &align.to_le_bytes()[0..destination.size]) } + "size_of_val" => { + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { + return Err(MirEvalError::TypeError("size_of_val generic arg is not provided")); + }; + let [arg] = args else { + return Err(MirEvalError::TypeError("size_of_val args are not provided")); + }; + if let Some((size, _)) = self.size_align_of(ty, locals)? { + destination.write_from_bytes(self, &size.to_le_bytes()) + } else { + let metadata = arg.interval.slice(self.ptr_size()..self.ptr_size() * 2); + let (size, _) = self.size_align_of_unsized(ty, metadata, locals)?; + destination.write_from_bytes(self, &size.to_le_bytes()) + } + } + "min_align_of_val" => { + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) else { + return Err(MirEvalError::TypeError("min_align_of_val generic arg is not provided")); + }; + let [arg] = args else { + return Err(MirEvalError::TypeError("min_align_of_val args are not provided")); + }; + if let Some((_, align)) = self.size_align_of(ty, locals)? { + destination.write_from_bytes(self, &align.to_le_bytes()) + } else { + let metadata = arg.interval.slice(self.ptr_size()..self.ptr_size() * 2); + let (_, align) = self.size_align_of_unsized(ty, metadata, locals)?; + destination.write_from_bytes(self, &align.to_le_bytes()) + } + } + "type_name" => { + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { + return Err(MirEvalError::TypeError("type_name generic arg is not provided")); + }; + let ty_name = match ty.display_source_code( + self.db, + locals.body.owner.module(self.db.upcast()), + true, + ) { + Ok(ty_name) => ty_name, + // Fallback to human readable display in case of `Err`. Ideally we want to use `display_source_code` to + // render full paths. + Err(_) => ty.display(self.db).to_string(), + }; + let len = ty_name.len(); + let addr = self.heap_allocate(len, 1)?; + self.write_memory(addr, ty_name.as_bytes())?; + destination.slice(0..self.ptr_size()).write_from_bytes(self, &addr.to_bytes())?; + destination + .slice(self.ptr_size()..2 * self.ptr_size()) + .write_from_bytes(self, &len.to_le_bytes()) + } "needs_drop" => { - let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else { + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { return Err(MirEvalError::TypeError("size_of generic arg is not provided")); }; let result = !ty.clone().is_copy(self.db, locals.body.owner); @@ -501,13 +735,17 @@ impl Evaluator<'_> { let ans = lhs.get(self)? == rhs.get(self)?; destination.write_from_bytes(self, &[u8::from(ans)]) } - "saturating_add" => { + "saturating_add" | "saturating_sub" => { let [lhs, rhs] = args else { return Err(MirEvalError::TypeError("saturating_add args are not provided")); }; let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false)); let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false)); - let ans = lhs.saturating_add(rhs); + let ans = match name { + "saturating_add" => lhs.saturating_add(rhs), + "saturating_sub" => lhs.saturating_sub(rhs), + _ => unreachable!(), + }; let bits = destination.size * 8; // FIXME: signed let is_signed = false; @@ -526,7 +764,22 @@ impl Evaluator<'_> { let ans = lhs.wrapping_add(rhs); destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]) } - "wrapping_sub" | "unchecked_sub" | "ptr_offset_from_unsigned" | "ptr_offset_from" => { + "ptr_offset_from_unsigned" | "ptr_offset_from" => { + let [lhs, rhs] = args else { + return Err(MirEvalError::TypeError("wrapping_sub args are not provided")); + }; + let lhs = i128::from_le_bytes(pad16(lhs.get(self)?, false)); + let rhs = i128::from_le_bytes(pad16(rhs.get(self)?, false)); + let ans = lhs.wrapping_sub(rhs); + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { + return Err(MirEvalError::TypeError("ptr_offset_from generic arg is not provided")); + }; + let size = self.size_of_sized(ty, locals, "ptr_offset_from arg")? as i128; + let ans = ans / size; + destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]) + } + "wrapping_sub" | "unchecked_sub" => { let [lhs, rhs] = args else { return Err(MirEvalError::TypeError("wrapping_sub args are not provided")); }; @@ -544,6 +797,26 @@ impl Evaluator<'_> { let ans = lhs.wrapping_mul(rhs); destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]) } + "wrapping_shl" | "unchecked_shl" => { + // FIXME: signed + let [lhs, rhs] = args else { + return Err(MirEvalError::TypeError("unchecked_shl args are not provided")); + }; + let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false)); + let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false)); + let ans = lhs.wrapping_shl(rhs as u32); + destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]) + } + "wrapping_shr" | "unchecked_shr" => { + // FIXME: signed + let [lhs, rhs] = args else { + return Err(MirEvalError::TypeError("unchecked_shr args are not provided")); + }; + let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false)); + let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false)); + let ans = lhs.wrapping_shr(rhs as u32); + destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size]) + } "unchecked_rem" => { // FIXME: signed let [lhs, rhs] = args else { @@ -588,7 +861,7 @@ impl Evaluator<'_> { _ => unreachable!(), }; let is_overflow = u128overflow - || ans.to_le_bytes()[op_size..].iter().any(|&x| x != 0 && x != 255); + || ans.to_le_bytes()[op_size..].iter().any(|&it| it != 0 && it != 255); let is_overflow = vec![u8::from(is_overflow)]; let layout = self.layout(&result_ty)?; let result = self.make_by_layout( @@ -603,10 +876,15 @@ impl Evaluator<'_> { } "copy" | "copy_nonoverlapping" => { let [src, dst, offset] = args else { - return Err(MirEvalError::TypeError("copy_nonoverlapping args are not provided")); + return Err(MirEvalError::TypeError( + "copy_nonoverlapping args are not provided", + )); }; - let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else { - return Err(MirEvalError::TypeError("copy_nonoverlapping generic arg is not provided")); + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { + return Err(MirEvalError::TypeError( + "copy_nonoverlapping generic arg is not provided", + )); }; let src = Address::from_bytes(src.get(self)?)?; let dst = Address::from_bytes(dst.get(self)?)?; @@ -621,7 +899,8 @@ impl Evaluator<'_> { let [ptr, offset] = args else { return Err(MirEvalError::TypeError("offset args are not provided")); }; - let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else { + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { return Err(MirEvalError::TypeError("offset generic arg is not provided")); }; let ptr = u128::from_le_bytes(pad16(ptr.get(self)?, false)); @@ -652,20 +931,106 @@ impl Evaluator<'_> { } "ctpop" => { let [arg] = args else { - return Err(MirEvalError::TypeError("likely arg is not provided")); + return Err(MirEvalError::TypeError("ctpop arg is not provided")); }; let result = u128::from_le_bytes(pad16(arg.get(self)?, false)).count_ones(); destination .write_from_bytes(self, &(result as u128).to_le_bytes()[0..destination.size]) } + "ctlz" | "ctlz_nonzero" => { + let [arg] = args else { + return Err(MirEvalError::TypeError("cttz arg is not provided")); + }; + let result = + u128::from_le_bytes(pad16(arg.get(self)?, false)).leading_zeros() as usize; + let result = result - (128 - arg.interval.size * 8); + destination + .write_from_bytes(self, &(result as u128).to_le_bytes()[0..destination.size]) + } "cttz" | "cttz_nonzero" => { let [arg] = args else { - return Err(MirEvalError::TypeError("likely arg is not provided")); + return Err(MirEvalError::TypeError("cttz arg is not provided")); }; let result = u128::from_le_bytes(pad16(arg.get(self)?, false)).trailing_zeros(); destination .write_from_bytes(self, &(result as u128).to_le_bytes()[0..destination.size]) } + "rotate_left" => { + let [lhs, rhs] = args else { + return Err(MirEvalError::TypeError("rotate_left args are not provided")); + }; + let lhs = &lhs.get(self)?[0..destination.size]; + let rhs = rhs.get(self)?[0] as u32; + match destination.size { + 1 => { + let r = from_bytes!(u8, lhs).rotate_left(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + 2 => { + let r = from_bytes!(u16, lhs).rotate_left(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + 4 => { + let r = from_bytes!(u32, lhs).rotate_left(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + 8 => { + let r = from_bytes!(u64, lhs).rotate_left(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + 16 => { + let r = from_bytes!(u128, lhs).rotate_left(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + s => not_supported!("destination with size {s} for rotate_left"), + } + } + "rotate_right" => { + let [lhs, rhs] = args else { + return Err(MirEvalError::TypeError("rotate_right args are not provided")); + }; + let lhs = &lhs.get(self)?[0..destination.size]; + let rhs = rhs.get(self)?[0] as u32; + match destination.size { + 1 => { + let r = from_bytes!(u8, lhs).rotate_right(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + 2 => { + let r = from_bytes!(u16, lhs).rotate_right(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + 4 => { + let r = from_bytes!(u32, lhs).rotate_right(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + 8 => { + let r = from_bytes!(u64, lhs).rotate_right(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + 16 => { + let r = from_bytes!(u128, lhs).rotate_right(rhs); + destination.write_from_bytes(self, &r.to_le_bytes()) + } + s => not_supported!("destination with size {s} for rotate_right"), + } + } + "discriminant_value" => { + let [arg] = args else { + return Err(MirEvalError::TypeError("discriminant_value arg is not provided")); + }; + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { + return Err(MirEvalError::TypeError( + "discriminant_value generic arg is not provided", + )); + }; + let addr = Address::from_bytes(arg.get(self)?)?; + let size = self.size_of_sized(ty, locals, "discriminant_value ptr type")?; + let interval = Interval { addr, size }; + let r = self.compute_discriminant(ty.clone(), interval.get(self)?)?; + destination.write_from_bytes(self, &r.to_le_bytes()[0..destination.size]) + } "const_eval_select" => { let [tuple, const_fn, _] = args else { return Err(MirEvalError::TypeError("const_eval_select args are not provided")); @@ -681,24 +1046,126 @@ impl Evaluator<'_> { let addr = tuple.interval.addr.offset(offset); args.push(IntervalAndTy::new(addr, field, self, locals)?); } - self.exec_fn_trait(&args, destination, locals, span) + if let Some(target) = self.db.lang_item(self.crate_id, LangItem::FnOnce) { + if let Some(def) = target + .as_trait() + .and_then(|it| self.db.trait_data(it).method_by_name(&name![call_once])) + { + self.exec_fn_trait( + def, + &args, + // FIXME: wrong for manual impls of `FnOnce` + Substitution::empty(Interner), + locals, + destination, + None, + span, + )?; + return Ok(()); + } + } + not_supported!("FnOnce was not available for executing const_eval_select"); + } + "read_via_copy" | "volatile_load" => { + let [arg] = args else { + return Err(MirEvalError::TypeError("read_via_copy args are not provided")); + }; + let addr = Address::from_bytes(arg.interval.get(self)?)?; + destination.write_from_interval(self, Interval { addr, size: destination.size }) + } + "write_bytes" => { + let [dst, val, count] = args else { + return Err(MirEvalError::TypeError("write_bytes args are not provided")); + }; + let count = from_bytes!(usize, count.get(self)?); + let val = from_bytes!(u8, val.get(self)?); + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) + else { + return Err(MirEvalError::TypeError( + "write_bytes generic arg is not provided", + )); + }; + let dst = Address::from_bytes(dst.get(self)?)?; + let size = self.size_of_sized(ty, locals, "copy_nonoverlapping ptr type")?; + let size = count * size; + self.write_memory_using_ref(dst, size)?.fill(val); + Ok(()) } _ => not_supported!("unknown intrinsic {name}"), } } + fn size_align_of_unsized( + &mut self, + ty: &Ty, + metadata: Interval, + locals: &Locals, + ) -> Result<(usize, usize)> { + Ok(match ty.kind(Interner) { + TyKind::Str => (from_bytes!(usize, metadata.get(self)?), 1), + TyKind::Slice(inner) => { + let len = from_bytes!(usize, metadata.get(self)?); + let (size, align) = self.size_align_of_sized(inner, locals, "slice inner type")?; + (size * len, align) + } + TyKind::Dyn(_) => self.size_align_of_sized( + self.vtable_map.ty_of_bytes(metadata.get(self)?)?, + locals, + "dyn concrete type", + )?, + TyKind::Adt(id, subst) => { + let id = id.0; + let layout = self.layout_adt(id, subst.clone())?; + let id = match id { + AdtId::StructId(s) => s, + _ => not_supported!("unsized enum or union"), + }; + let field_types = &self.db.field_types(id.into()); + let last_field_ty = + field_types.iter().rev().next().unwrap().1.clone().substitute(Interner, subst); + let sized_part_size = + layout.fields.offset(field_types.iter().count() - 1).bytes_usize(); + let sized_part_align = layout.align.abi.bytes() as usize; + let (unsized_part_size, unsized_part_align) = + self.size_align_of_unsized(&last_field_ty, metadata, locals)?; + let align = sized_part_align.max(unsized_part_align) as isize; + let size = (sized_part_size + unsized_part_size) as isize; + // Must add any necessary padding to `size` + // (to make it a multiple of `align`) before returning it. + // + // Namely, the returned size should be, in C notation: + // + // `size + ((size & (align-1)) ? align : 0)` + // + // emulated via the semi-standard fast bit trick: + // + // `(size + (align-1)) & -align` + let size = (size + (align - 1)) & (-align); + (size as usize, align as usize) + } + _ => not_supported!("unsized type other than str, slice, struct and dyn"), + }) + } + fn exec_atomic_intrinsic( &mut self, name: &str, args: &[IntervalAndTy], generic_args: &Substitution, destination: Interval, - locals: &Locals<'_>, + locals: &Locals, _span: MirSpan, ) -> Result<()> { // We are a single threaded runtime with no UB checking and no optimization, so - // we can implement these as normal functions. - let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else { + // we can implement atomic intrinsics as normal functions. + + if name.starts_with("singlethreadfence_") || name.starts_with("fence_") { + return Ok(()); + } + + // The rest of atomic intrinsics have exactly one generic arg + + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) else { return Err(MirEvalError::TypeError("atomic intrinsic generic arg is not provided")); }; let Some(arg0) = args.get(0) else { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim/simd.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim/simd.rs new file mode 100644 index 000000000..ec7463104 --- /dev/null +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim/simd.rs @@ -0,0 +1,177 @@ +//! Shim implementation for simd intrinsics + +use std::cmp::Ordering; + +use crate::TyKind; + +use super::*; + +macro_rules! from_bytes { + ($ty:tt, $value:expr) => { + ($ty::from_le_bytes(match ($value).try_into() { + Ok(it) => it, + Err(_) => return Err(MirEvalError::TypeError("mismatched size")), + })) + }; +} + +macro_rules! not_supported { + ($it: expr) => { + return Err(MirEvalError::NotSupported(format!($it))) + }; +} + +impl Evaluator<'_> { + fn detect_simd_ty(&self, ty: &Ty) -> Result<(usize, Ty)> { + match ty.kind(Interner) { + TyKind::Adt(id, subst) => { + let len = match subst.as_slice(Interner).get(1).and_then(|it| it.constant(Interner)) + { + Some(len) => len, + _ => { + if let AdtId::StructId(id) = id.0 { + let struct_data = self.db.struct_data(id); + let fields = struct_data.variant_data.fields(); + let Some((first_field, _)) = fields.iter().next() else { + not_supported!("simd type with no field"); + }; + let field_ty = self.db.field_types(id.into())[first_field] + .clone() + .substitute(Interner, subst); + return Ok((fields.len(), field_ty)); + } + return Err(MirEvalError::TypeError("simd type with no len param")); + } + }; + match try_const_usize(self.db, len) { + Some(len) => { + let Some(ty) = subst.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) else { + return Err(MirEvalError::TypeError("simd type with no ty param")); + }; + Ok((len as usize, ty.clone())) + } + None => Err(MirEvalError::TypeError("simd type with unevaluatable len param")), + } + } + _ => Err(MirEvalError::TypeError("simd type which is not a struct")), + } + } + + pub(super) fn exec_simd_intrinsic( + &mut self, + name: &str, + args: &[IntervalAndTy], + _generic_args: &Substitution, + destination: Interval, + _locals: &Locals, + _span: MirSpan, + ) -> Result<()> { + match name { + "and" | "or" | "xor" => { + let [left, right] = args else { + return Err(MirEvalError::TypeError("simd bit op args are not provided")); + }; + let result = left + .get(self)? + .iter() + .zip(right.get(self)?) + .map(|(&it, &y)| match name { + "and" => it & y, + "or" => it | y, + "xor" => it ^ y, + _ => unreachable!(), + }) + .collect::<Vec<_>>(); + destination.write_from_bytes(self, &result) + } + "eq" | "ne" | "lt" | "le" | "gt" | "ge" => { + let [left, right] = args else { + return Err(MirEvalError::TypeError("simd args are not provided")); + }; + let (len, ty) = self.detect_simd_ty(&left.ty)?; + let is_signed = matches!(ty.as_builtin(), Some(BuiltinType::Int(_))); + let size = left.interval.size / len; + let dest_size = destination.size / len; + let mut destination_bytes = vec![]; + let vector = left.get(self)?.chunks(size).zip(right.get(self)?.chunks(size)); + for (l, r) in vector { + let mut result = Ordering::Equal; + for (l, r) in l.iter().zip(r).rev() { + let it = l.cmp(r); + if it != Ordering::Equal { + result = it; + break; + } + } + if is_signed { + if let Some((&l, &r)) = l.iter().zip(r).rev().next() { + if l != r { + result = (l as i8).cmp(&(r as i8)); + } + } + } + let result = match result { + Ordering::Less => ["lt", "le", "ne"].contains(&name), + Ordering::Equal => ["ge", "le", "eq"].contains(&name), + Ordering::Greater => ["ge", "gt", "ne"].contains(&name), + }; + let result = if result { 255 } else { 0 }; + destination_bytes.extend(std::iter::repeat(result).take(dest_size)); + } + + destination.write_from_bytes(self, &destination_bytes) + } + "bitmask" => { + let [op] = args else { + return Err(MirEvalError::TypeError("simd_bitmask args are not provided")); + }; + let (op_len, _) = self.detect_simd_ty(&op.ty)?; + let op_count = op.interval.size / op_len; + let mut result: u64 = 0; + for (i, val) in op.get(self)?.chunks(op_count).enumerate() { + if !val.iter().all(|&it| it == 0) { + result |= 1 << i; + } + } + destination.write_from_bytes(self, &result.to_le_bytes()[0..destination.size]) + } + "shuffle" => { + let [left, right, index] = args else { + return Err(MirEvalError::TypeError("simd_shuffle args are not provided")); + }; + let TyKind::Array(_, index_len) = index.ty.kind(Interner) else { + return Err(MirEvalError::TypeError( + "simd_shuffle index argument has non-array type", + )); + }; + let index_len = match try_const_usize(self.db, index_len) { + Some(it) => it as usize, + None => { + return Err(MirEvalError::TypeError( + "simd type with unevaluatable len param", + )) + } + }; + let (left_len, _) = self.detect_simd_ty(&left.ty)?; + let left_size = left.interval.size / left_len; + let vector = + left.get(self)?.chunks(left_size).chain(right.get(self)?.chunks(left_size)); + let mut result = vec![]; + for index in index.get(self)?.chunks(index.interval.size / index_len) { + let index = from_bytes!(u32, index) as usize; + let val = match vector.clone().nth(index) { + Some(it) => it, + None => { + return Err(MirEvalError::TypeError( + "out of bound access in simd shuffle", + )) + } + }; + result.extend(val); + } + destination.write_from_bytes(self, &result) + } + _ => not_supported!("unknown simd intrinsic {name}"), + } + } +} diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/tests.rs index ca4268b8f..46165cf3d 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/tests.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/tests.rs @@ -30,7 +30,7 @@ fn eval_main(db: &TestDB, file_id: FileId) -> Result<(String, String), MirEvalEr db.trait_environment(func_id.into()), ) .map_err(|e| MirEvalError::MirLowerError(func_id.into(), e))?; - let (result, stdout, stderr) = interpret_mir(db, &body, false); + let (result, stdout, stderr) = interpret_mir(db, body, false, None); result?; Ok((stdout, stderr)) } @@ -183,6 +183,50 @@ fn main() { } #[test] +fn drop_struct_field() { + check_pass( + r#" +//- minicore: drop, add, option, cell, builtin_impls + +use core::cell::Cell; + +fn should_not_reach() { + _ // FIXME: replace this function with panic when that works +} + +struct X<'a>(&'a Cell<i32>); +impl<'a> Drop for X<'a> { + fn drop(&mut self) { + self.0.set(self.0.get() + 1) + } +} + +struct Tuple<'a>(X<'a>, X<'a>, X<'a>); + +fn main() { + let s = Cell::new(0); + { + let x0 = X(&s); + let xt = Tuple(x0, X(&s), X(&s)); + let x1 = xt.1; + if s.get() != 0 { + should_not_reach(); + } + drop(xt.0); + if s.get() != 1 { + should_not_reach(); + } + } + // FIXME: this should be 3 + if s.get() != 2 { + should_not_reach(); + } +} +"#, + ); +} + +#[test] fn drop_in_place() { check_pass( r#" @@ -614,6 +658,78 @@ fn main() { } #[test] +fn self_with_capital_s() { + check_pass( + r#" +//- minicore: fn, add, copy + +struct S1; + +impl S1 { + fn f() { + Self; + } +} + +struct S2 { + f1: i32, +} + +impl S2 { + fn f() { + Self { f1: 5 }; + } +} + +struct S3(i32); + +impl S3 { + fn f() { + Self(2); + Self; + let this = Self; + this(2); + } +} + +fn main() { + S1::f(); + S2::f(); + S3::f(); +} + "#, + ); +} + +#[test] +fn syscalls() { + check_pass( + r#" +//- minicore: option + +extern "C" { + pub unsafe extern "C" fn syscall(num: i64, ...) -> i64; +} + +const SYS_getrandom: i64 = 318; + +fn should_not_reach() { + _ // FIXME: replace this function with panic when that works +} + +fn main() { + let mut x: i32 = 0; + let r = syscall(SYS_getrandom, &mut x, 4usize, 0); + if r != 4 { + should_not_reach(); + } +} + +"#, + ) +} + +#[test] fn posix_tls() { check_pass( r#" diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs index 2cb29b4ab..718df8331 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower.rs @@ -146,12 +146,12 @@ impl MirLowerError { ConstEvalError::MirEvalError(e) => e.pretty_print(f, db, span_formatter)?, } } - MirLowerError::MissingFunctionDefinition(owner, x) => { + MirLowerError::MissingFunctionDefinition(owner, it) => { let body = db.body(*owner); writeln!( f, "Missing function definition for {}", - body.pretty_print_expr(db.upcast(), *owner, *x) + body.pretty_print_expr(db.upcast(), *owner, *it) )?; } MirLowerError::TypeMismatch(e) => { @@ -202,15 +202,15 @@ impl MirLowerError { } macro_rules! not_supported { - ($x: expr) => { - return Err(MirLowerError::NotSupported(format!($x))) + ($it: expr) => { + return Err(MirLowerError::NotSupported(format!($it))) }; } macro_rules! implementation_error { - ($x: expr) => {{ - ::stdx::never!("MIR lower implementation bug: {}", format!($x)); - return Err(MirLowerError::ImplementationError(format!($x))); + ($it: expr) => {{ + ::stdx::never!("MIR lower implementation bug: {}", format!($it)); + return Err(MirLowerError::ImplementationError(format!($it))); }}; } @@ -310,24 +310,30 @@ impl<'ctx> MirLowerCtx<'ctx> { self.lower_expr_to_place_with_adjust(expr_id, temp.into(), current, rest) } Adjust::Deref(_) => { - let Some((p, current)) = self.lower_expr_as_place_with_adjust(current, expr_id, true, adjustments)? else { - return Ok(None); - }; + let Some((p, current)) = + self.lower_expr_as_place_with_adjust(current, expr_id, true, adjustments)? + else { + return Ok(None); + }; self.push_assignment(current, place, Operand::Copy(p).into(), expr_id.into()); Ok(Some(current)) } Adjust::Borrow(AutoBorrow::Ref(m) | AutoBorrow::RawPtr(m)) => { - let Some((p, current)) = self.lower_expr_as_place_with_adjust(current, expr_id, true, rest)? else { - return Ok(None); - }; + let Some((p, current)) = + self.lower_expr_as_place_with_adjust(current, expr_id, true, rest)? + else { + return Ok(None); + }; let bk = BorrowKind::from_chalk(*m); self.push_assignment(current, place, Rvalue::Ref(bk, p), expr_id.into()); Ok(Some(current)) } Adjust::Pointer(cast) => { - let Some((p, current)) = self.lower_expr_as_place_with_adjust(current, expr_id, true, rest)? else { - return Ok(None); - }; + let Some((p, current)) = + self.lower_expr_as_place_with_adjust(current, expr_id, true, rest)? + else { + return Ok(None); + }; self.push_assignment( current, place, @@ -373,45 +379,49 @@ impl<'ctx> MirLowerCtx<'ctx> { } } Err(MirLowerError::IncompleteExpr) - }, + } Expr::Path(p) => { - let pr = if let Some((assoc, subst)) = self - .infer - .assoc_resolutions_for_expr(expr_id) - { - match assoc { - hir_def::AssocItemId::ConstId(c) => { - self.lower_const(c.into(), current, place, subst, expr_id.into(), self.expr_ty_without_adjust(expr_id))?; - return Ok(Some(current)) - }, - hir_def::AssocItemId::FunctionId(_) => { - // FnDefs are zero sized, no action is needed. - return Ok(Some(current)) + let pr = + if let Some((assoc, subst)) = self.infer.assoc_resolutions_for_expr(expr_id) { + match assoc { + hir_def::AssocItemId::ConstId(c) => { + self.lower_const( + c.into(), + current, + place, + subst, + expr_id.into(), + self.expr_ty_without_adjust(expr_id), + )?; + return Ok(Some(current)); + } + hir_def::AssocItemId::FunctionId(_) => { + // FnDefs are zero sized, no action is needed. + return Ok(Some(current)); + } + hir_def::AssocItemId::TypeAliasId(_) => { + // FIXME: If it is unreachable, use proper error instead of `not_supported`. + not_supported!("associated functions and types") + } } - hir_def::AssocItemId::TypeAliasId(_) => { - // FIXME: If it is unreachable, use proper error instead of `not_supported`. - not_supported!("associated functions and types") - }, - } - } else if let Some(variant) = self - .infer - .variant_resolution_for_expr(expr_id) - { - match variant { - VariantId::EnumVariantId(e) => ValueNs::EnumVariantId(e), - VariantId::StructId(s) => ValueNs::StructId(s), - VariantId::UnionId(_) => implementation_error!("Union variant as path"), - } - } else { - let unresolved_name = || MirLowerError::unresolved_path(self.db, p); - let resolver = resolver_for_expr(self.db.upcast(), self.owner, expr_id); - resolver - .resolve_path_in_value_ns_fully(self.db.upcast(), p) - .ok_or_else(unresolved_name)? - }; + } else if let Some(variant) = self.infer.variant_resolution_for_expr(expr_id) { + match variant { + VariantId::EnumVariantId(e) => ValueNs::EnumVariantId(e), + VariantId::StructId(s) => ValueNs::StructId(s), + VariantId::UnionId(_) => implementation_error!("Union variant as path"), + } + } else { + let unresolved_name = || MirLowerError::unresolved_path(self.db, p); + let resolver = resolver_for_expr(self.db.upcast(), self.owner, expr_id); + resolver + .resolve_path_in_value_ns_fully(self.db.upcast(), p) + .ok_or_else(unresolved_name)? + }; match pr { ValueNs::LocalBinding(_) | ValueNs::StaticId(_) => { - let Some((temp, current)) = self.lower_expr_as_place_without_adjust(current, expr_id, false)? else { + let Some((temp, current)) = + self.lower_expr_as_place_without_adjust(current, expr_id, false)? + else { return Ok(None); }; self.push_assignment( @@ -423,11 +433,19 @@ impl<'ctx> MirLowerCtx<'ctx> { Ok(Some(current)) } ValueNs::ConstId(const_id) => { - self.lower_const(const_id.into(), current, place, Substitution::empty(Interner), expr_id.into(), self.expr_ty_without_adjust(expr_id))?; + self.lower_const( + const_id.into(), + current, + place, + Substitution::empty(Interner), + expr_id.into(), + self.expr_ty_without_adjust(expr_id), + )?; Ok(Some(current)) } ValueNs::EnumVariantId(variant_id) => { - let variant_data = &self.db.enum_data(variant_id.parent).variants[variant_id.local_id]; + let variant_data = + &self.db.enum_data(variant_id.parent).variants[variant_id.local_id]; if variant_data.variant_data.kind() == StructKind::Unit { let ty = self.infer.type_of_expr[expr_id].clone(); current = self.lower_enum_variant( @@ -468,17 +486,16 @@ impl<'ctx> MirLowerCtx<'ctx> { ); Ok(Some(current)) } - ValueNs::FunctionId(_) | ValueNs::StructId(_) => { + ValueNs::FunctionId(_) | ValueNs::StructId(_) | ValueNs::ImplSelf(_) => { // It's probably a unit struct or a zero sized function, so no action is needed. Ok(Some(current)) } - x => { - not_supported!("unknown name {x:?} in value name space"); - } } } Expr::If { condition, then_branch, else_branch } => { - let Some((discr, current)) = self.lower_expr_to_some_operand(*condition, current)? else { + let Some((discr, current)) = + self.lower_expr_to_some_operand(*condition, current)? + else { return Ok(None); }; let start_of_then = self.new_basic_block(); @@ -501,15 +518,12 @@ impl<'ctx> MirLowerCtx<'ctx> { Ok(self.merge_blocks(end_of_then, end_of_else, expr_id.into())) } Expr::Let { pat, expr } => { - let Some((cond_place, current)) = self.lower_expr_as_place(current, *expr, true)? else { + let Some((cond_place, current)) = self.lower_expr_as_place(current, *expr, true)? + else { return Ok(None); }; - let (then_target, else_target) = self.pattern_match( - current, - None, - cond_place, - *pat, - )?; + let (then_target, else_target) = + self.pattern_match(current, None, cond_place, *pat)?; self.write_bytes_to_place( then_target, place.clone(), @@ -533,49 +547,35 @@ impl<'ctx> MirLowerCtx<'ctx> { } Expr::Block { id: _, statements, tail, label } => { if let Some(label) = label { - self.lower_loop(current, place.clone(), Some(*label), expr_id.into(), |this, begin| { - if let Some(current) = this.lower_block_to_place(statements, begin, *tail, place, expr_id.into())? { - let end = this.current_loop_end()?; - this.set_goto(current, end, expr_id.into()); - } - Ok(()) - }) + self.lower_loop( + current, + place.clone(), + Some(*label), + expr_id.into(), + |this, begin| { + if let Some(current) = this.lower_block_to_place( + statements, + begin, + *tail, + place, + expr_id.into(), + )? { + let end = this.current_loop_end()?; + this.set_goto(current, end, expr_id.into()); + } + Ok(()) + }, + ) } else { self.lower_block_to_place(statements, current, *tail, place, expr_id.into()) } } - Expr::Loop { body, label } => self.lower_loop(current, place, *label, expr_id.into(), |this, begin| { - let scope = this.push_drop_scope(); - if let Some((_, mut current)) = this.lower_expr_as_place(begin, *body, true)? { - current = scope.pop_and_drop(this, current); - this.set_goto(current, begin, expr_id.into()); - } else { - scope.pop_assume_dropped(this); - } - Ok(()) - }), - Expr::While { condition, body, label } => { - self.lower_loop(current, place, *label, expr_id.into(),|this, begin| { + Expr::Loop { body, label } => { + self.lower_loop(current, place, *label, expr_id.into(), |this, begin| { let scope = this.push_drop_scope(); - let Some((discr, to_switch)) = this.lower_expr_to_some_operand(*condition, begin)? else { - return Ok(()); - }; - let fail_cond = this.new_basic_block(); - let after_cond = this.new_basic_block(); - this.set_terminator( - to_switch, - TerminatorKind::SwitchInt { - discr, - targets: SwitchTargets::static_if(1, after_cond, fail_cond), - }, - expr_id.into(), - ); - let fail_cond = this.drop_until_scope(this.drop_scopes.len() - 1, fail_cond); - let end = this.current_loop_end()?; - this.set_goto(fail_cond, end, expr_id.into()); - if let Some((_, block)) = this.lower_expr_as_place(after_cond, *body, true)? { - let block = scope.pop_and_drop(this, block); - this.set_goto(block, begin, expr_id.into()); + if let Some((_, mut current)) = this.lower_expr_as_place(begin, *body, true)? { + current = scope.pop_and_drop(this, current); + this.set_goto(current, begin, expr_id.into()); } else { scope.pop_assume_dropped(this); } @@ -583,8 +583,7 @@ impl<'ctx> MirLowerCtx<'ctx> { }) } Expr::Call { callee, args, .. } => { - if let Some((func_id, generic_args)) = - self.infer.method_resolution(expr_id) { + if let Some((func_id, generic_args)) = self.infer.method_resolution(expr_id) { let ty = chalk_ir::TyKind::FnDef( CallableDefId::FunctionId(func_id).to_chalk(self.db), generic_args, @@ -601,24 +600,51 @@ impl<'ctx> MirLowerCtx<'ctx> { ); } let callee_ty = self.expr_ty_after_adjustments(*callee); - match &callee_ty.data(Interner).kind { + match &callee_ty.kind(Interner) { chalk_ir::TyKind::FnDef(..) => { let func = Operand::from_bytes(vec![], callee_ty.clone()); - self.lower_call_and_args(func, args.iter().copied(), place, current, self.is_uninhabited(expr_id), expr_id.into()) + self.lower_call_and_args( + func, + args.iter().copied(), + place, + current, + self.is_uninhabited(expr_id), + expr_id.into(), + ) } chalk_ir::TyKind::Function(_) => { - let Some((func, current)) = self.lower_expr_to_some_operand(*callee, current)? else { + let Some((func, current)) = + self.lower_expr_to_some_operand(*callee, current)? + else { return Ok(None); }; - self.lower_call_and_args(func, args.iter().copied(), place, current, self.is_uninhabited(expr_id), expr_id.into()) + self.lower_call_and_args( + func, + args.iter().copied(), + place, + current, + self.is_uninhabited(expr_id), + expr_id.into(), + ) + } + TyKind::Closure(_, _) => { + not_supported!( + "method resolution not emitted for closure (Are Fn traits available?)" + ); + } + TyKind::Error => { + return Err(MirLowerError::MissingFunctionDefinition(self.owner, expr_id)) } - TyKind::Error => return Err(MirLowerError::MissingFunctionDefinition(self.owner, expr_id)), _ => return Err(MirLowerError::TypeError("function call on bad type")), } } Expr::MethodCall { receiver, args, method_name, .. } => { let (func_id, generic_args) = - self.infer.method_resolution(expr_id).ok_or_else(|| MirLowerError::UnresolvedMethod(method_name.display(self.db.upcast()).to_string()))?; + self.infer.method_resolution(expr_id).ok_or_else(|| { + MirLowerError::UnresolvedMethod( + method_name.display(self.db.upcast()).to_string(), + ) + })?; let func = Operand::from_fn(self.db, func_id, generic_args); self.lower_call_and_args( func, @@ -630,23 +656,27 @@ impl<'ctx> MirLowerCtx<'ctx> { ) } Expr::Match { expr, arms } => { - let Some((cond_place, mut current)) = self.lower_expr_as_place(current, *expr, true)? + let Some((cond_place, mut current)) = + self.lower_expr_as_place(current, *expr, true)? else { return Ok(None); }; let mut end = None; for MatchArm { pat, guard, expr } in arms.iter() { - let (then, mut otherwise) = self.pattern_match( - current, - None, - cond_place.clone(), - *pat, - )?; + let (then, mut otherwise) = + self.pattern_match(current, None, cond_place.clone(), *pat)?; let then = if let &Some(guard) = guard { let next = self.new_basic_block(); let o = otherwise.get_or_insert_with(|| self.new_basic_block()); if let Some((discr, c)) = self.lower_expr_to_some_operand(guard, then)? { - self.set_terminator(c, TerminatorKind::SwitchInt { discr, targets: SwitchTargets::static_if(1, next, *o) }, expr_id.into()); + self.set_terminator( + c, + TerminatorKind::SwitchInt { + discr, + targets: SwitchTargets::static_if(1, next, *o), + }, + expr_id.into(), + ); } next } else { @@ -672,33 +702,53 @@ impl<'ctx> MirLowerCtx<'ctx> { } Expr::Continue { label } => { let loop_data = match label { - Some(l) => self.labeled_loop_blocks.get(l).ok_or(MirLowerError::UnresolvedLabel)?, - None => self.current_loop_blocks.as_ref().ok_or(MirLowerError::ContinueWithoutLoop)?, + Some(l) => { + self.labeled_loop_blocks.get(l).ok_or(MirLowerError::UnresolvedLabel)? + } + None => self + .current_loop_blocks + .as_ref() + .ok_or(MirLowerError::ContinueWithoutLoop)?, }; let begin = loop_data.begin; current = self.drop_until_scope(loop_data.drop_scope_index, current); self.set_goto(current, begin, expr_id.into()); Ok(None) - }, + } &Expr::Break { expr, label } => { if let Some(expr) = expr { let loop_data = match label { - Some(l) => self.labeled_loop_blocks.get(&l).ok_or(MirLowerError::UnresolvedLabel)?, - None => self.current_loop_blocks.as_ref().ok_or(MirLowerError::BreakWithoutLoop)?, + Some(l) => self + .labeled_loop_blocks + .get(&l) + .ok_or(MirLowerError::UnresolvedLabel)?, + None => self + .current_loop_blocks + .as_ref() + .ok_or(MirLowerError::BreakWithoutLoop)?, }; - let Some(c) = self.lower_expr_to_place(expr, loop_data.place.clone(), current)? else { + let Some(c) = + self.lower_expr_to_place(expr, loop_data.place.clone(), current)? + else { return Ok(None); }; current = c; } let (end, drop_scope) = match label { Some(l) => { - let loop_blocks = self.labeled_loop_blocks.get(&l).ok_or(MirLowerError::UnresolvedLabel)?; - (loop_blocks.end.expect("We always generate end for labeled loops"), loop_blocks.drop_scope_index) - }, - None => { - (self.current_loop_end()?, self.current_loop_blocks.as_ref().unwrap().drop_scope_index) - }, + let loop_blocks = self + .labeled_loop_blocks + .get(&l) + .ok_or(MirLowerError::UnresolvedLabel)?; + ( + loop_blocks.end.expect("We always generate end for labeled loops"), + loop_blocks.drop_scope_index, + ) + } + None => ( + self.current_loop_end()?, + self.current_loop_blocks.as_ref().unwrap().drop_scope_index, + ), }; current = self.drop_until_scope(drop_scope, current); self.set_goto(current, end, expr_id.into()); @@ -706,7 +756,9 @@ impl<'ctx> MirLowerCtx<'ctx> { } Expr::Return { expr } => { if let Some(expr) = expr { - if let Some(c) = self.lower_expr_to_place(*expr, return_slot().into(), current)? { + if let Some(c) = + self.lower_expr_to_place(*expr, return_slot().into(), current)? + { current = c; } else { return Ok(None); @@ -719,19 +771,17 @@ impl<'ctx> MirLowerCtx<'ctx> { Expr::Yield { .. } => not_supported!("yield"), Expr::RecordLit { fields, path, spread, ellipsis: _, is_assignee_expr: _ } => { let spread_place = match spread { - &Some(x) => { - let Some((p, c)) = self.lower_expr_as_place(current, x, true)? else { + &Some(it) => { + let Some((p, c)) = self.lower_expr_as_place(current, it, true)? else { return Ok(None); }; current = c; Some(p) - }, + } None => None, }; - let variant_id = self - .infer - .variant_resolution_for_expr(expr_id) - .ok_or_else(|| match path { + let variant_id = + self.infer.variant_resolution_for_expr(expr_id).ok_or_else(|| match path { Some(p) => MirLowerError::UnresolvedName(p.display(self.db).to_string()), None => MirLowerError::RecordLiteralWithoutPath, })?; @@ -746,7 +796,8 @@ impl<'ctx> MirLowerCtx<'ctx> { for RecordLitField { name, expr } in fields.iter() { let field_id = variant_data.field(name).ok_or(MirLowerError::UnresolvedField)?; - let Some((op, c)) = self.lower_expr_to_some_operand(*expr, current)? else { + let Some((op, c)) = self.lower_expr_to_some_operand(*expr, current)? + else { return Ok(None); }; current = c; @@ -758,18 +809,23 @@ impl<'ctx> MirLowerCtx<'ctx> { Rvalue::Aggregate( AggregateKind::Adt(variant_id, subst), match spread_place { - Some(sp) => operands.into_iter().enumerate().map(|(i, x)| { - match x { - Some(x) => x, + Some(sp) => operands + .into_iter() + .enumerate() + .map(|(i, it)| match it { + Some(it) => it, None => { - let p = sp.project(ProjectionElem::Field(FieldId { - parent: variant_id, - local_id: LocalFieldId::from_raw(RawIdx::from(i as u32)), - })); + let p = + sp.project(ProjectionElem::Field(FieldId { + parent: variant_id, + local_id: LocalFieldId::from_raw( + RawIdx::from(i as u32), + ), + })); Operand::Copy(p) - }, - } - }).collect(), + } + }) + .collect(), None => operands.into_iter().collect::<Option<_>>().ok_or( MirLowerError::TypeError("missing field in record literal"), )?, @@ -785,7 +841,10 @@ impl<'ctx> MirLowerCtx<'ctx> { }; let local_id = variant_data.field(name).ok_or(MirLowerError::UnresolvedField)?; - let place = place.project(PlaceElem::Field(FieldId { parent: union_id.into(), local_id })); + let place = place.project(PlaceElem::Field(FieldId { + parent: union_id.into(), + local_id, + })); self.lower_expr_to_place(*expr, place, current) } } @@ -795,11 +854,18 @@ impl<'ctx> MirLowerCtx<'ctx> { Expr::Async { .. } => not_supported!("async block"), &Expr::Const(id) => { let subst = self.placeholder_subst(); - self.lower_const(id.into(), current, place, subst, expr_id.into(), self.expr_ty_without_adjust(expr_id))?; + self.lower_const( + id.into(), + current, + place, + subst, + expr_id.into(), + self.expr_ty_without_adjust(expr_id), + )?; Ok(Some(current)) - }, + } Expr::Cast { expr, type_ref: _ } => { - let Some((x, current)) = self.lower_expr_to_some_operand(*expr, current)? else { + let Some((it, current)) = self.lower_expr_to_some_operand(*expr, current)? else { return Ok(None); }; let source_ty = self.infer[*expr].clone(); @@ -807,7 +873,7 @@ impl<'ctx> MirLowerCtx<'ctx> { self.push_assignment( current, place, - Rvalue::Cast(cast_kind(&source_ty, &target_ty)?, x, target_ty), + Rvalue::Cast(cast_kind(&source_ty, &target_ty)?, it, target_ty), expr_id.into(), ); Ok(Some(current)) @@ -822,23 +888,37 @@ impl<'ctx> MirLowerCtx<'ctx> { } Expr::Box { expr } => { let ty = self.expr_ty_after_adjustments(*expr); - self.push_assignment(current, place.clone(), Rvalue::ShallowInitBoxWithAlloc(ty), expr_id.into()); - let Some((operand, current)) = self.lower_expr_to_some_operand(*expr, current)? else { + self.push_assignment( + current, + place.clone(), + Rvalue::ShallowInitBoxWithAlloc(ty), + expr_id.into(), + ); + let Some((operand, current)) = self.lower_expr_to_some_operand(*expr, current)? + else { return Ok(None); }; let p = place.project(ProjectionElem::Deref); self.push_assignment(current, p, operand.into(), expr_id.into()); Ok(Some(current)) - }, - Expr::Field { .. } | Expr::Index { .. } | Expr::UnaryOp { op: hir_def::hir::UnaryOp::Deref, .. } => { - let Some((p, current)) = self.lower_expr_as_place_without_adjust(current, expr_id, true)? else { + } + Expr::Field { .. } + | Expr::Index { .. } + | Expr::UnaryOp { op: hir_def::hir::UnaryOp::Deref, .. } => { + let Some((p, current)) = + self.lower_expr_as_place_without_adjust(current, expr_id, true)? + else { return Ok(None); }; self.push_assignment(current, place, Operand::Copy(p).into(), expr_id.into()); Ok(Some(current)) } - Expr::UnaryOp { expr, op: op @ (hir_def::hir::UnaryOp::Not | hir_def::hir::UnaryOp::Neg) } => { - let Some((operand, current)) = self.lower_expr_to_some_operand(*expr, current)? else { + Expr::UnaryOp { + expr, + op: op @ (hir_def::hir::UnaryOp::Not | hir_def::hir::UnaryOp::Neg), + } => { + let Some((operand, current)) = self.lower_expr_to_some_operand(*expr, current)? + else { return Ok(None); }; let operation = match op { @@ -853,7 +933,7 @@ impl<'ctx> MirLowerCtx<'ctx> { expr_id.into(), ); Ok(Some(current)) - }, + } Expr::BinaryOp { lhs, rhs, op } => { let op = op.ok_or(MirLowerError::IncompleteExpr)?; let is_builtin = 'b: { @@ -861,16 +941,19 @@ impl<'ctx> MirLowerCtx<'ctx> { // for binary operator, and use without adjust to simplify our conditions. let lhs_ty = self.expr_ty_without_adjust(*lhs); let rhs_ty = self.expr_ty_without_adjust(*rhs); - if matches!(op ,BinaryOp::CmpOp(syntax::ast::CmpOp::Eq { .. })) { + if matches!(op, BinaryOp::CmpOp(syntax::ast::CmpOp::Eq { .. })) { if lhs_ty.as_raw_ptr().is_some() && rhs_ty.as_raw_ptr().is_some() { break 'b true; } } let builtin_inequal_impls = matches!( op, - BinaryOp::ArithOp(ArithOp::Shl | ArithOp::Shr) | BinaryOp::Assignment { op: Some(ArithOp::Shl | ArithOp::Shr) } + BinaryOp::ArithOp(ArithOp::Shl | ArithOp::Shr) + | BinaryOp::Assignment { op: Some(ArithOp::Shl | ArithOp::Shr) } ); - lhs_ty.is_scalar() && rhs_ty.is_scalar() && (lhs_ty == rhs_ty || builtin_inequal_impls) + lhs_ty.is_scalar() + && rhs_ty.is_scalar() + && (lhs_ty == rhs_ty || builtin_inequal_impls) }; if !is_builtin { if let Some((func_id, generic_args)) = self.infer.method_resolution(expr_id) { @@ -892,34 +975,34 @@ impl<'ctx> MirLowerCtx<'ctx> { .infer .expr_adjustments .get(lhs) - .and_then(|x| x.split_last()) - .map(|x| x.1) - .ok_or(MirLowerError::TypeError("adjustment of binary op was missing"))?; + .and_then(|it| it.split_last()) + .map(|it| it.1) + .ok_or(MirLowerError::TypeError( + "adjustment of binary op was missing", + ))?; let Some((lhs_place, current)) = self.lower_expr_as_place_with_adjust(current, *lhs, false, adjusts)? else { return Ok(None); }; - let Some((rhs_op, current)) = self.lower_expr_to_some_operand(*rhs, current)? else { + let Some((rhs_op, current)) = + self.lower_expr_to_some_operand(*rhs, current)? + else { return Ok(None); }; - let r_value = Rvalue::CheckedBinaryOp(op.into(), Operand::Copy(lhs_place.clone()), rhs_op); + let r_value = Rvalue::CheckedBinaryOp( + op.into(), + Operand::Copy(lhs_place.clone()), + rhs_op, + ); self.push_assignment(current, lhs_place, r_value, expr_id.into()); return Ok(Some(current)); } else { - let Some((lhs_place, current)) = - self.lower_expr_as_place(current, *lhs, false)? - else { - return Ok(None); - }; - let Some((rhs_op, current)) = self.lower_expr_to_some_operand(*rhs, current)? else { - return Ok(None); - }; - self.push_assignment(current, lhs_place, rhs_op.into(), expr_id.into()); - return Ok(Some(current)); + return self.lower_assignment(current, *lhs, *rhs, expr_id.into()); } } - let Some((lhs_op, current)) = self.lower_expr_to_some_operand(*lhs, current)? else { + let Some((lhs_op, current)) = self.lower_expr_to_some_operand(*lhs, current)? + else { return Ok(None); }; if let hir_def::hir::BinaryOp::LogicOp(op) = op { @@ -928,22 +1011,31 @@ impl<'ctx> MirLowerCtx<'ctx> { syntax::ast::LogicOp::Or => 1, }; let start_of_then = self.new_basic_block(); - self.push_assignment(start_of_then, place.clone(), lhs_op.clone().into(), expr_id.into()); + self.push_assignment( + start_of_then, + place.clone(), + lhs_op.clone().into(), + expr_id.into(), + ); let end_of_then = Some(start_of_then); let start_of_else = self.new_basic_block(); - let end_of_else = - self.lower_expr_to_place(*rhs, place, start_of_else)?; + let end_of_else = self.lower_expr_to_place(*rhs, place, start_of_else)?; self.set_terminator( current, TerminatorKind::SwitchInt { discr: lhs_op, - targets: SwitchTargets::static_if(value_to_short, start_of_then, start_of_else), + targets: SwitchTargets::static_if( + value_to_short, + start_of_then, + start_of_else, + ), }, expr_id.into(), ); return Ok(self.merge_blocks(end_of_then, end_of_else, expr_id.into())); } - let Some((rhs_op, current)) = self.lower_expr_to_some_operand(*rhs, current)? else { + let Some((rhs_op, current)) = self.lower_expr_to_some_operand(*rhs, current)? + else { return Ok(None); }; self.push_assignment( @@ -976,15 +1068,15 @@ impl<'ctx> MirLowerCtx<'ctx> { }; let mut lp = None; let mut rp = None; - if let Some(x) = lhs { - let Some((o, c)) = self.lower_expr_to_some_operand(x, current)? else { + if let Some(it) = lhs { + let Some((o, c)) = self.lower_expr_to_some_operand(it, current)? else { return Ok(None); }; lp = Some(o); current = c; } - if let Some(x) = rhs { - let Some((o, c)) = self.lower_expr_to_some_operand(x, current)? else { + if let Some(it) = rhs { + let Some((o, c)) = self.lower_expr_to_some_operand(it, current)? else { return Ok(None); }; rp = Some(o); @@ -995,20 +1087,28 @@ impl<'ctx> MirLowerCtx<'ctx> { place, Rvalue::Aggregate( AggregateKind::Adt(st.into(), subst.clone()), - self.db.struct_data(st).variant_data.fields().iter().map(|x| { - let o = match x.1.name.as_str() { - Some("start") => lp.take(), - Some("end") => rp.take(), - Some("exhausted") => Some(Operand::from_bytes(vec![0], TyBuilder::bool())), - _ => None, - }; - o.ok_or(MirLowerError::UnresolvedField) - }).collect::<Result<_>>()?, + self.db + .struct_data(st) + .variant_data + .fields() + .iter() + .map(|it| { + let o = match it.1.name.as_str() { + Some("start") => lp.take(), + Some("end") => rp.take(), + Some("exhausted") => { + Some(Operand::from_bytes(vec![0], TyBuilder::bool())) + } + _ => None, + }; + o.ok_or(MirLowerError::UnresolvedField) + }) + .collect::<Result<_>>()?, ), expr_id.into(), ); Ok(Some(current)) - }, + } Expr::Closure { .. } => { let ty = self.expr_ty_without_adjust(expr_id); let TyKind::Closure(id, _) = ty.kind(Interner) else { @@ -1020,22 +1120,33 @@ impl<'ctx> MirLowerCtx<'ctx> { for capture in captures.iter() { let p = Place { local: self.binding_local(capture.place.local)?, - projection: capture.place.projections.clone().into_iter().map(|x| { - match x { + projection: capture + .place + .projections + .clone() + .into_iter() + .map(|it| match it { ProjectionElem::Deref => ProjectionElem::Deref, - ProjectionElem::Field(x) => ProjectionElem::Field(x), - ProjectionElem::TupleOrClosureField(x) => ProjectionElem::TupleOrClosureField(x), - ProjectionElem::ConstantIndex { offset, from_end } => ProjectionElem::ConstantIndex { offset, from_end }, - ProjectionElem::Subslice { from, to } => ProjectionElem::Subslice { from, to }, - ProjectionElem::OpaqueCast(x) => ProjectionElem::OpaqueCast(x), - ProjectionElem::Index(x) => match x { }, - } - }).collect(), + ProjectionElem::Field(it) => ProjectionElem::Field(it), + ProjectionElem::TupleOrClosureField(it) => { + ProjectionElem::TupleOrClosureField(it) + } + ProjectionElem::ConstantIndex { offset, from_end } => { + ProjectionElem::ConstantIndex { offset, from_end } + } + ProjectionElem::Subslice { from, to } => { + ProjectionElem::Subslice { from, to } + } + ProjectionElem::OpaqueCast(it) => ProjectionElem::OpaqueCast(it), + ProjectionElem::Index(it) => match it {}, + }) + .collect(), }; match &capture.kind { CaptureKind::ByRef(bk) => { let placeholder_subst = self.placeholder_subst(); - let tmp_ty = capture.ty.clone().substitute(Interner, &placeholder_subst); + let tmp_ty = + capture.ty.clone().substitute(Interner, &placeholder_subst); let tmp: Place = self.temp(tmp_ty, current, capture.span)?.into(); self.push_assignment( current, @@ -1044,7 +1155,7 @@ impl<'ctx> MirLowerCtx<'ctx> { capture.span, ); operands.push(Operand::Move(tmp)); - }, + } CaptureKind::ByValue => operands.push(Operand::Move(p)), } } @@ -1055,18 +1166,18 @@ impl<'ctx> MirLowerCtx<'ctx> { expr_id.into(), ); Ok(Some(current)) - }, + } Expr::Tuple { exprs, is_assignee_expr: _ } => { let Some(values) = exprs - .iter() - .map(|x| { - let Some((o, c)) = self.lower_expr_to_some_operand(*x, current)? else { - return Ok(None); - }; - current = c; - Ok(Some(o)) - }) - .collect::<Result<Option<_>>>()? + .iter() + .map(|it| { + let Some((o, c)) = self.lower_expr_to_some_operand(*it, current)? else { + return Ok(None); + }; + current = c; + Ok(Some(o)) + }) + .collect::<Result<Option<_>>>()? else { return Ok(None); }; @@ -1079,7 +1190,7 @@ impl<'ctx> MirLowerCtx<'ctx> { } Expr::Array(l) => match l { Array::ElementList { elements, .. } => { - let elem_ty = match &self.expr_ty_without_adjust(expr_id).data(Interner).kind { + let elem_ty = match &self.expr_ty_without_adjust(expr_id).kind(Interner) { TyKind::Array(ty, _) => ty.clone(), _ => { return Err(MirLowerError::TypeError( @@ -1088,30 +1199,29 @@ impl<'ctx> MirLowerCtx<'ctx> { } }; let Some(values) = elements - .iter() - .map(|x| { - let Some((o, c)) = self.lower_expr_to_some_operand(*x, current)? else { - return Ok(None); - }; - current = c; - Ok(Some(o)) - }) - .collect::<Result<Option<_>>>()? + .iter() + .map(|it| { + let Some((o, c)) = self.lower_expr_to_some_operand(*it, current)? else { + return Ok(None); + }; + current = c; + Ok(Some(o)) + }) + .collect::<Result<Option<_>>>()? else { return Ok(None); }; - let r = Rvalue::Aggregate( - AggregateKind::Array(elem_ty), - values, - ); + let r = Rvalue::Aggregate(AggregateKind::Array(elem_ty), values); self.push_assignment(current, place, r, expr_id.into()); Ok(Some(current)) } Array::Repeat { initializer, .. } => { - let Some((init, current)) = self.lower_expr_to_some_operand(*initializer, current)? else { + let Some((init, current)) = + self.lower_expr_to_some_operand(*initializer, current)? + else { return Ok(None); }; - let len = match &self.expr_ty_without_adjust(expr_id).data(Interner).kind { + let len = match &self.expr_ty_without_adjust(expr_id).kind(Interner) { TyKind::Array(_, len) => len.clone(), _ => { return Err(MirLowerError::TypeError( @@ -1122,7 +1232,7 @@ impl<'ctx> MirLowerCtx<'ctx> { let r = Rvalue::Repeat(init, len); self.push_assignment(current, place, r, expr_id.into()); Ok(Some(current)) - }, + } }, Expr::Literal(l) => { let ty = self.expr_ty_without_adjust(expr_id); @@ -1134,9 +1244,33 @@ impl<'ctx> MirLowerCtx<'ctx> { } } + fn lower_assignment( + &mut self, + current: BasicBlockId, + lhs: ExprId, + rhs: ExprId, + span: MirSpan, + ) -> Result<Option<BasicBlockId>> { + let Some((rhs_op, current)) = + self.lower_expr_to_some_operand(rhs, current)? + else { + return Ok(None); + }; + if matches!(&self.body.exprs[lhs], Expr::Underscore) { + return Ok(Some(current)); + } + let Some((lhs_place, current)) = + self.lower_expr_as_place(current, lhs, false)? + else { + return Ok(None); + }; + self.push_assignment(current, lhs_place, rhs_op.into(), span); + Ok(Some(current)) + } + fn placeholder_subst(&mut self) -> Substitution { let placeholder_subst = match self.owner.as_generic_def_id() { - Some(x) => TyBuilder::placeholder_subst(self.db, x), + Some(it) => TyBuilder::placeholder_subst(self.db, it), None => Substitution::empty(Interner), }; placeholder_subst @@ -1192,7 +1326,7 @@ impl<'ctx> MirLowerCtx<'ctx> { fn lower_literal_to_operand(&mut self, ty: Ty, l: &Literal) -> Result<Operand> { let size = self .db - .layout_of_ty(ty.clone(), self.owner.module(self.db.upcast()).krate())? + .layout_of_ty(ty.clone(), self.db.trait_environment_for_body(self.owner))? .size .bytes_usize(); let bytes = match l { @@ -1206,7 +1340,6 @@ impl<'ctx> MirLowerCtx<'ctx> { return Ok(Operand::from_concrete_const(data, mm, ty)); } hir_def::hir::Literal::CString(b) => { - let b = b.as_bytes(); let bytes = b.iter().copied().chain(iter::once(0)).collect::<Vec<_>>(); let mut data = Vec::with_capacity(mem::size_of::<usize>() * 2); @@ -1226,8 +1359,8 @@ impl<'ctx> MirLowerCtx<'ctx> { } hir_def::hir::Literal::Char(c) => u32::from(*c).to_le_bytes().into(), hir_def::hir::Literal::Bool(b) => vec![*b as u8], - hir_def::hir::Literal::Int(x, _) => x.to_le_bytes()[0..size].into(), - hir_def::hir::Literal::Uint(x, _) => x.to_le_bytes()[0..size].into(), + hir_def::hir::Literal::Int(it, _) => it.to_le_bytes()[0..size].into(), + hir_def::hir::Literal::Uint(it, _) => it.to_le_bytes()[0..size].into(), hir_def::hir::Literal::Float(f, _) => match size { 8 => f.into_f64().to_le_bytes().into(), 4 => f.into_f32().to_le_bytes().into(), @@ -1269,7 +1402,7 @@ impl<'ctx> MirLowerCtx<'ctx> { } else { let name = const_id.name(self.db.upcast()); self.db - .const_eval(const_id.into(), subst) + .const_eval(const_id.into(), subst, None) .map_err(|e| MirLowerError::ConstEvalError(name, Box::new(e)))? }; Ok(Operand::Constant(c)) @@ -1377,9 +1510,9 @@ impl<'ctx> MirLowerCtx<'ctx> { fn expr_ty_after_adjustments(&self, e: ExprId) -> Ty { let mut ty = None; - if let Some(x) = self.infer.expr_adjustments.get(&e) { - if let Some(x) = x.last() { - ty = Some(x.target.clone()); + if let Some(it) = self.infer.expr_adjustments.get(&e) { + if let Some(it) = it.last() { + ty = Some(it.target.clone()); } } ty.unwrap_or_else(|| self.expr_ty_without_adjust(e)) @@ -1401,7 +1534,7 @@ impl<'ctx> MirLowerCtx<'ctx> { fn discr_temp_place(&mut self, current: BasicBlockId) -> Place { match &self.discr_temp { - Some(x) => x.clone(), + Some(it) => it.clone(), None => { let tmp: Place = self .temp(TyBuilder::discr_ty(), current, MirSpan::Unknown) @@ -1448,7 +1581,7 @@ impl<'ctx> MirLowerCtx<'ctx> { } fn has_adjustments(&self, expr_id: ExprId) -> bool { - !self.infer.expr_adjustments.get(&expr_id).map(|x| x.is_empty()).unwrap_or(true) + !self.infer.expr_adjustments.get(&expr_id).map(|it| it.is_empty()).unwrap_or(true) } fn merge_blocks( @@ -1478,7 +1611,7 @@ impl<'ctx> MirLowerCtx<'ctx> { ))? .end { - Some(x) => x, + Some(it) => it, None => { let s = self.new_basic_block(); self.current_loop_blocks @@ -1602,10 +1735,10 @@ impl<'ctx> MirLowerCtx<'ctx> { pick_binding: impl Fn(BindingId) -> bool, ) -> Result<BasicBlockId> { let base_param_count = self.result.param_locals.len(); - self.result.param_locals.extend(params.clone().map(|(x, ty)| { + self.result.param_locals.extend(params.clone().map(|(it, ty)| { let local_id = self.result.locals.alloc(Local { ty }); self.drop_scopes.last_mut().unwrap().locals.push(local_id); - if let Pat::Bind { id, subpat: None } = self.body[x] { + if let Pat::Bind { id, subpat: None } = self.body[it] { if matches!( self.body.bindings[id].mode, BindingAnnotation::Unannotated | BindingAnnotation::Mutable @@ -1646,7 +1779,7 @@ impl<'ctx> MirLowerCtx<'ctx> { fn binding_local(&self, b: BindingId) -> Result<LocalId> { match self.result.binding_locals.get(b) { - Some(x) => Ok(*x), + Some(it) => Ok(*it), None => { // FIXME: It should never happens, but currently it will happen in `const_dependent_on_local` test, which // is a hir lowering problem IMO. @@ -1731,6 +1864,7 @@ impl<'ctx> MirLowerCtx<'ctx> { fn cast_kind(source_ty: &Ty, target_ty: &Ty) -> Result<CastKind> { Ok(match (source_ty.kind(Interner), target_ty.kind(Interner)) { + (TyKind::FnDef(..), TyKind::Function(_)) => CastKind::Pointer(PointerCast::ReifyFnPointer), (TyKind::Scalar(s), TyKind::Scalar(t)) => match (s, t) { (chalk_ir::Scalar::Float(_), chalk_ir::Scalar::Float(_)) => CastKind::FloatToFloat, (chalk_ir::Scalar::Float(_), _) => CastKind::FloatToInt, @@ -1742,17 +1876,17 @@ fn cast_kind(source_ty: &Ty, target_ty: &Ty) -> Result<CastKind> { (TyKind::Raw(_, a) | TyKind::Ref(_, _, a), TyKind::Raw(_, b) | TyKind::Ref(_, _, b)) => { CastKind::Pointer(if a == b { PointerCast::MutToConstPointer - } else if matches!(a.kind(Interner), TyKind::Slice(_) | TyKind::Str) - && matches!(b.kind(Interner), TyKind::Slice(_) | TyKind::Str) + } else if matches!(b.kind(Interner), TyKind::Slice(_)) + && matches!(a.kind(Interner), TyKind::Array(_, _)) + || matches!(b.kind(Interner), TyKind::Dyn(_)) { - // slice to slice cast is no-op (metadata is not touched), so we use this - PointerCast::MutToConstPointer - } else if matches!(b.kind(Interner), TyKind::Slice(_) | TyKind::Dyn(_)) { PointerCast::Unsize } else if matches!(a.kind(Interner), TyKind::Slice(s) if s == b) { PointerCast::ArrayToPointer } else { - // cast between two sized pointer, like *const i32 to *const i8. There is no specific variant + // cast between two sized pointer, like *const i32 to *const i8, or two unsized pointer, like + // slice to slice, slice to str, ... . These are no-ops (even in the unsized case, no metadata + // will be touched) but there is no specific variant // for it in `PointerCast` so we use `MutToConstPointer` PointerCast::MutToConstPointer }) @@ -1796,7 +1930,7 @@ pub fn mir_body_for_closure_query( implementation_error!("closure has not callable sig"); }; let current = ctx.lower_params_and_bindings( - args.iter().zip(sig.params().iter()).map(|(x, y)| (*x, y.clone())), + args.iter().zip(sig.params().iter()).map(|(it, y)| (*it, y.clone())), |_| true, )?; if let Some(current) = ctx.lower_expr_to_place(*root, return_slot().into(), current)? { @@ -1815,34 +1949,35 @@ pub fn mir_body_for_closure_query( FnTrait::FnMut | FnTrait::Fn => vec![ProjectionElem::Deref], }; ctx.result.walk_places(|p| { - if let Some(x) = upvar_map.get(&p.local) { - let r = x.iter().find(|x| { - if p.projection.len() < x.0.place.projections.len() { + if let Some(it) = upvar_map.get(&p.local) { + let r = it.iter().find(|it| { + if p.projection.len() < it.0.place.projections.len() { return false; } - for (x, y) in p.projection.iter().zip(x.0.place.projections.iter()) { - match (x, y) { + for (it, y) in p.projection.iter().zip(it.0.place.projections.iter()) { + match (it, y) { (ProjectionElem::Deref, ProjectionElem::Deref) => (), - (ProjectionElem::Field(x), ProjectionElem::Field(y)) if x == y => (), + (ProjectionElem::Field(it), ProjectionElem::Field(y)) if it == y => (), ( - ProjectionElem::TupleOrClosureField(x), + ProjectionElem::TupleOrClosureField(it), ProjectionElem::TupleOrClosureField(y), - ) if x == y => (), + ) if it == y => (), _ => return false, } } true }); match r { - Some(x) => { + Some(it) => { p.local = closure_local; let mut next_projs = closure_projection.clone(); - next_projs.push(PlaceElem::TupleOrClosureField(x.1)); + next_projs.push(PlaceElem::TupleOrClosureField(it.1)); let prev_projs = mem::take(&mut p.projection); - if x.0.kind != CaptureKind::ByValue { + if it.0.kind != CaptureKind::ByValue { next_projs.push(ProjectionElem::Deref); } - next_projs.extend(prev_projs.iter().cloned().skip(x.0.place.projections.len())); + next_projs + .extend(prev_projs.iter().cloned().skip(it.0.place.projections.len())); p.projection = next_projs.into(); } None => err = Some(p.clone()), @@ -1902,8 +2037,8 @@ pub fn lower_to_mir( // need to take this input explicitly. root_expr: ExprId, ) -> Result<MirBody> { - if let Some((_, x)) = infer.type_mismatches().next() { - return Err(MirLowerError::TypeMismatch(x.clone())); + if let Some((_, it)) = infer.type_mismatches().next() { + return Err(MirLowerError::TypeMismatch(it.clone())); } let mut ctx = MirLowerCtx::new(db, owner, body, infer); // 0 is return local @@ -1929,7 +2064,7 @@ pub fn lower_to_mir( body.params .iter() .zip(callable_sig.params().iter()) - .map(|(x, y)| (*x, y.clone())), + .map(|(it, y)| (*it, y.clone())), binding_picker, )?; } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs index d2c8d9a08..213f151ab 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/as_place.rs @@ -1,12 +1,12 @@ //! MIR lowering for places use super::*; -use hir_def::{lang_item::lang_attr, FunctionId}; +use hir_def::FunctionId; use hir_expand::name; macro_rules! not_supported { - ($x: expr) => { - return Err(MirLowerError::NotSupported(format!($x))) + ($it: expr) => { + return Err(MirLowerError::NotSupported(format!($it))) }; } @@ -18,7 +18,9 @@ impl MirLowerCtx<'_> { ) -> Result<Option<(Place, BasicBlockId)>> { let ty = self.expr_ty_without_adjust(expr_id); let place = self.temp(ty, prev_block, expr_id.into())?; - let Some(current) = self.lower_expr_to_place_without_adjust(expr_id, place.into(), prev_block)? else { + let Some(current) = + self.lower_expr_to_place_without_adjust(expr_id, place.into(), prev_block)? + else { return Ok(None); }; Ok(Some((place.into(), current))) @@ -32,10 +34,12 @@ impl MirLowerCtx<'_> { ) -> Result<Option<(Place, BasicBlockId)>> { let ty = adjustments .last() - .map(|x| x.target.clone()) + .map(|it| it.target.clone()) .unwrap_or_else(|| self.expr_ty_without_adjust(expr_id)); let place = self.temp(ty, prev_block, expr_id.into())?; - let Some(current) = self.lower_expr_to_place_with_adjust(expr_id, place.into(), prev_block, adjustments)? else { + let Some(current) = + self.lower_expr_to_place_with_adjust(expr_id, place.into(), prev_block, adjustments)? + else { return Ok(None); }; Ok(Some((place.into(), current))) @@ -57,16 +61,17 @@ impl MirLowerCtx<'_> { if let Some((last, rest)) = adjustments.split_last() { match last.kind { Adjust::Deref(None) => { - let Some(mut x) = self.lower_expr_as_place_with_adjust( + let Some(mut it) = self.lower_expr_as_place_with_adjust( current, expr_id, upgrade_rvalue, rest, - )? else { + )? + else { return Ok(None); }; - x.0 = x.0.project(ProjectionElem::Deref); - Ok(Some(x)) + it.0 = it.0.project(ProjectionElem::Deref); + Ok(Some(it)) } Adjust::Deref(Some(od)) => { let Some((r, current)) = self.lower_expr_as_place_with_adjust( @@ -74,14 +79,15 @@ impl MirLowerCtx<'_> { expr_id, upgrade_rvalue, rest, - )? else { + )? + else { return Ok(None); }; self.lower_overloaded_deref( current, r, rest.last() - .map(|x| x.target.clone()) + .map(|it| it.target.clone()) .unwrap_or_else(|| self.expr_ty_without_adjust(expr_id)), last.target.clone(), expr_id.into(), @@ -156,7 +162,7 @@ impl MirLowerCtx<'_> { let is_builtin = match self.expr_ty_without_adjust(*expr).kind(Interner) { TyKind::Ref(..) | TyKind::Raw(..) => true, TyKind::Adt(id, _) => { - if let Some(lang_item) = lang_attr(self.db.upcast(), id.0) { + if let Some(lang_item) = self.db.lang_attr(id.0.into()) { lang_item == LangItem::OwnedBox } else { false @@ -165,7 +171,8 @@ impl MirLowerCtx<'_> { _ => false, }; if !is_builtin { - let Some((p, current)) = self.lower_expr_as_place(current, *expr, true)? else { + let Some((p, current)) = self.lower_expr_as_place(current, *expr, true)? + else { return Ok(None); }; return self.lower_overloaded_deref( @@ -192,7 +199,8 @@ impl MirLowerCtx<'_> { }, ); } - let Some((mut r, current)) = self.lower_expr_as_place(current, *expr, true)? else { + let Some((mut r, current)) = self.lower_expr_as_place(current, *expr, true)? + else { return Ok(None); }; r = r.project(ProjectionElem::Deref); @@ -217,12 +225,18 @@ impl MirLowerCtx<'_> { ) { let Some(index_fn) = self.infer.method_resolution(expr_id) else { - return Err(MirLowerError::UnresolvedMethod("[overloaded index]".to_string())); + return Err(MirLowerError::UnresolvedMethod( + "[overloaded index]".to_string(), + )); }; - let Some((base_place, current)) = self.lower_expr_as_place(current, *base, true)? else { + let Some((base_place, current)) = + self.lower_expr_as_place(current, *base, true)? + else { return Ok(None); }; - let Some((index_operand, current)) = self.lower_expr_to_some_operand(*index, current)? else { + let Some((index_operand, current)) = + self.lower_expr_to_some_operand(*index, current)? + else { return Ok(None); }; return self.lower_overloaded_index( @@ -239,8 +253,8 @@ impl MirLowerCtx<'_> { .infer .expr_adjustments .get(base) - .and_then(|x| x.split_last()) - .map(|x| x.1) + .and_then(|it| it.split_last()) + .map(|it| it.1) .unwrap_or(&[]); let Some((mut p_base, current)) = self.lower_expr_as_place_with_adjust(current, *base, true, adjusts)? @@ -249,7 +263,8 @@ impl MirLowerCtx<'_> { }; let l_index = self.temp(self.expr_ty_after_adjustments(*index), current, expr_id.into())?; - let Some(current) = self.lower_expr_to_place(*index, l_index.into(), current)? else { + let Some(current) = self.lower_expr_to_place(*index, l_index.into(), current)? + else { return Ok(None); }; p_base = p_base.project(ProjectionElem::Index(l_index)); @@ -282,7 +297,15 @@ impl MirLowerCtx<'_> { ) .intern(Interner), ); - let Some(current) = self.lower_call(index_fn_op, Box::new([Operand::Copy(place), index_operand]), result.clone(), current, false, span)? else { + let Some(current) = self.lower_call( + index_fn_op, + Box::new([Operand::Copy(place), index_operand]), + result.clone(), + current, + false, + span, + )? + else { return Ok(None); }; result = result.project(ProjectionElem::Deref); @@ -329,7 +352,15 @@ impl MirLowerCtx<'_> { .intern(Interner), ); let mut result: Place = self.temp(target_ty_ref, current, span)?.into(); - let Some(current) = self.lower_call(deref_fn_op, Box::new([Operand::Copy(ref_place)]), result.clone(), current, false, span)? else { + let Some(current) = self.lower_call( + deref_fn_op, + Box::new([Operand::Copy(ref_place)]), + result.clone(), + current, + false, + span, + )? + else { return Ok(None); }; result = result.project(ProjectionElem::Deref); diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/pattern_matching.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/pattern_matching.rs index ff43c64a9..3354cbd76 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/pattern_matching.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/lower/pattern_matching.rs @@ -307,6 +307,11 @@ impl MirLowerCtx<'_> { mode, )?, None => { + // The path is not a variant, so it is a const + if mode != MatchingMode::Check { + // A const don't bind anything. Only needs check. + return Ok((current, current_else)); + } let unresolved_name = || MirLowerError::unresolved_path(self.db, p); let resolver = self.owner.resolver(self.db.upcast()); let pr = resolver @@ -362,8 +367,8 @@ impl MirLowerCtx<'_> { }, Pat::Lit(l) => match &self.body.exprs[*l] { Expr::Literal(l) => { - let c = self.lower_literal_to_operand(self.infer[pattern].clone(), l)?; if mode == MatchingMode::Check { + let c = self.lower_literal_to_operand(self.infer[pattern].clone(), l)?; self.pattern_match_const(current_else, current, c, cond_place, pattern)? } else { (current, current_else) diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/monomorphization.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/monomorphization.rs index ce3f7a8e5..c565228d9 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/monomorphization.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/monomorphization.rs @@ -13,15 +13,14 @@ use chalk_ir::{ fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable}, ConstData, DebruijnIndex, }; -use hir_def::{DefWithBodyId, GeneralConstId}; +use hir_def::DefWithBodyId; use triomphe::Arc; use crate::{ - consteval::unknown_const, + consteval::{intern_const_scalar, unknown_const}, db::HirDatabase, from_placeholder_idx, infer::normalize, - method_resolution::lookup_impl_const, utils::{generics, Generics}, ClosureId, Const, Interner, ProjectionTy, Substitution, TraitEnvironment, Ty, TyKind, }; @@ -29,8 +28,8 @@ use crate::{ use super::{MirBody, MirLowerError, Operand, Rvalue, StatementKind, TerminatorKind}; macro_rules! not_supported { - ($x: expr) => { - return Err(MirLowerError::NotSupported(format!($x))) + ($it: expr) => { + return Err(MirLowerError::NotSupported(format!($it))) }; } @@ -97,16 +96,16 @@ impl FallibleTypeFolder<Interner> for Filler<'_> { idx: chalk_ir::PlaceholderIndex, _outer_binder: DebruijnIndex, ) -> std::result::Result<chalk_ir::Const<Interner>, Self::Error> { - let x = from_placeholder_idx(self.db, idx); - let Some(idx) = self.generics.as_ref().and_then(|g| g.param_idx(x)) else { + let it = from_placeholder_idx(self.db, idx); + let Some(idx) = self.generics.as_ref().and_then(|g| g.param_idx(it)) else { not_supported!("missing idx in generics"); }; Ok(self .subst .as_slice(Interner) .get(idx) - .and_then(|x| x.constant(Interner)) - .ok_or_else(|| MirLowerError::GenericArgNotProvided(x, self.subst.clone()))? + .and_then(|it| it.constant(Interner)) + .ok_or_else(|| MirLowerError::GenericArgNotProvided(it, self.subst.clone()))? .clone()) } @@ -115,16 +114,16 @@ impl FallibleTypeFolder<Interner> for Filler<'_> { idx: chalk_ir::PlaceholderIndex, _outer_binder: DebruijnIndex, ) -> std::result::Result<Ty, Self::Error> { - let x = from_placeholder_idx(self.db, idx); - let Some(idx) = self.generics.as_ref().and_then(|g| g.param_idx(x)) else { + let it = from_placeholder_idx(self.db, idx); + let Some(idx) = self.generics.as_ref().and_then(|g| g.param_idx(it)) else { not_supported!("missing idx in generics"); }; Ok(self .subst .as_slice(Interner) .get(idx) - .and_then(|x| x.ty(Interner)) - .ok_or_else(|| MirLowerError::GenericArgNotProvided(x, self.subst.clone()))? + .and_then(|it| it.ty(Interner)) + .ok_or_else(|| MirLowerError::GenericArgNotProvided(it, self.subst.clone()))? .clone()) } @@ -180,7 +179,7 @@ impl Filler<'_> { MirLowerError::GenericArgNotProvided( self.generics .as_ref() - .and_then(|x| x.iter().nth(b.index)) + .and_then(|it| it.iter().nth(b.index)) .unwrap() .0, self.subst.clone(), @@ -193,25 +192,12 @@ impl Filler<'_> { | chalk_ir::ConstValue::Placeholder(_) => {} chalk_ir::ConstValue::Concrete(cc) => match &cc.interned { crate::ConstScalar::UnevaluatedConst(const_id, subst) => { - let mut const_id = *const_id; let mut subst = subst.clone(); self.fill_subst(&mut subst)?; - if let GeneralConstId::ConstId(c) = const_id { - let (c, s) = lookup_impl_const( - self.db, - self.db.trait_environment_for_body(self.owner), - c, - subst, - ); - const_id = GeneralConstId::ConstId(c); - subst = s; - } - let result = - self.db.const_eval(const_id.into(), subst).map_err(|e| { - let name = const_id.name(self.db.upcast()); - MirLowerError::ConstEvalError(name, Box::new(e)) - })?; - *c = result; + *c = intern_const_scalar( + crate::ConstScalar::UnevaluatedConst(*const_id, subst), + c.data(Interner).ty.clone(), + ); } crate::ConstScalar::Bytes(_, _) | crate::ConstScalar::Unknown => (), }, diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs index ac23e77bd..781ffaeca 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/pretty.rs @@ -135,7 +135,7 @@ impl<'a> MirPrettyCtx<'a> { fn for_closure(&mut self, closure: ClosureId) { let body = match self.db.mir_body_for_closure(closure) { - Ok(x) => x, + Ok(it) => it, Err(e) => { wln!(self, "// error in {closure:?}: {e:?}"); return; @@ -145,7 +145,7 @@ impl<'a> MirPrettyCtx<'a> { let indent = mem::take(&mut self.indent); let mut ctx = MirPrettyCtx { body: &body, - local_to_binding: body.binding_locals.iter().map(|(x, y)| (*y, x)).collect(), + local_to_binding: body.binding_locals.iter().map(|(it, y)| (*y, it)).collect(), result, indent, ..*self @@ -167,7 +167,7 @@ impl<'a> MirPrettyCtx<'a> { } fn new(body: &'a MirBody, hir_body: &'a Body, db: &'a dyn HirDatabase) -> Self { - let local_to_binding = body.binding_locals.iter().map(|(x, y)| (*y, x)).collect(); + let local_to_binding = body.binding_locals.iter().map(|(it, y)| (*y, it)).collect(); MirPrettyCtx { body, db, @@ -315,17 +315,17 @@ impl<'a> MirPrettyCtx<'a> { } } } - ProjectionElem::TupleOrClosureField(x) => { + ProjectionElem::TupleOrClosureField(it) => { f(this, local, head); - w!(this, ".{}", x); + w!(this, ".{}", it); } ProjectionElem::Index(l) => { f(this, local, head); w!(this, "[{}]", this.local_name(*l).display(this.db)); } - x => { + it => { f(this, local, head); - w!(this, ".{:?}", x); + w!(this, ".{:?}", it); } } } @@ -356,14 +356,14 @@ impl<'a> MirPrettyCtx<'a> { } self.place(p); } - Rvalue::Aggregate(AggregateKind::Tuple(_), x) => { + Rvalue::Aggregate(AggregateKind::Tuple(_), it) => { w!(self, "("); - self.operand_list(x); + self.operand_list(it); w!(self, ")"); } - Rvalue::Aggregate(AggregateKind::Array(_), x) => { + Rvalue::Aggregate(AggregateKind::Array(_), it) => { w!(self, "["); - self.operand_list(x); + self.operand_list(it); w!(self, "]"); } Rvalue::Repeat(op, len) => { @@ -371,19 +371,19 @@ impl<'a> MirPrettyCtx<'a> { self.operand(op); w!(self, "; {}]", len.display(self.db)); } - Rvalue::Aggregate(AggregateKind::Adt(_, _), x) => { + Rvalue::Aggregate(AggregateKind::Adt(_, _), it) => { w!(self, "Adt("); - self.operand_list(x); + self.operand_list(it); w!(self, ")"); } - Rvalue::Aggregate(AggregateKind::Closure(_), x) => { + Rvalue::Aggregate(AggregateKind::Closure(_), it) => { w!(self, "Closure("); - self.operand_list(x); + self.operand_list(it); w!(self, ")"); } - Rvalue::Aggregate(AggregateKind::Union(_, _), x) => { + Rvalue::Aggregate(AggregateKind::Union(_, _), it) => { w!(self, "Union("); - self.operand_list(x); + self.operand_list(it); w!(self, ")"); } Rvalue::Len(p) => { @@ -428,8 +428,8 @@ impl<'a> MirPrettyCtx<'a> { } } - fn operand_list(&mut self, x: &[Operand]) { - let mut it = x.iter(); + fn operand_list(&mut self, it: &[Operand]) { + let mut it = it.iter(); if let Some(first) = it.next() { self.operand(first); for op in it { diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests.rs index 857141280..d22d0d85c 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/tests.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests.rs @@ -30,7 +30,7 @@ use syntax::{ ast::{self, AstNode, HasName}, SyntaxNode, }; -use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry}; +use tracing_subscriber::{layer::SubscriberExt, Registry}; use tracing_tree::HierarchicalLayer; use triomphe::Arc; @@ -52,7 +52,8 @@ fn setup_tracing() -> Option<tracing::subscriber::DefaultGuard> { return None; } - let filter = EnvFilter::from_env("CHALK_DEBUG"); + let filter: tracing_subscriber::filter::Targets = + env::var("CHALK_DEBUG").ok().and_then(|it| it.parse().ok()).unwrap_or_default(); let layer = HierarchicalLayer::default() .with_indent_lines(true) .with_ansi(false) @@ -205,7 +206,9 @@ fn check_impl(ra_fixture: &str, allow_none: bool, only_types: bool, display_sour let Some(node) = (match expr_or_pat { hir_def::hir::ExprOrPatId::ExprId(expr) => expr_node(&body_source_map, expr, &db), hir_def::hir::ExprOrPatId::PatId(pat) => pat_node(&body_source_map, pat, &db), - }) else { continue; }; + }) else { + continue; + }; let range = node.as_ref().original_file_range(&db); let actual = format!( "expected {}, got {}", diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/display_source_code.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/display_source_code.rs index 425432479..e75b037e3 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/display_source_code.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/display_source_code.rs @@ -227,3 +227,22 @@ fn f(a: impl Foo<i8, Assoc<i16> = i32>) { "#, ); } + +#[test] +fn fn_def_is_shown_as_fn_ptr() { + check_types_source_code( + r#" +fn foo(_: i32) -> i64 { 42 } +struct S<T>(T); +enum E { A(usize) } +fn test() { + let f = foo; + //^ fn(i32) -> i64 + let f = S::<i8>; + //^ fn(i8) -> S<i8> + let f = E::A; + //^ fn(usize) -> E +} +"#, + ); +} diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs index 111ac0b61..1e6e946a1 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/macros.rs @@ -202,13 +202,15 @@ fn expr_macro_def_expanded_in_various_places() { 100..119 'for _ ...!() {}': IntoIterator::IntoIter<isize> 100..119 'for _ ...!() {}': &mut IntoIterator::IntoIter<isize> 100..119 'for _ ...!() {}': fn next<IntoIterator::IntoIter<isize>>(&mut IntoIterator::IntoIter<isize>) -> Option<<IntoIterator::IntoIter<isize> as Iterator>::Item> - 100..119 'for _ ...!() {}': Option<Iterator::Item<IntoIterator::IntoIter<isize>>> + 100..119 'for _ ...!() {}': Option<IntoIterator::Item<isize>> 100..119 'for _ ...!() {}': () 100..119 'for _ ...!() {}': () 100..119 'for _ ...!() {}': () - 104..105 '_': Iterator::Item<IntoIterator::IntoIter<isize>> + 104..105 '_': IntoIterator::Item<isize> 117..119 '{}': () 124..134 '|| spam!()': impl Fn() -> isize + 140..156 'while ...!() {}': ! + 140..156 'while ...!() {}': () 140..156 'while ...!() {}': () 154..156 '{}': () 161..174 'break spam!()': ! @@ -293,13 +295,15 @@ fn expr_macro_rules_expanded_in_various_places() { 114..133 'for _ ...!() {}': IntoIterator::IntoIter<isize> 114..133 'for _ ...!() {}': &mut IntoIterator::IntoIter<isize> 114..133 'for _ ...!() {}': fn next<IntoIterator::IntoIter<isize>>(&mut IntoIterator::IntoIter<isize>) -> Option<<IntoIterator::IntoIter<isize> as Iterator>::Item> - 114..133 'for _ ...!() {}': Option<Iterator::Item<IntoIterator::IntoIter<isize>>> + 114..133 'for _ ...!() {}': Option<IntoIterator::Item<isize>> 114..133 'for _ ...!() {}': () 114..133 'for _ ...!() {}': () 114..133 'for _ ...!() {}': () - 118..119 '_': Iterator::Item<IntoIterator::IntoIter<isize>> + 118..119 '_': IntoIterator::Item<isize> 131..133 '{}': () 138..148 '|| spam!()': impl Fn() -> isize + 154..170 'while ...!() {}': ! + 154..170 'while ...!() {}': () 154..170 'while ...!() {}': () 168..170 '{}': () 175..188 'break spam!()': ! diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs index 1e57a4ae2..c837fae3f 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/method_resolution.rs @@ -1216,6 +1216,73 @@ fn main() { } #[test] +fn inherent_method_deref_raw() { + check_types( + r#" +struct Val; + +impl Val { + pub fn method(self: *const Val) -> u32 { + 0 + } +} + +fn main() { + let foo: *const Val; + foo.method(); + // ^^^^^^^^^^^^ u32 +} +"#, + ); +} + +#[test] +fn inherent_method_ref_self_deref_raw() { + check_types( + r#" +struct Val; + +impl Val { + pub fn method(&self) -> u32 { + 0 + } +} + +fn main() { + let foo: *const Val; + foo.method(); + // ^^^^^^^^^^^^ {unknown} +} +"#, + ); +} + +#[test] +fn trait_method_deref_raw() { + check_types( + r#" +trait Trait { + fn method(self: *const Self) -> u32; +} + +struct Val; + +impl Trait for Val { + fn method(self: *const Self) -> u32 { + 0 + } +} + +fn main() { + let foo: *const Val; + foo.method(); + // ^^^^^^^^^^^^ u32 +} +"#, + ); +} + +#[test] fn method_on_dyn_impl() { check_types( r#" diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs index 59046c043..5d809b823 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/never_type.rs @@ -412,17 +412,23 @@ fn diverging_expression_3_break() { 355..654 '{ ...; }; }': () 398..399 'x': u32 407..433 '{ whil...; }; }': u32 + 409..430 'while ...eak; }': ! + 409..430 'while ...eak; }': () 409..430 'while ...eak; }': () 415..419 'true': bool 420..430 '{ break; }': () 422..427 'break': ! 537..538 'x': u32 546..564 '{ whil... {}; }': u32 + 548..561 'while true {}': ! + 548..561 'while true {}': () 548..561 'while true {}': () 554..558 'true': bool 559..561 '{}': () 615..616 'x': u32 624..651 '{ whil...; }; }': u32 + 626..648 'while ...urn; }': ! + 626..648 'while ...urn; }': () 626..648 'while ...urn; }': () 632..636 'true': bool 637..648 '{ return; }': () diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs index 047900a32..6ea059065 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/regression.rs @@ -1240,11 +1240,11 @@ fn test() { 16..66 'for _ ... }': IntoIterator::IntoIter<()> 16..66 'for _ ... }': &mut IntoIterator::IntoIter<()> 16..66 'for _ ... }': fn next<IntoIterator::IntoIter<()>>(&mut IntoIterator::IntoIter<()>) -> Option<<IntoIterator::IntoIter<()> as Iterator>::Item> - 16..66 'for _ ... }': Option<Iterator::Item<IntoIterator::IntoIter<()>>> + 16..66 'for _ ... }': Option<IntoIterator::Item<()>> 16..66 'for _ ... }': () 16..66 'for _ ... }': () 16..66 'for _ ... }': () - 20..21 '_': Iterator::Item<IntoIterator::IntoIter<()>> + 20..21 '_': IntoIterator::Item<()> 25..39 '{ let x = 0; }': () 31..32 'x': i32 35..36 '0': i32 @@ -1267,6 +1267,8 @@ fn test() { "#, expect![[r#" 10..59 '{ ... } }': () + 16..57 'while ... }': ! + 16..57 'while ... }': () 16..57 'while ... }': () 22..30 '{ true }': bool 24..28 'true': bool @@ -1978,3 +1980,23 @@ fn x(a: [i32; 4]) { "#, ); } + +#[test] +fn dont_unify_on_casts() { + // #15246 + check_types( + r#" +fn unify(_: [bool; 1]) {} +fn casted(_: *const bool) {} +fn default<T>() -> T { loop {} } + +fn test() { + let foo = default(); + //^^^ [bool; 1] + + casted(&foo as *const _); + unify(foo); +} +"#, + ); +} diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs index a0ff62843..2ad7946c8 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/simple.rs @@ -3513,7 +3513,6 @@ fn func() { ); } -// FIXME #[test] fn castable_to() { check_infer( @@ -3538,10 +3537,10 @@ fn func() { 120..122 '{}': () 138..184 '{ ...0]>; }': () 148..149 'x': Box<[i32; 0]> - 152..160 'Box::new': fn new<[{unknown}; 0]>([{unknown}; 0]) -> Box<[{unknown}; 0]> - 152..164 'Box::new([])': Box<[{unknown}; 0]> + 152..160 'Box::new': fn new<[i32; 0]>([i32; 0]) -> Box<[i32; 0]> + 152..164 'Box::new([])': Box<[i32; 0]> 152..181 'Box::n...2; 0]>': Box<[i32; 0]> - 161..163 '[]': [{unknown}; 0] + 161..163 '[]': [i32; 0] "#]], ); } @@ -3578,6 +3577,21 @@ fn f<T>(t: Ark<T>) { } #[test] +fn ref_to_array_to_ptr_cast() { + check_types( + r#" +fn default<T>() -> T { loop {} } +fn foo() { + let arr = [default()]; + //^^^ [i32; 1] + let ref_to_arr = &arr; + let casted = ref_to_arr as *const i32; +} +"#, + ); +} + +#[test] fn const_dependent_on_local() { check_types( r#" diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs b/src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs index 97ae732a9..542df8b34 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/tests/traits.rs @@ -4149,6 +4149,30 @@ where } #[test] +fn gats_in_bounds_for_assoc() { + check_types( + r#" +trait Trait { + type Assoc: Another<Gat<i32> = usize>; + type Assoc2<T>: Another<Gat<T> = T>; +} +trait Another { + type Gat<T>; + fn foo(&self) -> Self::Gat<i32>; + fn bar<T>(&self) -> Self::Gat<T>; +} + +fn test<T: Trait>(a: T::Assoc, b: T::Assoc2<isize>) { + let v = a.foo(); + //^ usize + let v = b.bar::<isize>(); + //^ isize +} +"#, + ); +} + +#[test] fn bin_op_with_scalar_fallback() { // Extra impls are significant so that chalk doesn't give us definite guidances. check_types( @@ -4410,3 +4434,47 @@ fn test(v: S<i32>) { "#, ); } + +#[test] +fn associated_type_in_argument() { + check( + r#" + trait A { + fn m(&self) -> i32; + } + + fn x<T: B>(k: &<T as B>::Ty) { + k.m(); + } + + struct X; + struct Y; + + impl A for X { + fn m(&self) -> i32 { + 8 + } + } + + impl A for Y { + fn m(&self) -> i32 { + 32 + } + } + + trait B { + type Ty: A; + } + + impl B for u16 { + type Ty = X; + } + + fn ttt() { + let inp = Y; + x::<u16>(&inp); + //^^^^ expected &X, got &Y + } + "#, + ); +} diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/traits.rs b/src/tools/rust-analyzer/crates/hir-ty/src/traits.rs index f40b7db3a..3c7cfbaed 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/traits.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/traits.rs @@ -170,7 +170,7 @@ fn solve( struct LoggingRustIrDatabaseLoggingOnDrop<'a>(LoggingRustIrDatabase<Interner, ChalkContext<'a>>); -impl<'a> Drop for LoggingRustIrDatabaseLoggingOnDrop<'a> { +impl Drop for LoggingRustIrDatabaseLoggingOnDrop<'_> { fn drop(&mut self) { eprintln!("chalk program:\n{}", self.0); } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/utils.rs b/src/tools/rust-analyzer/crates/hir-ty/src/utils.rs index 363658063..75b8b9afa 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/utils.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/utils.rs @@ -28,14 +28,15 @@ use intern::Interned; use rustc_hash::FxHashSet; use smallvec::{smallvec, SmallVec}; use stdx::never; +use triomphe::Arc; use crate::{ consteval::unknown_const, db::HirDatabase, layout::{Layout, TagEncoding}, mir::pad16, - ChalkTraitId, Const, ConstScalar, GenericArg, Interner, Substitution, TraitRef, TraitRefExt, - Ty, WhereClause, + ChalkTraitId, Const, ConstScalar, GenericArg, Interner, Substitution, TraitEnvironment, + TraitRef, TraitRefExt, Ty, WhereClause, }; pub(crate) fn fn_traits( @@ -89,7 +90,7 @@ struct SuperTraits<'a> { seen: FxHashSet<ChalkTraitId>, } -impl<'a> SuperTraits<'a> { +impl SuperTraits<'_> { fn elaborate(&mut self, trait_ref: &TraitRef) { direct_super_trait_refs(self.db, trait_ref, |trait_ref| { if !self.seen.contains(&trait_ref.trait_id) { @@ -99,7 +100,7 @@ impl<'a> SuperTraits<'a> { } } -impl<'a> Iterator for SuperTraits<'a> { +impl Iterator for SuperTraits<'_> { type Item = TraitRef; fn next(&mut self) -> Option<Self::Item> { @@ -417,7 +418,7 @@ impl FallibleTypeFolder<Interner> for UnevaluatedConstEvaluatorFolder<'_> { ) -> Result<Const, Self::Error> { if let chalk_ir::ConstValue::Concrete(c) = &constant.data(Interner).value { if let ConstScalar::UnevaluatedConst(id, subst) = &c.interned { - if let Ok(eval) = self.db.const_eval(*id, subst.clone()) { + if let Ok(eval) = self.db.const_eval(*id, subst.clone(), None) { return Ok(eval); } else { return Ok(unknown_const(constant.data(Interner).ty.clone())); @@ -431,10 +432,11 @@ impl FallibleTypeFolder<Interner> for UnevaluatedConstEvaluatorFolder<'_> { pub(crate) fn detect_variant_from_bytes<'a>( layout: &'a Layout, db: &dyn HirDatabase, - krate: CrateId, + trait_env: Arc<TraitEnvironment>, b: &[u8], e: EnumId, ) -> Option<(LocalEnumVariantId, &'a Layout)> { + let krate = trait_env.krate; let (var_id, var_layout) = match &layout.variants { hir_def::layout::Variants::Single { index } => (index.0, &*layout), hir_def::layout::Variants::Multiple { tag, tag_encoding, variants, .. } => { |