summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_ty_utils
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:32 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:32 +0000
commit4547b622d8d29df964fa2914213088b148c498fc (patch)
tree9fc6b25f3c3add6b745be9a2400a6e96140046e9 /compiler/rustc_ty_utils
parentReleasing progress-linux version 1.66.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-4547b622d8d29df964fa2914213088b148c498fc.tar.xz
rustc-4547b622d8d29df964fa2914213088b148c498fc.zip
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_ty_utils')
-rw-r--r--compiler/rustc_ty_utils/Cargo.toml2
-rw-r--r--compiler/rustc_ty_utils/src/consts.rs572
-rw-r--r--compiler/rustc_ty_utils/src/instance.rs10
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs991
-rw-r--r--compiler/rustc_ty_utils/src/layout_sanity_check.rs536
-rw-r--r--compiler/rustc_ty_utils/src/lib.rs2
-rw-r--r--compiler/rustc_ty_utils/src/structural_match.rs44
-rw-r--r--compiler/rustc_ty_utils/src/ty.rs73
8 files changed, 640 insertions, 1590 deletions
diff --git a/compiler/rustc_ty_utils/Cargo.toml b/compiler/rustc_ty_utils/Cargo.toml
index 5e4ba4730..52fbd3ae0 100644
--- a/compiler/rustc_ty_utils/Cargo.toml
+++ b/compiler/rustc_ty_utils/Cargo.toml
@@ -4,8 +4,6 @@ version = "0.0.0"
edition = "2021"
[dependencies]
-rand = "0.8.4"
-rand_xoshiro = "0.6.0"
tracing = "0.1"
rustc_middle = { path = "../rustc_middle" }
rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs
index e057bb668..f8ff31f97 100644
--- a/compiler/rustc_ty_utils/src/consts.rs
+++ b/compiler/rustc_ty_utils/src/consts.rs
@@ -1,10 +1,11 @@
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::LocalDefId;
-use rustc_index::vec::IndexVec;
use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput};
-use rustc_middle::ty::abstract_const::{CastKind, Node, NodeId};
-use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
+use rustc_middle::thir::visit;
+use rustc_middle::thir::visit::Visitor;
+use rustc_middle::ty::abstract_const::CastKind;
+use rustc_middle::ty::{self, Expr, TyCtxt, TypeVisitable};
use rustc_middle::{mir, thir};
use rustc_span::Span;
use rustc_target::abi::VariantIdx;
@@ -31,10 +32,8 @@ pub(crate) fn destructure_const<'tcx>(
let (fields, variant) = match const_.ty().kind() {
ty::Array(inner_ty, _) | ty::Slice(inner_ty) => {
// construct the consts for the elements of the array/slice
- let field_consts = branches
- .iter()
- .map(|b| tcx.mk_const(ty::ConstS { kind: ty::ConstKind::Value(*b), ty: *inner_ty }))
- .collect::<Vec<_>>();
+ let field_consts =
+ branches.iter().map(|b| tcx.mk_const(*b, *inner_ty)).collect::<Vec<_>>();
debug!(?field_consts);
(field_consts, None)
@@ -52,10 +51,7 @@ pub(crate) fn destructure_const<'tcx>(
for (field, field_valtree) in iter::zip(fields, branches) {
let field_ty = field.ty(tcx, substs);
- let field_const = tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Value(*field_valtree),
- ty: field_ty,
- });
+ let field_const = tcx.mk_const(*field_valtree, field_ty);
field_consts.push(field_const);
}
debug!(?field_consts);
@@ -64,12 +60,7 @@ pub(crate) fn destructure_const<'tcx>(
}
ty::Tuple(elem_tys) => {
let fields = iter::zip(*elem_tys, branches)
- .map(|(elem_ty, elem_valtree)| {
- tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Value(*elem_valtree),
- ty: elem_ty,
- })
- })
+ .map(|(elem_ty, elem_valtree)| tcx.mk_const(*elem_valtree, elem_ty))
.collect::<Vec<_>>();
(fields, None)
@@ -82,328 +73,278 @@ pub(crate) fn destructure_const<'tcx>(
ty::DestructuredConst { variant, fields }
}
-pub struct AbstractConstBuilder<'a, 'tcx> {
- tcx: TyCtxt<'tcx>,
- body_id: thir::ExprId,
- body: &'a thir::Thir<'tcx>,
- /// The current WIP node tree.
- nodes: IndexVec<NodeId, Node<'tcx>>,
-}
-
-impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
- fn root_span(&self) -> Span {
- self.body.exprs[self.body_id].span
- }
-
- fn error(&mut self, sub: GenericConstantTooComplexSub) -> Result<!, ErrorGuaranteed> {
- let reported = self.tcx.sess.emit_err(GenericConstantTooComplex {
- span: self.root_span(),
- maybe_supported: None,
- sub,
- });
-
- Err(reported)
+/// We do not allow all binary operations in abstract consts, so filter disallowed ones.
+fn check_binop(op: mir::BinOp) -> bool {
+ use mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Shl | Shr | Eq | Lt | Le | Ne
+ | Ge | Gt => true,
+ Offset => false,
}
+}
- fn maybe_supported_error(
- &mut self,
- sub: GenericConstantTooComplexSub,
- ) -> Result<!, ErrorGuaranteed> {
- let reported = self.tcx.sess.emit_err(GenericConstantTooComplex {
- span: self.root_span(),
- maybe_supported: Some(()),
- sub,
- });
-
- Err(reported)
+/// While we currently allow all unary operations, we still want to explicitly guard against
+/// future changes here.
+fn check_unop(op: mir::UnOp) -> bool {
+ use mir::UnOp::*;
+ match op {
+ Not | Neg => true,
}
+}
- #[instrument(skip(tcx, body, body_id), level = "debug")]
- pub fn new(
- tcx: TyCtxt<'tcx>,
- (body, body_id): (&'a thir::Thir<'tcx>, thir::ExprId),
- ) -> Result<Option<AbstractConstBuilder<'a, 'tcx>>, ErrorGuaranteed> {
- let builder = AbstractConstBuilder { tcx, body_id, body, nodes: IndexVec::new() };
-
- struct IsThirPolymorphic<'a, 'tcx> {
- is_poly: bool,
- thir: &'a thir::Thir<'tcx>,
+fn recurse_build<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &thir::Thir<'tcx>,
+ node: thir::ExprId,
+ root_span: Span,
+) -> Result<ty::Const<'tcx>, ErrorGuaranteed> {
+ use thir::ExprKind;
+ let node = &body.exprs[node];
+
+ let maybe_supported_error = |a| maybe_supported_error(tcx, a, root_span);
+ let error = |a| error(tcx, a, root_span);
+
+ Ok(match &node.kind {
+ // I dont know if handling of these 3 is correct
+ &ExprKind::Scope { value, .. } => recurse_build(tcx, body, value, root_span)?,
+ &ExprKind::PlaceTypeAscription { source, .. }
+ | &ExprKind::ValueTypeAscription { source, .. } => {
+ recurse_build(tcx, body, source, root_span)?
}
-
- use crate::rustc_middle::thir::visit::Visitor;
- use thir::visit;
-
- impl<'a, 'tcx> IsThirPolymorphic<'a, 'tcx> {
- fn expr_is_poly(&mut self, expr: &thir::Expr<'tcx>) -> bool {
- if expr.ty.has_non_region_param() {
- return true;
+ &ExprKind::Literal { lit, neg } => {
+ let sp = node.span;
+ match tcx.at(sp).lit_to_const(LitToConstInput { lit: &lit.node, ty: node.ty, neg }) {
+ Ok(c) => c,
+ Err(LitToConstError::Reported(guar)) => {
+ tcx.const_error_with_guaranteed(node.ty, guar)
}
-
- match expr.kind {
- thir::ExprKind::NamedConst { substs, .. } => substs.has_non_region_param(),
- thir::ExprKind::ConstParam { .. } => true,
- thir::ExprKind::Repeat { value, count } => {
- self.visit_expr(&self.thir()[value]);
- count.has_non_region_param()
- }
- _ => false,
+ Err(LitToConstError::TypeError) => {
+ bug!("encountered type error in lit_to_const")
}
}
+ }
+ &ExprKind::NonHirLiteral { lit, user_ty: _ } => {
+ let val = ty::ValTree::from_scalar_int(lit);
+ tcx.mk_const(val, node.ty)
+ }
+ &ExprKind::ZstLiteral { user_ty: _ } => {
+ let val = ty::ValTree::zst();
+ tcx.mk_const(val, node.ty)
+ }
+ &ExprKind::NamedConst { def_id, substs, user_ty: _ } => {
+ let uneval = ty::UnevaluatedConst::new(ty::WithOptConstParam::unknown(def_id), substs);
+ tcx.mk_const(uneval, node.ty)
+ }
+ ExprKind::ConstParam { param, .. } => tcx.mk_const(*param, node.ty),
- fn pat_is_poly(&mut self, pat: &thir::Pat<'tcx>) -> bool {
- if pat.ty.has_non_region_param() {
- return true;
- }
+ ExprKind::Call { fun, args, .. } => {
+ let fun = recurse_build(tcx, body, *fun, root_span)?;
- match pat.kind {
- thir::PatKind::Constant { value } => value.has_non_region_param(),
- thir::PatKind::Range(box thir::PatRange { lo, hi, .. }) => {
- lo.has_non_region_param() || hi.has_non_region_param()
- }
- _ => false,
- }
+ let mut new_args = Vec::<ty::Const<'tcx>>::with_capacity(args.len());
+ for &id in args.iter() {
+ new_args.push(recurse_build(tcx, body, id, root_span)?);
}
+ let new_args = tcx.mk_const_list(new_args.iter());
+ tcx.mk_const(Expr::FunctionCall(fun, new_args), node.ty)
}
-
- impl<'a, 'tcx> visit::Visitor<'a, 'tcx> for IsThirPolymorphic<'a, 'tcx> {
- fn thir(&self) -> &'a thir::Thir<'tcx> {
- &self.thir
- }
-
- #[instrument(skip(self), level = "debug")]
- fn visit_expr(&mut self, expr: &thir::Expr<'tcx>) {
- self.is_poly |= self.expr_is_poly(expr);
- if !self.is_poly {
- visit::walk_expr(self, expr)
- }
+ &ExprKind::Binary { op, lhs, rhs } if check_binop(op) => {
+ let lhs = recurse_build(tcx, body, lhs, root_span)?;
+ let rhs = recurse_build(tcx, body, rhs, root_span)?;
+ tcx.mk_const(Expr::Binop(op, lhs, rhs), node.ty)
+ }
+ &ExprKind::Unary { op, arg } if check_unop(op) => {
+ let arg = recurse_build(tcx, body, arg, root_span)?;
+ tcx.mk_const(Expr::UnOp(op, arg), node.ty)
+ }
+ // This is necessary so that the following compiles:
+ //
+ // ```
+ // fn foo<const N: usize>(a: [(); N + 1]) {
+ // bar::<{ N + 1 }>();
+ // }
+ // ```
+ ExprKind::Block { block } => {
+ if let thir::Block { stmts: box [], expr: Some(e), .. } = &body.blocks[*block] {
+ recurse_build(tcx, body, *e, root_span)?
+ } else {
+ maybe_supported_error(GenericConstantTooComplexSub::BlockNotSupported(node.span))?
}
-
- #[instrument(skip(self), level = "debug")]
- fn visit_pat(&mut self, pat: &thir::Pat<'tcx>) {
- self.is_poly |= self.pat_is_poly(pat);
- if !self.is_poly {
- visit::walk_pat(self, pat);
- }
+ }
+ // `ExprKind::Use` happens when a `hir::ExprKind::Cast` is a
+ // "coercion cast" i.e. using a coercion or is a no-op.
+ // This is important so that `N as usize as usize` doesnt unify with `N as usize`. (untested)
+ &ExprKind::Use { source } => {
+ let arg = recurse_build(tcx, body, source, root_span)?;
+ tcx.mk_const(Expr::Cast(CastKind::Use, arg, node.ty), node.ty)
+ }
+ &ExprKind::Cast { source } => {
+ let arg = recurse_build(tcx, body, source, root_span)?;
+ tcx.mk_const(Expr::Cast(CastKind::As, arg, node.ty), node.ty)
+ }
+ ExprKind::Borrow { arg, .. } => {
+ let arg_node = &body.exprs[*arg];
+
+ // Skip reborrows for now until we allow Deref/Borrow/AddressOf
+ // expressions.
+ // FIXME(generic_const_exprs): Verify/explain why this is sound
+ if let ExprKind::Deref { arg } = arg_node.kind {
+ recurse_build(tcx, body, arg, root_span)?
+ } else {
+ maybe_supported_error(GenericConstantTooComplexSub::BorrowNotSupported(node.span))?
}
}
-
- let mut is_poly_vis = IsThirPolymorphic { is_poly: false, thir: body };
- visit::walk_expr(&mut is_poly_vis, &body[body_id]);
- debug!("AbstractConstBuilder: is_poly={}", is_poly_vis.is_poly);
- if !is_poly_vis.is_poly {
- return Ok(None);
+ // FIXME(generic_const_exprs): We may want to support these.
+ ExprKind::AddressOf { .. } | ExprKind::Deref { .. } => maybe_supported_error(
+ GenericConstantTooComplexSub::AddressAndDerefNotSupported(node.span),
+ )?,
+ ExprKind::Repeat { .. } | ExprKind::Array { .. } => {
+ maybe_supported_error(GenericConstantTooComplexSub::ArrayNotSupported(node.span))?
}
+ ExprKind::NeverToAny { .. } => {
+ maybe_supported_error(GenericConstantTooComplexSub::NeverToAnyNotSupported(node.span))?
+ }
+ ExprKind::Tuple { .. } => {
+ maybe_supported_error(GenericConstantTooComplexSub::TupleNotSupported(node.span))?
+ }
+ ExprKind::Index { .. } => {
+ maybe_supported_error(GenericConstantTooComplexSub::IndexNotSupported(node.span))?
+ }
+ ExprKind::Field { .. } => {
+ maybe_supported_error(GenericConstantTooComplexSub::FieldNotSupported(node.span))?
+ }
+ ExprKind::ConstBlock { .. } => {
+ maybe_supported_error(GenericConstantTooComplexSub::ConstBlockNotSupported(node.span))?
+ }
+ ExprKind::Adt(_) => {
+ maybe_supported_error(GenericConstantTooComplexSub::AdtNotSupported(node.span))?
+ }
+ // dont know if this is correct
+ ExprKind::Pointer { .. } => {
+ error(GenericConstantTooComplexSub::PointerNotSupported(node.span))?
+ }
+ ExprKind::Yield { .. } => {
+ error(GenericConstantTooComplexSub::YieldNotSupported(node.span))?
+ }
+ ExprKind::Continue { .. } | ExprKind::Break { .. } | ExprKind::Loop { .. } => {
+ error(GenericConstantTooComplexSub::LoopNotSupported(node.span))?
+ }
+ ExprKind::Box { .. } => error(GenericConstantTooComplexSub::BoxNotSupported(node.span))?,
- Ok(Some(builder))
- }
-
- /// We do not allow all binary operations in abstract consts, so filter disallowed ones.
- fn check_binop(op: mir::BinOp) -> bool {
- use mir::BinOp::*;
- match op {
- Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Shl | Shr | Eq | Lt | Le
- | Ne | Ge | Gt => true,
- Offset => false,
+ ExprKind::Unary { .. } => unreachable!(),
+ // we handle valid unary/binary ops above
+ ExprKind::Binary { .. } => {
+ error(GenericConstantTooComplexSub::BinaryNotSupported(node.span))?
+ }
+ ExprKind::LogicalOp { .. } => {
+ error(GenericConstantTooComplexSub::LogicalOpNotSupported(node.span))?
+ }
+ ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
+ error(GenericConstantTooComplexSub::AssignNotSupported(node.span))?
+ }
+ ExprKind::Closure { .. } | ExprKind::Return { .. } => {
+ error(GenericConstantTooComplexSub::ClosureAndReturnNotSupported(node.span))?
+ }
+ // let expressions imply control flow
+ ExprKind::Match { .. } | ExprKind::If { .. } | ExprKind::Let { .. } => {
+ error(GenericConstantTooComplexSub::ControlFlowNotSupported(node.span))?
+ }
+ ExprKind::InlineAsm { .. } => {
+ error(GenericConstantTooComplexSub::InlineAsmNotSupported(node.span))?
}
- }
- /// While we currently allow all unary operations, we still want to explicitly guard against
- /// future changes here.
- fn check_unop(op: mir::UnOp) -> bool {
- use mir::UnOp::*;
- match op {
- Not | Neg => true,
+ // we dont permit let stmts so `VarRef` and `UpvarRef` cant happen
+ ExprKind::VarRef { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::StaticRef { .. }
+ | ExprKind::ThreadLocalRef(_) => {
+ error(GenericConstantTooComplexSub::OperationNotSupported(node.span))?
}
- }
+ })
+}
- /// Builds the abstract const by walking the thir and bailing out when
- /// encountering an unsupported operation.
- pub fn build(mut self) -> Result<&'tcx [Node<'tcx>], ErrorGuaranteed> {
- debug!("AbstractConstBuilder::build: body={:?}", &*self.body);
- self.recurse_build(self.body_id)?;
+struct IsThirPolymorphic<'a, 'tcx> {
+ is_poly: bool,
+ thir: &'a thir::Thir<'tcx>,
+}
- Ok(self.tcx.arena.alloc_from_iter(self.nodes.into_iter()))
- }
+fn error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sub: GenericConstantTooComplexSub,
+ root_span: Span,
+) -> Result<!, ErrorGuaranteed> {
+ let reported = tcx.sess.emit_err(GenericConstantTooComplex {
+ span: root_span,
+ maybe_supported: None,
+ sub,
+ });
+
+ Err(reported)
+}
- fn recurse_build(&mut self, node: thir::ExprId) -> Result<NodeId, ErrorGuaranteed> {
- use thir::ExprKind;
- let node = &self.body.exprs[node];
- Ok(match &node.kind {
- // I dont know if handling of these 3 is correct
- &ExprKind::Scope { value, .. } => self.recurse_build(value)?,
- &ExprKind::PlaceTypeAscription { source, .. }
- | &ExprKind::ValueTypeAscription { source, .. } => self.recurse_build(source)?,
- &ExprKind::Literal { lit, neg } => {
- let sp = node.span;
- let constant = match self.tcx.at(sp).lit_to_const(LitToConstInput {
- lit: &lit.node,
- ty: node.ty,
- neg,
- }) {
- Ok(c) => c,
- Err(LitToConstError::Reported) => self.tcx.const_error(node.ty),
- Err(LitToConstError::TypeError) => {
- bug!("encountered type error in lit_to_const")
- }
- };
-
- self.nodes.push(Node::Leaf(constant))
- }
- &ExprKind::NonHirLiteral { lit, user_ty: _ } => {
- let val = ty::ValTree::from_scalar_int(lit);
- self.nodes.push(Node::Leaf(ty::Const::from_value(self.tcx, val, node.ty)))
- }
- &ExprKind::ZstLiteral { user_ty: _ } => {
- let val = ty::ValTree::zst();
- self.nodes.push(Node::Leaf(ty::Const::from_value(self.tcx, val, node.ty)))
- }
- &ExprKind::NamedConst { def_id, substs, user_ty: _ } => {
- let uneval =
- ty::UnevaluatedConst::new(ty::WithOptConstParam::unknown(def_id), substs);
+fn maybe_supported_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sub: GenericConstantTooComplexSub,
+ root_span: Span,
+) -> Result<!, ErrorGuaranteed> {
+ let reported = tcx.sess.emit_err(GenericConstantTooComplex {
+ span: root_span,
+ maybe_supported: Some(()),
+ sub,
+ });
+
+ Err(reported)
+}
- let constant = self
- .tcx
- .mk_const(ty::ConstS { kind: ty::ConstKind::Unevaluated(uneval), ty: node.ty });
+impl<'a, 'tcx> IsThirPolymorphic<'a, 'tcx> {
+ fn expr_is_poly(&mut self, expr: &thir::Expr<'tcx>) -> bool {
+ if expr.ty.has_non_region_param() {
+ return true;
+ }
- self.nodes.push(Node::Leaf(constant))
+ match expr.kind {
+ thir::ExprKind::NamedConst { substs, .. } => substs.has_non_region_param(),
+ thir::ExprKind::ConstParam { .. } => true,
+ thir::ExprKind::Repeat { value, count } => {
+ self.visit_expr(&self.thir()[value]);
+ count.has_non_region_param()
}
+ _ => false,
+ }
+ }
+ fn pat_is_poly(&mut self, pat: &thir::Pat<'tcx>) -> bool {
+ if pat.ty.has_non_region_param() {
+ return true;
+ }
- ExprKind::ConstParam { param, .. } => {
- let const_param = self
- .tcx
- .mk_const(ty::ConstS { kind: ty::ConstKind::Param(*param), ty: node.ty });
- self.nodes.push(Node::Leaf(const_param))
+ match pat.kind {
+ thir::PatKind::Constant { value } => value.has_non_region_param(),
+ thir::PatKind::Range(box thir::PatRange { lo, hi, .. }) => {
+ lo.has_non_region_param() || hi.has_non_region_param()
}
+ _ => false,
+ }
+ }
+}
- ExprKind::Call { fun, args, .. } => {
- let fun = self.recurse_build(*fun)?;
-
- let mut new_args = Vec::<NodeId>::with_capacity(args.len());
- for &id in args.iter() {
- new_args.push(self.recurse_build(id)?);
- }
- let new_args = self.tcx.arena.alloc_slice(&new_args);
- self.nodes.push(Node::FunctionCall(fun, new_args))
- }
- &ExprKind::Binary { op, lhs, rhs } if Self::check_binop(op) => {
- let lhs = self.recurse_build(lhs)?;
- let rhs = self.recurse_build(rhs)?;
- self.nodes.push(Node::Binop(op, lhs, rhs))
- }
- &ExprKind::Unary { op, arg } if Self::check_unop(op) => {
- let arg = self.recurse_build(arg)?;
- self.nodes.push(Node::UnaryOp(op, arg))
- }
- // This is necessary so that the following compiles:
- //
- // ```
- // fn foo<const N: usize>(a: [(); N + 1]) {
- // bar::<{ N + 1 }>();
- // }
- // ```
- ExprKind::Block { block } => {
- if let thir::Block { stmts: box [], expr: Some(e), .. } = &self.body.blocks[*block]
- {
- self.recurse_build(*e)?
- } else {
- self.maybe_supported_error(GenericConstantTooComplexSub::BlockNotSupported(
- node.span,
- ))?
- }
- }
- // `ExprKind::Use` happens when a `hir::ExprKind::Cast` is a
- // "coercion cast" i.e. using a coercion or is a no-op.
- // This is important so that `N as usize as usize` doesnt unify with `N as usize`. (untested)
- &ExprKind::Use { source } => {
- let arg = self.recurse_build(source)?;
- self.nodes.push(Node::Cast(CastKind::Use, arg, node.ty))
- }
- &ExprKind::Cast { source } => {
- let arg = self.recurse_build(source)?;
- self.nodes.push(Node::Cast(CastKind::As, arg, node.ty))
- }
- ExprKind::Borrow { arg, .. } => {
- let arg_node = &self.body.exprs[*arg];
-
- // Skip reborrows for now until we allow Deref/Borrow/AddressOf
- // expressions.
- // FIXME(generic_const_exprs): Verify/explain why this is sound
- if let ExprKind::Deref { arg } = arg_node.kind {
- self.recurse_build(arg)?
- } else {
- self.maybe_supported_error(GenericConstantTooComplexSub::BorrowNotSupported(
- node.span,
- ))?
- }
- }
- // FIXME(generic_const_exprs): We may want to support these.
- ExprKind::AddressOf { .. } | ExprKind::Deref { .. } => self.maybe_supported_error(
- GenericConstantTooComplexSub::AddressAndDerefNotSupported(node.span),
- )?,
- ExprKind::Repeat { .. } | ExprKind::Array { .. } => self.maybe_supported_error(
- GenericConstantTooComplexSub::ArrayNotSupported(node.span),
- )?,
- ExprKind::NeverToAny { .. } => self.maybe_supported_error(
- GenericConstantTooComplexSub::NeverToAnyNotSupported(node.span),
- )?,
- ExprKind::Tuple { .. } => self.maybe_supported_error(
- GenericConstantTooComplexSub::TupleNotSupported(node.span),
- )?,
- ExprKind::Index { .. } => self.maybe_supported_error(
- GenericConstantTooComplexSub::IndexNotSupported(node.span),
- )?,
- ExprKind::Field { .. } => self.maybe_supported_error(
- GenericConstantTooComplexSub::FieldNotSupported(node.span),
- )?,
- ExprKind::ConstBlock { .. } => self.maybe_supported_error(
- GenericConstantTooComplexSub::ConstBlockNotSupported(node.span),
- )?,
- ExprKind::Adt(_) => self
- .maybe_supported_error(GenericConstantTooComplexSub::AdtNotSupported(node.span))?,
- // dont know if this is correct
- ExprKind::Pointer { .. } => {
- self.error(GenericConstantTooComplexSub::PointerNotSupported(node.span))?
- }
- ExprKind::Yield { .. } => {
- self.error(GenericConstantTooComplexSub::YieldNotSupported(node.span))?
- }
- ExprKind::Continue { .. } | ExprKind::Break { .. } | ExprKind::Loop { .. } => {
- self.error(GenericConstantTooComplexSub::LoopNotSupported(node.span))?
- }
- ExprKind::Box { .. } => {
- self.error(GenericConstantTooComplexSub::BoxNotSupported(node.span))?
- }
+impl<'a, 'tcx> visit::Visitor<'a, 'tcx> for IsThirPolymorphic<'a, 'tcx> {
+ fn thir(&self) -> &'a thir::Thir<'tcx> {
+ &self.thir
+ }
- ExprKind::Unary { .. } => unreachable!(),
- // we handle valid unary/binary ops above
- ExprKind::Binary { .. } => {
- self.error(GenericConstantTooComplexSub::BinaryNotSupported(node.span))?
- }
- ExprKind::LogicalOp { .. } => {
- self.error(GenericConstantTooComplexSub::LogicalOpNotSupported(node.span))?
- }
- ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
- self.error(GenericConstantTooComplexSub::AssignNotSupported(node.span))?
- }
- ExprKind::Closure { .. } | ExprKind::Return { .. } => {
- self.error(GenericConstantTooComplexSub::ClosureAndReturnNotSupported(node.span))?
- }
- // let expressions imply control flow
- ExprKind::Match { .. } | ExprKind::If { .. } | ExprKind::Let { .. } => {
- self.error(GenericConstantTooComplexSub::ControlFlowNotSupported(node.span))?
- }
- ExprKind::InlineAsm { .. } => {
- self.error(GenericConstantTooComplexSub::InlineAsmNotSupported(node.span))?
- }
+ #[instrument(skip(self), level = "debug")]
+ fn visit_expr(&mut self, expr: &thir::Expr<'tcx>) {
+ self.is_poly |= self.expr_is_poly(expr);
+ if !self.is_poly {
+ visit::walk_expr(self, expr)
+ }
+ }
- // we dont permit let stmts so `VarRef` and `UpvarRef` cant happen
- ExprKind::VarRef { .. }
- | ExprKind::UpvarRef { .. }
- | ExprKind::StaticRef { .. }
- | ExprKind::ThreadLocalRef(_) => {
- self.error(GenericConstantTooComplexSub::OperationNotSupported(node.span))?
- }
- })
+ #[instrument(skip(self), level = "debug")]
+ fn visit_pat(&mut self, pat: &thir::Pat<'tcx>) {
+ self.is_poly |= self.pat_is_poly(pat);
+ if !self.is_poly {
+ visit::walk_pat(self, pat);
+ }
}
}
@@ -411,7 +352,7 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
pub fn thir_abstract_const<'tcx>(
tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
-) -> Result<Option<&'tcx [Node<'tcx>]>, ErrorGuaranteed> {
+) -> Result<Option<ty::Const<'tcx>>, ErrorGuaranteed> {
if tcx.features().generic_const_exprs {
match tcx.def_kind(def.did) {
// FIXME(generic_const_exprs): We currently only do this for anonymous constants,
@@ -424,10 +365,17 @@ pub fn thir_abstract_const<'tcx>(
}
let body = tcx.thir_body(def)?;
+ let (body, body_id) = (&*body.0.borrow(), body.1);
+
+ let mut is_poly_vis = IsThirPolymorphic { is_poly: false, thir: body };
+ visit::walk_expr(&mut is_poly_vis, &body[body_id]);
+ if !is_poly_vis.is_poly {
+ return Ok(None);
+ }
+
+ let root_span = body.exprs[body_id].span;
- AbstractConstBuilder::new(tcx, (&*body.0.borrow(), body.1))?
- .map(AbstractConstBuilder::build)
- .transpose()
+ Some(recurse_build(tcx, body, body_id, root_span)).transpose()
} else {
Ok(None)
}
diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs
index 6436713b3..c6f2b16ca 100644
--- a/compiler/rustc_ty_utils/src/instance.rs
+++ b/compiler/rustc_ty_utils/src/instance.rs
@@ -202,8 +202,14 @@ fn resolve_associated_item<'tcx>(
)),
substs: generator_data.substs,
}),
+ traits::ImplSource::Future(future_data) => Some(Instance {
+ def: ty::InstanceDef::Item(ty::WithOptConstParam::unknown(
+ future_data.generator_def_id,
+ )),
+ substs: future_data.substs,
+ }),
traits::ImplSource::Closure(closure_data) => {
- let trait_closure_kind = tcx.fn_trait_kind_from_lang_item(trait_id).unwrap();
+ let trait_closure_kind = tcx.fn_trait_kind_from_def_id(trait_id).unwrap();
Instance::resolve_closure(
tcx,
closure_data.closure_def_id,
@@ -264,8 +270,6 @@ fn resolve_associated_item<'tcx>(
traits::ImplSource::AutoImpl(..)
| traits::ImplSource::Param(..)
| traits::ImplSource::TraitAlias(..)
- | traits::ImplSource::DiscriminantKind(..)
- | traits::ImplSource::Pointee(..)
| traits::ImplSource::TraitUpcasting(_)
| traits::ImplSource::ConstDestruct(_) => None,
})
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 52ba0eee9..fbc055b5d 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -13,13 +13,8 @@ use rustc_span::symbol::Symbol;
use rustc_span::DUMMY_SP;
use rustc_target::abi::*;
-use std::cmp::{self, Ordering};
+use std::fmt::Debug;
use std::iter;
-use std::num::NonZeroUsize;
-use std::ops::Bound;
-
-use rand::{seq::SliceRandom, SeedableRng};
-use rand_xoshiro::Xoshiro128StarStar;
use crate::layout_sanity_check::sanity_check_layout;
@@ -66,16 +61,6 @@ fn layout_of<'tcx>(
Ok(layout)
}
-#[derive(Copy, Clone, Debug)]
-enum StructKind {
- /// A tuple, closure, or univariant which cannot be coerced to unsized.
- AlwaysSized,
- /// A univariant, the last field of which may be coerced to unsized.
- MaybeUnsized,
- /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
- Prefixed(Size, Align),
-}
-
// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
// This is used to go between `memory_index` (source field order to memory order)
// and `inverse_memory_index` (memory order to source field order).
@@ -89,40 +74,13 @@ fn invert_mapping(map: &[u32]) -> Vec<u32> {
inverse
}
-fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
- let dl = cx.data_layout();
- let b_align = b.align(dl);
- let align = a.align(dl).max(b_align).max(dl.aggregate_align);
- let b_offset = a.size(dl).align_to(b_align.abi);
- let size = (b_offset + b.size(dl)).align_to(align.abi);
-
- // HACK(nox): We iter on `b` and then `a` because `max_by_key`
- // returns the last maximum.
- let largest_niche = Niche::from_scalar(dl, b_offset, b)
- .into_iter()
- .chain(Niche::from_scalar(dl, Size::ZERO, a))
- .max_by_key(|niche| niche.available(dl));
-
- LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Arbitrary {
- offsets: vec![Size::ZERO, b_offset],
- memory_index: vec![0, 1],
- },
- abi: Abi::ScalarPair(a, b),
- largest_niche,
- align,
- size,
- }
-}
-
fn univariant_uninterned<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
ty: Ty<'tcx>,
fields: &[TyAndLayout<'_>],
repr: &ReprOptions,
kind: StructKind,
-) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
+) -> Result<LayoutS<VariantIdx>, LayoutError<'tcx>> {
let dl = cx.data_layout();
let pack = repr.pack;
if pack.is_some() && repr.align.is_some() {
@@ -130,208 +88,7 @@ fn univariant_uninterned<'tcx>(
return Err(LayoutError::Unknown(ty));
}
- let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
- let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
-
- let optimize = !repr.inhibit_struct_field_reordering_opt();
- if optimize {
- let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
- let optimizing = &mut inverse_memory_index[..end];
- let field_align = |f: &TyAndLayout<'_>| {
- if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
- };
-
- // If `-Z randomize-layout` was enabled for the type definition we can shuffle
- // the field ordering to try and catch some code making assumptions about layouts
- // we don't guarantee
- if repr.can_randomize_type_layout() {
- // `ReprOptions.layout_seed` is a deterministic seed that we can use to
- // randomize field ordering with
- let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
-
- // Shuffle the ordering of the fields
- optimizing.shuffle(&mut rng);
-
- // Otherwise we just leave things alone and actually optimize the type's fields
- } else {
- match kind {
- StructKind::AlwaysSized | StructKind::MaybeUnsized => {
- optimizing.sort_by_key(|&x| {
- // Place ZSTs first to avoid "interesting offsets",
- // especially with only one or two non-ZST fields.
- let f = &fields[x as usize];
- (!f.is_zst(), cmp::Reverse(field_align(f)))
- });
- }
-
- StructKind::Prefixed(..) => {
- // Sort in ascending alignment so that the layout stays optimal
- // regardless of the prefix
- optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
- }
- }
-
- // FIXME(Kixiron): We can always shuffle fields within a given alignment class
- // regardless of the status of `-Z randomize-layout`
- }
- }
-
- // inverse_memory_index holds field indices by increasing memory offset.
- // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
- // We now write field offsets to the corresponding offset slot;
- // field 5 with offset 0 puts 0 in offsets[5].
- // At the bottom of this function, we invert `inverse_memory_index` to
- // produce `memory_index` (see `invert_mapping`).
-
- let mut sized = true;
- let mut offsets = vec![Size::ZERO; fields.len()];
- let mut offset = Size::ZERO;
- let mut largest_niche = None;
- let mut largest_niche_available = 0;
-
- if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
- let prefix_align =
- if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
- align = align.max(AbiAndPrefAlign::new(prefix_align));
- offset = prefix_size.align_to(prefix_align);
- }
-
- for &i in &inverse_memory_index {
- let field = fields[i as usize];
- if !sized {
- cx.tcx.sess.delay_span_bug(
- DUMMY_SP,
- &format!(
- "univariant: field #{} of `{}` comes after unsized field",
- offsets.len(),
- ty
- ),
- );
- }
-
- if field.is_unsized() {
- sized = false;
- }
-
- // Invariant: offset < dl.obj_size_bound() <= 1<<61
- let field_align = if let Some(pack) = pack {
- field.align.min(AbiAndPrefAlign::new(pack))
- } else {
- field.align
- };
- offset = offset.align_to(field_align.abi);
- align = align.max(field_align);
-
- debug!("univariant offset: {:?} field: {:#?}", offset, field);
- offsets[i as usize] = offset;
-
- if let Some(mut niche) = field.largest_niche {
- let available = niche.available(dl);
- if available > largest_niche_available {
- largest_niche_available = available;
- niche.offset += offset;
- largest_niche = Some(niche);
- }
- }
-
- offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
- }
-
- if let Some(repr_align) = repr.align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
-
- debug!("univariant min_size: {:?}", offset);
- let min_size = offset;
-
- // As stated above, inverse_memory_index holds field indices by increasing offset.
- // This makes it an already-sorted view of the offsets vec.
- // To invert it, consider:
- // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
- // Field 5 would be the first element, so memory_index is i:
- // Note: if we didn't optimize, it's already right.
-
- let memory_index =
- if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
-
- let size = min_size.align_to(align.abi);
- let mut abi = Abi::Aggregate { sized };
-
- // Unpack newtype ABIs and find scalar pairs.
- if sized && size.bytes() > 0 {
- // All other fields must be ZSTs.
- let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
-
- match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
- // We have exactly one non-ZST field.
- (Some((i, field)), None, None) => {
- // Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
- match field.abi {
- // For plain scalars, or vectors of them, we can't unpack
- // newtypes for `#[repr(C)]`, as that affects C ABIs.
- Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi;
- }
- // But scalar pairs are Rust-specific and get
- // treated as aggregates by C ABIs anyway.
- Abi::ScalarPair(..) => {
- abi = field.abi;
- }
- _ => {}
- }
- }
- }
-
- // Two non-ZST fields, and they're both scalars.
- (Some((i, a)), Some((j, b)), None) => {
- match (a.abi, b.abi) {
- (Abi::Scalar(a), Abi::Scalar(b)) => {
- // Order by the memory placement, not source order.
- let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
- ((i, a), (j, b))
- } else {
- ((j, b), (i, a))
- };
- let pair = scalar_pair(cx, a, b);
- let pair_offsets = match pair.fields {
- FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
- assert_eq!(memory_index, &[0, 1]);
- offsets
- }
- _ => bug!(),
- };
- if offsets[i] == pair_offsets[0]
- && offsets[j] == pair_offsets[1]
- && align == pair.align
- && size == pair.size
- {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
- }
- }
- _ => {}
- }
- }
-
- _ => {}
- }
- }
-
- if fields.iter().any(|f| f.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- }
-
- Ok(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Arbitrary { offsets, memory_index },
- abi,
- largest_niche,
- align,
- size,
- })
+ cx.univariant(dl, fields, repr, kind).ok_or(LayoutError::SizeOverflow(ty))
}
fn layout_of_uncached<'tcx>(
@@ -382,14 +139,7 @@ fn layout_of_uncached<'tcx>(
}
// The never type.
- ty::Never => tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Primitive,
- abi: Abi::Uninhabited,
- largest_niche: None,
- align: dl.i8_align,
- size: Size::ZERO,
- }),
+ ty::Never => tcx.intern_layout(cx.layout_of_never_type()),
// Potentially-wide pointers.
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
@@ -418,7 +168,7 @@ fn layout_of_uncached<'tcx>(
};
// Effectively a (ptr, meta) tuple.
- tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
+ tcx.intern_layout(cx.scalar_pair(data_ptr, metadata))
}
ty::Dynamic(_, _, ty::DynStar) => {
@@ -426,7 +176,7 @@ fn layout_of_uncached<'tcx>(
data.valid_range_mut().start = 0;
let mut vtable = scalar_unit(Pointer);
vtable.valid_range_mut().start = 1;
- tcx.intern_layout(scalar_pair(cx, data, vtable))
+ tcx.intern_layout(cx.scalar_pair(data, vtable))
}
// Arrays and slices.
@@ -442,8 +192,7 @@ fn layout_of_uncached<'tcx>(
let element = cx.layout_of(element)?;
let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
- let abi = if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty))
- {
+ let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
Abi::Uninhabited
} else {
Abi::Aggregate { sized: true }
@@ -576,8 +325,8 @@ fn layout_of_uncached<'tcx>(
// Extract the number of elements from the layout of the array field:
let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
- return Err(LayoutError::Unknown(ty));
- };
+ return Err(LayoutError::Unknown(ty));
+ };
(*e_ty, *count, true)
} else {
@@ -602,14 +351,14 @@ fn layout_of_uncached<'tcx>(
// Compute the ABI of the element type:
let e_ly = cx.layout_of(e_ty)?;
let Abi::Scalar(e_abi) = e_ly.abi else {
- // This error isn't caught in typeck, e.g., if
- // the element type of the vector is generic.
- tcx.sess.fatal(&format!(
- "monomorphising SIMD type `{}` with a non-primitive-scalar \
- (integer/float/pointer) element type `{}`",
- ty, e_ty
- ))
- };
+ // This error isn't caught in typeck, e.g., if
+ // the element type of the vector is generic.
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` with a non-primitive-scalar \
+ (integer/float/pointer) element type `{}`",
+ ty, e_ty
+ ))
+ };
// Compute the size and alignment of the vector:
let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
@@ -656,681 +405,41 @@ fn layout_of_uncached<'tcx>(
return Err(LayoutError::Unknown(ty));
}
- let mut align =
- if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
- if let Some(repr_align) = def.repr().align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
-
- let optimize = !def.repr().inhibit_union_abi_opt();
- let mut size = Size::ZERO;
- let mut abi = Abi::Aggregate { sized: true };
- let index = VariantIdx::new(0);
- for field in &variants[index] {
- assert!(!field.is_unsized());
- align = align.max(field.align);
-
- // If all non-ZST fields have the same ABI, forward this ABI
- if optimize && !field.is_zst() {
- // Discard valid range information and allow undef
- let field_abi = match field.abi {
- Abi::Scalar(x) => Abi::Scalar(x.to_union()),
- Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
- Abi::Vector { element: x, count } => {
- Abi::Vector { element: x.to_union(), count }
- }
- Abi::Uninhabited | Abi::Aggregate { .. } => {
- Abi::Aggregate { sized: true }
- }
- };
-
- if size == Size::ZERO {
- // first non ZST: initialize 'abi'
- abi = field_abi;
- } else if abi != field_abi {
- // different fields have different ABI: reset to Aggregate
- abi = Abi::Aggregate { sized: true };
- }
- }
-
- size = cmp::max(size, field.size);
- }
-
- if let Some(pack) = def.repr().pack {
- align = align.min(AbiAndPrefAlign::new(pack));
- }
-
- return Ok(tcx.intern_layout(LayoutS {
- variants: Variants::Single { index },
- fields: FieldsShape::Union(
- NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
- ),
- abi,
- largest_niche: None,
- align,
- size: size.align_to(align.abi),
- }));
- }
-
- // A variant is absent if it's uninhabited and only has ZST fields.
- // Present uninhabited variants only require space for their fields,
- // but *not* an encoding of the discriminant (e.g., a tag value).
- // See issue #49298 for more details on the need to leave space
- // for non-ZST uninhabited data (mostly partial initialization).
- let absent = |fields: &[TyAndLayout<'_>]| {
- let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
- let is_zst = fields.iter().all(|f| f.is_zst());
- uninhabited && is_zst
- };
- let (present_first, present_second) = {
- let mut present_variants = variants
- .iter_enumerated()
- .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
- (present_variants.next(), present_variants.next())
- };
- let present_first = match present_first {
- Some(present_first) => present_first,
- // Uninhabited because it has no variants, or only absent ones.
- None if def.is_enum() => {
- return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
- }
- // If it's a struct, still compute a layout so that we can still compute the
- // field offsets.
- None => VariantIdx::new(0),
- };
-
- let is_struct = !def.is_enum() ||
- // Only one variant is present.
- (present_second.is_none() &&
- // Representation optimizations are allowed.
- !def.repr().inhibit_enum_layout_opt());
- if is_struct {
- // Struct, or univariant enum equivalent to a struct.
- // (Typechecking will reject discriminant-sizing attrs.)
-
- let v = present_first;
- let kind = if def.is_enum() || variants[v].is_empty() {
- StructKind::AlwaysSized
- } else {
- let param_env = tcx.param_env(def.did());
- let last_field = def.variant(v).fields.last().unwrap();
- let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
- if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
- };
-
- let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
- st.variants = Variants::Single { index: v };
-
- if def.is_unsafe_cell() {
- let hide_niches = |scalar: &mut _| match scalar {
- Scalar::Initialized { value, valid_range } => {
- *valid_range = WrappingRange::full(value.size(dl))
- }
- // Already doesn't have any niches
- Scalar::Union { .. } => {}
- };
- match &mut st.abi {
- Abi::Uninhabited => {}
- Abi::Scalar(scalar) => hide_niches(scalar),
- Abi::ScalarPair(a, b) => {
- hide_niches(a);
- hide_niches(b);
- }
- Abi::Vector { element, count: _ } => hide_niches(element),
- Abi::Aggregate { sized: _ } => {}
- }
- st.largest_niche = None;
- return Ok(tcx.intern_layout(st));
- }
-
- let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
- match st.abi {
- Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
- // the asserts ensure that we are not using the
- // `#[rustc_layout_scalar_valid_range(n)]`
- // attribute to widen the range of anything as that would probably
- // result in UB somewhere
- // FIXME(eddyb) the asserts are probably not needed,
- // as larger validity ranges would result in missed
- // optimizations, *not* wrongly assuming the inner
- // value is valid. e.g. unions enlarge validity ranges,
- // because the values may be uninitialized.
- if let Bound::Included(start) = start {
- // FIXME(eddyb) this might be incorrect - it doesn't
- // account for wrap-around (end < start) ranges.
- let valid_range = scalar.valid_range_mut();
- assert!(valid_range.start <= start);
- valid_range.start = start;
- }
- if let Bound::Included(end) = end {
- // FIXME(eddyb) this might be incorrect - it doesn't
- // account for wrap-around (end < start) ranges.
- let valid_range = scalar.valid_range_mut();
- assert!(valid_range.end >= end);
- valid_range.end = end;
- }
-
- // Update `largest_niche` if we have introduced a larger niche.
- let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
- if let Some(niche) = niche {
- match st.largest_niche {
- Some(largest_niche) => {
- // Replace the existing niche even if they're equal,
- // because this one is at a lower offset.
- if largest_niche.available(dl) <= niche.available(dl) {
- st.largest_niche = Some(niche);
- }
- }
- None => st.largest_niche = Some(niche),
- }
- }
- }
- _ => assert!(
- start == Bound::Unbounded && end == Bound::Unbounded,
- "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
- def,
- st,
- ),
- }
-
- return Ok(tcx.intern_layout(st));
- }
-
- // At this point, we have handled all unions and
- // structs. (We have also handled univariant enums
- // that allow representation optimization.)
- assert!(def.is_enum());
-
- // Until we've decided whether to use the tagged or
- // niche filling LayoutS, we don't want to intern the
- // variant layouts, so we can't store them in the
- // overall LayoutS. Store the overall LayoutS
- // and the variant LayoutSs here until then.
- struct TmpLayout<'tcx> {
- layout: LayoutS<'tcx>,
- variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
+ return Ok(tcx.intern_layout(
+ cx.layout_of_union(&def.repr(), &variants).ok_or(LayoutError::Unknown(ty))?,
+ ));
}
- let calculate_niche_filling_layout =
- || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
- // The current code for niche-filling relies on variant indices
- // instead of actual discriminants, so enums with
- // explicit discriminants (RFC #2363) would misbehave.
- if def.repr().inhibit_enum_layout_opt()
+ tcx.intern_layout(
+ cx.layout_of_struct_or_enum(
+ &def.repr(),
+ &variants,
+ def.is_enum(),
+ def.is_unsafe_cell(),
+ tcx.layout_scalar_valid_range(def.did()),
+ |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max),
+ def.is_enum()
+ .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128)))
+ .into_iter()
+ .flatten(),
+ def.repr().inhibit_enum_layout_opt()
|| def
.variants()
.iter_enumerated()
- .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
- {
- return Ok(None);
- }
-
- if variants.len() < 2 {
- return Ok(None);
- }
-
- let mut align = dl.aggregate_align;
- let mut variant_layouts = variants
- .iter_enumerated()
- .map(|(j, v)| {
- let mut st = univariant_uninterned(
- cx,
- ty,
- v,
- &def.repr(),
- StructKind::AlwaysSized,
- )?;
- st.variants = Variants::Single { index: j };
-
- align = align.max(st.align);
-
- Ok(st)
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- let largest_variant_index = match variant_layouts
- .iter_enumerated()
- .max_by_key(|(_i, layout)| layout.size.bytes())
- .map(|(i, _layout)| i)
- {
- None => return Ok(None),
- Some(i) => i,
- };
-
- let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
- let needs_disc = |index: VariantIdx| {
- index != largest_variant_index && !absent(&variants[index])
- };
- let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
- ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
-
- let count = niche_variants.size_hint().1.unwrap() as u128;
-
- // Find the field with the largest niche
- let (field_index, niche, (niche_start, niche_scalar)) = match variants
- [largest_variant_index]
- .iter()
- .enumerate()
- .filter_map(|(j, field)| Some((j, field.largest_niche?)))
- .max_by_key(|(_, niche)| niche.available(dl))
- .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
+ .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32())),
{
- None => return Ok(None),
- Some(x) => x,
- };
-
- let niche_offset = niche.offset
- + variant_layouts[largest_variant_index].fields.offset(field_index);
- let niche_size = niche.value.size(dl);
- let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
-
- let all_variants_fit =
- variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
- if i == largest_variant_index {
- return true;
- }
-
- layout.largest_niche = None;
-
- if layout.size <= niche_offset {
- // This variant will fit before the niche.
- return true;
- }
-
- // Determine if it'll fit after the niche.
- let this_align = layout.align.abi;
- let this_offset = (niche_offset + niche_size).align_to(this_align);
-
- if this_offset + layout.size > size {
- return false;
- }
-
- // It'll fit, but we need to make some adjustments.
- match layout.fields {
- FieldsShape::Arbitrary { ref mut offsets, .. } => {
- for (j, offset) in offsets.iter_mut().enumerate() {
- if !variants[i][j].is_zst() {
- *offset += this_offset;
- }
- }
- }
- _ => {
- panic!("Layout of fields should be Arbitrary for variants")
- }
- }
-
- // It can't be a Scalar or ScalarPair because the offset isn't 0.
- if !layout.abi.is_uninhabited() {
- layout.abi = Abi::Aggregate { sized: true };
- }
- layout.size += this_offset;
-
- true
- });
-
- if !all_variants_fit {
- return Ok(None);
- }
-
- let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
-
- let others_zst = variant_layouts
- .iter_enumerated()
- .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
- let same_size = size == variant_layouts[largest_variant_index].size;
- let same_align = align == variant_layouts[largest_variant_index].align;
-
- let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
- Abi::Uninhabited
- } else if same_size && same_align && others_zst {
- match variant_layouts[largest_variant_index].abi {
- // When the total alignment and size match, we can use the
- // same ABI as the scalar variant with the reserved niche.
- Abi::Scalar(_) => Abi::Scalar(niche_scalar),
- Abi::ScalarPair(first, second) => {
- // Only the niche is guaranteed to be initialised,
- // so use union layouts for the other primitive.
- if niche_offset == Size::ZERO {
- Abi::ScalarPair(niche_scalar, second.to_union())
- } else {
- Abi::ScalarPair(first.to_union(), niche_scalar)
- }
- }
- _ => Abi::Aggregate { sized: true },
- }
- } else {
- Abi::Aggregate { sized: true }
- };
-
- let layout = LayoutS {
- variants: Variants::Multiple {
- tag: niche_scalar,
- tag_encoding: TagEncoding::Niche {
- untagged_variant: largest_variant_index,
- niche_variants,
- niche_start,
- },
- tag_field: 0,
- variants: IndexVec::new(),
- },
- fields: FieldsShape::Arbitrary {
- offsets: vec![niche_offset],
- memory_index: vec![0],
- },
- abi,
- largest_niche,
- size,
- align,
- };
-
- Ok(Some(TmpLayout { layout, variants: variant_layouts }))
- };
-
- let niche_filling_layout = calculate_niche_filling_layout()?;
-
- let (mut min, mut max) = (i128::MAX, i128::MIN);
- let discr_type = def.repr().discr_type();
- let bits = Integer::from_attr(cx, discr_type).size().bits();
- for (i, discr) in def.discriminants(tcx) {
- if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
- continue;
- }
- let mut x = discr.val as i128;
- if discr_type.is_signed() {
- // sign extend the raw representation to be an i128
- x = (x << (128 - bits)) >> (128 - bits);
- }
- if x < min {
- min = x;
- }
- if x > max {
- max = x;
- }
- }
- // We might have no inhabited variants, so pretend there's at least one.
- if (min, max) == (i128::MAX, i128::MIN) {
- min = 0;
- max = 0;
- }
- assert!(min <= max, "discriminant range is {}...{}", min, max);
- let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
-
- let mut align = dl.aggregate_align;
- let mut size = Size::ZERO;
-
- // We're interested in the smallest alignment, so start large.
- let mut start_align = Align::from_bytes(256).unwrap();
- assert_eq!(Integer::for_align(dl, start_align), None);
-
- // repr(C) on an enum tells us to make a (tag, union) layout,
- // so we need to grow the prefix alignment to be at least
- // the alignment of the union. (This value is used both for
- // determining the alignment of the overall enum, and the
- // determining the alignment of the payload after the tag.)
- let mut prefix_align = min_ity.align(dl).abi;
- if def.repr().c() {
- for fields in &variants {
- for field in fields {
- prefix_align = prefix_align.max(field.align.abi);
- }
- }
- }
-
- // Create the set of structs that represent each variant.
- let mut layout_variants = variants
- .iter_enumerated()
- .map(|(i, field_layouts)| {
- let mut st = univariant_uninterned(
- cx,
- ty,
- &field_layouts,
- &def.repr(),
- StructKind::Prefixed(min_ity.size(), prefix_align),
- )?;
- st.variants = Variants::Single { index: i };
- // Find the first field we can't move later
- // to make room for a larger discriminant.
- for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
- if !field.is_zst() || field.align.abi.bytes() != 1 {
- start_align = start_align.min(field.align.abi);
- break;
- }
- }
- size = cmp::max(size, st.size);
- align = align.max(st.align);
- Ok(st)
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- // Align the maximum variant size to the largest alignment.
- size = size.align_to(align.abi);
-
- if size.bytes() >= dl.obj_size_bound() {
- return Err(LayoutError::SizeOverflow(ty));
- }
-
- let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
- if typeck_ity < min_ity {
- // It is a bug if Layout decided on a greater discriminant size than typeck for
- // some reason at this point (based on values discriminant can take on). Mostly
- // because this discriminant will be loaded, and then stored into variable of
- // type calculated by typeck. Consider such case (a bug): typeck decided on
- // byte-sized discriminant, but layout thinks we need a 16-bit to store all
- // discriminant values. That would be a bug, because then, in codegen, in order
- // to store this 16-bit discriminant into 8-bit sized temporary some of the
- // space necessary to represent would have to be discarded (or layout is wrong
- // on thinking it needs 16 bits)
- bug!(
- "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
- min_ity,
- typeck_ity
- );
- // However, it is fine to make discr type however large (as an optimisation)
- // after this point – we’ll just truncate the value we load in codegen.
- }
-
- // Check to see if we should use a different type for the
- // discriminant. We can safely use a type with the same size
- // as the alignment of the first field of each variant.
- // We increase the size of the discriminant to avoid LLVM copying
- // padding when it doesn't need to. This normally causes unaligned
- // load/stores and excessive memcpy/memset operations. By using a
- // bigger integer size, LLVM can be sure about its contents and
- // won't be so conservative.
-
- // Use the initial field alignment
- let mut ity = if def.repr().c() || def.repr().int.is_some() {
- min_ity
- } else {
- Integer::for_align(dl, start_align).unwrap_or(min_ity)
- };
-
- // If the alignment is not larger than the chosen discriminant size,
- // don't use the alignment as the final size.
- if ity <= min_ity {
- ity = min_ity;
- } else {
- // Patch up the variants' first few fields.
- let old_ity_size = min_ity.size();
- let new_ity_size = ity.size();
- for variant in &mut layout_variants {
- match variant.fields {
- FieldsShape::Arbitrary { ref mut offsets, .. } => {
- for i in offsets {
- if *i <= old_ity_size {
- assert_eq!(*i, old_ity_size);
- *i = new_ity_size;
+ let param_env = tcx.param_env(def.did());
+ def.is_struct()
+ && match def.variants().iter().next().and_then(|x| x.fields.last()) {
+ Some(last_field) => {
+ tcx.type_of(last_field.did).is_sized(tcx, param_env)
}
+ None => false,
}
- // We might be making the struct larger.
- if variant.size <= old_ity_size {
- variant.size = new_ity_size;
- }
- }
- _ => bug!(),
- }
- }
- }
-
- let tag_mask = ity.size().unsigned_int_max();
- let tag = Scalar::Initialized {
- value: Int(ity, signed),
- valid_range: WrappingRange {
- start: (min as u128 & tag_mask),
- end: (max as u128 & tag_mask),
- },
- };
- let mut abi = Abi::Aggregate { sized: true };
-
- if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- } else if tag.size(dl) == size {
- // Make sure we only use scalar layout when the enum is entirely its
- // own tag (i.e. it has no padding nor any non-ZST variant fields).
- abi = Abi::Scalar(tag);
- } else {
- // Try to use a ScalarPair for all tagged enums.
- let mut common_prim = None;
- let mut common_prim_initialized_in_all_variants = true;
- for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
- let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
- bug!();
- };
- let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
- let (field, offset) = match (fields.next(), fields.next()) {
- (None, None) => {
- common_prim_initialized_in_all_variants = false;
- continue;
- }
- (Some(pair), None) => pair,
- _ => {
- common_prim = None;
- break;
- }
- };
- let prim = match field.abi {
- Abi::Scalar(scalar) => {
- common_prim_initialized_in_all_variants &=
- matches!(scalar, Scalar::Initialized { .. });
- scalar.primitive()
- }
- _ => {
- common_prim = None;
- break;
- }
- };
- if let Some(pair) = common_prim {
- // This is pretty conservative. We could go fancier
- // by conflating things like i32 and u32, or even
- // realising that (u8, u8) could just cohabit with
- // u16 or even u32.
- if pair != (prim, offset) {
- common_prim = None;
- break;
- }
- } else {
- common_prim = Some((prim, offset));
- }
- }
- if let Some((prim, offset)) = common_prim {
- let prim_scalar = if common_prim_initialized_in_all_variants {
- scalar_unit(prim)
- } else {
- // Common prim might be uninit.
- Scalar::Union { value: prim }
- };
- let pair = scalar_pair(cx, tag, prim_scalar);
- let pair_offsets = match pair.fields {
- FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
- assert_eq!(memory_index, &[0, 1]);
- offsets
- }
- _ => bug!(),
- };
- if pair_offsets[0] == Size::ZERO
- && pair_offsets[1] == *offset
- && align == pair.align
- && size == pair.size
- {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
- }
- }
- }
-
- // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
- // variants to ensure they are consistent. This is because a downcast is
- // semantically a NOP, and thus should not affect layout.
- if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
- for variant in &mut layout_variants {
- // We only do this for variants with fields; the others are not accessed anyway.
- // Also do not overwrite any already existing "clever" ABIs.
- if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
- variant.abi = abi;
- // Also need to bump up the size and alignment, so that the entire value fits in here.
- variant.size = cmp::max(variant.size, size);
- variant.align.abi = cmp::max(variant.align.abi, align.abi);
- }
- }
- }
-
- let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
-
- let tagged_layout = LayoutS {
- variants: Variants::Multiple {
- tag,
- tag_encoding: TagEncoding::Direct,
- tag_field: 0,
- variants: IndexVec::new(),
- },
- fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
- largest_niche,
- abi,
- align,
- size,
- };
-
- let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
-
- let mut best_layout = match (tagged_layout, niche_filling_layout) {
- (tl, Some(nl)) => {
- // Pick the smaller layout; otherwise,
- // pick the layout with the larger niche; otherwise,
- // pick tagged as it has simpler codegen.
- use Ordering::*;
- let niche_size = |tmp_l: &TmpLayout<'_>| {
- tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
- };
- match (
- tl.layout.size.cmp(&nl.layout.size),
- niche_size(&tl).cmp(&niche_size(&nl)),
- ) {
- (Greater, _) => nl,
- (Equal, Less) => nl,
- _ => tl,
- }
- }
- (tl, None) => tl,
- };
-
- // Now we can intern the variant layouts and store them in the enum layout.
- best_layout.layout.variants = match best_layout.layout.variants {
- Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
- tag,
- tag_encoding,
- tag_field,
- variants: best_layout
- .variants
- .into_iter()
- .map(|layout| tcx.intern_layout(layout))
- .collect(),
- },
- _ => bug!(),
- };
-
- tcx.intern_layout(best_layout.layout)
+ },
+ )
+ .ok_or(LayoutError::SizeOverflow(ty))?,
+ )
}
// Types with no meaningful known layout.
@@ -1488,8 +597,8 @@ fn generator_layout<'tcx>(
let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
let Some(info) = tcx.generator_layout(def_id) else {
- return Err(LayoutError::Unknown(ty));
- };
+ return Err(LayoutError::Unknown(ty));
+ };
let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
// Build a prefix layout, including "promoting" all ineligible
@@ -1592,8 +701,8 @@ fn generator_layout<'tcx>(
variant.variants = Variants::Single { index };
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
- bug!();
- };
+ bug!();
+ };
// Now, stitch the promoted and variant-only fields back together in
// the order they are mentioned by our GeneratorLayout.
@@ -1640,13 +749,13 @@ fn generator_layout<'tcx>(
size = size.max(variant.size);
align = align.max(variant.align);
- Ok(tcx.intern_layout(variant))
+ Ok(variant)
})
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
size = size.align_to(align.abi);
- let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
+ let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
Abi::Uninhabited
} else {
Abi::Aggregate { sized: true }
diff --git a/compiler/rustc_ty_utils/src/layout_sanity_check.rs b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
index 100926ad4..a5311dbd1 100644
--- a/compiler/rustc_ty_utils/src/layout_sanity_check.rs
+++ b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
@@ -12,7 +12,7 @@ pub(super) fn sanity_check_layout<'tcx>(
layout: &TyAndLayout<'tcx>,
) {
// Type-level uninhabitedness should always imply ABI uninhabitedness.
- if cx.tcx.conservative_is_privately_uninhabited(cx.param_env.and(layout.ty)) {
+ if layout.ty.is_privately_uninhabited(cx.tcx, cx.param_env) {
assert!(layout.abi.is_uninhabited());
}
@@ -20,283 +20,293 @@ pub(super) fn sanity_check_layout<'tcx>(
bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
}
- if cfg!(debug_assertions) {
- /// Yields non-ZST fields of the type
- fn non_zst_fields<'tcx, 'a>(
- cx: &'a LayoutCx<'tcx, TyCtxt<'tcx>>,
- layout: &'a TyAndLayout<'tcx>,
- ) -> impl Iterator<Item = (Size, TyAndLayout<'tcx>)> + 'a {
- (0..layout.layout.fields().count()).filter_map(|i| {
- let field = layout.field(cx, i);
- // Also checking `align == 1` here leads to test failures in
- // `layout/zero-sized-array-union.rs`, where a type has a zero-size field with
- // alignment 4 that still gets ignored during layout computation (which is okay
- // since other fields already force alignment 4).
- let zst = field.is_zst();
- (!zst).then(|| (layout.fields.offset(i), field))
- })
- }
+ if !cfg!(debug_assertions) {
+ // Stop here, the rest is kind of expensive.
+ return;
+ }
- fn skip_newtypes<'tcx>(
- cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
- layout: &TyAndLayout<'tcx>,
- ) -> TyAndLayout<'tcx> {
- if matches!(layout.layout.variants(), Variants::Multiple { .. }) {
- // Definitely not a newtype of anything.
- return *layout;
- }
- let mut fields = non_zst_fields(cx, layout);
- let Some(first) = fields.next() else {
- // No fields here, so this could be a primitive or enum -- either way it's not a newtype around a thing
- return *layout
- };
- if fields.next().is_none() {
- let (offset, first) = first;
- if offset == Size::ZERO && first.layout.size() == layout.size {
- // This is a newtype, so keep recursing.
- // FIXME(RalfJung): I don't think it would be correct to do any checks for
- // alignment here, so we don't. Is that correct?
- return skip_newtypes(cx, &first);
- }
+ /// Yields non-ZST fields of the type
+ fn non_zst_fields<'tcx, 'a>(
+ cx: &'a LayoutCx<'tcx, TyCtxt<'tcx>>,
+ layout: &'a TyAndLayout<'tcx>,
+ ) -> impl Iterator<Item = (Size, TyAndLayout<'tcx>)> + 'a {
+ (0..layout.layout.fields().count()).filter_map(|i| {
+ let field = layout.field(cx, i);
+ // Also checking `align == 1` here leads to test failures in
+ // `layout/zero-sized-array-union.rs`, where a type has a zero-size field with
+ // alignment 4 that still gets ignored during layout computation (which is okay
+ // since other fields already force alignment 4).
+ let zst = field.is_zst();
+ (!zst).then(|| (layout.fields.offset(i), field))
+ })
+ }
+
+ fn skip_newtypes<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ layout: &TyAndLayout<'tcx>,
+ ) -> TyAndLayout<'tcx> {
+ if matches!(layout.layout.variants(), Variants::Multiple { .. }) {
+ // Definitely not a newtype of anything.
+ return *layout;
+ }
+ let mut fields = non_zst_fields(cx, layout);
+ let Some(first) = fields.next() else {
+ // No fields here, so this could be a primitive or enum -- either way it's not a newtype around a thing
+ return *layout
+ };
+ if fields.next().is_none() {
+ let (offset, first) = first;
+ if offset == Size::ZERO && first.layout.size() == layout.size {
+ // This is a newtype, so keep recursing.
+ // FIXME(RalfJung): I don't think it would be correct to do any checks for
+ // alignment here, so we don't. Is that correct?
+ return skip_newtypes(cx, &first);
}
- // No more newtypes here.
- *layout
}
+ // No more newtypes here.
+ *layout
+ }
- fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: &TyAndLayout<'tcx>) {
- match layout.layout.abi() {
- Abi::Scalar(scalar) => {
- // No padding in scalars.
- let size = scalar.size(cx);
- let align = scalar.align(cx).abi;
- assert_eq!(
- layout.layout.size(),
- size,
- "size mismatch between ABI and layout in {layout:#?}"
- );
- assert_eq!(
- layout.layout.align().abi,
- align,
- "alignment mismatch between ABI and layout in {layout:#?}"
- );
- // Check that this matches the underlying field.
- let inner = skip_newtypes(cx, layout);
- assert!(
- matches!(inner.layout.abi(), Abi::Scalar(_)),
- "`Scalar` type {} is newtype around non-`Scalar` type {}",
- layout.ty,
- inner.ty
- );
- match inner.layout.fields() {
- FieldsShape::Primitive => {
- // Fine.
- }
- FieldsShape::Union(..) => {
- // FIXME: I guess we could also check something here? Like, look at all fields?
- return;
- }
- FieldsShape::Arbitrary { .. } => {
- // Should be an enum, the only field is the discriminant.
- assert!(
- inner.ty.is_enum(),
- "`Scalar` layout for non-primitive non-enum type {}",
- inner.ty
- );
- assert_eq!(
- inner.layout.fields().count(),
- 1,
- "`Scalar` layout for multiple-field type in {inner:#?}",
- );
- let offset = inner.layout.fields().offset(0);
- let field = inner.field(cx, 0);
- // The field should be at the right offset, and match the `scalar` layout.
- assert_eq!(
- offset,
- Size::ZERO,
- "`Scalar` field at non-0 offset in {inner:#?}",
- );
- assert_eq!(
- field.size, size,
- "`Scalar` field with bad size in {inner:#?}",
- );
- assert_eq!(
- field.align.abi, align,
- "`Scalar` field with bad align in {inner:#?}",
- );
- assert!(
- matches!(field.abi, Abi::Scalar(_)),
- "`Scalar` field with bad ABI in {inner:#?}",
- );
- }
- _ => {
- panic!("`Scalar` layout for non-primitive non-enum type {}", inner.ty);
- }
+ fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: &TyAndLayout<'tcx>) {
+ match layout.layout.abi() {
+ Abi::Scalar(scalar) => {
+ // No padding in scalars.
+ let size = scalar.size(cx);
+ let align = scalar.align(cx).abi;
+ assert_eq!(
+ layout.layout.size(),
+ size,
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ assert_eq!(
+ layout.layout.align().abi,
+ align,
+ "alignment mismatch between ABI and layout in {layout:#?}"
+ );
+ // Check that this matches the underlying field.
+ let inner = skip_newtypes(cx, layout);
+ assert!(
+ matches!(inner.layout.abi(), Abi::Scalar(_)),
+ "`Scalar` type {} is newtype around non-`Scalar` type {}",
+ layout.ty,
+ inner.ty
+ );
+ match inner.layout.fields() {
+ FieldsShape::Primitive => {
+ // Fine.
}
- }
- Abi::ScalarPair(scalar1, scalar2) => {
- // Sanity-check scalar pairs. These are a bit more flexible and support
- // padding, but we can at least ensure both fields actually fit into the layout
- // and the alignment requirement has not been weakened.
- let size1 = scalar1.size(cx);
- let align1 = scalar1.align(cx).abi;
- let size2 = scalar2.size(cx);
- let align2 = scalar2.align(cx).abi;
- assert!(
- layout.layout.align().abi >= cmp::max(align1, align2),
- "alignment mismatch between ABI and layout in {layout:#?}",
- );
- let field2_offset = size1.align_to(align2);
- assert!(
- layout.layout.size() >= field2_offset + size2,
- "size mismatch between ABI and layout in {layout:#?}"
- );
- // Check that the underlying pair of fields matches.
- let inner = skip_newtypes(cx, layout);
- assert!(
- matches!(inner.layout.abi(), Abi::ScalarPair(..)),
- "`ScalarPair` type {} is newtype around non-`ScalarPair` type {}",
- layout.ty,
- inner.ty
- );
- if matches!(inner.layout.variants(), Variants::Multiple { .. }) {
- // FIXME: ScalarPair for enums is enormously complicated and it is very hard
- // to check anything about them.
+ FieldsShape::Union(..) => {
+ // FIXME: I guess we could also check something here? Like, look at all fields?
return;
}
- match inner.layout.fields() {
- FieldsShape::Arbitrary { .. } => {
- // Checked below.
- }
- FieldsShape::Union(..) => {
- // FIXME: I guess we could also check something here? Like, look at all fields?
- return;
- }
- _ => {
- panic!("`ScalarPair` layout with unexpected field shape in {inner:#?}");
- }
+ FieldsShape::Arbitrary { .. } => {
+ // Should be an enum, the only field is the discriminant.
+ assert!(
+ inner.ty.is_enum(),
+ "`Scalar` layout for non-primitive non-enum type {}",
+ inner.ty
+ );
+ assert_eq!(
+ inner.layout.fields().count(),
+ 1,
+ "`Scalar` layout for multiple-field type in {inner:#?}",
+ );
+ let offset = inner.layout.fields().offset(0);
+ let field = inner.field(cx, 0);
+ // The field should be at the right offset, and match the `scalar` layout.
+ assert_eq!(
+ offset,
+ Size::ZERO,
+ "`Scalar` field at non-0 offset in {inner:#?}",
+ );
+ assert_eq!(field.size, size, "`Scalar` field with bad size in {inner:#?}",);
+ assert_eq!(
+ field.align.abi, align,
+ "`Scalar` field with bad align in {inner:#?}",
+ );
+ assert!(
+ matches!(field.abi, Abi::Scalar(_)),
+ "`Scalar` field with bad ABI in {inner:#?}",
+ );
+ }
+ _ => {
+ panic!("`Scalar` layout for non-primitive non-enum type {}", inner.ty);
}
- let mut fields = non_zst_fields(cx, &inner);
- let (offset1, field1) = fields.next().unwrap_or_else(|| {
- panic!("`ScalarPair` layout for type with not even one non-ZST field: {inner:#?}")
- });
- let (offset2, field2) = fields.next().unwrap_or_else(|| {
- panic!("`ScalarPair` layout for type with less than two non-ZST fields: {inner:#?}")
- });
- assert!(
- fields.next().is_none(),
- "`ScalarPair` layout for type with at least three non-ZST fields: {inner:#?}"
- );
- // The fields might be in opposite order.
- let (offset1, field1, offset2, field2) = if offset1 <= offset2 {
- (offset1, field1, offset2, field2)
- } else {
- (offset2, field2, offset1, field1)
- };
- // The fields should be at the right offset, and match the `scalar` layout.
- assert_eq!(
- offset1,
- Size::ZERO,
- "`ScalarPair` first field at non-0 offset in {inner:#?}",
- );
- assert_eq!(
- field1.size, size1,
- "`ScalarPair` first field with bad size in {inner:#?}",
- );
- assert_eq!(
- field1.align.abi, align1,
- "`ScalarPair` first field with bad align in {inner:#?}",
- );
- assert!(
- matches!(field1.abi, Abi::Scalar(_)),
- "`ScalarPair` first field with bad ABI in {inner:#?}",
- );
- assert_eq!(
- offset2, field2_offset,
- "`ScalarPair` second field at bad offset in {inner:#?}",
- );
- assert_eq!(
- field2.size, size2,
- "`ScalarPair` second field with bad size in {inner:#?}",
- );
- assert_eq!(
- field2.align.abi, align2,
- "`ScalarPair` second field with bad align in {inner:#?}",
- );
- assert!(
- matches!(field2.abi, Abi::Scalar(_)),
- "`ScalarPair` second field with bad ABI in {inner:#?}",
- );
}
- Abi::Vector { count, element } => {
- // No padding in vectors. Alignment can be strengthened, though.
- assert!(
- layout.layout.align().abi >= element.align(cx).abi,
- "alignment mismatch between ABI and layout in {layout:#?}"
- );
- let size = element.size(cx) * count;
- assert_eq!(
- layout.layout.size(),
- size.align_to(cx.data_layout().vector_align(size).abi),
- "size mismatch between ABI and layout in {layout:#?}"
- );
+ }
+ Abi::ScalarPair(scalar1, scalar2) => {
+ // Sanity-check scalar pairs. Computing the expected size and alignment is a bit of work.
+ let size1 = scalar1.size(cx);
+ let align1 = scalar1.align(cx).abi;
+ let size2 = scalar2.size(cx);
+ let align2 = scalar2.align(cx).abi;
+ let align = cmp::max(align1, align2);
+ let field2_offset = size1.align_to(align2);
+ let size = (field2_offset + size2).align_to(align);
+ assert_eq!(
+ layout.layout.size(),
+ size,
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ assert_eq!(
+ layout.layout.align().abi,
+ align,
+ "alignment mismatch between ABI and layout in {layout:#?}",
+ );
+ // Check that the underlying pair of fields matches.
+ let inner = skip_newtypes(cx, layout);
+ assert!(
+ matches!(inner.layout.abi(), Abi::ScalarPair(..)),
+ "`ScalarPair` type {} is newtype around non-`ScalarPair` type {}",
+ layout.ty,
+ inner.ty
+ );
+ if matches!(inner.layout.variants(), Variants::Multiple { .. }) {
+ // FIXME: ScalarPair for enums is enormously complicated and it is very hard
+ // to check anything about them.
+ return;
+ }
+ match inner.layout.fields() {
+ FieldsShape::Arbitrary { .. } => {
+ // Checked below.
+ }
+ FieldsShape::Union(..) => {
+ // FIXME: I guess we could also check something here? Like, look at all fields?
+ return;
+ }
+ _ => {
+ panic!("`ScalarPair` layout with unexpected field shape in {inner:#?}");
+ }
}
- Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
+ let mut fields = non_zst_fields(cx, &inner);
+ let (offset1, field1) = fields.next().unwrap_or_else(|| {
+ panic!(
+ "`ScalarPair` layout for type with not even one non-ZST field: {inner:#?}"
+ )
+ });
+ let (offset2, field2) = fields.next().unwrap_or_else(|| {
+ panic!(
+ "`ScalarPair` layout for type with less than two non-ZST fields: {inner:#?}"
+ )
+ });
+ assert!(
+ fields.next().is_none(),
+ "`ScalarPair` layout for type with at least three non-ZST fields: {inner:#?}"
+ );
+ // The fields might be in opposite order.
+ let (offset1, field1, offset2, field2) = if offset1 <= offset2 {
+ (offset1, field1, offset2, field2)
+ } else {
+ (offset2, field2, offset1, field1)
+ };
+ // The fields should be at the right offset, and match the `scalar` layout.
+ assert_eq!(
+ offset1,
+ Size::ZERO,
+ "`ScalarPair` first field at non-0 offset in {inner:#?}",
+ );
+ assert_eq!(
+ field1.size, size1,
+ "`ScalarPair` first field with bad size in {inner:#?}",
+ );
+ assert_eq!(
+ field1.align.abi, align1,
+ "`ScalarPair` first field with bad align in {inner:#?}",
+ );
+ assert!(
+ matches!(field1.abi, Abi::Scalar(_)),
+ "`ScalarPair` first field with bad ABI in {inner:#?}",
+ );
+ assert_eq!(
+ offset2, field2_offset,
+ "`ScalarPair` second field at bad offset in {inner:#?}",
+ );
+ assert_eq!(
+ field2.size, size2,
+ "`ScalarPair` second field with bad size in {inner:#?}",
+ );
+ assert_eq!(
+ field2.align.abi, align2,
+ "`ScalarPair` second field with bad align in {inner:#?}",
+ );
+ assert!(
+ matches!(field2.abi, Abi::Scalar(_)),
+ "`ScalarPair` second field with bad ABI in {inner:#?}",
+ );
}
+ Abi::Vector { count, element } => {
+ // No padding in vectors, except possibly for trailing padding to make the size a multiple of align.
+ let size = element.size(cx) * count;
+ let align = cx.data_layout().vector_align(size).abi;
+ let size = size.align_to(align); // needed e.g. for vectors of size 3
+ assert!(align >= element.align(cx).abi); // just sanity-checking `vector_align`.
+ assert_eq!(
+ layout.layout.size(),
+ size,
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ assert_eq!(
+ layout.layout.align().abi,
+ align,
+ "alignment mismatch between ABI and layout in {layout:#?}"
+ );
+ // FIXME: Do some kind of check of the inner type, like for Scalar and ScalarPair.
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
}
+ }
- check_layout_abi(cx, layout);
+ check_layout_abi(cx, layout);
- if let Variants::Multiple { variants, .. } = &layout.variants {
- for variant in variants.iter() {
- // No nested "multiple".
- assert!(matches!(variant.variants(), Variants::Single { .. }));
- // Variants should have the same or a smaller size as the full thing,
- // and same for alignment.
- if variant.size() > layout.size {
- bug!(
- "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
- layout.size.bytes(),
- variant.size().bytes(),
- )
- }
- if variant.align().abi > layout.align.abi {
- bug!(
- "Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}",
- layout.align.abi.bytes(),
- variant.align().abi.bytes(),
- )
- }
- // Skip empty variants.
- if variant.size() == Size::ZERO
- || variant.fields().count() == 0
- || variant.abi().is_uninhabited()
- {
- // These are never actually accessed anyway, so we can skip the coherence check
- // for them. They also fail that check, since they have
- // `Aggregate`/`Uninhbaited` ABI even when the main type is
- // `Scalar`/`ScalarPair`. (Note that sometimes, variants with fields have size
- // 0, and sometimes, variants without fields have non-0 size.)
- continue;
- }
- // The top-level ABI and the ABI of the variants should be coherent.
- let scalar_coherent = |s1: Scalar, s2: Scalar| {
- s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx)
- };
- let abi_coherent = match (layout.abi, variant.abi()) {
- (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
- (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
- scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
- }
- (Abi::Uninhabited, _) => true,
- (Abi::Aggregate { .. }, _) => true,
- _ => false,
- };
- if !abi_coherent {
- bug!(
- "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
- variant
- );
+ if let Variants::Multiple { variants, .. } = &layout.variants {
+ for variant in variants.iter() {
+ // No nested "multiple".
+ assert!(matches!(variant.variants, Variants::Single { .. }));
+ // Variants should have the same or a smaller size as the full thing,
+ // and same for alignment.
+ if variant.size > layout.size {
+ bug!(
+ "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
+ layout.size.bytes(),
+ variant.size.bytes(),
+ )
+ }
+ if variant.align.abi > layout.align.abi {
+ bug!(
+ "Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}",
+ layout.align.abi.bytes(),
+ variant.align.abi.bytes(),
+ )
+ }
+ // Skip empty variants.
+ if variant.size == Size::ZERO
+ || variant.fields.count() == 0
+ || variant.abi.is_uninhabited()
+ {
+ // These are never actually accessed anyway, so we can skip the coherence check
+ // for them. They also fail that check, since they have
+ // `Aggregate`/`Uninhbaited` ABI even when the main type is
+ // `Scalar`/`ScalarPair`. (Note that sometimes, variants with fields have size
+ // 0, and sometimes, variants without fields have non-0 size.)
+ continue;
+ }
+ // The top-level ABI and the ABI of the variants should be coherent.
+ let scalar_coherent =
+ |s1: Scalar, s2: Scalar| s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx);
+ let abi_coherent = match (layout.abi, variant.abi) {
+ (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
+ (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
+ scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
}
+ (Abi::Uninhabited, _) => true,
+ (Abi::Aggregate { .. }, _) => true,
+ _ => false,
+ };
+ if !abi_coherent {
+ bug!(
+ "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
+ variant
+ );
}
}
}
diff --git a/compiler/rustc_ty_utils/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs
index cce5a79dd..7ad5cbc01 100644
--- a/compiler/rustc_ty_utils/src/lib.rs
+++ b/compiler/rustc_ty_utils/src/lib.rs
@@ -29,6 +29,7 @@ mod layout;
mod layout_sanity_check;
mod needs_drop;
pub mod representability;
+mod structural_match;
mod ty;
pub fn provide(providers: &mut Providers) {
@@ -42,4 +43,5 @@ pub fn provide(providers: &mut Providers) {
representability::provide(providers);
ty::provide(providers);
instance::provide(providers);
+ structural_match::provide(providers);
}
diff --git a/compiler/rustc_ty_utils/src/structural_match.rs b/compiler/rustc_ty_utils/src/structural_match.rs
new file mode 100644
index 000000000..a55bb7e7e
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/structural_match.rs
@@ -0,0 +1,44 @@
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_trait_selection::traits::{ObligationCause, ObligationCtxt};
+
+/// This method returns true if and only if `adt_ty` itself has been marked as
+/// eligible for structural-match: namely, if it implements both
+/// `StructuralPartialEq` and `StructuralEq` (which are respectively injected by
+/// `#[derive(PartialEq)]` and `#[derive(Eq)]`).
+///
+/// Note that this does *not* recursively check if the substructure of `adt_ty`
+/// implements the traits.
+fn has_structural_eq_impls<'tcx>(tcx: TyCtxt<'tcx>, adt_ty: Ty<'tcx>) -> bool {
+ let ref infcx = tcx.infer_ctxt().build();
+ let cause = ObligationCause::dummy();
+
+ let ocx = ObligationCtxt::new(infcx);
+ // require `#[derive(PartialEq)]`
+ let structural_peq_def_id =
+ infcx.tcx.require_lang_item(LangItem::StructuralPeq, Some(cause.span));
+ ocx.register_bound(cause.clone(), ty::ParamEnv::empty(), adt_ty, structural_peq_def_id);
+ // for now, require `#[derive(Eq)]`. (Doing so is a hack to work around
+ // the type `for<'a> fn(&'a ())` failing to implement `Eq` itself.)
+ let structural_teq_def_id =
+ infcx.tcx.require_lang_item(LangItem::StructuralTeq, Some(cause.span));
+ ocx.register_bound(cause, ty::ParamEnv::empty(), adt_ty, structural_teq_def_id);
+
+ // We deliberately skip *reporting* fulfillment errors (via
+ // `report_fulfillment_errors`), for two reasons:
+ //
+ // 1. The error messages would mention `std::marker::StructuralPartialEq`
+ // (a trait which is solely meant as an implementation detail
+ // for now), and
+ //
+ // 2. We are sometimes doing future-incompatibility lints for
+ // now, so we do not want unconditional errors here.
+ ocx.select_all_or_error().is_empty()
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.has_structural_eq_impls = has_structural_eq_impls;
+}
diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs
index 3eebb4ace..5fc9bcac1 100644
--- a/compiler/rustc_ty_utils/src/ty.rs
+++ b/compiler/rustc_ty_utils/src/ty.rs
@@ -49,12 +49,9 @@ fn sized_constraint_for_ty<'tcx>(
// it on the impl.
let Some(sized_trait) = tcx.lang_items().sized_trait() else { return vec![ty] };
- let sized_predicate = ty::Binder::dummy(ty::TraitRef {
- def_id: sized_trait,
- substs: tcx.mk_substs_trait(ty, &[]),
- })
- .without_const()
- .to_predicate(tcx);
+ let sized_predicate = ty::Binder::dummy(tcx.mk_trait_ref(sized_trait, [ty]))
+ .without_const()
+ .to_predicate(tcx);
let predicates = tcx.predicates_of(adtdef.did()).predicates;
if predicates.iter().any(|(p, _)| *p == sized_predicate) { vec![] } else { vec![ty] }
}
@@ -108,12 +105,7 @@ fn adt_sized_constraint(tcx: TyCtxt<'_>, def_id: DefId) -> &[Ty<'_>] {
/// See `ParamEnv` struct definition for details.
fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
- // The param_env of an impl Trait type is its defining function's param_env
- if let Some(parent) = ty::is_impl_trait_defn(tcx, def_id) {
- return param_env(tcx, parent.to_def_id());
- }
// Compute the bounds on Self and the type parameters.
-
let ty::InstantiatedPredicates { mut predicates, .. } =
tcx.predicates_of(def_id).instantiate_identity(tcx);
@@ -413,63 +405,7 @@ fn issue33140_self_ty(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Ty<'_>> {
/// Check if a function is async.
fn asyncness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::IsAsync {
let node = tcx.hir().get_by_def_id(def_id.expect_local());
- if let Some(fn_kind) = node.fn_kind() { fn_kind.asyncness() } else { hir::IsAsync::NotAsync }
-}
-
-/// Don't call this directly: use ``tcx.conservative_is_privately_uninhabited`` instead.
-pub fn conservative_is_privately_uninhabited_raw<'tcx>(
- tcx: TyCtxt<'tcx>,
- param_env_and: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
-) -> bool {
- let (param_env, ty) = param_env_and.into_parts();
- match ty.kind() {
- ty::Never => {
- debug!("ty::Never =>");
- true
- }
- ty::Adt(def, _) if def.is_union() => {
- debug!("ty::Adt(def, _) if def.is_union() =>");
- // For now, `union`s are never considered uninhabited.
- false
- }
- ty::Adt(def, substs) => {
- debug!("ty::Adt(def, _) if def.is_not_union() =>");
- // Any ADT is uninhabited if either:
- // (a) It has no variants (i.e. an empty `enum`);
- // (b) Each of its variants (a single one in the case of a `struct`) has at least
- // one uninhabited field.
- def.variants().iter().all(|var| {
- var.fields.iter().any(|field| {
- let ty = tcx.bound_type_of(field.did).subst(tcx, substs);
- tcx.conservative_is_privately_uninhabited(param_env.and(ty))
- })
- })
- }
- ty::Tuple(fields) => {
- debug!("ty::Tuple(..) =>");
- fields.iter().any(|ty| tcx.conservative_is_privately_uninhabited(param_env.and(ty)))
- }
- ty::Array(ty, len) => {
- debug!("ty::Array(ty, len) =>");
- match len.try_eval_usize(tcx, param_env) {
- Some(0) | None => false,
- // If the array is definitely non-empty, it's uninhabited if
- // the type of its elements is uninhabited.
- Some(1..) => tcx.conservative_is_privately_uninhabited(param_env.and(*ty)),
- }
- }
- ty::Ref(..) => {
- debug!("ty::Ref(..) =>");
- // References to uninitialised memory is valid for any type, including
- // uninhabited types, in unsafe code, so we treat all references as
- // inhabited.
- false
- }
- _ => {
- debug!("_ =>");
- false
- }
- }
+ node.fn_sig().map_or(hir::IsAsync::NotAsync, |sig| sig.header.asyncness)
}
pub fn provide(providers: &mut ty::query::Providers) {
@@ -481,7 +417,6 @@ pub fn provide(providers: &mut ty::query::Providers) {
instance_def_size_estimate,
issue33140_self_ty,
impl_defaultness,
- conservative_is_privately_uninhabited: conservative_is_privately_uninhabited_raw,
..*providers
};
}