summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_const_eval
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval')
-rw-r--r--compiler/rustc_const_eval/Cargo.toml1
-rw-r--r--compiler/rustc_const_eval/locales/en-US.ftl83
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs6
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs5
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs23
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs4
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs13
-rw-r--r--compiler/rustc_const_eval/src/errors.rs18
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs22
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs238
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs10
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs26
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs118
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs11
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs40
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs70
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs1
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs198
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs10
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs191
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs20
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs118
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs32
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs16
-rw-r--r--compiler/rustc_const_eval/src/lib.rs11
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs14
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs4
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs73
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs4
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs42
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs65
-rw-r--r--compiler/rustc_const_eval/src/util/aggregate.rs77
-rw-r--r--compiler/rustc_const_eval/src/util/call_kind.rs3
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs (renamed from compiler/rustc_const_eval/src/util/might_permit_raw_init.rs)65
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs6
-rw-r--r--compiler/rustc_const_eval/src/util/type_name.rs1
38 files changed, 913 insertions, 740 deletions
diff --git a/compiler/rustc_const_eval/Cargo.toml b/compiler/rustc_const_eval/Cargo.toml
index 51489e293..98ac36c1c 100644
--- a/compiler/rustc_const_eval/Cargo.toml
+++ b/compiler/rustc_const_eval/Cargo.toml
@@ -19,7 +19,6 @@ rustc_infer = { path = "../rustc_infer" }
rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
-rustc_query_system = { path = "../rustc_query_system" }
rustc_session = { path = "../rustc_session" }
rustc_target = { path = "../rustc_target" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
diff --git a/compiler/rustc_const_eval/locales/en-US.ftl b/compiler/rustc_const_eval/locales/en-US.ftl
new file mode 100644
index 000000000..33bb116d6
--- /dev/null
+++ b/compiler/rustc_const_eval/locales/en-US.ftl
@@ -0,0 +1,83 @@
+const_eval_unstable_in_stable =
+ const-stable function cannot use `#[feature({$gate})]`
+ .unstable_sugg = if it is not part of the public API, make this function unstably const
+ .bypass_sugg = otherwise `#[rustc_allow_const_fn_unstable]` can be used to bypass stability checks
+
+const_eval_thread_local_access =
+ thread-local statics cannot be accessed at compile-time
+
+const_eval_static_access =
+ {$kind}s cannot refer to statics
+ .help = consider extracting the value of the `static` to a `const`, and referring to that
+ .teach_note = `static` and `const` variables can refer to other `const` variables. A `const` variable, however, cannot refer to a `static` variable.
+ .teach_help = To fix this, the value can be extracted to a `const` and then used.
+
+const_eval_raw_ptr_to_int =
+ pointers cannot be cast to integers during const eval
+ .note = at compile-time, pointers do not have an integer value
+ .note2 = avoiding this restriction via `transmute`, `union`, or raw pointers leads to compile-time undefined behavior
+
+const_eval_raw_ptr_comparison =
+ pointers cannot be reliably compared during const eval
+ .note = see issue #53020 <https://github.com/rust-lang/rust/issues/53020> for more information
+
+const_eval_panic_non_str = argument to `panic!()` in a const context must have type `&str`
+
+const_eval_mut_deref =
+ mutation through a reference is not allowed in {$kind}s
+
+const_eval_transient_mut_borrow = mutable references are not allowed in {$kind}s
+
+const_eval_transient_mut_borrow_raw = raw mutable references are not allowed in {$kind}s
+
+const_eval_max_num_nodes_in_const = maximum number of nodes exceeded in constant {$global_const_id}
+
+const_eval_unallowed_fn_pointer_call = function pointer calls are not allowed in {$kind}s
+
+const_eval_unstable_const_fn = `{$def_path}` is not yet stable as a const fn
+
+const_eval_unallowed_mutable_refs =
+ mutable references are not allowed in the final value of {$kind}s
+ .teach_note =
+ References in statics and constants may only refer to immutable values.\n\n
+ Statics are shared everywhere, and if they refer to mutable data one might violate memory
+ safety since holding multiple mutable references to shared data is not allowed.\n\n
+ If you really want global mutable state, try using static mut or a global UnsafeCell.
+
+const_eval_unallowed_mutable_refs_raw =
+ raw mutable references are not allowed in the final value of {$kind}s
+ .teach_note =
+ References in statics and constants may only refer to immutable values.\n\n
+ Statics are shared everywhere, and if they refer to mutable data one might violate memory
+ safety since holding multiple mutable references to shared data is not allowed.\n\n
+ If you really want global mutable state, try using static mut or a global UnsafeCell.
+
+const_eval_non_const_fmt_macro_call =
+ cannot call non-const formatting macro in {$kind}s
+
+const_eval_non_const_fn_call =
+ cannot call non-const fn `{$def_path_str}` in {$kind}s
+
+const_eval_unallowed_op_in_const_context =
+ {$msg}
+
+const_eval_unallowed_heap_allocations =
+ allocations are not allowed in {$kind}s
+ .label = allocation not allowed in {$kind}s
+ .teach_note =
+ The value of statics and constants must be known at compile time, and they live for the entire lifetime of a program. Creating a boxed value allocates memory on the heap at runtime, and therefore cannot be done at compile time.
+
+const_eval_unallowed_inline_asm =
+ inline assembly is not allowed in {$kind}s
+
+const_eval_interior_mutable_data_refer =
+ {$kind}s cannot refer to interior mutable data
+ .label = this borrow of an interior mutable value may end up in the final value
+ .help = to fix this, the value can be extracted to a separate `static` item and then referenced
+ .teach_note =
+ A constant containing interior mutable data behind a reference can allow you to modify that data.
+ This would make multiple uses of a constant to be able to see different values and allow circumventing
+ the `Send` and `Sync` requirements for shared mutable data, which is unsound.
+
+const_eval_interior_mutability_borrow =
+ cannot borrow here, since the borrowed element may contain interior mutability
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 18e01567c..7564ba17b 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -54,7 +54,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
trace!(
"eval_body_using_ecx: pushing stack frame for global: {}{}",
- with_no_trimmed_paths!(ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id()))),
+ with_no_trimmed_paths!(ecx.tcx.def_path_str(cid.instance.def_id())),
cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p))
);
@@ -180,13 +180,13 @@ pub(super) fn op_to_const<'tcx>(
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
}
(None, _offset) => (
- ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
+ ecx.tcx.mk_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
b"" as &[u8],
)),
0,
),
};
- let len = b.to_machine_usize(ecx).unwrap();
+ let len = b.to_target_usize(ecx).unwrap();
let start = start.try_into().unwrap();
let len: usize = len.try_into().unwrap();
ConstValue::Slice { data, start, end: start + len }
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index 351c70130..9eaab1f47 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -17,7 +17,8 @@ pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
let parent_id = tcx.local_parent(def_id);
- tcx.def_kind(parent_id) == DefKind::Impl && tcx.constness(parent_id) == hir::Constness::Const
+ matches!(tcx.def_kind(parent_id), DefKind::Impl { .. })
+ && tcx.constness(parent_id) == hir::Constness::Const
}
/// Checks whether an item is considered to be `const`. If it is a constructor, it is const. If
@@ -66,7 +67,7 @@ fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
if cfg!(debug_assertions) && stab.promotable {
let sig = tcx.fn_sig(def_id);
assert_eq!(
- sig.unsafety(),
+ sig.skip_binder().unsafety(),
hir::Unsafety::Normal,
"don't mark const unsafe fns as promotable",
// https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index 4709514c8..a44f70ed0 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -244,7 +244,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
assert_eq!(args.len(), 2);
let ptr = self.read_pointer(&args[0])?;
- let target_align = self.read_scalar(&args[1])?.to_machine_usize(self)?;
+ let target_align = self.read_scalar(&args[1])?.to_target_usize(self)?;
if !target_align.is_power_of_two() {
throw_ub_format!("`align_offset` called with non-power-of-two align: {}", target_align);
@@ -276,7 +276,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
Ok(ControlFlow::Break(()))
} else {
// Not alignable in const, return `usize::MAX`.
- let usize_max = Scalar::from_machine_usize(self.machine_usize_max(), self);
+ let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
self.write_scalar(usize_max, dest)?;
self.return_to_block(ret)?;
Ok(ControlFlow::Break(()))
@@ -470,8 +470,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
ecx.write_scalar(Scalar::from_u8(cmp), dest)?;
}
sym::const_allocate => {
- let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
- let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+ let size = ecx.read_scalar(&args[0])?.to_target_usize(ecx)?;
+ let align = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
let align = match Align::from_bytes(align) {
Ok(a) => a,
@@ -487,8 +487,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
}
sym::const_deallocate => {
let ptr = ecx.read_pointer(&args[0])?;
- let size = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
- let align = ecx.read_scalar(&args[2])?.to_machine_usize(ecx)?;
+ let size = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
+ let align = ecx.read_scalar(&args[2])?.to_target_usize(ecx)?;
let size = Size::from_bytes(size);
let align = match Align::from_bytes(align) {
@@ -561,8 +561,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
}
- fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
- // The step limit has already been hit in a previous call to `before_terminator`.
+ fn increment_const_eval_counter(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ // The step limit has already been hit in a previous call to `increment_const_eval_counter`.
if ecx.machine.steps_remaining == 0 {
return Ok(());
}
@@ -622,10 +622,9 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
let alloc = alloc.inner();
if is_write {
// Write access. These are never allowed, but we give a targeted error message.
- if alloc.mutability == Mutability::Not {
- Err(err_ub!(WriteToReadOnly(alloc_id)).into())
- } else {
- Err(ConstEvalErrKind::ModifiedGlobal.into())
+ match alloc.mutability {
+ Mutability::Not => Err(err_ub!(WriteToReadOnly(alloc_id)).into()),
+ Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal.into()),
}
} else {
// Read access. These are usually allowed, with some exceptions.
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index 01b2b4b5d..3cdf1e6e3 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -107,7 +107,7 @@ pub(crate) fn try_destructure_mir_constant<'tcx>(
// We go to `usize` as we cannot allocate anything bigger anyway.
let (field_count, variant, down) = match val.ty().kind() {
- ty::Array(_, len) => (len.eval_usize(tcx, param_env) as usize, None, op),
+ ty::Array(_, len) => (len.eval_target_usize(tcx, param_env) as usize, None, op),
ty::Adt(def, _) if def.variants().is_empty() => {
throw_ub!(Unreachable)
}
@@ -155,7 +155,7 @@ pub(crate) fn deref_mir_constant<'tcx>(
// In case of unsized types, figure out the real type behind.
MemPlaceMeta::Meta(scalar) => match mplace.layout.ty.kind() {
ty::Str => bug!("there's no sized equivalent of a `str`"),
- ty::Slice(elem_ty) => tcx.mk_array(*elem_ty, scalar.to_machine_usize(&tcx).unwrap()),
+ ty::Slice(elem_ty) => tcx.mk_array(*elem_ty, scalar.to_target_usize(&tcx).unwrap()),
_ => bug!(
"type {} should not have metadata, but had {:?}",
mplace.layout.ty,
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index 498c00873..a73f778d4 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -151,7 +151,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
// FIXME(oli-obk): we can probably encode closures just like structs
| ty::Closure(..)
| ty::Generator(..)
- | ty::GeneratorWitness(..) => Err(ValTreeCreationError::NonSupportedType),
+ | ty::GeneratorWitness(..) |ty::GeneratorWitnessMIR(..)=> Err(ValTreeCreationError::NonSupportedType),
}
}
@@ -193,7 +193,7 @@ fn get_info_on_unsized_field<'tcx>(
// Have to adjust type for ty::Str
let unsized_inner_ty = match unsized_inner_ty.kind() {
- ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+ ty::Str => tcx.types.u8,
_ => unsized_inner_ty,
};
@@ -216,7 +216,7 @@ fn create_pointee_place<'tcx>(
let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx);
let unsized_inner_ty = match unsized_inner_ty.kind() {
- ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+ ty::Str => tcx.types.u8,
_ => unsized_inner_ty,
};
let unsized_inner_ty_size =
@@ -239,7 +239,7 @@ fn create_pointee_place<'tcx>(
MPlaceTy::from_aligned_ptr_with_meta(
ptr.into(),
layout,
- MemPlaceMeta::Meta(Scalar::from_machine_usize(num_elems as u64, &tcx)),
+ MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx)),
)
} else {
create_mplace_from_layout(ecx, ty)
@@ -314,6 +314,7 @@ pub fn valtree_to_const_value<'tcx>(
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
+ | ty::GeneratorWitnessMIR(..)
| ty::FnPtr(_)
| ty::RawPtr(_)
| ty::Str
@@ -354,7 +355,7 @@ fn valtree_into_mplace<'tcx>(
let imm = match inner_ty.kind() {
ty::Slice(_) | ty::Str => {
let len = valtree.unwrap_branch().len();
- let len_scalar = Scalar::from_machine_usize(len as u64, &tcx);
+ let len_scalar = Scalar::from_target_usize(len as u64, &tcx);
Immediate::ScalarPair(
Scalar::from_maybe_pointer((*pointee_place).ptr, &tcx),
@@ -425,7 +426,7 @@ fn valtree_into_mplace<'tcx>(
place
.offset_with_meta(
offset,
- MemPlaceMeta::Meta(Scalar::from_machine_usize(
+ MemPlaceMeta::Meta(Scalar::from_target_usize(
num_elems as u64,
&tcx,
)),
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index 4b0550767..f8b7cc6d7 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -9,12 +9,12 @@ pub(crate) struct UnstableInStable {
#[primary_span]
pub span: Span,
#[suggestion(
- unstable_sugg,
+ const_eval_unstable_sugg,
code = "#[rustc_const_unstable(feature = \"...\", issue = \"...\")]\n",
applicability = "has-placeholders"
)]
#[suggestion(
- bypass_sugg,
+ const_eval_bypass_sugg,
code = "#[rustc_allow_const_fn_unstable({gate})]\n",
applicability = "has-placeholders"
)]
@@ -35,15 +35,15 @@ pub(crate) struct StaticAccessErr {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
- #[note(teach_note)]
- #[help(teach_help)]
+ #[note(const_eval_teach_note)]
+ #[help(const_eval_teach_help)]
pub teach: Option<()>,
}
#[derive(Diagnostic)]
#[diag(const_eval_raw_ptr_to_int)]
#[note]
-#[note(note2)]
+#[note(const_eval_note2)]
pub(crate) struct RawPtrToIntErr {
#[primary_span]
pub span: Span,
@@ -118,7 +118,7 @@ pub(crate) struct UnallowedMutableRefs {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
- #[note(teach_note)]
+ #[note(const_eval_teach_note)]
pub teach: Option<()>,
}
@@ -128,7 +128,7 @@ pub(crate) struct UnallowedMutableRefsRaw {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
- #[note(teach_note)]
+ #[note(const_eval_teach_note)]
pub teach: Option<()>,
}
#[derive(Diagnostic)]
@@ -163,7 +163,7 @@ pub(crate) struct UnallowedHeapAllocations {
#[label]
pub span: Span,
pub kind: ConstContext,
- #[note(teach_note)]
+ #[note(const_eval_teach_note)]
pub teach: Option<()>,
}
@@ -184,7 +184,7 @@ pub(crate) struct InteriorMutableDataRefer {
#[help]
pub opt_help: Option<()>,
pub kind: ConstContext,
- #[note(teach_note)]
+ #[note(const_eval_teach_note)]
pub teach: Option<()>,
}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index b2c847d3f..2be5ed896 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -126,7 +126,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let vtable = self.get_vtable_ptr(src.layout.ty, data.principal())?;
let vtable = Scalar::from_maybe_pointer(vtable, self);
let data = self.read_immediate(src)?.to_scalar();
- let _assert_pointer_sized = data.to_pointer(self)?;
+ let _assert_pointer_like = data.to_pointer(self)?;
let val = Immediate::ScalarPair(data, vtable);
self.write_immediate(val, dest)?;
} else {
@@ -231,7 +231,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// First cast to usize.
let scalar = src.to_scalar();
let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?;
- let addr = addr.to_machine_usize(self)?;
+ let addr = addr.to_target_usize(self)?;
// Then turn address into pointer.
let ptr = M::ptr_from_addr_cast(&self, addr)?;
@@ -312,6 +312,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
+ /// `src` is a *pointer to* a `source_ty`, and in `dest` we should store a pointer to th same
+ /// data at type `cast_ty`.
fn unsize_into_ptr(
&mut self,
src: &OpTy<'tcx, M::Provenance>,
@@ -328,11 +330,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(&ty::Array(_, length), &ty::Slice(_)) => {
let ptr = self.read_scalar(src)?;
// u64 cast is from usize to u64, which is always good
- let val =
- Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self);
+ let val = Immediate::new_slice(
+ ptr,
+ length.eval_target_usize(*self.tcx, self.param_env),
+ self,
+ );
self.write_immediate(val, dest)
}
- (ty::Dynamic(data_a, ..), ty::Dynamic(data_b, ..)) => {
+ (ty::Dynamic(data_a, _, ty::Dyn), ty::Dynamic(data_b, _, ty::Dyn)) => {
let val = self.read_immediate(src)?;
if data_a.principal() == data_b.principal() {
// A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables.
@@ -356,7 +361,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => {
- span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty)
+ span_bug!(
+ self.cur_span(),
+ "invalid pointer unsizing {:?} -> {:?}",
+ src.layout.ty,
+ cast_ty
+ )
}
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
new file mode 100644
index 000000000..557e72124
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -0,0 +1,238 @@
+//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
+
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{self, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Writes the discriminant of the given variant.
+ #[instrument(skip(self), level = "trace")]
+ pub fn write_discriminant(
+ &mut self,
+ variant_index: VariantIdx,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // Layout computation excludes uninhabited variants from consideration
+ // therefore there's no way to represent those variants in the given layout.
+ // Essentially, uninhabited variants do not have a tag that corresponds to their
+ // discriminant, so we cannot do anything here.
+ // When evaluating we will always error before even getting here, but ConstProp 'executes'
+ // dead code, so we cannot ICE here.
+ if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantWritten)
+ }
+
+ match dest.layout.variants {
+ abi::Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ abi::Variants::Multiple {
+ tag_encoding: TagEncoding::Direct,
+ tag: tag_layout,
+ tag_field,
+ ..
+ } => {
+ // No need to validate that the discriminant here because the
+ // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+ let discr_val =
+ dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+
+ // raw discriminants for enums are isize or bigger during
+ // their computation, but the in-memory tag is the smallest possible
+ // representation
+ let size = tag_layout.size(self);
+ let tag_val = size.truncate(discr_val);
+
+ let tag_dest = self.place_field(dest, tag_field)?;
+ self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
+ }
+ abi::Variants::Multiple {
+ tag_encoding:
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
+ tag: tag_layout,
+ tag_field,
+ ..
+ } => {
+ // No need to validate that the discriminant here because the
+ // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+ if variant_index != untagged_variant {
+ let variants_start = niche_variants.start().as_u32();
+ let variant_index_relative = variant_index
+ .as_u32()
+ .checked_sub(variants_start)
+ .expect("overflow computing relative variant idx");
+ // We need to use machine arithmetic when taking into account `niche_start`:
+ // tag_val = variant_index_relative + niche_start_val
+ let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
+ let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+ let variant_index_relative_val =
+ ImmTy::from_uint(variant_index_relative, tag_layout);
+ let tag_val = self.binary_op(
+ mir::BinOp::Add,
+ &variant_index_relative_val,
+ &niche_start_val,
+ )?;
+ // Write result.
+ let niche_dest = self.place_field(dest, tag_field)?;
+ self.write_immediate(*tag_val, &niche_dest)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Read discriminant, return the runtime value as well as the variant index.
+ /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
+ #[instrument(skip(self), level = "trace")]
+ pub fn read_discriminant(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
+ trace!("read_discriminant_value {:#?}", op.layout);
+ // Get type and layout of the discriminant.
+ let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+ trace!("discriminant type: {:?}", discr_layout.ty);
+
+ // We use "discriminant" to refer to the value associated with a particular enum variant.
+ // This is not to be confused with its "variant index", which is just determining its position in the
+ // declared list of variants -- they can differ with explicitly assigned discriminants.
+ // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
+ // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
+ let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+ Variants::Single { index } => {
+ let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
+ Some(discr) => {
+ // This type actually has discriminants.
+ assert_eq!(discr.ty, discr_layout.ty);
+ Scalar::from_uint(discr.val, discr_layout.size)
+ }
+ None => {
+ // On a type without actual discriminants, variant is 0.
+ assert_eq!(index.as_u32(), 0);
+ Scalar::from_uint(index.as_u32(), discr_layout.size)
+ }
+ };
+ return Ok((discr, index));
+ }
+ Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+ (tag, tag_encoding, tag_field)
+ }
+ };
+
+ // There are *three* layouts that come into play here:
+ // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
+ // the `Scalar` we return.
+ // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
+ // and used to interpret the value we read from the tag field.
+ // For the return value, a cast to `discr_layout` is performed.
+ // - The field storing the tag has a layout, which is very similar to `tag_layout` but
+ // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
+
+ // Get layout for tag.
+ let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
+
+ // Read tag and sanity-check `tag_layout`.
+ let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+ assert_eq!(tag_layout.size, tag_val.layout.size);
+ assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+ trace!("tag value: {}", tag_val);
+
+ // Figure out which discriminant and variant this corresponds to.
+ Ok(match *tag_encoding {
+ TagEncoding::Direct => {
+ let scalar = tag_val.to_scalar();
+ // Generate a specific error if `tag_val` is not an integer.
+ // (`tag_bits` itself is only used for error messages below.)
+ let tag_bits = scalar
+ .try_to_int()
+ .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
+ .assert_bits(tag_layout.size);
+ // Cast bits from tag layout to discriminant layout.
+ // After the checks we did above, this cannot fail, as
+ // discriminants are int-like.
+ let discr_val =
+ self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
+ let discr_bits = discr_val.assert_bits(discr_layout.size);
+ // Convert discriminant to variant index, and catch invalid discriminants.
+ let index = match *op.layout.ty.kind() {
+ ty::Adt(adt, _) => {
+ adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
+ }
+ ty::Generator(def_id, substs, _) => {
+ let substs = substs.as_generator();
+ substs
+ .discriminants(def_id, *self.tcx)
+ .find(|(_, var)| var.val == discr_bits)
+ }
+ _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+ }
+ .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
+ // Return the cast value, and the index.
+ (discr_val, index.0)
+ }
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
+ let tag_val = tag_val.to_scalar();
+ // Compute the variant this niche value/"tag" corresponds to. With niche layout,
+ // discriminant (encoded in niche/tag) and variant index are the same.
+ let variants_start = niche_variants.start().as_u32();
+ let variants_end = niche_variants.end().as_u32();
+ let variant = match tag_val.try_to_int() {
+ Err(dbg_val) => {
+ // So this is a pointer then, and casting to an int failed.
+ // Can only happen during CTFE.
+ // The niche must be just 0, and the ptr not null, then we know this is
+ // okay. Everything else, we conservatively reject.
+ let ptr_valid = niche_start == 0
+ && variants_start == variants_end
+ && !self.scalar_may_be_null(tag_val)?;
+ if !ptr_valid {
+ throw_ub!(InvalidTag(dbg_val))
+ }
+ untagged_variant
+ }
+ Ok(tag_bits) => {
+ let tag_bits = tag_bits.assert_bits(tag_layout.size);
+ // We need to use machine arithmetic to get the relative variant idx:
+ // variant_index_relative = tag_val - niche_start_val
+ let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
+ let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+ let variant_index_relative_val =
+ self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+ let variant_index_relative =
+ variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
+ // Check if this is in the range that indicates an actual discriminant.
+ if variant_index_relative <= u128::from(variants_end - variants_start) {
+ let variant_index_relative = u32::try_from(variant_index_relative)
+ .expect("we checked that this fits into a u32");
+ // Then computing the absolute variant idx should not overflow any more.
+ let variant_index = variants_start
+ .checked_add(variant_index_relative)
+ .expect("overflow computing absolute variant idx");
+ let variants_len = op
+ .layout
+ .ty
+ .ty_adt_def()
+ .expect("tagged layout for non adt")
+ .variants()
+ .len();
+ assert!(usize::try_from(variant_index).unwrap() < variants_len);
+ VariantIdx::from_u32(variant_index)
+ } else {
+ untagged_variant
+ }
+ }
+ };
+ // Compute the size of the scalar we need to return.
+ // No need to cast, because the variant index directly serves as discriminant and is
+ // encoded in the tag.
+ (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+ }
+ })
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index d13fed7a9..3db102e48 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -489,7 +489,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Call this on things you got out of the MIR (so it is as generic as the current
/// stack frame), to bring it into the proper environment for this interpreter.
- pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+ pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<
+ T: TypeFoldable<TyCtxt<'tcx>>,
+ >(
&self,
value: T,
) -> Result<T, InterpError<'tcx>> {
@@ -498,7 +500,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Call this on things you got out of the MIR (so it is as generic as the provided
/// stack frame), to bring it into the proper environment for this interpreter.
- pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+ pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<TyCtxt<'tcx>>>(
&self,
frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
value: T,
@@ -632,14 +634,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
Ok(Some((size, align)))
}
- ty::Dynamic(..) => {
+ ty::Dynamic(_, _, ty::Dyn) => {
let vtable = metadata.unwrap_meta().to_pointer(self)?;
// Read size and align from vtable (already checks size).
Ok(Some(self.get_vtable_size_and_align(vtable)?))
}
ty::Slice(_) | ty::Str => {
- let len = metadata.unwrap_meta().to_machine_usize(self)?;
+ let len = metadata.unwrap_meta().to_target_usize(self)?;
let elem = layout.field(self, 0);
// Make sure the slice is not too big.
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 54528b1db..b220d21f6 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -30,15 +30,15 @@ use super::{
use crate::const_eval;
pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
- 'mir,
- 'tcx,
- MemoryKind = T,
- Provenance = AllocId,
- ExtraFnVal = !,
- FrameExtra = (),
- AllocExtra = (),
- MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>,
->;
+ 'mir,
+ 'tcx,
+ MemoryKind = T,
+ Provenance = AllocId,
+ ExtraFnVal = !,
+ FrameExtra = (),
+ AllocExtra = (),
+ MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>,
+ >;
struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> {
/// The ectx from which we intern.
@@ -135,7 +135,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
};
// link the alloc id to the actual allocation
leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, alloc_id)| alloc_id));
- let alloc = tcx.intern_const_alloc(alloc);
+ let alloc = tcx.mk_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
None
}
@@ -242,7 +242,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
let mplace = self.ecx.ref_to_mplace(&value)?;
assert_eq!(mplace.layout.ty, referenced_ty);
// Handle trait object vtables.
- if let ty::Dynamic(..) =
+ if let ty::Dynamic(_, _, ty::Dyn) =
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
{
let ptr = mplace.meta.unwrap_meta().to_pointer(&tcx)?;
@@ -437,7 +437,7 @@ pub fn intern_const_alloc_recursive<
alloc.mutability = Mutability::Not;
}
}
- let alloc = tcx.intern_const_alloc(alloc);
+ let alloc = tcx.mk_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
for &(_, alloc_id) in alloc.inner().provenance().ptrs().iter() {
if leftover_allocations.insert(alloc_id) {
@@ -479,6 +479,6 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
f(self, &dest.into())?;
let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
alloc.mutability = Mutability::Not;
- Ok(self.tcx.intern_const_alloc(alloc))
+ Ok(self.tcx.mk_const_alloc(alloc))
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index cc7b6c91b..a29cdade0 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -11,7 +11,7 @@ use rustc_middle::mir::{
BinOp, NonDivergingIntrinsic,
};
use rustc_middle::ty;
-use rustc_middle::ty::layout::LayoutOf as _;
+use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
@@ -45,7 +45,7 @@ fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<
pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
let path = crate::util::type_name(tcx, ty);
let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
- tcx.intern_const_alloc(alloc)
+ tcx.mk_const_alloc(alloc)
}
/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
@@ -71,7 +71,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
sym::pref_align_of => {
// Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
- ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx)
+ ConstValue::from_target_usize(layout.align.pref.bytes(), &tcx)
}
sym::type_id => {
ensure_monomorphic_enough(tcx, tp_ty)?;
@@ -79,7 +79,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
}
sym::variant_count => match tp_ty.kind() {
// Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
- ty::Adt(adt, _) => ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx),
+ ty::Adt(adt, _) => ConstValue::from_target_usize(adt.variants().len() as u64, &tcx),
ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => {
throw_inval!(TooGeneric)
}
@@ -101,9 +101,10 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
| ty::Closure(_, _)
| ty::Generator(_, _, _)
| ty::GeneratorWitness(_)
+ | ty::GeneratorWitnessMIR(_, _)
| ty::Never
| ty::Tuple(_)
- | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
+ | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
},
other => bug!("`{}` is not a zero arg intrinsic", other),
})
@@ -155,7 +156,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
_ => bug!(),
};
- self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
+ self.write_scalar(Scalar::from_target_usize(result, self), dest)?;
}
sym::pref_align_of
@@ -209,19 +210,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
self.write_scalar(out_val, dest)?;
}
- sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
- let lhs = self.read_immediate(&args[0])?;
- let rhs = self.read_immediate(&args[1])?;
- let bin_op = match intrinsic_name {
- sym::add_with_overflow => BinOp::Add,
- sym::sub_with_overflow => BinOp::Sub,
- sym::mul_with_overflow => BinOp::Mul,
- _ => bug!(),
- };
- self.binop_with_overflow(
- bin_op, /*force_overflow_checks*/ true, &lhs, &rhs, dest,
- )?;
- }
sym::saturating_add | sym::saturating_sub => {
let l = self.read_immediate(&args[0])?;
let r = self.read_immediate(&args[1])?;
@@ -301,7 +289,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
sym::offset => {
let ptr = self.read_pointer(&args[0])?;
- let offset_count = self.read_machine_isize(&args[1])?;
+ let offset_count = self.read_target_isize(&args[1])?;
let pointee_ty = substs.type_at(0);
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
@@ -309,7 +297,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
sym::arith_offset => {
let ptr = self.read_pointer(&args[0])?;
- let offset_count = self.read_machine_isize(&args[1])?;
+ let offset_count = self.read_target_isize(&args[1])?;
let pointee_ty = substs.type_at(0);
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
@@ -375,7 +363,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The signed form of the intrinsic allows this. If we interpret the
// difference as isize, we'll get the proper signed difference. If that
// seems *positive*, they were more than isize::MAX apart.
- let dist = val.to_machine_isize(self)?;
+ let dist = val.to_target_isize(self)?;
if dist >= 0 {
throw_ub_format!(
"`{}` called when first pointer is too far before second",
@@ -385,7 +373,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dist
} else {
// b >= a
- let dist = val.to_machine_isize(self)?;
+ let dist = val.to_target_isize(self)?;
// If converting to isize produced a *negative* result, we had an overflow
// because they were more than isize::MAX apart.
if dist < 0 {
@@ -410,10 +398,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Perform division by size to compute return value.
let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
- assert!(0 <= dist && dist <= self.machine_isize_max());
+ assert!(0 <= dist && dist <= self.target_isize_max());
usize_layout
} else {
- assert!(self.machine_isize_min() <= dist && dist <= self.machine_isize_max());
+ assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
isize_layout
};
let pointee_layout = self.layout_of(substs.type_at(0))?;
@@ -430,48 +418,36 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
| sym::assert_zero_valid
| sym::assert_mem_uninitialized_valid => {
let ty = instance.substs.type_at(0);
- let layout = self.layout_of(ty)?;
-
- // For *all* intrinsics we first check `is_uninhabited` to give a more specific
- // error message.
- if layout.abi.is_uninhabited() {
- // The run-time intrinsic panics just to get a good backtrace; here we abort
- // since there is no problem showing a backtrace even for aborts.
- M::abort(
- self,
- format!(
+ let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
+
+ let should_panic = !self
+ .tcx
+ .check_validity_requirement((requirement, self.param_env.and(ty)))
+ .map_err(|_| err_inval!(TooGeneric))?;
+
+ if should_panic {
+ let layout = self.layout_of(ty)?;
+
+ let msg = match requirement {
+ // For *all* intrinsics we first check `is_uninhabited` to give a more specific
+ // error message.
+ _ if layout.abi.is_uninhabited() => format!(
"aborted execution: attempted to instantiate uninhabited type `{}`",
ty
),
- )?;
- }
-
- if intrinsic_name == sym::assert_zero_valid {
- let should_panic = !self.tcx.permits_zero_init(layout);
-
- if should_panic {
- M::abort(
- self,
- format!(
- "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
- ty
- ),
- )?;
- }
- }
+ ValidityRequirement::Inhabited => bug!("handled earlier"),
+ ValidityRequirement::Zero => format!(
+ "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
+ ty
+ ),
+ ValidityRequirement::UninitMitigated0x01Fill => format!(
+ "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
+ ty
+ ),
+ ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
+ };
- if intrinsic_name == sym::assert_mem_uninitialized_valid {
- let should_panic = !self.tcx.permits_uninit_init(layout);
-
- if should_panic {
- M::abort(
- self,
- format!(
- "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
- ty
- ),
- )?;
- }
+ M::abort(self, msg)?;
}
}
sym::simd_insert => {
@@ -482,7 +458,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(input_len, dest_len, "Return vector length must match input length");
assert!(
index < dest_len,
- "Index `{}` must be in bounds of vector with length {}`",
+ "Index `{}` must be in bounds of vector with length {}",
index,
dest_len
);
@@ -502,7 +478,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (input, input_len) = self.operand_to_simd(&args[0])?;
assert!(
index < input_len,
- "index `{}` must be in bounds of vector with length `{}`",
+ "index `{}` must be in bounds of vector with length {}",
index,
input_len
);
@@ -524,12 +500,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::vtable_size => {
let ptr = self.read_pointer(&args[0])?;
let (size, _align) = self.get_vtable_size_and_align(ptr)?;
- self.write_scalar(Scalar::from_machine_usize(size.bytes(), self), dest)?;
+ self.write_scalar(Scalar::from_target_usize(size.bytes(), self), dest)?;
}
sym::vtable_align => {
let ptr = self.read_pointer(&args[0])?;
let (_size, align) = self.get_vtable_size_and_align(ptr)?;
- self.write_scalar(Scalar::from_machine_usize(align.bytes(), self), dest)?;
+ self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
}
_ => return Ok(false),
@@ -668,10 +644,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
- let count = self.read_machine_usize(&count)?;
+ let count = self.read_target_usize(&count)?;
let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
let (size, align) = (layout.size, layout.align.abi);
- // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+ // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable.
let size = size.checked_mul(count, self).ok_or_else(|| {
err_ub_format!(
@@ -696,9 +672,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let dst = self.read_pointer(&dst)?;
let byte = self.read_scalar(&byte)?.to_u8()?;
- let count = self.read_machine_usize(&count)?;
+ let count = self.read_target_usize(&count)?;
- // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+ // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable.
let len = layout
.size
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
index 77c7b4bac..cf52299b7 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -78,13 +78,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
col: u32,
) -> MPlaceTy<'tcx, M::Provenance> {
let loc_details = &self.tcx.sess.opts.unstable_opts.location_detail;
+ // This can fail if rustc runs out of memory right here. Trying to emit an error would be
+ // pointless, since that would require allocating more memory than these short strings.
let file = if loc_details.file {
self.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not)
+ .unwrap()
} else {
// FIXME: This creates a new allocation each time. It might be preferable to
// perform this allocation only once, and re-use the `MPlaceTy`.
// See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
- self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not)
+ self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap()
};
let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
@@ -92,11 +95,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Allocate memory for `CallerLocation` struct.
let loc_ty = self
.tcx
- .bound_type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
- .subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter()));
+ .type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
+ .subst(*self.tcx, self.tcx.mk_substs(&[self.tcx.lifetimes.re_erased.into()]));
let loc_layout = self.layout_of(loc_ty).unwrap();
- // This can fail if rustc runs out of memory right here. Trying to emit an error would be
- // pointless, since that would require allocating more memory than a Location.
let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
// Initialize fields.
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index 248953de8..92fa59aec 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -16,8 +16,8 @@ use rustc_target::spec::abi::Abi as CallAbi;
use crate::const_eval::CheckAlignment;
use super::{
- AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
- MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
+ AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx,
+ InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
};
/// Data returned by Machine::stack_pop,
@@ -105,10 +105,16 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Extra data stored in every allocation.
type AllocExtra: Debug + Clone + 'static;
+ /// Type for the bytes of the allocation.
+ type Bytes: AllocBytes + 'static;
+
/// Memory's allocation map
type MemoryMap: AllocMap<
AllocId,
- (MemoryKind<Self::MemoryKind>, Allocation<Self::Provenance, Self::AllocExtra>),
+ (
+ MemoryKind<Self::MemoryKind>,
+ Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>,
+ ),
> + Default
+ Clone;
@@ -147,8 +153,9 @@ pub trait Machine<'mir, 'tcx>: Sized {
true
}
- /// Whether CheckedBinOp MIR statements should actually check for overflow.
- fn checked_binop_checks_overflow(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+ /// Whether Assert(OverflowNeg) and Assert(Overflow) MIR terminators should actually
+ /// check for overflow.
+ fn ignore_checkable_overflow_assertions(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
/// Entry point for obtaining the MIR of anything that should get evaluated.
/// So not just functions and shims, but also const/static initializers, anonymous
@@ -244,12 +251,18 @@ pub trait Machine<'mir, 'tcx>: Sized {
}
/// Called before a basic block terminator is executed.
- /// You can use this to detect endlessly running programs.
#[inline]
fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
Ok(())
}
+ /// Called when the interpreter encounters a `StatementKind::ConstEvalCounter` instruction.
+ /// You can use this to detect long or endlessly running programs.
+ #[inline]
+ fn increment_const_eval_counter(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
/// Called before a global allocation is accessed.
/// `def_id` is `Some` if this is the "lazy" allocation of a static.
#[inline]
@@ -285,7 +298,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
fn adjust_alloc_base_pointer(
ecx: &InterpCx<'mir, 'tcx, Self>,
ptr: Pointer,
- ) -> Pointer<Self::Provenance>;
+ ) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
/// "Int-to-pointer cast"
fn ptr_from_addr_cast(
@@ -331,7 +344,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
id: AllocId,
alloc: Cow<'b, Allocation>,
kind: Option<MemoryKind<Self::MemoryKind>>,
- ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>>;
+ ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>;
fn eval_inline_asm(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
@@ -452,6 +465,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
type AllocExtra = ();
type FrameExtra = ();
+ type Bytes = Box<[u8]>;
#[inline(always)]
fn use_addr_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
@@ -460,8 +474,8 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
}
#[inline(always)]
- fn checked_binop_checks_overflow(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
- true
+ fn ignore_checkable_overflow_assertions(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ false
}
#[inline(always)]
@@ -499,8 +513,8 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
fn adjust_alloc_base_pointer(
_ecx: &InterpCx<$mir, $tcx, Self>,
ptr: Pointer<AllocId>,
- ) -> Pointer<AllocId> {
- ptr
+ ) -> InterpResult<$tcx, Pointer<AllocId>> {
+ Ok(ptr)
}
#[inline(always)]
@@ -511,7 +525,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
// Allow these casts, but make the pointer not dereferenceable.
// (I.e., they behave like transmutation.)
// This is correct because no pointers can ever be exposed in compile-time evaluation.
- Ok(Pointer::from_addr(addr))
+ Ok(Pointer::from_addr_invalid(addr))
}
#[inline(always)]
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 291bfb2b5..a3764a7d1 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -21,8 +21,9 @@ use rustc_target::abi::{Align, HasDataLayout, Size};
use crate::const_eval::CheckAlignment;
use super::{
- alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx,
- InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
+ alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg,
+ GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance,
+ Scalar,
};
#[derive(Debug, PartialEq, Copy, Clone)]
@@ -114,16 +115,16 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// A reference to some allocation that was already bounds-checked for the given region
/// and had the on-access machine hooks run.
#[derive(Copy, Clone)]
-pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra> {
- alloc: &'a Allocation<Prov, Extra>,
+pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
+ alloc: &'a Allocation<Prov, Extra, Bytes>,
range: AllocRange,
tcx: TyCtxt<'tcx>,
alloc_id: AllocId,
}
/// A reference to some allocation that was already bounds-checked for the given region
/// and had the on-access machine hooks run.
-pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra> {
- alloc: &'a mut Allocation<Prov, Extra>,
+pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
+ alloc: &'a mut Allocation<Prov, Extra, Bytes>,
range: AllocRange,
tcx: TyCtxt<'tcx>,
alloc_id: AllocId,
@@ -171,7 +172,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
_ => {}
}
// And we need to get the provenance.
- Ok(M::adjust_alloc_base_pointer(self, ptr))
+ M::adjust_alloc_base_pointer(self, ptr)
}
pub fn create_fn_alloc_ptr(
@@ -200,8 +201,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, Pointer<M::Provenance>> {
let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
- // We can `unwrap` since `alloc` contains no pointers.
- Ok(self.allocate_raw_ptr(alloc, kind).unwrap())
+ self.allocate_raw_ptr(alloc, kind)
}
pub fn allocate_bytes_ptr(
@@ -210,10 +210,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
align: Align,
kind: MemoryKind<M::MemoryKind>,
mutability: Mutability,
- ) -> Pointer<M::Provenance> {
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
let alloc = Allocation::from_bytes(bytes, align, mutability);
- // We can `unwrap` since `alloc` contains no pointers.
- self.allocate_raw_ptr(alloc, kind).unwrap()
+ self.allocate_raw_ptr(alloc, kind)
}
/// This can fail only of `alloc` contains provenance.
@@ -230,7 +229,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
);
let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
- Ok(M::adjust_alloc_base_pointer(self, Pointer::from(id)))
+ M::adjust_alloc_base_pointer(self, Pointer::from(id))
}
pub fn reallocate_ptr(
@@ -304,7 +303,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.into());
};
- if alloc.mutability == Mutability::Not {
+ if alloc.mutability.is_not() {
throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
}
if alloc_kind != kind {
@@ -427,7 +426,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_ub!(PointerOutOfBounds {
alloc_id,
alloc_size,
- ptr_offset: self.machine_usize_to_isize(offset.bytes()),
+ ptr_offset: self.target_usize_to_isize(offset.bytes()),
ptr_size: size,
msg,
})
@@ -485,7 +484,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
id: AllocId,
is_write: bool,
- ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra>>> {
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
Some(GlobalAlloc::Memory(mem)) => {
// Memory of a constant or promoted or anonymous memory referenced by a static.
@@ -528,6 +527,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
}
+ /// Get the base address for the bytes in an `Allocation` specified by the
+ /// `AllocID` passed in; error if no such allocation exists.
+ ///
+ /// It is up to the caller to take sufficient care when using this address:
+ /// there could be provenance or uninit memory in there, and other memory
+ /// accesses could invalidate the exposed pointer.
+ pub fn alloc_base_addr(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
+ let alloc = self.get_alloc_raw(id)?;
+ Ok(alloc.base_addr())
+ }
+
/// Gives raw access to the `Allocation`, without bounds or alignment checks.
/// The caller is responsible for calling the access hooks!
///
@@ -535,7 +545,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn get_alloc_raw(
&self,
id: AllocId,
- ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> {
+ ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
// The error type of the inner closure here is somewhat funny. We have two
// ways of "erroring": An actual error, or because we got a reference from
// `get_global_alloc` that we can actually use directly without inserting anything anywhere.
@@ -571,7 +581,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ptr: Pointer<Option<M::Provenance>>,
size: Size,
align: Align,
- ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
+ ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+ {
let ptr_and_alloc = self.check_and_deref_ptr(
ptr,
size,
@@ -614,7 +625,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn get_alloc_raw_mut(
&mut self,
id: AllocId,
- ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra>, &mut M)> {
+ ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
// We have "NLL problem case #3" here, which cannot be worked around without loss of
// efficiency even for the common case where the key is in the map.
// <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
@@ -631,7 +642,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
- if alloc.mutability == Mutability::Not {
+ if alloc.mutability.is_not() {
throw_ub!(WriteToReadOnly(id))
}
Ok((alloc, &mut self.machine))
@@ -643,7 +654,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ptr: Pointer<Option<M::Provenance>>,
size: Size,
align: Align,
- ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
+ ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+ {
let parts = self.get_ptr_access(ptr, size, align)?;
if let Some((alloc_id, offset, prov)) = parts {
let tcx = *self.tcx;
@@ -692,7 +704,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(self.tcx.is_static(def_id));
assert!(!self.tcx.is_thread_local_static(def_id));
// Use size and align of the type.
- let ty = self.tcx.type_of(def_id);
+ let ty = self
+ .tcx
+ .type_of(def_id)
+ .no_bound_vars()
+ .expect("statics should not have generic parameters");
let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
assert!(layout.is_sized());
(layout.size, layout.align.abi, AllocKind::LiveData)
@@ -838,11 +854,11 @@ pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Cannot be a closure because it is generic in `Prov`, `Extra`.
- fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra>(
+ fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
fmt: &mut std::fmt::Formatter<'_>,
tcx: TyCtxt<'tcx>,
allocs_to_print: &mut VecDeque<AllocId>,
- alloc: &Allocation<Prov, Extra>,
+ alloc: &Allocation<Prov, Extra, Bytes>,
) -> std::fmt::Result {
for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
{
@@ -910,7 +926,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
}
/// Reading and writing.
-impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
+impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes>
+ AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
+{
/// `range` is relative to this allocation reference, not the base of the allocation.
pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
let range = self.range.subrange(range);
@@ -935,7 +953,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
}
}
-impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
+impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
/// `range` is relative to this allocation reference, not the base of the allocation.
pub fn read_scalar(
&self,
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 2e356f67b..86de4e4e3 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -1,6 +1,7 @@
//! An interpreter for MIR used in CTFE and by miri
mod cast;
+mod discriminant;
mod eval_context;
mod intern;
mod intrinsics;
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index befc0928f..5310ef0bb 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -4,13 +4,12 @@
use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, Ty, ValTree};
use rustc_middle::{mir, ty};
use rustc_span::Span;
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
-use rustc_target::abi::{VariantIdx, Variants};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
@@ -53,7 +52,7 @@ impl<Prov: Provenance> Immediate<Prov> {
}
pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
- Immediate::ScalarPair(val, Scalar::from_machine_usize(len, cx))
+ Immediate::ScalarPair(val, Scalar::from_target_usize(len, cx))
}
pub fn new_dyn_trait(
@@ -256,7 +255,22 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
}
}
- pub fn offset_with_meta(
+ /// Replace the layout of this operand. There's basically no sanity check that this makes sense,
+ /// you better know what you are doing! If this is an immediate, applying the wrong layout can
+ /// not just lead to invalid data, it can actually *shift the data around* since the offsets of
+ /// a ScalarPair are entirely determined by the layout, not the data.
+ pub fn transmute(&self, layout: TyAndLayout<'tcx>) -> Self {
+ assert_eq!(
+ self.layout.size, layout.size,
+ "transmuting with a size change, that doesn't seem right"
+ );
+ OpTy { layout, ..*self }
+ }
+
+ /// Offset the operand in memory (if possible) and change its metadata.
+ ///
+ /// This can go wrong very easily if you give the wrong layout for the new place!
+ pub(super) fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -277,6 +291,9 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
}
}
+ /// Offset the operand in memory (if possible).
+ ///
+ /// This can go wrong very easily if you give the wrong layout for the new place!
pub fn offset(
&self,
offset: Size,
@@ -319,7 +336,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
let scalar = alloc.read_scalar(
alloc_range(Size::ZERO, size),
- /*read_provenance*/ s.is_ptr(),
+ /*read_provenance*/ matches!(s, abi::Pointer(_)),
)?;
Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
}
@@ -335,11 +352,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
let a_val = alloc.read_scalar(
alloc_range(Size::ZERO, a_size),
- /*read_provenance*/ a.is_ptr(),
+ /*read_provenance*/ matches!(a, abi::Pointer(_)),
)?;
let b_val = alloc.read_scalar(
alloc_range(b_offset, b_size),
- /*read_provenance*/ b.is_ptr(),
+ /*read_provenance*/ matches!(b, abi::Pointer(_)),
)?;
Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })
}
@@ -415,12 +432,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.read_scalar(op)?.to_pointer(self)
}
/// Read a pointer-sized unsigned integer from a place.
- pub fn read_machine_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
- self.read_scalar(op)?.to_machine_usize(self)
+ pub fn read_target_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
+ self.read_scalar(op)?.to_target_usize(self)
}
/// Read a pointer-sized signed integer from a place.
- pub fn read_machine_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
- self.read_scalar(op)?.to_machine_isize(self)
+ pub fn read_target_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
+ self.read_scalar(op)?.to_target_isize(self)
}
/// Turn the wide MPlace into a string (must already be dereferenced!)
@@ -595,14 +612,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
span: Option<Span>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // FIXME(const_prop): normalization needed b/c const prop lint in
- // `mir_drops_elaborated_and_const_checked`, which happens before
- // optimized MIR. Only after optimizing the MIR can we guarantee
- // that the `RevealAll` pass has happened and that the body's consts
- // are normalized, so any call to resolve before that needs to be
- // manually normalized.
- let val = self.tcx.normalize_erasing_regions(self.param_env, *val);
- match val {
+ match *val {
mir::ConstantKind::Ty(ct) => {
let ty = ct.ty();
let valtree = self.eval_ty_constant(ct, span)?;
@@ -657,154 +667,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
Ok(OpTy { op, layout, align: Some(layout.align.abi) })
}
-
- /// Read discriminant, return the runtime value as well as the variant index.
- /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
- pub fn read_discriminant(
- &self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
- trace!("read_discriminant_value {:#?}", op.layout);
- // Get type and layout of the discriminant.
- let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
- trace!("discriminant type: {:?}", discr_layout.ty);
-
- // We use "discriminant" to refer to the value associated with a particular enum variant.
- // This is not to be confused with its "variant index", which is just determining its position in the
- // declared list of variants -- they can differ with explicitly assigned discriminants.
- // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
- // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
- let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
- Variants::Single { index } => {
- let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
- Some(discr) => {
- // This type actually has discriminants.
- assert_eq!(discr.ty, discr_layout.ty);
- Scalar::from_uint(discr.val, discr_layout.size)
- }
- None => {
- // On a type without actual discriminants, variant is 0.
- assert_eq!(index.as_u32(), 0);
- Scalar::from_uint(index.as_u32(), discr_layout.size)
- }
- };
- return Ok((discr, index));
- }
- Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
- (tag, tag_encoding, tag_field)
- }
- };
-
- // There are *three* layouts that come into play here:
- // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
- // the `Scalar` we return.
- // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
- // and used to interpret the value we read from the tag field.
- // For the return value, a cast to `discr_layout` is performed.
- // - The field storing the tag has a layout, which is very similar to `tag_layout` but
- // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
-
- // Get layout for tag.
- let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
-
- // Read tag and sanity-check `tag_layout`.
- let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
- assert_eq!(tag_layout.size, tag_val.layout.size);
- assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
- trace!("tag value: {}", tag_val);
-
- // Figure out which discriminant and variant this corresponds to.
- Ok(match *tag_encoding {
- TagEncoding::Direct => {
- let scalar = tag_val.to_scalar();
- // Generate a specific error if `tag_val` is not an integer.
- // (`tag_bits` itself is only used for error messages below.)
- let tag_bits = scalar
- .try_to_int()
- .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
- .assert_bits(tag_layout.size);
- // Cast bits from tag layout to discriminant layout.
- // After the checks we did above, this cannot fail, as
- // discriminants are int-like.
- let discr_val =
- self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
- let discr_bits = discr_val.assert_bits(discr_layout.size);
- // Convert discriminant to variant index, and catch invalid discriminants.
- let index = match *op.layout.ty.kind() {
- ty::Adt(adt, _) => {
- adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
- }
- ty::Generator(def_id, substs, _) => {
- let substs = substs.as_generator();
- substs
- .discriminants(def_id, *self.tcx)
- .find(|(_, var)| var.val == discr_bits)
- }
- _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
- }
- .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
- // Return the cast value, and the index.
- (discr_val, index.0)
- }
- TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
- let tag_val = tag_val.to_scalar();
- // Compute the variant this niche value/"tag" corresponds to. With niche layout,
- // discriminant (encoded in niche/tag) and variant index are the same.
- let variants_start = niche_variants.start().as_u32();
- let variants_end = niche_variants.end().as_u32();
- let variant = match tag_val.try_to_int() {
- Err(dbg_val) => {
- // So this is a pointer then, and casting to an int failed.
- // Can only happen during CTFE.
- // The niche must be just 0, and the ptr not null, then we know this is
- // okay. Everything else, we conservatively reject.
- let ptr_valid = niche_start == 0
- && variants_start == variants_end
- && !self.scalar_may_be_null(tag_val)?;
- if !ptr_valid {
- throw_ub!(InvalidTag(dbg_val))
- }
- untagged_variant
- }
- Ok(tag_bits) => {
- let tag_bits = tag_bits.assert_bits(tag_layout.size);
- // We need to use machine arithmetic to get the relative variant idx:
- // variant_index_relative = tag_val - niche_start_val
- let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
- let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
- let variant_index_relative_val =
- self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
- let variant_index_relative =
- variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
- // Check if this is in the range that indicates an actual discriminant.
- if variant_index_relative <= u128::from(variants_end - variants_start) {
- let variant_index_relative = u32::try_from(variant_index_relative)
- .expect("we checked that this fits into a u32");
- // Then computing the absolute variant idx should not overflow any more.
- let variant_index = variants_start
- .checked_add(variant_index_relative)
- .expect("overflow computing absolute variant idx");
- let variants_len = op
- .layout
- .ty
- .ty_adt_def()
- .expect("tagged layout for non adt")
- .variants()
- .len();
- assert!(usize::try_from(variant_index).unwrap() < variants_len);
- VariantIdx::from_u32(variant_index)
- } else {
- untagged_variant
- }
- }
- };
- // Compute the size of the scalar we need to return.
- // No need to cast, because the variant index directly serves as discriminant and is
- // encoded in the tag.
- (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
- }
- })
- }
}
// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index e8ff70e3a..4decfe863 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -10,28 +10,20 @@ use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
/// and a boolean signifying the potential overflow to the destination.
- ///
- /// `force_overflow_checks` indicates whether overflow checks should be done even when
- /// `tcx.sess.overflow_checks()` is `false`.
pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
- force_overflow_checks: bool,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!(
- self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
+ self.tcx.mk_tup(&[ty, self.tcx.types.bool]),
dest.layout.ty,
"type mismatch for result of {:?}",
op,
);
- // As per https://github.com/rust-lang/rust/pull/98738, we always return `false` in the 2nd
- // component when overflow checking is disabled.
- let overflowed =
- overflowed && (force_overflow_checks || M::checked_binop_checks_overflow(self));
// Write the result to `dest`.
if let Abi::ScalarPair(..) = dest.layout.abi {
// We can use the optimized path and avoid `place_field` (which might do
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 274af61ee..3c463500a 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -7,8 +7,8 @@ use either::{Either, Left, Right};
use rustc_ast::Mutability;
use rustc_middle::mir;
use rustc_middle::ty;
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, VariantIdx};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, VariantIdx};
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
@@ -26,6 +26,7 @@ pub enum MemPlaceMeta<Prov: Provenance = AllocId> {
}
impl<Prov: Provenance> MemPlaceMeta<Prov> {
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn unwrap_meta(self) -> Scalar<Prov> {
match self {
Self::Meta(s) => s,
@@ -147,12 +148,16 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
#[inline]
- pub fn offset_with_meta<'tcx>(
+ pub(super) fn offset_with_meta<'tcx>(
self,
offset: Size,
meta: MemPlaceMeta<Prov>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
+ debug_assert!(
+ !meta.has_meta() || self.meta.has_meta(),
+ "cannot use `offset_with_meta` to add metadata to a place"
+ );
Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
}
}
@@ -178,12 +183,15 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
assert!(layout.is_zst());
let align = layout.align.abi;
- let ptr = Pointer::from_addr(align.bytes()); // no provenance, absolute address
+ let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address
MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
}
+ /// Offset the place in memory and change its metadata.
+ ///
+ /// This can go wrong very easily if you give the wrong layout for the new place!
#[inline]
- pub fn offset_with_meta(
+ pub(crate) fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
@@ -197,6 +205,9 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
})
}
+ /// Offset the place in memory.
+ ///
+ /// This can go wrong very easily if you give the wrong layout for the new place!
pub fn offset(
&self,
offset: Size,
@@ -229,7 +240,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
if self.layout.is_unsized() {
// We need to consult `meta` metadata
match self.layout.ty.kind() {
- ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx),
+ ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_target_usize(cx),
_ => bug!("len not supported on unsized type {:?}", self.layout.ty),
}
} else {
@@ -241,14 +252,6 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
}
}
}
-
- #[inline]
- pub(super) fn vtable(&self) -> Scalar<Prov> {
- match self.layout.ty.kind() {
- ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
- _ => bug!("vtable not supported on type {:?}", self.layout.ty),
- }
- }
}
// These are defined here because they produce a place.
@@ -266,7 +269,12 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
- self.as_mplace_or_imm().left().unwrap()
+ self.as_mplace_or_imm().left().unwrap_or_else(|| {
+ bug!(
+ "OpTy of type {} was immediate when it was expected to be an MPlace",
+ self.layout.ty
+ )
+ })
}
}
@@ -283,7 +291,12 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
#[inline(always)]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
- self.as_mplace_or_local().left().unwrap()
+ self.as_mplace_or_local().left().unwrap_or_else(|| {
+ bug!(
+ "PlaceTy of type {} was a local when it was expected to be an MPlace",
+ self.layout.ty
+ )
+ })
}
}
@@ -340,7 +353,8 @@ where
pub(super) fn get_place_alloc(
&self,
place: &MPlaceTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
+ ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+ {
assert!(place.layout.is_sized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
@@ -351,7 +365,8 @@ where
pub(super) fn get_place_alloc_mut(
&mut self,
place: &MPlaceTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
+ ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+ {
assert!(place.layout.is_sized());
assert!(!place.meta.has_meta());
let size = place.layout.size;
@@ -754,9 +769,9 @@ where
str: &str,
kind: MemoryKind<M::MemoryKind>,
mutbl: Mutability,
- ) -> MPlaceTy<'tcx, M::Provenance> {
- let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl);
- let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?;
+ let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
let ty = self.tcx.mk_ref(
@@ -764,95 +779,35 @@ where
ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
);
let layout = self.layout_of(ty).unwrap();
- MPlaceTy { mplace, layout, align: layout.align.abi }
+ Ok(MPlaceTy { mplace, layout, align: layout.align.abi })
}
- /// Writes the discriminant of the given variant.
- #[instrument(skip(self), level = "debug")]
- pub fn write_discriminant(
+ /// Writes the aggregate to the destination.
+ #[instrument(skip(self), level = "trace")]
+ pub fn write_aggregate(
&mut self,
- variant_index: VariantIdx,
+ kind: &mir::AggregateKind<'tcx>,
+ operands: &[mir::Operand<'tcx>],
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- // This must be an enum or generator.
- match dest.layout.ty.kind() {
- ty::Adt(adt, _) => assert!(adt.is_enum()),
- ty::Generator(..) => {}
- _ => span_bug!(
- self.cur_span(),
- "write_discriminant called on non-variant-type (neither enum nor generator)"
- ),
- }
- // Layout computation excludes uninhabited variants from consideration
- // therefore there's no way to represent those variants in the given layout.
- // Essentially, uninhabited variants do not have a tag that corresponds to their
- // discriminant, so we cannot do anything here.
- // When evaluating we will always error before even getting here, but ConstProp 'executes'
- // dead code, so we cannot ICE here.
- if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
- throw_ub!(UninhabitedEnumVariantWritten)
- }
-
- match dest.layout.variants {
- abi::Variants::Single { index } => {
- assert_eq!(index, variant_index);
- }
- abi::Variants::Multiple {
- tag_encoding: TagEncoding::Direct,
- tag: tag_layout,
- tag_field,
- ..
- } => {
- // No need to validate that the discriminant here because the
- // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
-
- let discr_val =
- dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
-
- // raw discriminants for enums are isize or bigger during
- // their computation, but the in-memory tag is the smallest possible
- // representation
- let size = tag_layout.size(self);
- let tag_val = size.truncate(discr_val);
-
- let tag_dest = self.place_field(dest, tag_field)?;
- self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
- }
- abi::Variants::Multiple {
- tag_encoding:
- TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
- tag: tag_layout,
- tag_field,
- ..
- } => {
- // No need to validate that the discriminant here because the
- // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
-
- if variant_index != untagged_variant {
- let variants_start = niche_variants.start().as_u32();
- let variant_index_relative = variant_index
- .as_u32()
- .checked_sub(variants_start)
- .expect("overflow computing relative variant idx");
- // We need to use machine arithmetic when taking into account `niche_start`:
- // tag_val = variant_index_relative + niche_start_val
- let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
- let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
- let variant_index_relative_val =
- ImmTy::from_uint(variant_index_relative, tag_layout);
- let tag_val = self.binary_op(
- mir::BinOp::Add,
- &variant_index_relative_val,
- &niche_start_val,
- )?;
- // Write result.
- let niche_dest = self.place_field(dest, tag_field)?;
- self.write_immediate(*tag_val, &niche_dest)?;
- }
+ self.write_uninit(&dest)?;
+ let (variant_index, variant_dest, active_field_index) = match *kind {
+ mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
+ let variant_dest = self.place_downcast(&dest, variant_index)?;
+ (variant_index, variant_dest, active_field_index)
}
+ _ => (VariantIdx::from_u32(0), dest.clone(), None),
+ };
+ if active_field_index.is_some() {
+ assert_eq!(operands.len(), 1);
}
-
- Ok(())
+ for (field_index, operand) in operands.iter().enumerate() {
+ let field_index = active_field_index.unwrap_or(field_index);
+ let field_dest = self.place_field(&variant_dest, field_index)?;
+ let op = self.eval_operand(operand, Some(field_dest.layout))?;
+ self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
+ }
+ self.write_discriminant(variant_index, &dest)
}
pub fn raw_const_to_mplace(
@@ -867,11 +822,16 @@ where
}
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
+ /// Aso returns the vtable.
pub(super) fn unpack_dyn_trait(
&self,
mplace: &MPlaceTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
- let vtable = mplace.vtable().to_pointer(self)?; // also sanity checks the type
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+ assert!(
+ matches!(mplace.layout.ty.kind(), ty::Dynamic(_, _, ty::Dyn)),
+ "`unpack_dyn_trait` only makes sense on `dyn*` types"
+ );
+ let vtable = mplace.meta.unwrap_meta().to_pointer(self)?;
let (ty, _) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
@@ -880,7 +840,26 @@ where
layout,
align: layout.align.abi,
};
- Ok(mplace)
+ Ok((mplace, vtable))
+ }
+
+ /// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type.
+ /// Aso returns the vtable.
+ pub(super) fn unpack_dyn_star(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+ assert!(
+ matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+ "`unpack_dyn_star` only makes sense on `dyn*` types"
+ );
+ let data = self.operand_field(&op, 0)?;
+ let vtable = self.operand_field(&op, 1)?;
+ let vtable = self.read_pointer(&vtable)?;
+ let (ty, _) = self.get_ptr_vtable(vtable)?;
+ let layout = self.layout_of(ty)?;
+ let data = data.transmute(layout);
+ Ok((data, vtable))
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 291464ab5..91da930db 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -319,7 +319,7 @@ where
// implement this.
ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)),
ty::Slice(..) => {
- let len = Scalar::from_machine_usize(inner_len, self);
+ let len = Scalar::from_target_usize(inner_len, self);
(MemPlaceMeta::Meta(len), base.layout.ty)
}
_ => {
@@ -363,7 +363,7 @@ where
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?;
- let n = self.read_machine_usize(&n)?;
+ let n = self.read_target_usize(&n)?;
self.place_index(base, n)?
}
ConstantIndex { offset, min_length, from_end } => {
@@ -392,7 +392,7 @@ where
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?;
- let n = self.read_machine_usize(&n)?;
+ let n = self.read_target_usize(&n)?;
self.operand_index(base, n)?
}
ConstantIndex { offset, min_length, from_end } => {
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index fad4cb06c..6863435e5 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -129,6 +129,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// FIXME(#73156): Handle source code coverage in const eval
Coverage(..) => {}
+ ConstEvalCounter => {
+ M::increment_const_eval_counter(self)?;
+ }
+
// Defined to do nothing. These are added by optimization passes, to avoid changing the
// size of MIR constantly.
Nop => {}
@@ -181,9 +185,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let left = self.read_immediate(&self.eval_operand(left, None)?)?;
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
- self.binop_with_overflow(
- bin_op, /*force_overflow_checks*/ false, &left, &right, &dest,
- )?;
+ self.binop_with_overflow(bin_op, &left, &right, &dest)?;
}
UnaryOp(un_op, ref operand) => {
@@ -195,13 +197,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
Aggregate(box ref kind, ref operands) => {
- assert!(matches!(kind, mir::AggregateKind::Array(..)));
-
- for (field_index, operand) in operands.iter().enumerate() {
- let op = self.eval_operand(operand, None)?;
- let field_dest = self.place_field(&dest, field_index)?;
- self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
- }
+ self.write_aggregate(kind, operands, &dest)?;
}
Repeat(ref operand, _) => {
@@ -244,7 +240,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let src = self.eval_place(place)?;
let op = self.place_to_op(&src)?;
let len = op.len(self)?;
- self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
+ self.write_scalar(Scalar::from_target_usize(len, self), &dest)?;
}
Ref(_, borrow_kind, place) => {
@@ -299,7 +295,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
mir::NullOp::SizeOf => layout.size.bytes(),
mir::NullOp::AlignOf => layout.align.abi.bytes(),
};
- self.write_scalar(Scalar::from_machine_usize(val, self), &dest)?;
+ self.write_scalar(Scalar::from_target_usize(val, self), &dest)?;
}
ShallowInitBox(ref operand, _) => {
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index da320cd1c..2aea7c79b 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -73,7 +73,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let fn_sig =
self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
let extra_args = &args[fn_sig.inputs().len()..];
- let extra_args = self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout.ty));
+ let extra_args =
+ self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout.ty));
let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
ty::FnPtr(_sig) => {
@@ -137,8 +138,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
Assert { ref cond, expected, ref msg, target, cleanup } => {
+ let ignored = M::ignore_checkable_overflow_assertions(self)
+ && match msg {
+ mir::AssertKind::OverflowNeg(..) => true,
+ mir::AssertKind::Overflow(op, ..) => op.is_checkable(),
+ _ => false,
+ };
let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
- if expected == cond_val {
+ if ignored || expected == cond_val {
self.go_to_block(target);
} else {
M::assert_panic(self, msg, cleanup)?;
@@ -541,7 +548,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let receiver_place = loop {
match receiver.layout.ty.kind() {
ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?,
- ty::Dynamic(..) => break receiver.assert_mem_place(), // no immediate unsized values
+ ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values
+ ty::Dynamic(.., ty::DynStar) => {
+ // Not clear how to handle this, so far we assume the receiver is always a pointer.
+ span_bug!(
+ self.cur_span(),
+ "by-value calls on a `dyn*`... are those a thing?"
+ );
+ }
_ => {
// Not there yet, search for the only non-ZST field.
let mut non_zst_field = None;
@@ -567,39 +581,59 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
};
- // Obtain the underlying trait we are working on.
- let receiver_tail = self
- .tcx
- .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
- let ty::Dynamic(data, ..) = receiver_tail.kind() else {
- span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
- };
- // Get the required information from the vtable.
- let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
- let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
- if dyn_trait != data.principal() {
- throw_ub_format!(
- "`dyn` call on a pointer whose vtable does not match its type"
- );
- }
+ // Obtain the underlying trait we are working on, and the adjusted receiver argument.
+ let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
+ receiver_place.layout.ty.kind()
+ {
+ let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?;
+ let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
+ if dyn_trait != data.principal() {
+ throw_ub_format!(
+ "`dyn*` call on a pointer whose vtable does not match its type"
+ );
+ }
+ let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one
+
+ (vptr, dyn_ty, recv.ptr)
+ } else {
+ // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
+ // (For that reason we also cannot use `unpack_dyn_trait`.)
+ let receiver_tail = self
+ .tcx
+ .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
+ let ty::Dynamic(data, _, ty::Dyn) = receiver_tail.kind() else {
+ span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
+ };
+ assert!(receiver_place.layout.is_unsized());
+
+ // Get the required information from the vtable.
+ let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
+ let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
+ if dyn_trait != data.principal() {
+ throw_ub_format!(
+ "`dyn` call on a pointer whose vtable does not match its type"
+ );
+ }
+
+ // It might be surprising that we use a pointer as the receiver even if this
+ // is a by-val case; this works because by-val passing of an unsized `dyn
+ // Trait` to a function is actually desugared to a pointer.
+ (vptr, dyn_ty, receiver_place.ptr)
+ };
// Now determine the actual method to call. We can do that in two different ways and
// compare them to ensure everything fits.
let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else {
throw_ub_format!("`dyn` call trying to call something that is not a method")
};
+ trace!("Virtual call dispatches to {fn_inst:#?}");
if cfg!(debug_assertions) {
let tcx = *self.tcx;
let trait_def_id = tcx.trait_of_item(def_id).unwrap();
let virtual_trait_ref =
ty::TraitRef::from_method(tcx, trait_def_id, instance.substs);
- assert_eq!(
- receiver_tail,
- virtual_trait_ref.self_ty(),
- "mismatch in underlying dyn trait computation within Miri and MIR building",
- );
let existential_trait_ref =
ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
@@ -614,17 +648,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(fn_inst, concrete_method);
}
- // `*mut receiver_place.layout.ty` is almost the layout that we
- // want for args[0]: We have to project to field 0 because we want
- // a thin pointer.
- assert!(receiver_place.layout.is_unsized());
- let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
- let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0);
- // Adjust receiver argument.
- args[0] = OpTy::from(ImmTy::from_immediate(
- Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
- this_receiver_ptr,
- ));
+ // Adjust receiver argument. Layout can be any (thin) ptr.
+ args[0] = ImmTy::from_immediate(
+ Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
+ self.layout_of(self.tcx.mk_mut_ptr(dyn_ty))?,
+ )
+ .into();
trace!("Patched receiver operand to {:#?}", args[0]);
// recurse with concrete function
self.eval_fn_call(
@@ -653,15 +682,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// implementation fail -- a problem shared by rustc.
let place = self.force_allocation(place)?;
- let (instance, place) = match place.layout.ty.kind() {
- ty::Dynamic(..) => {
+ let place = match place.layout.ty.kind() {
+ ty::Dynamic(_, _, ty::Dyn) => {
// Dropping a trait object. Need to find actual drop fn.
- let place = self.unpack_dyn_trait(&place)?;
- let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
- (instance, place)
+ self.unpack_dyn_trait(&place)?.0
+ }
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // Dropping a `dyn*`. Need to find actual drop fn.
+ self.unpack_dyn_star(&place.into())?.0.assert_mem_place()
+ }
+ _ => {
+ debug_assert_eq!(
+ instance,
+ ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty)
+ );
+ place
}
- _ => (instance, place),
};
+ let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
let arg = ImmTy::from_immediate(
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index cabc65e2c..bf2b4ee69 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -1,5 +1,7 @@
use rustc_middle::mir::interpret::InterpResult;
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use rustc_middle::ty::{
+ self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor,
+};
use std::ops::ControlFlow;
/// Checks whether a type contains generic parameters which require substitution.
@@ -9,7 +11,7 @@ use std::ops::ControlFlow;
/// case these parameters are unused.
pub(crate) fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
where
- T: TypeVisitable<'tcx>,
+ T: TypeVisitable<TyCtxt<'tcx>>,
{
debug!("ensure_monomorphic_enough: ty={:?}", ty);
if !ty.needs_subst() {
@@ -21,7 +23,7 @@ where
tcx: TyCtxt<'tcx>,
}
- impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> {
+ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for UsedParamsNeedSubstVisitor<'tcx> {
type BreakTy = FoundParam;
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 19e359986..f7881c509 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -23,18 +23,18 @@ use std::hash::Hash;
// for the validation errors
use super::UndefinedBehaviorInfo::*;
use super::{
- CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine,
- MemPlaceMeta, OpTy, Scalar, ValueVisitor,
+ AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy,
+ Machine, MemPlaceMeta, OpTy, Pointer, Scalar, ValueVisitor,
};
macro_rules! throw_validation_failure {
- ($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{
+ ($where:expr, { $( $what_fmt:tt )* } $( expected { $( $expected_fmt:tt )* } )?) => {{
let mut msg = String::new();
msg.push_str("encountered ");
- write!(&mut msg, $($what_fmt),+).unwrap();
+ write!(&mut msg, $($what_fmt)*).unwrap();
$(
msg.push_str(", but expected ");
- write!(&mut msg, $($expected_fmt),+).unwrap();
+ write!(&mut msg, $($expected_fmt)*).unwrap();
)?
let path = rustc_middle::ty::print::with_no_trimmed_paths!({
let where_ = &$where;
@@ -82,7 +82,7 @@ macro_rules! throw_validation_failure {
///
macro_rules! try_validation {
($e:expr, $where:expr,
- $( $( $p:pat_param )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
+ $( $( $p:pat_param )|+ => { $( $what_fmt:tt )* } $( expected { $( $expected_fmt:tt )* } )? ),+ $(,)?
) => {{
match $e {
Ok(x) => x,
@@ -93,7 +93,7 @@ macro_rules! try_validation {
InterpError::UndefinedBehavior($($p)|+) =>
throw_validation_failure!(
$where,
- { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
+ { $( $what_fmt )* } $( expected { $( $expected_fmt )* } )?
)
),+,
#[allow(unreachable_patterns)]
@@ -240,10 +240,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
// https://github.com/rust-lang/project-rfc-2229/issues/46
if let Some(local_def_id) = def_id.as_local() {
- let tables = self.ecx.tcx.typeck(local_def_id);
- if let Some(captured_place) =
- tables.closure_min_captures_flattened(local_def_id).nth(field)
- {
+ let captures = self.ecx.tcx.closure_captures(local_def_id);
+ if let Some(captured_place) = captures.get(field) {
// Sometimes the index is beyond the number of upvars (seen
// for a generator).
let var_hir_id = captured_place.get_root_variable();
@@ -335,7 +333,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
) -> InterpResult<'tcx> {
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
match tail.kind() {
- ty::Dynamic(..) => {
+ ty::Dynamic(_, _, ty::Dyn) => {
let vtable = meta.unwrap_meta().to_pointer(self.ecx)?;
// Make sure it is a genuine vtable pointer.
let (_ty, _trait) = try_validation!(
@@ -348,7 +346,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// FIXME: check if the type/trait match what ty::Dynamic says?
}
ty::Slice(..) | ty::Str => {
- let _len = meta.unwrap_meta().to_machine_usize(self.ecx)?;
+ let _len = meta.unwrap_meta().to_target_usize(self.ecx)?;
// We do not check that `len * elem_size <= isize::MAX`:
// that is only required for references, and there it falls out of the
// "dereferenceable" check performed by Stacked Borrows.
@@ -399,12 +397,15 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
{
"an unaligned {kind} (required {} byte alignment but found {})",
required.bytes(),
- has.bytes()
+ has.bytes(),
},
DanglingIntPointer(0, _) =>
{ "a null {kind}" },
DanglingIntPointer(i, _) =>
- { "a dangling {kind} (address {i:#x} is unallocated)" },
+ {
+ "a dangling {kind} ({pointer} has no provenance)",
+ pointer = Pointer::<Option<AllocId>>::from_addr_invalid(*i),
+ },
PointerOutOfBounds { .. } =>
{ "a dangling {kind} (going beyond the bounds of its allocation)" },
// This cannot happen during const-eval (because interning already detects
@@ -602,6 +603,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
| ty::Bound(..)
| ty::Param(..)
| ty::Alias(..)
+ | ty::GeneratorWitnessMIR(..)
| ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index f9efc2418..7a1445939 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -284,7 +284,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- // We `force_allocation` here so that `from_op` below can work.
+ // No need for `force_allocation` since we are just going to read from this.
ecx.place_to_op(self)
}
@@ -421,15 +421,25 @@ macro_rules! make_value_visitor {
// Special treatment for special types, where the (static) layout is not sufficient.
match *ty.kind() {
// If it is a trait object, switch to the real type that was used to create it.
- ty::Dynamic(..) => {
+ ty::Dynamic(_, _, ty::Dyn) => {
+ // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
+ // vtable stored in the place metadata.
// unsized values are never immediate, so we can assert_mem_place
let op = v.to_op_for_read(self.ecx())?;
let dest = op.assert_mem_place();
- let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?;
+ let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
// recurse with the inner type
return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into()));
},
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // DynStar types. Very different from a dyn type (but strangely part of the
+ // same variant in `TyKind`): These are pairs where the 2nd component is the
+ // vtable, and the first component is the data (which must be ptr-sized).
+ let op = v.to_op_for_proj(self.ecx())?;
+ let data = self.ecx().unpack_dyn_star(&op)?.0;
+ return self.visit_field(&v, 0, &$value_trait::from_op(&data));
+ }
// Slices do not need special handling here: they have `Array` field
// placement with length 0, so we enter the `Array` case below which
// indirectly uses the metadata to determine the actual length.
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
index 57b91df2d..ed9efe568 100644
--- a/compiler/rustc_const_eval/src/lib.rs
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -34,9 +34,12 @@ pub mod interpret;
pub mod transform;
pub mod util;
+use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
+use rustc_macros::fluent_messages;
use rustc_middle::ty;
use rustc_middle::ty::query::Providers;
-use rustc_target::abi::InitKind;
+
+fluent_messages! { "../locales/en-US.ftl" }
pub fn provide(providers: &mut Providers) {
const_eval::provide(providers);
@@ -58,7 +61,7 @@ pub fn provide(providers: &mut Providers) {
let (param_env, value) = param_env_and_value.into_parts();
const_eval::deref_mir_constant(tcx, param_env, value)
};
- providers.permits_uninit_init =
- |tcx, ty| util::might_permit_raw_init(tcx, ty, InitKind::UninitMitigated0x01Fill);
- providers.permits_zero_init = |tcx, ty| util::might_permit_raw_init(tcx, ty, InitKind::Zero);
+ providers.check_validity_requirement = |tcx, (init_kind, param_env_and_ty)| {
+ util::check_validity_requirement(tcx, init_kind, param_env_and_ty)
+ };
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index 79f1737e3..aa24d9053 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -10,7 +10,7 @@ use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceC
use rustc_middle::mir::*;
use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
-use rustc_middle::ty::{Binder, TraitRef, TypeVisitable};
+use rustc_middle::ty::{Binder, TraitRef, TypeVisitableExt};
use rustc_mir_dataflow::{self, Analysis};
use rustc_span::{sym, Span, Symbol};
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
@@ -332,7 +332,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
fn check_static(&mut self, def_id: DefId, span: Span) {
if self.tcx.is_thread_local_static(def_id) {
- self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef");
+ self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef`");
}
self.check_op_spanned(ops::StaticAccess, span)
}
@@ -453,7 +453,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
Rvalue::Aggregate(kind, ..) => {
if let AggregateKind::Generator(def_id, ..) = kind.as_ref()
- && let Some(generator_kind @ hir::GeneratorKind::Async(..)) = self.tcx.generator_kind(def_id.to_def_id())
+ && let Some(generator_kind @ hir::GeneratorKind::Async(..)) = self.tcx.generator_kind(def_id)
{
self.check_op(ops::Generator(generator_kind));
}
@@ -473,7 +473,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// that this is merely a ZST and it is already eligible for promotion.
// This may require an RFC?
/*
- ty::Array(_, len) if len.try_eval_usize(cx.tcx, cx.param_env) == Some(0)
+ ty::Array(_, len) if len.try_eval_target_usize(cx.tcx, cx.param_env) == Some(0)
=> true,
*/
_ => false,
@@ -693,6 +693,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
| StatementKind::Intrinsic(..)
+ | StatementKind::ConstEvalCounter
| StatementKind::Nop => {}
}
}
@@ -754,12 +755,9 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let ocx = ObligationCtxt::new(&infcx);
let predicates = tcx.predicates_of(callee).instantiate(tcx, substs);
- let hir_id = tcx
- .hir()
- .local_def_id_to_hir_id(self.body.source.def_id().expect_local());
let cause = ObligationCause::new(
terminator.source_info.span,
- hir_id,
+ self.body.source.def_id().expect_local(),
ObligationCauseCode::ItemObligation(callee),
);
let normalized_predicates = ocx.normalize(&cause, param_env, predicates);
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
index 54868e418..0e4501922 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -68,11 +68,11 @@ impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
pub fn fn_sig(&self) -> PolyFnSig<'tcx> {
let did = self.def_id().to_def_id();
if self.tcx.is_closure(did) {
- let ty = self.tcx.type_of(did);
+ let ty = self.tcx.type_of(did).subst_identity();
let ty::Closure(_, substs) = ty.kind() else { bug!("type_of closure not ty::Closure") };
substs.as_closure().sig()
} else {
- self.tcx.fn_sig(did)
+ self.tcx.fn_sig(did).subst_identity()
}
}
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
index 0cb5d2ff8..3e416b89c 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -22,13 +22,7 @@ use rustc_span::{BytePos, Pos, Span, Symbol};
use rustc_trait_selection::traits::SelectionContext;
use super::ConstCx;
-use crate::errors::{
- InteriorMutabilityBorrow, InteriorMutableDataRefer, MutDerefErr, NonConstFmtMacroCall,
- NonConstFnCall, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr,
- TransientMutBorrowErr, TransientMutBorrowErrRaw, UnallowedFnPointerCall,
- UnallowedHeapAllocations, UnallowedInlineAsm, UnallowedMutableRefs, UnallowedMutableRefsRaw,
- UnallowedOpInConstContext, UnstableConstFn,
-};
+use crate::errors;
use crate::util::{call_kind, CallDesugaringKind, CallKind};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
@@ -99,7 +93,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(UnallowedFnPointerCall { span, kind: ccx.const_kind() })
+ ccx.tcx.sess.create_err(errors::UnallowedFnPointerCall { span, kind: ccx.const_kind() })
}
}
@@ -142,6 +136,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
&param_ty.name.as_str(),
&constraint,
None,
+ None,
);
}
}
@@ -303,10 +298,11 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
diag_trait(&mut err, self_ty, tcx.require_lang_item(LangItem::Deref, Some(span)));
err
}
- _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => {
- ccx.tcx.sess.create_err(NonConstFmtMacroCall { span, kind: ccx.const_kind() })
- }
- _ => ccx.tcx.sess.create_err(NonConstFnCall {
+ _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => ccx
+ .tcx
+ .sess
+ .create_err(errors::NonConstFmtMacroCall { span, kind: ccx.const_kind() }),
+ _ => ccx.tcx.sess.create_err(errors::NonConstFnCall {
span,
def_path_str: ccx.tcx.def_path_str_with_substs(callee, substs),
kind: ccx.const_kind(),
@@ -351,7 +347,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
let mut err = ccx
.tcx
.sess
- .create_err(UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) });
+ .create_err(errors::UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) });
if ccx.is_const_stable_const_fn() {
err.help("const-stable functions can only call other const-stable functions");
@@ -387,11 +383,11 @@ impl<'tcx> NonConstOp<'tcx> for Generator {
let msg = format!("{}s are not allowed in {}s", self.0.descr(), ccx.const_kind());
if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
ccx.tcx.sess.create_feature_err(
- UnallowedOpInConstContext { span, msg },
+ errors::UnallowedOpInConstContext { span, msg },
sym::const_async_blocks,
)
} else {
- ccx.tcx.sess.create_err(UnallowedOpInConstContext { span, msg })
+ ccx.tcx.sess.create_err(errors::UnallowedOpInConstContext { span, msg })
}
}
}
@@ -404,7 +400,7 @@ impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(UnallowedHeapAllocations {
+ ccx.tcx.sess.create_err(errors::UnallowedHeapAllocations {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0010)).then_some(()),
@@ -420,7 +416,7 @@ impl<'tcx> NonConstOp<'tcx> for InlineAsm {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(UnallowedInlineAsm { span, kind: ccx.const_kind() })
+ ccx.tcx.sess.create_err(errors::UnallowedInlineAsm { span, kind: ccx.const_kind() })
}
}
@@ -471,7 +467,9 @@ impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_feature_err(InteriorMutabilityBorrow { span }, sym::const_refs_to_cell)
+ ccx.tcx
+ .sess
+ .create_feature_err(errors::InteriorMutabilityBorrow { span }, sym::const_refs_to_cell)
}
}
@@ -488,14 +486,14 @@ impl<'tcx> NonConstOp<'tcx> for CellBorrow {
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
// FIXME: Maybe a more elegant solution to this if else case
if let hir::ConstContext::Static(_) = ccx.const_kind() {
- ccx.tcx.sess.create_err(InteriorMutableDataRefer {
+ ccx.tcx.sess.create_err(errors::InteriorMutableDataRefer {
span,
opt_help: Some(()),
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0492)).then_some(()),
})
} else {
- ccx.tcx.sess.create_err(InteriorMutableDataRefer {
+ ccx.tcx.sess.create_err(errors::InteriorMutableDataRefer {
span,
opt_help: None,
kind: ccx.const_kind(),
@@ -528,12 +526,12 @@ impl<'tcx> NonConstOp<'tcx> for MutBorrow {
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
match self.0 {
- hir::BorrowKind::Raw => ccx.tcx.sess.create_err(UnallowedMutableRefsRaw {
+ hir::BorrowKind::Raw => ccx.tcx.sess.create_err(errors::UnallowedMutableRefsRaw {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
}),
- hir::BorrowKind::Ref => ccx.tcx.sess.create_err(UnallowedMutableRefs {
+ hir::BorrowKind::Ref => ccx.tcx.sess.create_err(errors::UnallowedMutableRefs {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
@@ -557,14 +555,14 @@ impl<'tcx> NonConstOp<'tcx> for TransientMutBorrow {
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let kind = ccx.const_kind();
match self.0 {
- hir::BorrowKind::Raw => ccx
- .tcx
- .sess
- .create_feature_err(TransientMutBorrowErrRaw { span, kind }, sym::const_mut_refs),
- hir::BorrowKind::Ref => ccx
- .tcx
- .sess
- .create_feature_err(TransientMutBorrowErr { span, kind }, sym::const_mut_refs),
+ hir::BorrowKind::Raw => ccx.tcx.sess.create_feature_err(
+ errors::TransientMutBorrowErrRaw { span, kind },
+ sym::const_mut_refs,
+ ),
+ hir::BorrowKind::Ref => ccx.tcx.sess.create_feature_err(
+ errors::TransientMutBorrowErr { span, kind },
+ sym::const_mut_refs,
+ ),
}
}
}
@@ -586,9 +584,10 @@ impl<'tcx> NonConstOp<'tcx> for MutDeref {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx
- .sess
- .create_feature_err(MutDerefErr { span, kind: ccx.const_kind() }, sym::const_mut_refs)
+ ccx.tcx.sess.create_feature_err(
+ errors::MutDerefErr { span, kind: ccx.const_kind() },
+ sym::const_mut_refs,
+ )
}
}
@@ -601,7 +600,7 @@ impl<'tcx> NonConstOp<'tcx> for PanicNonStr {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(PanicNonStrErr { span })
+ ccx.tcx.sess.create_err(errors::PanicNonStrErr { span })
}
}
@@ -652,7 +651,7 @@ impl<'tcx> NonConstOp<'tcx> for RawPtrToIntCast {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(RawPtrToIntErr { span })
+ ccx.tcx.sess.create_err(errors::RawPtrToIntErr { span })
}
}
@@ -673,7 +672,7 @@ impl<'tcx> NonConstOp<'tcx> for StaticAccess {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(StaticAccessErr {
+ ccx.tcx.sess.create_err(errors::StaticAccessErr {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(&error_code!(E0013)).then_some(()),
@@ -690,7 +689,7 @@ impl<'tcx> NonConstOp<'tcx> for ThreadLocalAccess {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.create_err(NonConstOpErr { span })
+ ccx.tcx.sess.create_err(errors::NonConstOpErr { span })
}
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index 8ca3fdf40..bb4b7ad50 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -217,10 +217,10 @@ impl Qualif for CustomEq {
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
- adt: AdtDef<'tcx>,
+ def: AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
) -> bool {
- let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
+ let ty = cx.tcx.mk_adt(def, substs);
!ty.is_structural_eq_shallow(cx.tcx)
}
}
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index fae6117f8..3f3b66b06 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -18,7 +18,7 @@ use rustc_middle::mir::traversal::ReversePostorderIter;
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::subst::InternalSubsts;
-use rustc_middle::ty::{self, List, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{self, List, TyCtxt, TypeVisitableExt};
use rustc_span::Span;
use rustc_index::vec::{Idx, IndexVec};
@@ -364,31 +364,33 @@ impl<'tcx> Validator<'_, 'tcx> {
ProjectionElem::Index(local) => {
let mut promotable = false;
// Only accept if we can predict the index and are indexing an array.
- let val =
- if let TempState::Defined { location: loc, .. } = self.temps[local] {
- let block = &self.body[loc.block];
- if loc.statement_index < block.statements.len() {
- let statement = &block.statements[loc.statement_index];
- match &statement.kind {
- StatementKind::Assign(box (
- _,
- Rvalue::Use(Operand::Constant(c)),
- )) => c.literal.try_eval_usize(self.tcx, self.param_env),
- _ => None,
- }
- } else {
- None
+ let val = if let TempState::Defined { location: loc, .. } =
+ self.temps[local]
+ {
+ let block = &self.body[loc.block];
+ if loc.statement_index < block.statements.len() {
+ let statement = &block.statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Use(Operand::Constant(c)),
+ )) => c.literal.try_eval_target_usize(self.tcx, self.param_env),
+ _ => None,
}
} else {
None
- };
+ }
+ } else {
+ None
+ };
if let Some(idx) = val {
// Determine the type of the thing we are indexing.
let ty = place_base.ty(self.body, self.tcx).ty;
match ty.kind() {
ty::Array(_, len) => {
// It's an array; determine its length.
- if let Some(len) = len.try_eval_usize(self.tcx, self.param_env)
+ if let Some(len) =
+ len.try_eval_target_usize(self.tcx, self.param_env)
{
// If the index is in-bounds, go ahead.
if idx < len {
@@ -470,7 +472,7 @@ impl<'tcx> Validator<'_, 'tcx> {
// mutably without consequences. However, only &mut []
// is allowed right now.
if let ty::Array(_, len) = ty.kind() {
- match len.try_eval_usize(self.tcx, self.param_env) {
+ match len.try_eval_target_usize(self.tcx, self.param_env) {
Some(0) => {}
_ => return Err(Unpromotable),
}
@@ -864,7 +866,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let mut projection = vec![PlaceElem::Deref];
projection.extend(place.projection);
- place.projection = tcx.intern_place_elems(&projection);
+ place.projection = tcx.mk_place_elems(&projection);
// Create a temp to hold the promoted reference.
// This is because `*r` requires `r` to be a local,
@@ -896,7 +898,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
assert_eq!(self.new_block(), START_BLOCK);
self.visit_rvalue(
&mut rvalue,
- Location { block: BasicBlock::new(0), statement_index: usize::MAX },
+ Location { block: START_BLOCK, statement_index: usize::MAX },
);
let span = self.promoted.span;
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index dd168a9ac..fb37eb79a 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -8,12 +8,12 @@ use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
use rustc_middle::mir::visit::{PlaceContext, Visitor};
use rustc_middle::mir::{
- traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping,
- Local, Location, MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef,
- ProjectionElem, RetagKind, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind,
- Terminator, TerminatorKind, UnOp, START_BLOCK,
+ traversal, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping, Local, Location,
+ MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef, ProjectionElem,
+ RetagKind, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, Terminator,
+ TerminatorKind, UnOp, START_BLOCK,
};
-use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_mir_dataflow::{Analysis, ResultsCursor};
@@ -230,8 +230,12 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
// Equal types, all is good.
return true;
}
- // Normalization reveals opaque types, but we may be validating MIR while computing
- // said opaque types, causing cycles.
+
+ // We sometimes have to use `defining_opaque_types` for subtyping
+ // to succeed here and figuring out how exactly that should work
+ // is annoying. It is harmless enough to just not validate anything
+ // in that case. We still check this after analysis as all opque
+ // types have been revealed at this point.
if (src, dest).has_opaque_types() {
return true;
}
@@ -311,7 +315,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
ProjectionElem::Field(f, ty) => {
- let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
+ let parent = Place { local, projection: self.tcx.mk_place_elems(proj_base) };
let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
let fail_out_of_bounds = |this: &Self, location| {
this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
@@ -330,7 +334,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
let kind = match parent_ty.ty.kind() {
&ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
- self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
+ self.tcx.type_of(def_id).subst(self.tcx, substs).kind()
}
kind => kind,
};
@@ -377,12 +381,12 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
return;
};
- let Some(&f_ty) = layout.field_tys.get(local) else {
+ let Some(f_ty) = layout.field_tys.get(local) else {
self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
return;
};
- f_ty
+ f_ty.ty
} else {
let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
fail_out_of_bounds(self, location);
@@ -428,19 +432,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
};
}
match rvalue {
- Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
- Rvalue::Aggregate(agg_kind, _) => {
- let disallowed = match **agg_kind {
- AggregateKind::Array(..) => false,
- _ => self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup),
- };
- if disallowed {
- self.fail(
- location,
- format!("{:?} have been lowered to field assignments", rvalue),
- )
- }
- }
+ Rvalue::Use(_) | Rvalue::CopyForDeref(_) | Rvalue::Aggregate(..) => {}
Rvalue::Ref(_, BorrowKind::Shallow, _) => {
if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
@@ -759,13 +751,32 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// FIXME(JakobDegen) The validator should check that `self.mir_phase <
// DropsLowered`. However, this causes ICEs with generation of drop shims, which
// seem to fail to set their `MirPhase` correctly.
- if *kind == RetagKind::Raw || *kind == RetagKind::TwoPhase {
+ if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
self.fail(location, format!("explicit `{:?}` is forbidden", kind));
}
}
- StatementKind::StorageLive(..)
- | StatementKind::StorageDead(..)
+ StatementKind::StorageLive(local) => {
+ // We check that the local is not live when entering a `StorageLive` for it.
+ // Technically, violating this restriction is only UB and not actually indicative
+ // of not well-formed MIR. This means that an optimization which turns MIR that
+ // already has UB into MIR that fails this check is not necessarily wrong. However,
+ // we have no such optimizations at the moment, and so we include this check anyway
+ // to help us catch bugs. If you happen to write an optimization that might cause
+ // this to incorrectly fire, feel free to remove this check.
+ if self.reachable_blocks.contains(location.block) {
+ self.storage_liveness.seek_before_primary_effect(location);
+ let locals_with_storage = self.storage_liveness.get();
+ if locals_with_storage.contains(*local) {
+ self.fail(
+ location,
+ format!("StorageLive({local:?}) which already has storage here"),
+ );
+ }
+ }
+ }
+ StatementKind::StorageDead(_)
| StatementKind::Coverage(_)
+ | StatementKind::ConstEvalCounter
| StatementKind::Nop => {}
}
diff --git a/compiler/rustc_const_eval/src/util/aggregate.rs b/compiler/rustc_const_eval/src/util/aggregate.rs
deleted file mode 100644
index 10783c5ed..000000000
--- a/compiler/rustc_const_eval/src/util/aggregate.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-use rustc_index::vec::Idx;
-use rustc_middle::mir::*;
-use rustc_middle::ty::{Ty, TyCtxt};
-use rustc_target::abi::VariantIdx;
-
-use std::iter::TrustedLen;
-
-/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
-///
-/// Produces something like
-/// ```ignore (ilustrative)
-/// (lhs as Variant).field0 = arg0; // We only have a downcast if this is an enum
-/// (lhs as Variant).field1 = arg1;
-/// discriminant(lhs) = variant_index; // If lhs is an enum or generator.
-/// ```
-pub fn expand_aggregate<'tcx>(
- orig_lhs: Place<'tcx>,
- operands: impl Iterator<Item = (Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
- kind: AggregateKind<'tcx>,
- source_info: SourceInfo,
- tcx: TyCtxt<'tcx>,
-) -> impl Iterator<Item = Statement<'tcx>> + TrustedLen {
- let mut lhs = orig_lhs;
- let mut set_discriminant = None;
- let active_field_index = match kind {
- AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
- let adt_def = tcx.adt_def(adt_did);
- if adt_def.is_enum() {
- set_discriminant = Some(Statement {
- kind: StatementKind::SetDiscriminant {
- place: Box::new(orig_lhs),
- variant_index,
- },
- source_info,
- });
- lhs = tcx.mk_place_downcast(orig_lhs, adt_def, variant_index);
- }
- active_field_index
- }
- AggregateKind::Generator(..) => {
- // Right now we only support initializing generators to
- // variant 0 (Unresumed).
- let variant_index = VariantIdx::new(0);
- set_discriminant = Some(Statement {
- kind: StatementKind::SetDiscriminant { place: Box::new(orig_lhs), variant_index },
- source_info,
- });
-
- // Operands are upvars stored on the base place, so no
- // downcast is necessary.
-
- None
- }
- _ => None,
- };
-
- let operands = operands.enumerate().map(move |(i, (op, ty))| {
- let lhs_field = if let AggregateKind::Array(_) = kind {
- let offset = u64::try_from(i).unwrap();
- tcx.mk_place_elem(
- lhs,
- ProjectionElem::ConstantIndex { offset, min_length: offset + 1, from_end: false },
- )
- } else {
- let field = Field::new(active_field_index.unwrap_or(i));
- tcx.mk_place_field(lhs, field, ty)
- };
- Statement {
- source_info,
- kind: StatementKind::Assign(Box::new((lhs_field, Rvalue::Use(op)))),
- }
- });
- [Statement { source_info, kind: StatementKind::Deinit(Box::new(orig_lhs)) }]
- .into_iter()
- .chain(operands)
- .chain(set_discriminant)
-}
diff --git a/compiler/rustc_const_eval/src/util/call_kind.rs b/compiler/rustc_const_eval/src/util/call_kind.rs
index 38d9b0449..995363c0e 100644
--- a/compiler/rustc_const_eval/src/util/call_kind.rs
+++ b/compiler/rustc_const_eval/src/util/call_kind.rs
@@ -40,6 +40,7 @@ pub enum CallKind<'tcx> {
self_arg: Option<Ident>,
desugaring: Option<(CallDesugaringKind, Ty<'tcx>)>,
method_did: DefId,
+ method_substs: SubstsRef<'tcx>,
},
/// A call to `Fn(..)::call(..)`, desugared from `my_closure(a, b, c)`
FnCall { fn_trait_id: DefId, self_ty: Ty<'tcx> },
@@ -131,6 +132,6 @@ pub fn call_kind<'tcx>(
} else {
None
};
- CallKind::Normal { self_arg, desugaring, method_did }
+ CallKind::Normal { self_arg, desugaring, method_did, method_substs }
})
}
diff --git a/compiler/rustc_const_eval/src/util/might_permit_raw_init.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index 4ce107ea6..23fcd22c5 100644
--- a/compiler/rustc_const_eval/src/util/might_permit_raw_init.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -1,7 +1,7 @@
-use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
-use rustc_middle::ty::{ParamEnv, TyCtxt};
+use rustc_middle::ty::layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement};
+use rustc_middle::ty::{ParamEnv, ParamEnvAnd, Ty, TyCtxt};
use rustc_session::Limit;
-use rustc_target::abi::{Abi, FieldsShape, InitKind, Scalar, Variants};
+use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
use crate::const_eval::{CheckAlignment, CompileTimeInterpreter};
use crate::interpret::{InterpCx, MemoryKind, OpTy};
@@ -18,16 +18,23 @@ use crate::interpret::{InterpCx, MemoryKind, OpTy};
/// Rust UB as long as there is no risk of miscompilations. The `strict_init_checks` can be set to
/// do a full check against Rust UB instead (in which case we will also ignore the 0x01-filling and
/// to the full uninit check).
-pub fn might_permit_raw_init<'tcx>(
+pub fn check_validity_requirement<'tcx>(
tcx: TyCtxt<'tcx>,
- ty: TyAndLayout<'tcx>,
- kind: InitKind,
-) -> bool {
- if tcx.sess.opts.unstable_opts.strict_init_checks {
- might_permit_raw_init_strict(ty, tcx, kind)
+ kind: ValidityRequirement,
+ param_env_and_ty: ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> Result<bool, LayoutError<'tcx>> {
+ let layout = tcx.layout_of(param_env_and_ty)?;
+
+ // There is nothing strict or lax about inhabitedness.
+ if kind == ValidityRequirement::Inhabited {
+ return Ok(!layout.abi.is_uninhabited());
+ }
+
+ if kind == ValidityRequirement::Uninit || tcx.sess.opts.unstable_opts.strict_init_checks {
+ might_permit_raw_init_strict(layout, tcx, kind)
} else {
- let layout_cx = LayoutCx { tcx, param_env: ParamEnv::reveal_all() };
- might_permit_raw_init_lax(ty, &layout_cx, kind)
+ let layout_cx = LayoutCx { tcx, param_env: param_env_and_ty.param_env };
+ might_permit_raw_init_lax(layout, &layout_cx, kind)
}
}
@@ -36,8 +43,8 @@ pub fn might_permit_raw_init<'tcx>(
fn might_permit_raw_init_strict<'tcx>(
ty: TyAndLayout<'tcx>,
tcx: TyCtxt<'tcx>,
- kind: InitKind,
-) -> bool {
+ kind: ValidityRequirement,
+) -> Result<bool, LayoutError<'tcx>> {
let machine = CompileTimeInterpreter::new(
Limit::new(0),
/*can_access_statics:*/ false,
@@ -50,7 +57,7 @@ fn might_permit_raw_init_strict<'tcx>(
.allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
.expect("OOM: failed to allocate for uninit check");
- if kind == InitKind::Zero {
+ if kind == ValidityRequirement::Zero {
cx.write_bytes_ptr(
allocated.ptr,
std::iter::repeat(0_u8).take(ty.layout.size().bytes_usize()),
@@ -64,7 +71,7 @@ fn might_permit_raw_init_strict<'tcx>(
// This does *not* actually check that references are dereferenceable, but since all types that
// require dereferenceability also require non-null, we don't actually get any false negatives
// due to this.
- cx.validate_operand(&ot).is_ok()
+ Ok(cx.validate_operand(&ot).is_ok())
}
/// Implements the 'lax' (default) version of the `might_permit_raw_init` checks; see that function for
@@ -72,15 +79,18 @@ fn might_permit_raw_init_strict<'tcx>(
fn might_permit_raw_init_lax<'tcx>(
this: TyAndLayout<'tcx>,
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
- init_kind: InitKind,
-) -> bool {
+ init_kind: ValidityRequirement,
+) -> Result<bool, LayoutError<'tcx>> {
let scalar_allows_raw_init = move |s: Scalar| -> bool {
match init_kind {
- InitKind::Zero => {
+ ValidityRequirement::Inhabited => {
+ bug!("ValidityRequirement::Inhabited should have been handled above")
+ }
+ ValidityRequirement::Zero => {
// The range must contain 0.
s.valid_range(cx).contains(0)
}
- InitKind::UninitMitigated0x01Fill => {
+ ValidityRequirement::UninitMitigated0x01Fill => {
// The range must include an 0x01-filled buffer.
let mut val: u128 = 0x01;
for _ in 1..s.size(cx).bytes() {
@@ -89,6 +99,9 @@ fn might_permit_raw_init_lax<'tcx>(
}
s.valid_range(cx).contains(val)
}
+ ValidityRequirement::Uninit => {
+ bug!("ValidityRequirement::Uninit should have been handled above")
+ }
}
};
@@ -102,20 +115,20 @@ fn might_permit_raw_init_lax<'tcx>(
};
if !valid {
// This is definitely not okay.
- return false;
+ return Ok(false);
}
// Special magic check for references and boxes (i.e., special pointer types).
if let Some(pointee) = this.ty.builtin_deref(false) {
- let pointee = cx.layout_of(pointee.ty).expect("need to be able to compute layouts");
+ let pointee = cx.layout_of(pointee.ty)?;
// We need to ensure that the LLVM attributes `aligned` and `dereferenceable(size)` are satisfied.
if pointee.align.abi.bytes() > 1 {
// 0x01-filling is not aligned.
- return false;
+ return Ok(false);
}
if pointee.size.bytes() > 0 {
// A 'fake' integer pointer is not sufficiently dereferenceable.
- return false;
+ return Ok(false);
}
}
@@ -128,9 +141,9 @@ fn might_permit_raw_init_lax<'tcx>(
}
FieldsShape::Arbitrary { offsets, .. } => {
for idx in 0..offsets.len() {
- if !might_permit_raw_init_lax(this.field(cx, idx), cx, init_kind) {
+ if !might_permit_raw_init_lax(this.field(cx, idx), cx, init_kind)? {
// We found a field that is unhappy with this kind of initialization.
- return false;
+ return Ok(false);
}
}
}
@@ -147,5 +160,5 @@ fn might_permit_raw_init_lax<'tcx>(
}
}
- true
+ Ok(true)
}
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
index 76ea5a24e..c0aabd77c 100644
--- a/compiler/rustc_const_eval/src/util/mod.rs
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -1,16 +1,14 @@
-pub mod aggregate;
mod alignment;
mod call_kind;
+mod check_validity_requirement;
pub mod collect_writes;
mod compare_types;
mod find_self_call;
-mod might_permit_raw_init;
mod type_name;
-pub use self::aggregate::expand_aggregate;
pub use self::alignment::is_disaligned;
pub use self::call_kind::{call_kind, CallDesugaringKind, CallKind};
+pub use self::check_validity_requirement::check_validity_requirement;
pub use self::compare_types::{is_equal_up_to_subtyping, is_subtype};
pub use self::find_self_call::find_self_call;
-pub use self::might_permit_raw_init::might_permit_raw_init;
pub use self::type_name::type_name;
diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs
index c4122f664..4e80a2851 100644
--- a/compiler/rustc_const_eval/src/util/type_name.rs
+++ b/compiler/rustc_const_eval/src/util/type_name.rs
@@ -64,6 +64,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
+ ty::GeneratorWitnessMIR(..) => bug!("type_name: unexpected `GeneratorWitnessMIR`"),
}
}