diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-19 09:25:56 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-19 09:25:56 +0000 |
commit | 018c4950b9406055dec02ef0fb52f132e2bb1e2c (patch) | |
tree | a835ebdf2088ef88fa681f8fad45f09922c1ae9a /compiler/rustc_const_eval | |
parent | Adding debian version 1.75.0+dfsg1-5. (diff) | |
download | rustc-018c4950b9406055dec02ef0fb52f132e2bb1e2c.tar.xz rustc-018c4950b9406055dec02ef0fb52f132e2bb1e2c.zip |
Merging upstream version 1.76.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_const_eval')
30 files changed, 614 insertions, 456 deletions
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index f926da464..4a426ed16 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -461,6 +461,9 @@ const_eval_validation_uninhabited_val = {$front_matter}: encountered a value of const_eval_validation_uninit = {$front_matter}: encountered uninitialized memory, but {$expected} const_eval_validation_unsafe_cell = {$front_matter}: encountered `UnsafeCell` in a `const` +const_eval_write_through_immutable_pointer = + writing through a pointer that was derived from a shared (immutable) reference + const_eval_write_to_read_only = writing to {$allocation} which is read-only const_eval_zst_pointer_out_of_bounds = diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs index bf1e0a370..8f18cd78d 100644 --- a/compiler/rustc_const_eval/src/const_eval/error.rs +++ b/compiler/rustc_const_eval/src/const_eval/error.rs @@ -1,14 +1,16 @@ use std::mem; use rustc_errors::{DiagnosticArgValue, DiagnosticMessage, IntoDiagnostic, IntoDiagnosticArg}; +use rustc_hir::CRATE_HIR_ID; use rustc_middle::mir::AssertKind; +use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::TyCtxt; use rustc_middle::ty::{layout::LayoutError, ConstInt}; use rustc_span::{ErrorGuaranteed, Span, Symbol, DUMMY_SP}; -use super::InterpCx; +use super::{CompileTimeInterpreter, InterpCx}; use crate::errors::{self, FrameNote, ReportErrorExt}; -use crate::interpret::{ErrorHandled, InterpError, InterpErrorInfo, Machine, MachineStopType}; +use crate::interpret::{ErrorHandled, InterpError, InterpErrorInfo, MachineStopType}; /// The CTFE machine has some custom error kinds. #[derive(Clone, Debug)] @@ -57,16 +59,20 @@ impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind { } } -pub fn get_span_and_frames<'tcx, 'mir, M: Machine<'mir, 'tcx>>( - ecx: &InterpCx<'mir, 'tcx, M>, +pub fn get_span_and_frames<'tcx, 'mir>( + tcx: TyCtxtAt<'tcx>, + machine: &CompileTimeInterpreter<'mir, 'tcx>, ) -> (Span, Vec<errors::FrameNote>) where 'tcx: 'mir, { - let mut stacktrace = ecx.generate_stacktrace(); + let mut stacktrace = + InterpCx::<CompileTimeInterpreter<'mir, 'tcx>>::generate_stacktrace_from_stack( + &machine.stack, + ); // Filter out `requires_caller_location` frames. - stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*ecx.tcx)); - let span = stacktrace.first().map(|f| f.span).unwrap_or(ecx.tcx.span); + stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*tcx)); + let span = stacktrace.first().map(|f| f.span).unwrap_or(tcx.span); let mut frames = Vec::new(); @@ -87,7 +93,7 @@ where let mut last_frame: Option<errors::FrameNote> = None; for frame_info in &stacktrace { - let frame = frame_info.as_note(*ecx.tcx); + let frame = frame_info.as_note(*tcx); match last_frame.as_mut() { Some(last_frame) if last_frame.span == frame.span @@ -148,7 +154,7 @@ where let mut err = tcx.sess.create_err(err); let msg = error.diagnostic_message(); - error.add_args(&tcx.sess.parse_sess.span_diagnostic, &mut err); + error.add_args(tcx.sess.dcx(), &mut err); // Use *our* span to label the interp error err.span_label(our_span, msg); @@ -156,3 +162,25 @@ where } } } + +/// Emit a lint from a const-eval situation. +// Even if this is unused, please don't remove it -- chances are we will need to emit a lint during const-eval again in the future! +pub(super) fn lint<'tcx, 'mir, L>( + tcx: TyCtxtAt<'tcx>, + machine: &CompileTimeInterpreter<'mir, 'tcx>, + lint: &'static rustc_session::lint::Lint, + decorator: impl FnOnce(Vec<errors::FrameNote>) -> L, +) where + L: for<'a> rustc_errors::DecorateLint<'a, ()>, +{ + let (span, frames) = get_span_and_frames(tcx, machine); + + tcx.emit_spanned_lint( + lint, + // We use the root frame for this so the crate that defines the const defines whether the + // lint is emitted. + machine.stack.first().and_then(|frame| frame.lint_root()).unwrap_or(CRATE_HIR_ID), + span, + decorator(frames), + ); +} diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 13937a941..9d22df50d 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -155,8 +155,8 @@ pub(super) fn op_to_const<'tcx>( match immediate { Left(ref mplace) => { // We know `offset` is relative to the allocation, so we can use `into_parts`. - let (alloc_id, offset) = mplace.ptr().into_parts(); - let alloc_id = alloc_id.expect("cannot have `fake` place fot non-ZST type"); + let (prov, offset) = mplace.ptr().into_parts(); + let alloc_id = prov.expect("cannot have `fake` place for non-ZST type").alloc_id(); ConstValue::Indirect { alloc_id, offset } } // see comment on `let force_as_immediate` above @@ -178,8 +178,8 @@ pub(super) fn op_to_const<'tcx>( ); let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to the beginning of an actual allocation"; // We know `offset` is relative to the allocation, so we can use `into_parts`. - let (alloc_id, offset) = a.to_pointer(ecx).expect(msg).into_parts(); - let alloc_id = alloc_id.expect(msg); + let (prov, offset) = a.to_pointer(ecx).expect(msg).into_parts(); + let alloc_id = prov.expect(msg).alloc_id(); let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory(); assert!(offset == abi::Size::ZERO, "{}", msg); let meta = b.to_target_usize(ecx).expect(msg); @@ -314,7 +314,7 @@ pub fn eval_in_interpreter<'mir, 'tcx>( is_static: bool, ) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> { let res = ecx.load_mir(cid.instance.def, cid.promoted); - match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body)) { + match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, body)) { Err(error) => { let (error, backtrace) = error.into_parts(); backtrace.print_backtrace(); @@ -338,7 +338,7 @@ pub fn eval_in_interpreter<'mir, 'tcx>( *ecx.tcx, error, None, - || super::get_span_and_frames(&ecx), + || super::get_span_and_frames(ecx.tcx, &ecx.machine), |span, frames| ConstEvalError { span, error_kind: kind, @@ -353,7 +353,7 @@ pub fn eval_in_interpreter<'mir, 'tcx>( let validation = const_validate_mplace(&ecx, &mplace, is_static, cid.promoted.is_some()); - let alloc_id = mplace.ptr().provenance.unwrap(); + let alloc_id = mplace.ptr().provenance.unwrap().alloc_id(); // Validation failed, report an error. if let Err(error) = validation { @@ -419,7 +419,7 @@ pub fn const_report_error<'mir, 'tcx>( *ecx.tcx, error, None, - || crate::const_eval::get_span_and_frames(ecx), + || crate::const_eval::get_span_and_frames(ecx.tcx, &ecx.machine), move |span, frames| errors::UndefinedBehavior { span, ub_note, frames, raw_bytes }, ) } diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs index 9e992637f..dbc29e607 100644 --- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs @@ -33,7 +33,7 @@ pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { /// return if it has a `const` modifier. If it is an intrinsic, report whether said intrinsic /// has a `rustc_const_{un,}stable` attribute. Otherwise, return `Constness::NotConst`. fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness { - let node = tcx.hir().get_by_def_id(def_id); + let node = tcx.hir_node_by_def_id(def_id); match node { hir::Node::Ctor(_) diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 4b447229c..9aaf6c510 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -1,30 +1,30 @@ -use rustc_hir::def::DefKind; -use rustc_hir::LangItem; -use rustc_middle::mir; -use rustc_middle::mir::interpret::PointerArithmetic; -use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout}; -use rustc_middle::ty::{self, TyCtxt}; -use rustc_span::Span; use std::borrow::Borrow; +use std::fmt; use std::hash::Hash; use std::ops::ControlFlow; +use rustc_ast::Mutability; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::fx::IndexEntry; -use std::fmt; - -use rustc_ast::Mutability; +use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; +use rustc_hir::LangItem; +use rustc_middle::mir; use rustc_middle::mir::AssertMessage; +use rustc_middle::query::TyCtxtAt; +use rustc_middle::ty; +use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout}; +use rustc_session::lint::builtin::WRITES_THROUGH_IMMUTABLE_POINTER; use rustc_span::symbol::{sym, Symbol}; +use rustc_span::Span; use rustc_target::abi::{Align, Size}; use rustc_target::spec::abi::Abi as CallAbi; use crate::errors::{LongRunning, LongRunningWarn}; use crate::fluent_generated as fluent; use crate::interpret::{ - self, compile_time_machine, AllocId, ConstAllocation, FnArg, FnVal, Frame, ImmTy, InterpCx, - InterpResult, OpTy, PlaceTy, Pointer, Scalar, + self, compile_time_machine, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, FnVal, + Frame, ImmTy, InterpCx, InterpResult, OpTy, PlaceTy, Pointer, PointerArithmetic, Scalar, }; use super::error::*; @@ -49,7 +49,7 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> { pub(super) num_evaluated_steps: usize, /// The virtual call stack. - pub(super) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>, + pub(super) stack: Vec<Frame<'mir, 'tcx>>, /// We need to make sure consts never point to anything mutable, even recursively. That is /// relied on for pattern matching on consts with references. @@ -108,6 +108,14 @@ impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> { } #[inline(always)] + fn contains_key_ref<Q: ?Sized + Hash + Eq>(&self, k: &Q) -> bool + where + K: Borrow<Q>, + { + FxIndexMap::contains_key(self, k) + } + + #[inline(always)] fn insert(&mut self, k: K, v: V) -> Option<V> { FxIndexMap::insert(self, k, v) } @@ -192,7 +200,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> { &caller .file .name - .for_scope(&self.tcx.sess, RemapPathScopeComponents::DIAGNOSTICS) + .for_scope(self.tcx.sess, RemapPathScopeComponents::DIAGNOSTICS) .to_string_lossy(), ), u32::try_from(caller.line).unwrap(), @@ -383,7 +391,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, if ecx.tcx.is_ctfe_mir_available(def) { Ok(ecx.tcx.mir_for_ctfe(def)) } else if ecx.tcx.def_kind(def) == DefKind::AssocConst { - let guar = ecx.tcx.sess.delay_span_bug( + let guar = ecx.tcx.sess.span_delayed_bug( rustc_span::DUMMY_SP, "This is likely a const item that is missing from its impl", ); @@ -485,7 +493,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, }; let ptr = ecx.allocate_ptr( - Size::from_bytes(size as u64), + Size::from_bytes(size), align, interpret::MemoryKind::Machine(MemoryKind::Heap), )?; @@ -614,7 +622,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, let guard = ecx .tcx .sess - .delay_span_bug(span, "The deny lint should have already errored"); + .span_delayed_bug(span, "The deny lint should have already errored"); throw_inval!(AlreadyReported(guard.into())); } } else if new_steps > start && new_steps.is_power_of_two() { @@ -630,10 +638,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } #[inline(always)] - fn expose_ptr( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, - _ptr: Pointer<AllocId>, - ) -> InterpResult<'tcx> { + fn expose_ptr(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> { // This is only reachable with -Zunleash-the-miri-inside-of-you. throw_unsup_format!("exposing pointers is not possible at compile-time") } @@ -666,7 +671,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } fn before_access_global( - _tcx: TyCtxt<'tcx>, + _tcx: TyCtxtAt<'tcx>, machine: &Self, alloc_id: AllocId, alloc: ConstAllocation<'tcx>, @@ -703,6 +708,48 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } } } + + fn retag_ptr_value( + ecx: &mut InterpCx<'mir, 'tcx, Self>, + _kind: mir::RetagKind, + val: &ImmTy<'tcx, CtfeProvenance>, + ) -> InterpResult<'tcx, ImmTy<'tcx, CtfeProvenance>> { + // If it's a frozen shared reference that's not already immutable, make it immutable. + // (Do nothing on `None` provenance, that cannot store immutability anyway.) + if let ty::Ref(_, ty, mutbl) = val.layout.ty.kind() + && *mutbl == Mutability::Not + && val.to_scalar_and_meta().0.to_pointer(ecx)?.provenance.is_some_and(|p| !p.immutable()) + // That next check is expensive, that's why we have all the guards above. + && ty.is_freeze(*ecx.tcx, ecx.param_env) + { + let place = ecx.ref_to_mplace(val)?; + let new_place = place.map_provenance(|p| p.map(CtfeProvenance::as_immutable)); + Ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout)) + } else { + Ok(val.clone()) + } + } + + fn before_memory_write( + tcx: TyCtxtAt<'tcx>, + machine: &mut Self, + _alloc_extra: &mut Self::AllocExtra, + (_alloc_id, immutable): (AllocId, bool), + range: AllocRange, + ) -> InterpResult<'tcx> { + if range.size == Size::ZERO { + // Nothing to check. + return Ok(()); + } + // Reject writes through immutable pointers. + if immutable { + super::lint(tcx, machine, WRITES_THROUGH_IMMUTABLE_POINTER, |frames| { + crate::errors::WriteThroughImmutablePointer { frames } + }); + } + // Everything else is fine. + Ok(()) + } } // Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index ed2d81727..854fe9a07 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -11,7 +11,7 @@ use rustc_middle::mir; use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; use rustc_span::DUMMY_SP; -use rustc_target::abi::VariantIdx; +use rustc_target::abi::{Abi, VariantIdx}; #[instrument(skip(ecx), level = "debug")] fn branches<'tcx>( @@ -101,11 +101,16 @@ pub(crate) fn const_to_valtree_inner<'tcx>( // Not all raw pointers are allowed, as we cannot properly test them for // equality at compile-time (see `ptr_guaranteed_cmp`). // However we allow those that are just integers in disguise. - // (We could allow wide raw pointers where both sides are integers in the future, - // but for now we reject them.) - let Ok(val) = ecx.read_scalar(place) else { + // First, get the pointer. Remember it might be wide! + let Ok(val) = ecx.read_immediate(place) else { return Err(ValTreeCreationError::Other); }; + // We could allow wide raw pointers where both sides are integers in the future, + // but for now we reject them. + if matches!(val.layout.abi, Abi::ScalarPair(..)) { + return Err(ValTreeCreationError::Other); + } + let val = val.to_scalar(); // We are in the CTFE machine, so ptr-to-int casts will fail. // This can only be `Ok` if `val` already is an integer. let Ok(val) = val.try_to_int() else { @@ -382,7 +387,7 @@ fn valtree_into_mplace<'tcx>( debug!(?place_inner); valtree_into_mplace(ecx, &place_inner, *inner_valtree); - dump_place(&ecx, &place_inner); + dump_place(ecx, &place_inner); } debug!("dump of place_adjusted:"); diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index cc8f33872..adce1f543 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -1,5 +1,5 @@ use rustc_errors::{ - DiagnosticArgValue, DiagnosticBuilder, DiagnosticMessage, EmissionGuarantee, Handler, + DiagCtxt, DiagnosticArgValue, DiagnosticBuilder, DiagnosticMessage, EmissionGuarantee, IntoDiagnostic, }; use rustc_hir::ConstContext; @@ -402,6 +402,13 @@ pub struct ConstEvalError { pub frame_notes: Vec<FrameNote>, } +#[derive(LintDiagnostic)] +#[diag(const_eval_write_through_immutable_pointer)] +pub struct WriteThroughImmutablePointer { + #[subdiagnostic] + pub frames: Vec<FrameNote>, +} + #[derive(Diagnostic)] #[diag(const_eval_nullary_intrinsic_fail)] pub struct NullaryIntrinsicError { @@ -425,11 +432,7 @@ pub struct UndefinedBehavior { pub trait ReportErrorExt { /// Returns the diagnostic message for this error. fn diagnostic_message(&self) -> DiagnosticMessage; - fn add_args<G: EmissionGuarantee>( - self, - handler: &Handler, - builder: &mut DiagnosticBuilder<'_, G>, - ); + fn add_args<G: EmissionGuarantee>(self, dcx: &DiagCtxt, builder: &mut DiagnosticBuilder<'_, G>); fn debug(self) -> String where @@ -437,17 +440,17 @@ pub trait ReportErrorExt { { ty::tls::with(move |tcx| { let mut builder = tcx.sess.struct_allow(DiagnosticMessage::Str(String::new().into())); - let handler = &tcx.sess.parse_sess.span_diagnostic; + let dcx = tcx.sess.dcx(); let message = self.diagnostic_message(); - self.add_args(handler, &mut builder); - let s = handler.eagerly_translate_to_string(message, builder.args()); + self.add_args(dcx, &mut builder); + let s = dcx.eagerly_translate_to_string(message, builder.args()); builder.cancel(); s }) } } -fn bad_pointer_message(msg: CheckInAllocMsg, handler: &Handler) -> String { +fn bad_pointer_message(msg: CheckInAllocMsg, dcx: &DiagCtxt) -> String { use crate::fluent_generated::*; let msg = match msg { @@ -457,7 +460,7 @@ fn bad_pointer_message(msg: CheckInAllocMsg, handler: &Handler) -> String { CheckInAllocMsg::InboundsTest => const_eval_in_bounds_test, }; - handler.eagerly_translate_to_string(msg, [].into_iter()) + dcx.eagerly_translate_to_string(msg, [].into_iter()) } impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { @@ -507,7 +510,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { fn add_args<G: EmissionGuarantee>( self, - handler: &Handler, + dcx: &DiagCtxt, builder: &mut DiagnosticBuilder<'_, G>, ) { use UndefinedBehaviorInfo::*; @@ -518,7 +521,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { builder.set_arg(name, value); }); } - ValidationError(e) => e.add_args(handler, builder), + ValidationError(e) => e.add_args(dcx, builder), Unreachable | DivisionByZero @@ -542,7 +545,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { PointerUseAfterFree(alloc_id, msg) => { builder .set_arg("alloc_id", alloc_id) - .set_arg("bad_pointer_message", bad_pointer_message(msg, handler)); + .set_arg("bad_pointer_message", bad_pointer_message(msg, dcx)); } PointerOutOfBounds { alloc_id, alloc_size, ptr_offset, ptr_size, msg } => { builder @@ -550,14 +553,14 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { .set_arg("alloc_size", alloc_size.bytes()) .set_arg("ptr_offset", ptr_offset) .set_arg("ptr_size", ptr_size.bytes()) - .set_arg("bad_pointer_message", bad_pointer_message(msg, handler)); + .set_arg("bad_pointer_message", bad_pointer_message(msg, dcx)); } DanglingIntPointer(ptr, msg) => { if ptr != 0 { builder.set_arg("pointer", format!("{ptr:#x}[noalloc]")); } - builder.set_arg("bad_pointer_message", bad_pointer_message(msg, handler)); + builder.set_arg("bad_pointer_message", bad_pointer_message(msg, dcx)); } AlignmentCheckFailed(Misalignment { required, has }, msg) => { builder.set_arg("required", required.bytes()); @@ -671,7 +674,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { } } - fn add_args<G: EmissionGuarantee>(self, handler: &Handler, err: &mut DiagnosticBuilder<'_, G>) { + fn add_args<G: EmissionGuarantee>(self, dcx: &DiagCtxt, err: &mut DiagnosticBuilder<'_, G>) { use crate::fluent_generated as fluent; use rustc_middle::mir::interpret::ValidationErrorKind::*; @@ -681,12 +684,12 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { } let message = if let Some(path) = self.path { - handler.eagerly_translate_to_string( + dcx.eagerly_translate_to_string( fluent::const_eval_validation_front_matter_invalid_value_with_path, [("path".into(), DiagnosticArgValue::Str(path.into()))].iter().map(|(a, b)| (a, b)), ) } else { - handler.eagerly_translate_to_string( + dcx.eagerly_translate_to_string( fluent::const_eval_validation_front_matter_invalid_value, [].into_iter(), ) @@ -697,7 +700,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { fn add_range_arg<G: EmissionGuarantee>( r: WrappingRange, max_hi: u128, - handler: &Handler, + dcx: &DiagCtxt, err: &mut DiagnosticBuilder<'_, G>, ) { let WrappingRange { start: lo, end: hi } = r; @@ -721,7 +724,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { ("hi".into(), DiagnosticArgValue::Str(hi.to_string().into())), ]; let args = args.iter().map(|(a, b)| (a, b)); - let message = handler.eagerly_translate_to_string(msg, args); + let message = dcx.eagerly_translate_to_string(msg, args); err.set_arg("in_range", message); } @@ -743,7 +746,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { ExpectedKind::EnumTag => fluent::const_eval_validation_expected_enum_tag, ExpectedKind::Str => fluent::const_eval_validation_expected_str, }; - let msg = handler.eagerly_translate_to_string(msg, [].into_iter()); + let msg = dcx.eagerly_translate_to_string(msg, [].into_iter()); err.set_arg("expected", msg); } InvalidEnumTag { value } @@ -754,11 +757,11 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { err.set_arg("value", value); } NullablePtrOutOfRange { range, max_value } | PtrOutOfRange { range, max_value } => { - add_range_arg(range, max_value, handler, err) + add_range_arg(range, max_value, dcx, err) } OutOfRange { range, max_value, value } => { err.set_arg("value", value); - add_range_arg(range, max_value, handler, err); + add_range_arg(range, max_value, dcx, err); } UnalignedPtr { required_bytes, found_bytes, .. } => { err.set_arg("required_bytes", required_bytes); @@ -797,7 +800,7 @@ impl ReportErrorExt for UnsupportedOpInfo { UnsupportedOpInfo::ReadExternStatic(_) => const_eval_read_extern_static, } } - fn add_args<G: EmissionGuarantee>(self, _: &Handler, builder: &mut DiagnosticBuilder<'_, G>) { + fn add_args<G: EmissionGuarantee>(self, _: &DiagCtxt, builder: &mut DiagnosticBuilder<'_, G>) { use crate::fluent_generated::*; use UnsupportedOpInfo::*; @@ -832,14 +835,14 @@ impl<'tcx> ReportErrorExt for InterpError<'tcx> { } fn add_args<G: EmissionGuarantee>( self, - handler: &Handler, + dcx: &DiagCtxt, builder: &mut DiagnosticBuilder<'_, G>, ) { match self { - InterpError::UndefinedBehavior(ub) => ub.add_args(handler, builder), - InterpError::Unsupported(e) => e.add_args(handler, builder), - InterpError::InvalidProgram(e) => e.add_args(handler, builder), - InterpError::ResourceExhaustion(e) => e.add_args(handler, builder), + InterpError::UndefinedBehavior(ub) => ub.add_args(dcx, builder), + InterpError::Unsupported(e) => e.add_args(dcx, builder), + InterpError::InvalidProgram(e) => e.add_args(dcx, builder), + InterpError::ResourceExhaustion(e) => e.add_args(dcx, builder), InterpError::MachineStop(e) => e.add_args(&mut |name, value| { builder.set_arg(name, value); }), @@ -864,7 +867,7 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> { } fn add_args<G: EmissionGuarantee>( self, - handler: &Handler, + dcx: &DiagCtxt, builder: &mut DiagnosticBuilder<'_, G>, ) { match self { @@ -872,7 +875,7 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> { | InvalidProgramInfo::AlreadyReported(_) | InvalidProgramInfo::ConstPropNonsense => {} InvalidProgramInfo::Layout(e) => { - let diag: DiagnosticBuilder<'_, ()> = e.into_diagnostic().into_diagnostic(handler); + let diag: DiagnosticBuilder<'_, ()> = e.into_diagnostic().into_diagnostic(dcx); for (name, val) in diag.args() { builder.set_arg(name.clone(), val.clone()); } @@ -897,5 +900,5 @@ impl ReportErrorExt for ResourceExhaustionInfo { ResourceExhaustionInfo::AddressSpaceFull => const_eval_address_space_full, } } - fn add_args<G: EmissionGuarantee>(self, _: &Handler, _: &mut DiagnosticBuilder<'_, G>) {} + fn add_args<G: EmissionGuarantee>(self, _: &DiagCtxt, _: &mut DiagnosticBuilder<'_, G>) {} } diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index f4cb12c8d..d296ff592 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -256,7 +256,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let addr = addr.to_target_usize(self)?; // Then turn address into pointer. - let ptr = M::ptr_from_addr_cast(&self, addr)?; + let ptr = M::ptr_from_addr_cast(self, addr)?; Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to)) } diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index fd1736703..d9f583c1d 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -119,7 +119,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if matches!(ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) { throw_ub!(UninhabitedEnumVariantRead(index)) } - // For consisteny with `write_discriminant`, and to make sure that + // For consistency with `write_discriminant`, and to make sure that // `project_downcast` cannot fail due to strange layouts, we declare immediate UB // for uninhabited variants. if op.layout().for_variant(self, index).abi.is_uninhabited() { @@ -236,7 +236,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { variant } }; - // For consisteny with `write_discriminant`, and to make sure that `project_downcast` cannot fail due to strange layouts, we declare immediate UB for uninhabited variants. + // For consistency with `write_discriminant`, and to make sure that `project_downcast` cannot fail due to strange layouts, we declare immediate UB for uninhabited variants. if op.layout().for_variant(self, index).abi.is_uninhabited() { throw_ub!(UninhabitedEnumVariantRead(index)) } diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 07cab5e34..af8e5e7d1 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -7,7 +7,9 @@ use hir::CRATE_HIR_ID; use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData}; use rustc_index::IndexVec; use rustc_middle::mir; -use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo}; +use rustc_middle::mir::interpret::{ + CtfeProvenance, ErrorHandled, InvalidMetaKind, ReportedErrorInfo, +}; use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::layout::{ self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers, @@ -20,9 +22,9 @@ use rustc_span::Span; use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout}; use super::{ - AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, - MemPlaceMeta, Memory, MemoryKind, OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic, - Projectable, Provenance, Scalar, StackPopJump, + GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, + Memory, MemoryKind, OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic, Projectable, + Provenance, Scalar, StackPopJump, }; use crate::errors; use crate::util; @@ -84,7 +86,7 @@ impl Drop for SpanGuard { } /// A stack frame. -pub struct Frame<'mir, 'tcx, Prov: Provenance = AllocId, Extra = ()> { +pub struct Frame<'mir, 'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information //////////////////////////////////////////////////////////////////////////////// @@ -156,7 +158,7 @@ pub enum StackPopCleanup { /// State of a local variable including a memoized layout #[derive(Clone)] -pub struct LocalState<'tcx, Prov: Provenance = AllocId> { +pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> { value: LocalValue<Prov>, /// Don't modify if `Some`, this is only used to prevent computing the layout twice. /// Avoids computing the layout of locals that are never actually initialized. @@ -173,8 +175,11 @@ impl<Prov: Provenance> std::fmt::Debug for LocalState<'_, Prov> { } /// Current value of a local variable +/// +/// This does not store the type of the local; the type is given by `body.local_decls` and can never +/// change, so by not storing here we avoid having to maintain that as an invariant. #[derive(Copy, Clone, Debug)] // Miri debug-prints these -pub(super) enum LocalValue<Prov: Provenance = AllocId> { +pub(super) enum LocalValue<Prov: Provenance = CtfeProvenance> { /// This local is not currently alive, and cannot be used at all. Dead, /// A normal, live local. @@ -279,14 +284,12 @@ impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> { impl<'tcx> fmt::Display for FrameInfo<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ty::tls::with(|tcx| { - if tcx.def_key(self.instance.def_id()).disambiguated_data.data - == DefPathData::ClosureExpr - { + if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { write!(f, "inside closure") } else { - // Note: this triggers a `good_path_bug` state, which means that if we ever get here - // we must emit a diagnostic. We should never display a `FrameInfo` unless we - // actually want to emit a warning or error to the user. + // Note: this triggers a `good_path_delayed_bug` state, which means that if we ever + // get here we must emit a diagnostic. We should never display a `FrameInfo` unless + // we actually want to emit a warning or error to the user. write!(f, "inside `{}`", self.instance) } }) @@ -296,12 +299,12 @@ impl<'tcx> fmt::Display for FrameInfo<'tcx> { impl<'tcx> FrameInfo<'tcx> { pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote { let span = self.span; - if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr { + if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 } } else { let instance = format!("{}", self.instance); - // Note: this triggers a `good_path_bug` state, which means that if we ever get here - // we must emit a diagnostic. We should never display a `FrameInfo` unless we + // Note: this triggers a `good_path_delayed_bug` state, which means that if we ever get + // here we must emit a diagnostic. We should never display a `FrameInfo` unless we // actually want to emit a warning or error to the user. errors::FrameNote { where_: "instance", span, instance, times: 0 } } @@ -456,7 +459,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.stack() .iter() .find_map(|frame| frame.body.source.def_id().as_local()) - .map_or(CRATE_HIR_ID, |def_id| self.tcx.hir().local_def_id_to_hir_id(def_id)) + .map_or(CRATE_HIR_ID, |def_id| self.tcx.local_def_id_to_hir_id(def_id)) } /// Turn the given error into a human-readable string. Expects the string to be printed, so if @@ -470,12 +473,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { backtrace.print_backtrace(); // FIXME(fee1-dead), HACK: we want to use the error as title therefore we can just extract the // label and arguments from the InterpError. - let handler = &self.tcx.sess.parse_sess.span_diagnostic; + let dcx = self.tcx.sess.dcx(); #[allow(rustc::untranslatable_diagnostic)] let mut diag = self.tcx.sess.struct_allow(""); let msg = e.diagnostic_message(); - e.add_args(handler, &mut diag); - let s = handler.eagerly_translate_to_string(msg, diag.args()); + e.add_args(dcx, &mut diag); + let s = dcx.eagerly_translate_to_string(msg, diag.args()); diag.cancel(); s } @@ -683,14 +686,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert!(layout.fields.count() > 0); trace!("DST layout: {:?}", layout); - let sized_size = layout.fields.offset(layout.fields.count() - 1); + let unsized_offset_unadjusted = layout.fields.offset(layout.fields.count() - 1); let sized_align = layout.align.abi; - trace!( - "DST {} statically sized prefix size: {:?} align: {:?}", - layout.ty, - sized_size, - sized_align - ); // Recurse to get the size of the dynamically sized field (must be // the last field). Can't have foreign types here, how would we @@ -704,36 +701,35 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { return Ok(None); }; - // FIXME (#26403, #27023): We should be adding padding - // to `sized_size` (to accommodate the `unsized_align` - // required of the unsized field that follows) before - // summing it with `sized_size`. (Note that since #26403 - // is unfixed, we do not yet add the necessary padding - // here. But this is where the add would go.) - - // Return the sum of sizes and max of aligns. - let size = sized_size + unsized_size; // `Size` addition + // # First compute the dynamic alignment - // Packed types ignore the alignment of their fields. + // Packed type alignment needs to be capped. if let ty::Adt(def, _) = layout.ty.kind() { - if def.repr().packed() { - unsized_align = sized_align; + if let Some(packed) = def.repr().pack { + unsized_align = unsized_align.min(packed); } } // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). - let align = sized_align.max(unsized_align); + let full_align = sized_align.max(unsized_align); - // Issue #27023: must add any necessary padding to `size` - // (to make it a multiple of `align`) before returning it. - let size = size.align_to(align); + // # Then compute the dynamic size + + let unsized_offset_adjusted = unsized_offset_unadjusted.align_to(unsized_align); + let full_size = (unsized_offset_adjusted + unsized_size).align_to(full_align); + + // Just for our sanitiy's sake, assert that this is equal to what codegen would compute. + assert_eq!( + full_size, + (unsized_offset_unadjusted + unsized_size).align_to(full_align) + ); // Check if this brought us over the size limit. - if size > self.max_size_of_val() { + if full_size > self.max_size_of_val() { throw_ub!(InvalidMeta(InvalidMetaKind::TooBig)); } - Ok(Some((size, align))) + Ok(Some((full_size, full_align))) } ty::Dynamic(_, _, ty::Dyn) => { let vtable = metadata.unwrap_meta().to_pointer(self)?; diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 3d90e95c0..7931789e4 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -18,7 +18,7 @@ use super::validity::RefTracking; use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; -use rustc_middle::mir::interpret::InterpResult; +use rustc_middle::mir::interpret::{CtfeProvenance, InterpResult}; use rustc_middle::ty::{self, layout::TyAndLayout, Ty}; use rustc_ast::Mutability; @@ -34,7 +34,7 @@ pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine< 'mir, 'tcx, MemoryKind = T, - Provenance = AllocId, + Provenance = CtfeProvenance, ExtraFnVal = !, FrameExtra = (), AllocExtra = (), @@ -94,9 +94,9 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval: // If the pointer is dangling (neither in local nor global memory), we leave it // to validation to error -- it has the much better error messages, pointing out where // in the value the dangling reference lies. - // The `delay_span_bug` ensures that we don't forget such a check in validation. + // The `span_delayed_bug` ensures that we don't forget such a check in validation. if tcx.try_get_global_alloc(alloc_id).is_none() { - tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer"); + tcx.sess.span_delayed_bug(ecx.tcx.span, "tried to intern dangling pointer"); } // treat dangling pointers like other statics // just to stop trying to recurse into them @@ -135,7 +135,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval: alloc.mutability = Mutability::Not; }; // link the alloc id to the actual allocation - leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, alloc_id)| alloc_id)); + leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, prov)| prov.alloc_id())); let alloc = tcx.mk_const_alloc(alloc); tcx.set_alloc_id_memory(alloc_id, alloc); None @@ -178,20 +178,20 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind() { let ptr = mplace.meta().unwrap_meta().to_pointer(&tcx)?; - if let Some(alloc_id) = ptr.provenance { + if let Some(prov) = ptr.provenance { // Explicitly choose const mode here, since vtables are immutable, even // if the reference of the fat pointer is mutable. - self.intern_shallow(alloc_id, InternMode::Const, None); + self.intern_shallow(prov.alloc_id(), InternMode::Const, None); } else { // Validation will error (with a better message) on an invalid vtable pointer. // Let validation show the error message, but make sure it *does* error. tcx.sess - .delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers"); + .span_delayed_bug(tcx.span, "vtables pointers cannot be integer pointers"); } } // Check if we have encountered this pointer+layout combination before. // Only recurse for allocation-backed pointers. - if let Some(alloc_id) = mplace.ptr().provenance { + if let Some(prov) = mplace.ptr().provenance { // Compute the mode with which we intern this. Our goal here is to make as many // statics as we can immutable so they can be placed in read-only memory by LLVM. let ref_mode = match self.mode { @@ -234,7 +234,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory InternMode::Const } }; - match self.intern_shallow(alloc_id, ref_mode, Some(referenced_ty)) { + match self.intern_shallow(prov.alloc_id(), ref_mode, Some(referenced_ty)) { // No need to recurse, these are interned already and statics may have // cycles, so we don't want to recurse there Some(IsStaticOrFn) => {} @@ -259,7 +259,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory // to avoid could be expensive: on the potentially larger types, arrays and slices, // rather than on all aggregates unconditionally. if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) { - let Some((size, _align)) = self.ecx.size_and_align_of_mplace(&mplace)? else { + let Some((size, _align)) = self.ecx.size_and_align_of_mplace(mplace)? else { // We do the walk if we can't determine the size of the mplace: we may be // dealing with extern types here in the future. return Ok(true); @@ -353,7 +353,7 @@ pub fn intern_const_alloc_recursive< leftover_allocations, // The outermost allocation must exist, because we allocated it with // `Memory::allocate`. - ret.ptr().provenance.unwrap(), + ret.ptr().provenance.unwrap().alloc_id(), base_intern_mode, Some(ret.layout.ty), ); @@ -375,7 +375,7 @@ pub fn intern_const_alloc_recursive< match res { Ok(()) => {} Err(error) => { - ecx.tcx.sess.delay_span_bug( + ecx.tcx.sess.span_delayed_bug( ecx.tcx.span, format!( "error during interning should later cause validation failure: {}", @@ -431,7 +431,8 @@ pub fn intern_const_alloc_recursive< } let alloc = tcx.mk_const_alloc(alloc); tcx.set_alloc_id_memory(alloc_id, alloc); - for &(_, alloc_id) in alloc.inner().provenance().ptrs().iter() { + for &(_, prov) in alloc.inner().provenance().ptrs().iter() { + let alloc_id = prov.alloc_id(); if leftover_allocations.insert(alloc_id) { todo.push(alloc_id); } @@ -503,10 +504,11 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> // `allocate` picks a fresh AllocId that we will associate with its data below. let dest = self.allocate(layout, MemoryKind::Stack)?; f(self, &dest.clone().into())?; - let mut alloc = self.memory.alloc_map.remove(&dest.ptr().provenance.unwrap()).unwrap().1; + let mut alloc = + self.memory.alloc_map.remove(&dest.ptr().provenance.unwrap().alloc_id()).unwrap().1; alloc.mutability = Mutability::Not; let alloc = self.tcx.mk_const_alloc(alloc); - let alloc_id = dest.ptr().provenance.unwrap(); // this was just allocated, it must have provenance + let alloc_id = dest.ptr().provenance.unwrap().alloc_id(); // this was just allocated, it must have provenance self.tcx.set_alloc_id_memory(alloc_id, alloc); Ok(alloc_id) } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index b23cafc19..c29f23b91 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -3,17 +3,22 @@ //! and miri. use rustc_hir::def_id::DefId; -use rustc_middle::mir::{ - self, - interpret::{Allocation, ConstAllocation, GlobalId, InterpResult, PointerArithmetic, Scalar}, - BinOp, ConstValue, NonDivergingIntrinsic, -}; use rustc_middle::ty; use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement}; use rustc_middle::ty::GenericArgsRef; use rustc_middle::ty::{Ty, TyCtxt}; +use rustc_middle::{ + mir::{ + self, + interpret::{ + Allocation, ConstAllocation, GlobalId, InterpResult, PointerArithmetic, Scalar, + }, + BinOp, ConstValue, NonDivergingIntrinsic, + }, + ty::layout::TyAndLayout, +}; use rustc_span::symbol::{sym, Symbol}; -use rustc_target::abi::{Abi, Primitive, Size}; +use rustc_target::abi::Size; use super::{ util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy, @@ -22,23 +27,6 @@ use super::{ use crate::fluent_generated as fluent; -fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> { - let size = match kind { - Primitive::Int(integer, _) => integer.size(), - _ => bug!("invalid `{}` argument: {:?}", name, bits), - }; - let extra = 128 - u128::from(size.bits()); - let bits_out = match name { - sym::ctpop => u128::from(bits.count_ones()), - sym::ctlz => u128::from(bits.leading_zeros()) - extra, - sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra, - sym::bswap => (bits << extra).swap_bytes(), - sym::bitreverse => (bits << extra).reverse_bits(), - _ => bug!("not a numeric intrinsic: {}", name), - }; - Scalar::from_uint(bits_out, size) -} - /// Directly returns an `Allocation` containing an absolute path representation of the given type. pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> { let path = crate::util::type_name(tcx, ty); @@ -179,30 +167,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { | sym::bswap | sym::bitreverse => { let ty = instance_args.type_at(0); - let layout_of = self.layout_of(ty)?; + let layout = self.layout_of(ty)?; let val = self.read_scalar(&args[0])?; - let bits = val.to_bits(layout_of.size)?; - let kind = match layout_of.abi { - Abi::Scalar(scalar) => scalar.primitive(), - _ => span_bug!( - self.cur_span(), - "{} called on invalid type {:?}", - intrinsic_name, - ty - ), - }; - let (nonzero, actual_intrinsic_name) = match intrinsic_name { - sym::cttz_nonzero => (true, sym::cttz), - sym::ctlz_nonzero => (true, sym::ctlz), - other => (false, other), - }; - if nonzero && bits == 0 { - throw_ub_custom!( - fluent::const_eval_call_nonzero_intrinsic, - name = intrinsic_name, - ); - } - let out_val = numeric_intrinsic(actual_intrinsic_name, bits, kind); + let out_val = self.numeric_intrinsic(intrinsic_name, val, layout)?; self.write_scalar(out_val, dest)?; } sym::saturating_add | sym::saturating_sub => { @@ -493,6 +460,29 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } + pub fn numeric_intrinsic( + &self, + name: Symbol, + val: Scalar<M::Provenance>, + layout: TyAndLayout<'tcx>, + ) -> InterpResult<'tcx, Scalar<M::Provenance>> { + assert!(layout.ty.is_integral(), "invalid type for numeric intrinsic: {}", layout.ty); + let bits = val.to_bits(layout.size)?; + let extra = 128 - u128::from(layout.size.bits()); + let bits_out = match name { + sym::ctpop => u128::from(bits.count_ones()), + sym::ctlz_nonzero | sym::cttz_nonzero if bits == 0 => { + throw_ub_custom!(fluent::const_eval_call_nonzero_intrinsic, name = name,); + } + sym::ctlz | sym::ctlz_nonzero => u128::from(bits.leading_zeros()) - extra, + sym::cttz | sym::cttz_nonzero => u128::from((bits << extra).trailing_zeros()) - extra, + sym::bswap => (bits << extra).swap_bytes(), + sym::bitreverse => (bits << extra).reverse_bits(), + _ => bug!("not a numeric intrinsic: {}", name), + }; + Ok(Scalar::from_uint(bits_out, layout.size)) + } + pub fn exact_div( &mut self, a: &ImmTy<'tcx, M::Provenance>, @@ -505,7 +495,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Performs an exact division, resulting in undefined behavior where // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`. // First, check x % y != 0 (or if that computation overflows). - let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?; + let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, a, b)?; assert!(!overflow); // All overflow is UB, so this should never return on overflow. if res.to_scalar().assert_bits(a.layout.size) != 0 { throw_ub_custom!( @@ -515,7 +505,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) } // `Rem` says this is all right, so we can let `Div` do its job. - self.binop_ignore_overflow(BinOp::Div, &a, &b, dest) + self.binop_ignore_overflow(BinOp::Div, a, b, dest) } pub fn saturating_arith( diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 61fe9151d..5e6996551 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -9,15 +9,17 @@ use std::hash::Hash; use rustc_apfloat::{Float, FloatConvert}; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_middle::mir; +use rustc_middle::query::TyCtxtAt; +use rustc_middle::ty; use rustc_middle::ty::layout::TyAndLayout; -use rustc_middle::ty::{self, TyCtxt}; use rustc_span::def_id::DefId; -use rustc_target::abi::Size; +use rustc_target::abi::{Align, Size}; use rustc_target::spec::abi::Abi as CallAbi; use super::{ - AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx, - InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance, + AllocBytes, AllocId, AllocKind, AllocRange, Allocation, ConstAllocation, CtfeProvenance, FnArg, + Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind, Misalignment, OpTy, PlaceTy, + Pointer, Provenance, }; /// Data returned by Machine::stack_pop, @@ -49,6 +51,14 @@ pub trait AllocMap<K: Hash + Eq, V> { where K: Borrow<Q>; + /// Callers should prefer [`AllocMap::contains_key`] when it is possible to call because it may + /// be more efficient. This function exists for callers that only have a shared reference + /// (which might make it slightly less efficient than `contains_key`, e.g. if + /// the data is stored inside a `RefCell`). + fn contains_key_ref<Q: ?Sized + Hash + Eq>(&self, k: &Q) -> bool + where + K: Borrow<Q>; + /// Inserts a new entry into the map. fn insert(&mut self, k: K, v: V) -> Option<V>; @@ -135,11 +145,18 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Whether memory accesses should be alignment-checked. fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; - /// Whether, when checking alignment, we should look at the actual address and thus support - /// custom alignment logic based on whatever the integer address happens to be. - /// - /// If this returns true, Provenance::OFFSET_IS_ADDR must be true. - fn use_addr_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; + /// Gives the machine a chance to detect more misalignment than the built-in checks would catch. + #[inline(always)] + fn alignment_check( + _ecx: &InterpCx<'mir, 'tcx, Self>, + _alloc_id: AllocId, + _alloc_align: Align, + _alloc_kind: AllocKind, + _offset: Size, + _align: Align, + ) -> Option<Misalignment> { + None + } /// Whether to enforce the validity invariant for a specific layout. fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool; @@ -277,7 +294,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// `def_id` is `Some` if this is the "lazy" allocation of a static. #[inline] fn before_access_global( - _tcx: TyCtxt<'tcx>, + _tcx: TyCtxtAt<'tcx>, _machine: &Self, _alloc_id: AllocId, _allocation: ConstAllocation<'tcx>, @@ -372,7 +389,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// need to mutate. #[inline(always)] fn before_memory_read( - _tcx: TyCtxt<'tcx>, + _tcx: TyCtxtAt<'tcx>, _machine: &Self, _alloc_extra: &Self::AllocExtra, _prov: (AllocId, Self::ProvenanceExtra), @@ -384,7 +401,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Hook for performing extra checks on a memory write access. #[inline(always)] fn before_memory_write( - _tcx: TyCtxt<'tcx>, + _tcx: TyCtxtAt<'tcx>, _machine: &mut Self, _alloc_extra: &mut Self::AllocExtra, _prov: (AllocId, Self::ProvenanceExtra), @@ -396,7 +413,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Hook for performing extra operations on a memory deallocation. #[inline(always)] fn before_memory_deallocation( - _tcx: TyCtxt<'tcx>, + _tcx: TyCtxtAt<'tcx>, _machine: &mut Self, _alloc_extra: &mut Self::AllocExtra, _prov: (AllocId, Self::ProvenanceExtra), @@ -498,8 +515,8 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines /// (CTFE and ConstProp) use the same instance. Here, we share that code. pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { - type Provenance = AllocId; - type ProvenanceExtra = (); + type Provenance = CtfeProvenance; + type ProvenanceExtra = bool; // the "immutable" flag type ExtraFnVal = !; @@ -512,12 +529,6 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { type Bytes = Box<[u8]>; #[inline(always)] - fn use_addr_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { - // We do not support `use_addr`. - false - } - - #[inline(always)] fn ignore_optional_overflow_checks(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { false } @@ -558,14 +569,14 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { def_id: DefId, ) -> InterpResult<$tcx, Pointer> { // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail. - Ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id), Size::ZERO)) + Ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id).into(), Size::ZERO)) } #[inline(always)] fn adjust_alloc_base_pointer( _ecx: &InterpCx<$mir, $tcx, Self>, - ptr: Pointer<AllocId>, - ) -> InterpResult<$tcx, Pointer<AllocId>> { + ptr: Pointer<CtfeProvenance>, + ) -> InterpResult<$tcx, Pointer<CtfeProvenance>> { Ok(ptr) } @@ -573,7 +584,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { fn ptr_from_addr_cast( _ecx: &InterpCx<$mir, $tcx, Self>, addr: u64, - ) -> InterpResult<$tcx, Pointer<Option<AllocId>>> { + ) -> InterpResult<$tcx, Pointer<Option<CtfeProvenance>>> { // Allow these casts, but make the pointer not dereferenceable. // (I.e., they behave like transmutation.) // This is correct because no pointers can ever be exposed in compile-time evaluation. @@ -583,10 +594,10 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { #[inline(always)] fn ptr_get_alloc( _ecx: &InterpCx<$mir, $tcx, Self>, - ptr: Pointer<AllocId>, + ptr: Pointer<CtfeProvenance>, ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> { // We know `offset` is relative to the allocation, so we can use `into_parts`. - let (alloc_id, offset) = ptr.into_parts(); - Some((alloc_id, offset, ())) + let (prov, offset) = ptr.into_parts(); + Some((prov.alloc_id(), offset, prov.immutable())) } } diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 16905e93b..3fde6ae9b 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -22,8 +22,8 @@ use crate::fluent_generated as fluent; use super::{ alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckAlignMsg, - CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Misalignment, Pointer, - PointerArithmetic, Provenance, Scalar, + CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, + Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, }; #[derive(Debug, PartialEq, Copy, Clone)] @@ -58,6 +58,7 @@ impl<T: fmt::Display> fmt::Display for MemoryKind<T> { } /// The return value of `get_alloc_info` indicates the "kind" of the allocation. +#[derive(Copy, Clone, PartialEq, Debug)] pub enum AllocKind { /// A regular live data allocation. LiveData, @@ -158,9 +159,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline] pub fn global_base_pointer( &self, - ptr: Pointer<AllocId>, + ptr: Pointer<CtfeProvenance>, ) -> InterpResult<'tcx, Pointer<M::Provenance>> { - let alloc_id = ptr.provenance; + let alloc_id = ptr.provenance.alloc_id(); // We need to handle `extern static`. match self.tcx.try_get_global_alloc(alloc_id) { Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => { @@ -338,7 +339,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Let the machine take some extra action let size = alloc.size(); M::before_memory_deallocation( - *self.tcx, + self.tcx, &mut self.machine, &mut alloc.extra, (alloc_id, prov), @@ -473,8 +474,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match self.ptr_try_get_alloc_id(ptr) { Err(addr) => offset_misalignment(addr, align), Ok((alloc_id, offset, _prov)) => { - let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id); - if M::use_addr_for_alignment_check(self) { + let (_size, alloc_align, kind) = self.get_alloc_info(alloc_id); + if let Some(misalign) = + M::alignment_check(self, alloc_id, alloc_align, kind, offset, align) + { + Some(misalign) + } else if M::Provenance::OFFSET_IS_ADDR { // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true. offset_misalignment(ptr.addr().bytes(), align) } else { @@ -501,6 +506,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } +impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { + /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map. + pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) { + // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or + // is live, here all the IDs in the map are for dead allocations so we don't + // need to check for liveness. + #[allow(rustc::potential_query_instability)] // Only used from Miri, not queries. + self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id)); + } +} + /// Allocation accessors impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Helper function to obtain a global (tcx) allocation. @@ -545,7 +561,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { (val, Some(def_id)) } }; - M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?; + M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?; // We got tcx memory. Let the machine initialize its "extra" stuff. M::adjust_allocation( self, @@ -610,7 +626,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { )?; if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc { let range = alloc_range(offset, size); - M::before_memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?; + M::before_memory_read(self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?; Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id })) } else { // Even in this branch we have to be sure that we actually access the allocation, in @@ -671,13 +687,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { { let parts = self.get_ptr_access(ptr, size)?; if let Some((alloc_id, offset, prov)) = parts { - let tcx = *self.tcx; + let tcx = self.tcx; // FIXME: can we somehow avoid looking up the allocation twice here? // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`. let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?; let range = alloc_range(offset, size); M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?; - Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id })) + Ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id })) } else { Ok(None) } @@ -692,6 +708,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok((&mut alloc.extra, machine)) } + /// Check whether an allocation is live. This is faster than calling + /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is + /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics. + pub fn is_alloc_live(&self, id: AllocId) -> bool { + self.tcx.try_get_global_alloc(id).is_some() + || self.memory.alloc_map.contains_key_ref(&id) + || self.memory.extra_fn_ptr_map.contains_key(&id) + } + /// Obtain the size and alignment of an allocation, even if that allocation has /// been deallocated. pub fn get_alloc_info(&self, id: AllocId) -> (Size, Align, AllocKind) { @@ -1108,7 +1133,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let src_alloc = self.get_alloc_raw(src_alloc_id)?; let src_range = alloc_range(src_offset, size); M::before_memory_read( - *tcx, + tcx, &self.machine, &src_alloc.extra, (src_alloc_id, src_prov), @@ -1138,7 +1163,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?; let dest_range = alloc_range(dest_offset, size * num_copies); M::before_memory_write( - *tcx, + tcx, extra, &mut dest_alloc.extra, (dest_alloc_id, dest_prov), diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 255dd1eba..b39b219b4 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -13,9 +13,9 @@ use rustc_middle::{mir, ty}; use rustc_target::abi::{self, Abi, HasDataLayout, Size}; use super::{ - alloc_range, from_known_layout, mir_assign_valid_types, AllocId, Frame, InterpCx, InterpResult, - MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode, PlaceTy, Pointer, Projectable, - Provenance, Scalar, + alloc_range, from_known_layout, mir_assign_valid_types, CtfeProvenance, Frame, InterpCx, + InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode, PlaceTy, Pointer, + Projectable, Provenance, Scalar, }; /// An `Immediate` represents a single immediate self-contained Rust value. @@ -26,7 +26,7 @@ use super::{ /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely /// defined on `Immediate`, and do not have to work with a `Place`. #[derive(Copy, Clone, Debug)] -pub enum Immediate<Prov: Provenance = AllocId> { +pub enum Immediate<Prov: Provenance = CtfeProvenance> { /// A single scalar value (must have *initialized* `Scalar` ABI). Scalar(Scalar<Prov>), /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are @@ -93,12 +93,23 @@ impl<Prov: Provenance> Immediate<Prov> { Immediate::Uninit => bug!("Got uninit where a scalar pair was expected"), } } + + /// Returns the scalar from the first component and optionally the 2nd component as metadata. + #[inline] + #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) + pub fn to_scalar_and_meta(self) -> (Scalar<Prov>, MemPlaceMeta<Prov>) { + match self { + Immediate::ScalarPair(val1, val2) => (val1, MemPlaceMeta::Meta(val2)), + Immediate::Scalar(val) => (val, MemPlaceMeta::None), + Immediate::Uninit => bug!("Got uninit where a scalar or scalar pair was expected"), + } + } } // ScalarPair needs a type to interpret, so we often have an immediate and a type together // as input for binary and cast operations. #[derive(Clone)] -pub struct ImmTy<'tcx, Prov: Provenance = AllocId> { +pub struct ImmTy<'tcx, Prov: Provenance = CtfeProvenance> { imm: Immediate<Prov>, pub layout: TyAndLayout<'tcx>, } @@ -334,13 +345,13 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> { /// or still in memory. The latter is an optimization, to delay reading that chunk of /// memory and to avoid having to store arbitrary-sized data here. #[derive(Copy, Clone, Debug)] -pub(super) enum Operand<Prov: Provenance = AllocId> { +pub(super) enum Operand<Prov: Provenance = CtfeProvenance> { Immediate(Immediate<Prov>), Indirect(MemPlace<Prov>), } #[derive(Clone)] -pub struct OpTy<'tcx, Prov: Provenance = AllocId> { +pub struct OpTy<'tcx, Prov: Provenance = CtfeProvenance> { op: Operand<Prov>, // Keep this private; it helps enforce invariants. pub layout: TyAndLayout<'tcx>, } @@ -750,17 +761,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?; let imm = match val_val { mir::ConstValue::Indirect { alloc_id, offset } => { - // We rely on mutability being set correctly in that allocation to prevent writes - // where none should happen. - let ptr = self.global_base_pointer(Pointer::new(alloc_id, offset))?; + // This is const data, no mutation allowed. + let ptr = self.global_base_pointer(Pointer::new( + CtfeProvenance::from(alloc_id).as_immutable(), + offset, + ))?; return Ok(self.ptr_to_mplace(ptr.into(), layout).into()); } mir::ConstValue::Scalar(x) => adjust_scalar(x)?.into(), mir::ConstValue::ZeroSized => Immediate::Uninit, mir::ConstValue::Slice { data, meta } => { - // We rely on mutability being set correctly in `data` to prevent writes - // where none should happen. - let ptr = Pointer::new(self.tcx.reserve_and_set_memory_alloc(data), Size::ZERO); + // This is const data, no mutation allowed. + let alloc_id = self.tcx.reserve_and_set_memory_alloc(data); + let ptr = Pointer::new(CtfeProvenance::from(alloc_id).as_immutable(), Size::ZERO); Immediate::new_slice(self.global_base_pointer(ptr)?.into(), meta, self) } }; diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index a3ba9530f..eef154257 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -20,7 +20,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { right: &ImmTy<'tcx, M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx> { - let (val, overflowed) = self.overflowing_binary_op(op, &left, &right)?; + let (val, overflowed) = self.overflowing_binary_op(op, left, right)?; debug_assert_eq!( Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]), dest.layout.ty, @@ -114,9 +114,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { use rustc_middle::mir::BinOp::*; // Performs appropriate non-deterministic adjustments of NaN results. - let adjust_nan = |f: F| -> F { - if f.is_nan() { M::generate_nan(self, &[l, r]) } else { f } - }; + let adjust_nan = + |f: F| -> F { if f.is_nan() { M::generate_nan(self, &[l, r]) } else { f } }; let val = match bin_op { Eq => ImmTy::from_bool(l == r, *self.tcx), @@ -157,41 +156,35 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Shift ops can have an RHS with a different numeric type. if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) { - let size = u128::from(left_layout.size.bits()); - // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its - // zero-extended form). This matches the codegen backend: - // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>. - // The overflow check is also ignorant to the sign: - // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>. - // This would behave rather strangely if we had integer types of size 256: a shift by - // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A - // shift by -1i16 though would be considered overflowing. If we had integers of size - // 512, then a shift by -1i8 would even produce a different result than one by -1i16: - // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our - // integers are maximally 128bits wide, so negative shifts *always* overflow and we have - // consistent results for the same value represented at different bit widths. - assert!(size <= 128); - let original_r = r; - let overflow = r >= size; - // The shift offset is implicitly masked to the type size, to make sure this operation - // is always defined. This is the one MIR operator that does *not* directly map to a - // single LLVM operation. See - // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158> - // for the corresponding truncation in our codegen backends. - let r = r % size; - let r = u32::try_from(r).unwrap(); // we masked so this will always fit + let size = left_layout.size.bits(); + // The shift offset is implicitly masked to the type size. (This is the one MIR operator + // that does *not* directly map to a single LLVM operation.) Compute how much we + // actually shift and whether there was an overflow due to shifting too much. + let (shift_amount, overflow) = if right_layout.abi.is_signed() { + let shift_amount = self.sign_extend(r, right_layout) as i128; + let overflow = shift_amount < 0 || shift_amount >= i128::from(size); + let masked_amount = (shift_amount as u128) % u128::from(size); + debug_assert_eq!(overflow, shift_amount != (masked_amount as i128)); + (masked_amount, overflow) + } else { + let shift_amount = r; + let masked_amount = shift_amount % u128::from(size); + (masked_amount, shift_amount != masked_amount) + }; + let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit + // Compute the shifted result. let result = if left_layout.abi.is_signed() { let l = self.sign_extend(l, left_layout) as i128; let result = match bin_op { - Shl | ShlUnchecked => l.checked_shl(r).unwrap(), - Shr | ShrUnchecked => l.checked_shr(r).unwrap(), + Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(), + Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(), _ => bug!(), }; result as u128 } else { match bin_op { - Shl | ShlUnchecked => l.checked_shl(r).unwrap(), - Shr | ShrUnchecked => l.checked_shr(r).unwrap(), + Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(), + Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(), _ => bug!(), } }; @@ -200,7 +193,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { throw_ub_custom!( fluent::const_eval_overflow_shift, - val = original_r, + val = if right_layout.abi.is_signed() { + (self.sign_extend(r, right_layout) as i128).to_string() + } else { + r.to_string() + }, name = intrinsic_name ); } diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 09ffdec7d..639b269ac 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -7,22 +7,21 @@ use std::assert_matches::assert_matches; use either::{Either, Left, Right}; use rustc_ast::Mutability; -use rustc_index::IndexSlice; use rustc_middle::mir; use rustc_middle::ty; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::Ty; -use rustc_target::abi::{Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT}; +use rustc_target::abi::{Abi, Align, HasDataLayout, Size}; use super::{ - alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckAlignMsg, ImmTy, - Immediate, InterpCx, InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy, + alloc_range, mir_assign_valid_types, AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance, + ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy, Operand, Pointer, PointerArithmetic, Projectable, Provenance, Readable, Scalar, }; #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] /// Information required for the sound usage of a `MemPlace`. -pub enum MemPlaceMeta<Prov: Provenance = AllocId> { +pub enum MemPlaceMeta<Prov: Provenance = CtfeProvenance> { /// The unsized payload (e.g. length for slices or vtable pointer for trait objects). Meta(Scalar<Prov>), /// `Sized` types or unsized `extern type` @@ -50,7 +49,7 @@ impl<Prov: Provenance> MemPlaceMeta<Prov> { } #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] -pub(super) struct MemPlace<Prov: Provenance = AllocId> { +pub(super) struct MemPlace<Prov: Provenance = CtfeProvenance> { /// The pointer can be a pure integer, with the `None` provenance. pub ptr: Pointer<Option<Prov>>, /// Metadata for unsized places. Interpretation is up to the type. @@ -101,7 +100,7 @@ impl<Prov: Provenance> MemPlace<Prov> { /// A MemPlace with its layout. Constructing it is only possible in this module. #[derive(Clone, Hash, Eq, PartialEq)] -pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> { +pub struct MPlaceTy<'tcx, Prov: Provenance = CtfeProvenance> { mplace: MemPlace<Prov>, pub layout: TyAndLayout<'tcx>, } @@ -180,7 +179,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> { } #[derive(Copy, Clone, Debug)] -pub(super) enum Place<Prov: Provenance = AllocId> { +pub(super) enum Place<Prov: Provenance = CtfeProvenance> { /// A place referring to a value allocated in the `Memory` system. Ptr(MemPlace<Prov>), @@ -196,7 +195,7 @@ pub(super) enum Place<Prov: Provenance = AllocId> { } #[derive(Clone)] -pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> { +pub struct PlaceTy<'tcx, Prov: Provenance = CtfeProvenance> { place: Place<Prov>, // Keep this private; it helps enforce invariants. pub layout: TyAndLayout<'tcx>, } @@ -407,11 +406,7 @@ where let pointee_type = val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty; let layout = self.layout_of(pointee_type)?; - let (ptr, meta) = match **val { - Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None), - Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta)), - Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)), - }; + let (ptr, meta) = val.to_scalar_and_meta(); // `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced; // we hence can't call `size_and_align_of` since that asserts more validity than we want. @@ -456,7 +451,7 @@ where ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>> { let (size, _align) = self - .size_and_align_of_mplace(&mplace)? + .size_and_align_of_mplace(mplace)? .unwrap_or((mplace.layout.size, mplace.layout.align.abi)); // We check alignment separately, and *after* checking everything else. // If an access is both OOB and misaligned, we want to see the bounds error. @@ -472,7 +467,7 @@ where ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>> { let (size, _align) = self - .size_and_align_of_mplace(&mplace)? + .size_and_align_of_mplace(mplace)? .unwrap_or((mplace.layout.size, mplace.layout.align.abi)); // We check alignment separately, and raise that error *after* checking everything else. // If an access is both OOB and misaligned, we want to see the bounds error. @@ -977,34 +972,6 @@ where Ok(self.ptr_with_meta_to_mplace(ptr.into(), MemPlaceMeta::Meta(meta), layout)) } - /// Writes the aggregate to the destination. - #[instrument(skip(self), level = "trace")] - pub fn write_aggregate( - &mut self, - kind: &mir::AggregateKind<'tcx>, - operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>, - dest: &PlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx> { - self.write_uninit(dest)?; - let (variant_index, variant_dest, active_field_index) = match *kind { - mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => { - let variant_dest = self.project_downcast(dest, variant_index)?; - (variant_index, variant_dest, active_field_index) - } - _ => (FIRST_VARIANT, dest.clone(), None), - }; - if active_field_index.is_some() { - assert_eq!(operands.len(), 1); - } - for (field_index, operand) in operands.iter_enumerated() { - let field_index = active_field_index.unwrap_or(field_index); - let field_dest = self.project_field(&variant_dest, field_index.as_usize())?; - let op = self.eval_operand(operand, Some(field_dest.layout))?; - self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?; - } - self.write_discriminant(variant_index, dest) - } - pub fn raw_const_to_mplace( &self, raw: mir::ConstAlloc<'tcx>, diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 6694c43c9..9a034ba22 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -163,13 +163,26 @@ where // With custom DSTS, this *will* execute user-defined code, but the same // happens at run-time so that's okay. match self.size_and_align_of(&base_meta, &field_layout)? { - Some((_, align)) => (base_meta, offset.align_to(align)), - None => { - // For unsized types with an extern type tail we perform no adjustments. - // NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend. - assert!(matches!(base_meta, MemPlaceMeta::None)); + Some((_, align)) => { + // For packed types, we need to cap alignment. + let align = if let ty::Adt(def, _) = base.layout().ty.kind() + && let Some(packed) = def.repr().pack + { + align.min(packed) + } else { + align + }; + (base_meta, offset.align_to(align)) + } + None if offset == Size::ZERO => { + // If the offset is 0, then rounding it up to alignment wouldn't change anything, + // so we can do this even for types where we cannot determine the alignment. (base_meta, offset) } + None => { + // We don't know the alignment of this field, so we cannot adjust. + throw_unsup_format!("`extern type` does not have a known offset") + } } } else { // base_meta could be present; we might be accessing a sized field of an unsized @@ -195,6 +208,24 @@ where if layout.abi.is_uninhabited() { // `read_discriminant` should have excluded uninhabited variants... but ConstProp calls // us on dead code. + // In the future we might want to allow this to permit code like this: + // (this is a Rust/MIR pseudocode mix) + // ``` + // enum Option2 { + // Some(i32, !), + // None, + // } + // + // fn panic() -> ! { panic!() } + // + // let x: Option2; + // x.Some.0 = 42; + // x.Some.1 = panic(); + // SetDiscriminant(x, Some); + // ``` + // However, for now we don't generate such MIR, and this check here *has* found real + // bugs (see https://github.com/rust-lang/rust/issues/115145), so we will keep rejecting + // it. throw_inval!(ConstPropNonsense) } // This cannot be `transmute` as variants *can* have a smaller size than the entire enum. @@ -256,13 +287,13 @@ where } /// Iterates over all fields of an array. Much more efficient than doing the - /// same by repeatedly calling `operand_index`. + /// same by repeatedly calling `project_index`. pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>( &self, base: &'a P, ) -> InterpResult<'tcx, ArrayIterator<'tcx, 'a, M::Provenance, P>> { let abi::FieldsShape::Array { stride, .. } = base.layout().fields else { - span_bug!(self.cur_span(), "operand_array_fields: expected an array layout"); + span_bug!(self.cur_span(), "project_array_fields: expected an array layout"); }; let len = base.len(self)?; let field_layout = base.layout().field(self, 0); diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index b6993d939..d48329b6c 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -4,11 +4,12 @@ use either::Either; +use rustc_index::IndexSlice; use rustc_middle::mir; -use rustc_middle::mir::interpret::{InterpResult, Scalar}; use rustc_middle::ty::layout::LayoutOf; +use rustc_target::abi::{FieldIdx, FIRST_VARIANT}; -use super::{ImmTy, InterpCx, Machine, Projectable}; +use super::{ImmTy, InterpCx, InterpResult, Machine, PlaceTy, Projectable, Scalar}; use crate::util; impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { @@ -187,34 +188,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } Repeat(ref operand, _) => { - let src = self.eval_operand(operand, None)?; - assert!(src.layout.is_sized()); - let dest = self.force_allocation(&dest)?; - let length = dest.len(self)?; - - if length == 0 { - // Nothing to copy... but let's still make sure that `dest` as a place is valid. - self.get_place_alloc_mut(&dest)?; - } else { - // Write the src to the first element. - let first = self.project_index(&dest, 0)?; - self.copy_op(&src, &first, /*allow_transmute*/ false)?; - - // This is performance-sensitive code for big static/const arrays! So we - // avoid writing each operand individually and instead just make many copies - // of the first element. - let elem_size = first.layout.size; - let first_ptr = first.ptr(); - let rest_ptr = first_ptr.offset(elem_size, self)?; - // No alignment requirement since `copy_op` above already checked it. - self.mem_copy_repeatedly( - first_ptr, - rest_ptr, - elem_size, - length - 1, - /*nonoverlapping:*/ true, - )?; - } + self.write_repeat(operand, &dest)?; } Len(place) => { @@ -307,6 +281,73 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(()) } + /// Writes the aggregate to the destination. + #[instrument(skip(self), level = "trace")] + fn write_aggregate( + &mut self, + kind: &mir::AggregateKind<'tcx>, + operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>, + dest: &PlaceTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx> { + self.write_uninit(dest)?; // make sure all the padding ends up as uninit + let (variant_index, variant_dest, active_field_index) = match *kind { + mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => { + let variant_dest = self.project_downcast(dest, variant_index)?; + (variant_index, variant_dest, active_field_index) + } + _ => (FIRST_VARIANT, dest.clone(), None), + }; + if active_field_index.is_some() { + assert_eq!(operands.len(), 1); + } + for (field_index, operand) in operands.iter_enumerated() { + let field_index = active_field_index.unwrap_or(field_index); + let field_dest = self.project_field(&variant_dest, field_index.as_usize())?; + let op = self.eval_operand(operand, Some(field_dest.layout))?; + self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?; + } + self.write_discriminant(variant_index, dest) + } + + /// Repeats `operand` into the destination. `dest` must have array type, and that type + /// determines how often `operand` is repeated. + fn write_repeat( + &mut self, + operand: &mir::Operand<'tcx>, + dest: &PlaceTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx> { + let src = self.eval_operand(operand, None)?; + assert!(src.layout.is_sized()); + let dest = self.force_allocation(&dest)?; + let length = dest.len(self)?; + + if length == 0 { + // Nothing to copy... but let's still make sure that `dest` as a place is valid. + self.get_place_alloc_mut(&dest)?; + } else { + // Write the src to the first element. + let first = self.project_index(&dest, 0)?; + self.copy_op(&src, &first, /*allow_transmute*/ false)?; + + // This is performance-sensitive code for big static/const arrays! So we + // avoid writing each operand individually and instead just make many copies + // of the first element. + let elem_size = first.layout.size; + let first_ptr = first.ptr(); + let rest_ptr = first_ptr.offset(elem_size, self)?; + // No alignment requirement since `copy_op` above already checked it. + self.mem_copy_repeatedly( + first_ptr, + rest_ptr, + elem_size, + length - 1, + /*nonoverlapping:*/ true, + )?; + } + + Ok(()) + } + /// Evaluate the given terminator. Will also adjust the stack frame and statement position accordingly. fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> { info!("{:?}", terminator.kind); diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index b54c66814..2358caffc 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -18,14 +18,14 @@ use rustc_target::abi::{ use rustc_target::spec::abi::Abi; use super::{ - AllocId, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, - Provenance, Scalar, StackPopCleanup, + CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, + Projectable, Provenance, Scalar, StackPopCleanup, }; use crate::fluent_generated as fluent; /// An argment passed to a function. #[derive(Clone, Debug)] -pub enum FnArg<'tcx, Prov: Provenance = AllocId> { +pub enum FnArg<'tcx, Prov: Provenance = CtfeProvenance> { /// Pass a copy of the given operand. Copy(OpTy<'tcx, Prov>), /// Allow for the argument to be passed in-place: destroy the value originally stored at that place and @@ -51,7 +51,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { match arg { FnArg::Copy(op) => Ok(op.clone()), - FnArg::InPlace(place) => self.place_to_op(&place), + FnArg::InPlace(place) => self.place_to_op(place), } } @@ -384,10 +384,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } // Compatible integer types (in particular, usize vs ptr-sized-u32/u64). + // `char` counts as `u32.` let int_ty = |ty: Ty<'tcx>| { Some(match ty.kind() { ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true), ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false), + ty::Char => (Integer::I32, /* signed */ false), _ => return None, }) }; @@ -410,7 +412,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility. if self.layout_compat(caller_abi.layout, callee_abi.layout)? { // Ensure that our checks imply actual ABI compatibility for this concrete call. - assert!(caller_abi.eq_abi(&callee_abi)); + assert!(caller_abi.eq_abi(callee_abi)); return Ok(true); } else { trace!( @@ -464,7 +466,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // We work with a copy of the argument for now; if this is in-place argument passing, we // will later protect the source it comes from. This means the callee cannot observe if we // did in-place of by-copy argument passing, except for pointer equality tests. - let caller_arg_copy = self.copy_fn_arg(&caller_arg)?; + let caller_arg_copy = self.copy_fn_arg(caller_arg)?; if !already_live { let local = callee_arg.as_local().unwrap(); let meta = caller_arg_copy.meta(); diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index d21fef58f..07500f744 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -227,7 +227,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // Sometimes the index is beyond the number of upvars (seen // for a coroutine). let var_hir_id = captured_place.get_root_variable(); - let node = self.ecx.tcx.hir().get(var_hir_id); + let node = self.ecx.tcx.hir_node(var_hir_id); if let hir::Node::Pat(pat) = node { if let hir::PatKind::Binding(_, _, ident, _) = pat.kind { name = Some(ident.name); @@ -896,7 +896,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self }; // Run it. - match visitor.visit_value(&op) { + match visitor.visit_value(op) { Ok(()) => Ok(()), // Pass through validation failures and "invalid program" issues. Err(err) diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs index fc21ad1f1..de0590a4b 100644 --- a/compiler/rustc_const_eval/src/interpret/visitor.rs +++ b/compiler/rustc_const_eval/src/interpret/visitor.rs @@ -21,7 +21,7 @@ pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized { /// `read_discriminant` can be hooked for better error messages. #[inline(always)] fn read_discriminant(&mut self, v: &Self::V) -> InterpResult<'tcx, VariantIdx> { - Ok(self.ecx().read_discriminant(&v.to_op(self.ecx())?)?) + self.ecx().read_discriminant(&v.to_op(self.ecx())?) } /// This function provides the chance to reorder the order in which fields are visited for @@ -97,14 +97,14 @@ pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized { let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0; trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout); // recurse with the inner type - return self.visit_field(&v, 0, &inner_mplace.into()); + return self.visit_field(v, 0, &inner_mplace.into()); } ty::Dynamic(_, _, ty::DynStar) => { // DynStar types. Very different from a dyn type (but strangely part of the // same variant in `TyKind`): These are pairs where the 2nd component is the // vtable, and the first component is the data (which must be ptr-sized). let data = self.ecx().unpack_dyn_star(v)?.0; - return self.visit_field(&v, 0, &data); + return self.visit_field(v, 0, &data); } // Slices do not need special handling here: they have `Array` field // placement with length 0, so we enter the `Array` case below which diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs index 1e21c4940..d6c36ccc5 100644 --- a/compiler/rustc_const_eval/src/lib.rs +++ b/compiler/rustc_const_eval/src/lib.rs @@ -4,9 +4,9 @@ Rust MIR: a lowered representation of Rust. */ -#![cfg_attr(not(bootstrap), allow(internal_features))] -#![cfg_attr(not(bootstrap), feature(rustdoc_internals))] -#![cfg_attr(not(bootstrap), doc(rust_logo))] +#![allow(internal_features)] +#![feature(rustdoc_internals)] +#![doc(rust_logo)] #![deny(rustc::untranslatable_diagnostic)] #![feature(assert_matches)] #![feature(box_patterns)] @@ -39,11 +39,9 @@ pub mod util; pub use errors::ReportErrorExt; -use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage}; -use rustc_fluent_macro::fluent_messages; use rustc_middle::{ty, util::Providers}; -fluent_messages! { "../messages.ftl" } +rustc_fluent_macro::fluent_messages! { "../messages.ftl" } pub fn provide(providers: &mut Providers) { const_eval::provide(providers); diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs index 76116e339..949606ed6 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs @@ -12,7 +12,7 @@ use rustc_middle::traits::BuiltinImplSource; use rustc_middle::ty::GenericArgs; use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, InstanceDef, Ty, TyCtxt}; use rustc_middle::ty::{TraitRef, TypeVisitableExt}; -use rustc_mir_dataflow::{self, Analysis}; +use rustc_mir_dataflow::Analysis; use rustc_span::{sym, Span, Symbol}; use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _; use rustc_trait_selection::traits::{self, ObligationCauseCode, ObligationCtxt, SelectionContext}; @@ -22,7 +22,7 @@ use std::mem; use std::ops::{ControlFlow, Deref}; use super::ops::{self, NonConstOp, Status}; -use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop}; +use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop}; use super::resolver::FlowSensitiveAnalysis; use super::{ConstCx, Qualif}; use crate::const_eval::is_unstable_const_fn; @@ -35,7 +35,7 @@ type QualifResults<'mir, 'tcx, Q> = pub struct Qualifs<'mir, 'tcx> { has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>, needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>, - // needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>, + needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>, } impl<'mir, 'tcx> Qualifs<'mir, 'tcx> { @@ -60,9 +60,9 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> { let ConstCx { tcx, body, .. } = *ccx; FlowSensitiveAnalysis::new(NeedsDrop, ccx) - .into_engine(tcx, &body) + .into_engine(tcx, body) .iterate_to_fixpoint() - .into_results_cursor(&body) + .into_results_cursor(body) }); needs_drop.seek_before_primary_effect(location); @@ -78,27 +78,25 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> { local: Local, location: Location, ) -> bool { - // FIXME(effects) replace with `NeedsNonconstDrop` after const traits work again - /* let ty = ccx.body.local_decls[local].ty; - if !NeedsDrop::in_any_value_of_ty(ccx, ty) { + // Peeking into opaque types causes cycles if the current function declares said opaque + // type. Thus we avoid short circuiting on the type and instead run the more expensive + // analysis that looks at the actual usage within this function + if !ty.has_opaque_types() && !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) { return false; } let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| { let ConstCx { tcx, body, .. } = *ccx; - FlowSensitiveAnalysis::new(NeedsDrop, ccx) - .into_engine(tcx, &body) + FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx) + .into_engine(tcx, body) .iterate_to_fixpoint() - .into_results_cursor(&body) + .into_results_cursor(body) }); needs_non_const_drop.seek_before_primary_effect(location); needs_non_const_drop.get().contains(local) - */ - - self.needs_drop(ccx, local, location) } /// Returns `true` if `local` is `HasMutInterior` at the given `Location`. @@ -122,9 +120,9 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> { let ConstCx { tcx, body, .. } = *ccx; FlowSensitiveAnalysis::new(HasMutInterior, ccx) - .into_engine(tcx, &body) + .into_engine(tcx, body) .iterate_to_fixpoint() - .into_results_cursor(&body) + .into_results_cursor(body) }); has_mut_interior.seek_before_primary_effect(location); @@ -170,9 +168,9 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> { hir::ConstContext::Const { .. } | hir::ConstContext::Static(_) => { let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx) - .into_engine(ccx.tcx, &ccx.body) + .into_engine(ccx.tcx, ccx.body) .iterate_to_fixpoint() - .into_results_cursor(&ccx.body); + .into_results_cursor(ccx.body); cursor.seek_after_primary_effect(return_loc); cursor.get().contains(RETURN_PLACE) @@ -225,7 +223,7 @@ impl<'mir, 'tcx> Deref for Checker<'mir, 'tcx> { type Target = ConstCx<'mir, 'tcx>; fn deref(&self) -> &Self::Target { - &self.ccx + self.ccx } } @@ -248,7 +246,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { // `async` functions cannot be `const fn`. This is checked during AST lowering, so there's // no need to emit duplicate errors here. if self.ccx.is_async() || body.coroutine.is_some() { - tcx.sess.delay_span_bug(body.span, "`async` functions cannot be `const fn`"); + tcx.sess.span_delayed_bug(body.span, "`async` functions cannot be `const fn`"); return; } @@ -272,15 +270,15 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { } if !tcx.has_attr(def_id, sym::rustc_do_not_const_check) { - self.visit_body(&body); + self.visit_body(body); } // If we got through const-checking without emitting any "primary" errors, emit any // "secondary" errors if they occurred. let secondary_errors = mem::take(&mut self.secondary_errors); if self.error_emitted.is_none() { - for mut error in secondary_errors { - self.tcx.sess.diagnostic().emit_diagnostic(&mut error); + for error in secondary_errors { + self.tcx.sess.dcx().emit_diagnostic(error); } } else { assert!(self.tcx.sess.has_errors().is_some()); @@ -357,7 +355,9 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { fn check_static(&mut self, def_id: DefId, span: Span) { if self.tcx.is_thread_local_static(def_id) { - self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef`"); + self.tcx + .sess + .span_delayed_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef`"); } self.check_op_spanned(ops::StaticAccess, span) } @@ -503,7 +503,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Fake, place) | Rvalue::AddressOf(Mutability::Not, place) => { let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>( - &self.ccx, + self.ccx, &mut |local| self.qualifs.has_mut_interior(self.ccx, local, location), place.as_ref(), ); @@ -1011,9 +1011,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { let mut err_span = self.span; let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty; - // FIXME(effects) replace with `NeedsNonConstDrop` once we fix const traits let ty_needs_non_const_drop = - qualifs::NeedsDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place); + qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place); debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop); diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs index e51082e1e..4f7e165c5 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs @@ -82,8 +82,8 @@ pub fn rustc_allow_const_fn_unstable( def_id: LocalDefId, feature_gate: Symbol, ) -> bool { - let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(def_id)); - attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate) + let attrs = tcx.hir().attrs(tcx.local_def_id_to_hir_id(def_id)); + attr::rustc_allow_const_fn_unstable(tcx.sess, attrs).any(|name| name == feature_gate) } /// Returns `true` if the given `const fn` is "const-stable". @@ -112,7 +112,7 @@ pub fn is_const_stable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool { None if is_parent_const_stable_trait(tcx, def_id) => { // Remove this when `#![feature(const_trait_impl)]` is stabilized, // returning `true` unconditionally. - tcx.sess.delay_span_bug( + tcx.sess.span_delayed_bug( tcx.def_span(def_id), "trait implementations cannot be const stable yet", ); diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs index 40183bacc..2de6362b9 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs @@ -119,8 +119,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> { match self_ty.kind() { Param(param_ty) => { debug!(?param_ty); - let caller_hir_id = tcx.hir().local_def_id_to_hir_id(caller); - if let Some(generics) = tcx.hir().get(caller_hir_id).generics() { + if let Some(generics) = tcx.hir_node_by_def_id(caller).generics() { let constraint = with_no_trimmed_paths!(format!( "~const {}", trait_ref.print_only_trait_path() @@ -129,7 +128,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> { tcx, generics, err, - ¶m_ty.name.as_str(), + param_ty.name.as_str(), &constraint, None, None, diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs index aff256b3e..5cd13783c 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs @@ -5,7 +5,7 @@ use rustc_span::{symbol::sym, Span}; use super::check::Qualifs; use super::ops::{self, NonConstOp}; -use super::qualifs::{NeedsDrop, Qualif}; +use super::qualifs::{NeedsNonConstDrop, Qualif}; use super::ConstCx; /// Returns `true` if we should use the more precise live drop checker that runs after drop @@ -54,7 +54,7 @@ impl<'mir, 'tcx> std::ops::Deref for CheckLiveDrops<'mir, 'tcx> { type Target = ConstCx<'mir, 'tcx>; fn deref(&self) -> &Self::Target { - &self.ccx + self.ccx } } @@ -83,8 +83,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> { mir::TerminatorKind::Drop { place: dropped_place, .. } => { let dropped_ty = dropped_place.ty(self.body, self.tcx).ty; - // FIXME(effects) use `NeedsNonConstDrop` - if !NeedsDrop::in_any_value_of_ty(self.ccx, dropped_ty) { + if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) { // Instead of throwing a bug, we just return here. This is because we have to // run custom `const Drop` impls. return; diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs index de3186a53..7e1cbfe66 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs @@ -23,8 +23,7 @@ pub fn in_any_value_of_ty<'tcx>( ConstQualifs { has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty), needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty), - // FIXME(effects) - needs_non_const_drop: NeedsDrop::in_any_value_of_ty(cx, ty), + needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty), custom_eq: CustomEq::in_any_value_of_ty(cx, ty), tainted_by_errors, } @@ -155,12 +154,27 @@ impl Qualif for NeedsNonConstDrop { return false; } - // FIXME(effects) constness + // FIXME(effects): If `destruct` is not a `const_trait`, + // or effects are disabled in this crate, then give up. + let destruct_def_id = cx.tcx.require_lang_item(LangItem::Destruct, Some(cx.body.span)); + if cx.tcx.generics_of(destruct_def_id).host_effect_index.is_none() + || !cx.tcx.features().effects + { + return NeedsDrop::in_any_value_of_ty(cx, ty); + } + let obligation = Obligation::new( cx.tcx, ObligationCause::dummy_with_span(cx.body.span), cx.param_env, - ty::TraitRef::from_lang_item(cx.tcx, LangItem::Destruct, cx.body.span, [ty]), + ty::TraitRef::new( + cx.tcx, + destruct_def_id, + [ + ty::GenericArg::from(ty), + ty::GenericArg::from(cx.tcx.expected_host_effect_param_for_body(cx.def_id())), + ], + ), ); let infcx = cx.tcx.infer_ctxt().build(); diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs index 32af537e2..8b2ea2dc2 100644 --- a/compiler/rustc_const_eval/src/transform/promote_consts.rs +++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs @@ -188,7 +188,7 @@ impl<'a, 'tcx> std::ops::Deref for Validator<'a, 'tcx> { type Target = ConstCx<'a, 'tcx>; fn deref(&self) -> &Self::Target { - &self.ccx + self.ccx } } @@ -229,7 +229,7 @@ impl<'tcx> Validator<'_, 'tcx> { let statement = &self.body[loc.block].statements[loc.statement_index]; match &statement.kind { StatementKind::Assign(box (_, rhs)) => qualifs::in_rvalue::<Q, _>( - &self.ccx, + self.ccx, &mut |l| self.qualif_local::<Q>(l), rhs, ), @@ -246,7 +246,7 @@ impl<'tcx> Validator<'_, 'tcx> { match &terminator.kind { TerminatorKind::Call { .. } => { let return_ty = self.body.local_decls[local].ty; - Q::in_any_value_of_ty(&self.ccx, return_ty) + Q::in_any_value_of_ty(self.ccx, return_ty) } kind => { span_bug!(terminator.source_info.span, "{:?} not promotable", kind); @@ -864,6 +864,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { }; // Use the underlying local for this (necessarily interior) borrow. + debug_assert!(region.is_erased()); let ty = local_decls[place.local].ty; let span = statement.source_info.span; @@ -873,8 +874,6 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { ty::TypeAndMut { ty, mutbl: borrow_kind.to_mutbl_lossy() }, ); - *region = tcx.lifetimes.re_erased; - let mut projection = vec![PlaceElem::Deref]; projection.extend(place.projection); place.projection = tcx.mk_place_elems(&projection); @@ -1024,36 +1023,3 @@ pub fn promote_candidates<'tcx>( promotions } - -/// This function returns `true` if the function being called in the array -/// repeat expression is a `const` function. -pub fn is_const_fn_in_array_repeat_expression<'tcx>( - ccx: &ConstCx<'_, 'tcx>, - place: &Place<'tcx>, - body: &Body<'tcx>, -) -> bool { - match place.as_local() { - // rule out cases such as: `let my_var = some_fn(); [my_var; N]` - Some(local) if body.local_decls[local].is_user_variable() => return false, - None => return false, - _ => {} - } - - for block in body.basic_blocks.iter() { - if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) = - &block.terminator - { - if let Operand::Constant(box ConstOperand { const_, .. }) = func { - if let ty::FnDef(def_id, _) = *const_.ty().kind() { - if destination == place { - if ccx.tcx.is_const_fn(def_id) { - return true; - } - } - } - } - } - } - - false -} diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs index 5922922d4..cca5b90ab 100644 --- a/compiler/rustc_const_eval/src/transform/validate.rs +++ b/compiler/rustc_const_eval/src/transform/validate.rs @@ -128,9 +128,9 @@ impl<'a, 'tcx> CfgChecker<'a, 'tcx> { #[track_caller] fn fail(&self, location: Location, msg: impl AsRef<str>) { let span = self.body.source_info(location).span; - // We use `delay_span_bug` as we might see broken MIR when other errors have already + // We use `span_delayed_bug` as we might see broken MIR when other errors have already // occurred. - self.tcx.sess.diagnostic().delay_span_bug( + self.tcx.sess.dcx().span_delayed_bug( span, format!( "broken MIR in {:?} ({}) at {:?}:\n{}", @@ -285,6 +285,12 @@ impl<'a, 'tcx> CfgChecker<'a, 'tcx> { UnwindAction::Unreachable | UnwindAction::Terminate(UnwindTerminateReason::Abi) => (), } } + + fn is_critical_call_edge(&self, target: Option<BasicBlock>, unwind: UnwindAction) -> bool { + let Some(target) = target else { return false }; + matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate(_)) + && self.body.basic_blocks.predecessors()[target].len() > 1 + } } impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { @@ -425,6 +431,22 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { } self.check_unwind_edge(location, *unwind); + // The code generation assumes that there are no critical call edges. The assumption + // is used to simplify inserting code that should be executed along the return edge + // from the call. FIXME(tmiasko): Since this is a strictly code generation concern, + // the code generation should be responsible for handling it. + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized) + && self.is_critical_call_edge(*target, *unwind) + { + self.fail( + location, + format!( + "encountered critical edge in `Call` terminator {:?}", + terminator.kind, + ), + ); + } + // The call destination place and Operand::Move place used as an argument might be // passed by a reference to the callee. Consequently they must be non-overlapping // and cannot be packed. Currently this simply checks for duplicate places. @@ -549,7 +571,7 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { fn visit_source_scope(&mut self, scope: SourceScope) { if self.body.source_scopes.get(scope).is_none() { - self.tcx.sess.diagnostic().delay_span_bug( + self.tcx.sess.dcx().span_delayed_bug( self.body.span, format!( "broken MIR in {:?} ({}):\ninvalid source scope {:?}", |