diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/analyze.rs | 6 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/block.rs | 221 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/constant.rs | 36 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/debuginfo.rs | 8 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/intrinsic.rs | 16 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/mod.rs | 14 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/operand.rs | 9 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/place.rs | 19 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 18 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/statement.rs | 14 |
10 files changed, 216 insertions, 145 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index 24da48ead..c7617d2e4 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -266,7 +266,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>, ) { - for (bb, data) in mir.basic_blocks().iter_enumerated() { + for (bb, data) in mir.basic_blocks.iter_enumerated() { match data.terminator().kind { TerminatorKind::Goto { .. } | TerminatorKind::Resume @@ -296,7 +296,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi } fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) { - let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks()); + let mut funclet_succs = IndexVec::from_elem(None, &mir.basic_blocks); let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] { ref mut s @ None => { @@ -359,7 +359,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi } } - let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks()); + let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, &mir.basic_blocks); discover_masters(&mut result, mir); propagate(&mut result, mir); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 3eee58d9d..a6b226ef7 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -13,15 +13,14 @@ use rustc_ast as ast; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_hir::lang_items::LangItem; use rustc_index::vec::Idx; -use rustc_middle::mir::AssertKind; -use rustc_middle::mir::{self, SwitchTargets}; +use rustc_middle::mir::{self, AssertKind, SwitchTargets}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Instance, Ty, TypeVisitable}; use rustc_span::source_map::Span; use rustc_span::{sym, Symbol}; use rustc_symbol_mangling::typeid::typeid_for_fnabi; -use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; +use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg}; use rustc_target::abi::{self, HasDataLayout, WrappingRange}; use rustc_target::spec::abi::Abi; @@ -324,7 +323,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.unreachable(); return; } - let llval = match self.fn_abi.ret.mode { + let llval = match &self.fn_abi.ret.mode { PassMode::Ignore | PassMode::Indirect { .. } => { bx.ret_void(); return; @@ -339,7 +338,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - PassMode::Cast(cast_ty) => { + PassMode::Cast(cast_ty, _) => { let op = match self.locals[mir::RETURN_PLACE] { LocalRef::Operand(Some(op)) => op, LocalRef::Operand(None) => bug!("use of return before def"), @@ -360,7 +359,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { llval } }; - let ty = bx.cast_backend_type(&cast_ty); + let ty = bx.cast_backend_type(cast_ty); let addr = bx.pointercast(llslot, bx.type_ptr_to(ty)); bx.load(ty, addr, self.fn_abi.ret.layout.align.abi) } @@ -368,6 +367,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.ret(llval); } + #[tracing::instrument(level = "trace", skip(self, helper, bx))] fn codegen_drop_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, @@ -398,13 +398,29 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let (drop_fn, fn_abi) = match ty.kind() { // FIXME(eddyb) perhaps move some of this logic into // `Instance::resolve_drop_in_place`? - ty::Dynamic(..) => { + ty::Dynamic(_, _, ty::Dyn) => { + // IN THIS ARM, WE HAVE: + // ty = *mut (dyn Trait) + // which is: exists<T> ( *mut T, Vtable<T: Trait> ) + // args[0] args[1] + // + // args = ( Data, Vtable ) + // | + // v + // /-------\ + // | ... | + // \-------/ + // let virtual_drop = Instance { def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0), substs: drop_fn.substs, }; + debug!("ty = {:?}", ty); + debug!("drop_fn = {:?}", drop_fn); + debug!("args = {:?}", args); let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty()); let vtable = args[1]; + // Truncate vtable off of args list args = &args[..1]; ( meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE) @@ -412,6 +428,51 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn_abi, ) } + ty::Dynamic(_, _, ty::DynStar) => { + // IN THIS ARM, WE HAVE: + // ty = *mut (dyn* Trait) + // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>) + // + // args = [ * ] + // | + // v + // ( Data, Vtable ) + // | + // v + // /-------\ + // | ... | + // \-------/ + // + // + // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING + // + // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer) + // vtable = (*args[0]).1 // loads the vtable out + // (data, vtable) // an equivalent Rust `*mut dyn Trait` + // + // SO THEN WE CAN USE THE ABOVE CODE. + let virtual_drop = Instance { + def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0), + substs: drop_fn.substs, + }; + debug!("ty = {:?}", ty); + debug!("drop_fn = {:?}", drop_fn); + debug!("args = {:?}", args); + let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty()); + let data = args[0]; + let data_ty = bx.cx().backend_type(place.layout); + let vtable_ptr = + bx.gep(data_ty, data, &[bx.cx().const_i32(0), bx.cx().const_i32(1)]); + let vtable = bx.load(bx.type_i8p(), vtable_ptr, abi::Align::ONE); + // Truncate vtable off of args list + args = &args[..1]; + debug!("args' = {:?}", args); + ( + meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE) + .get_fn(&mut bx, vtable, ty, &fn_abi), + fn_abi, + ) + } _ => (bx.get_fn_addr(drop_fn), bx.fn_abi_of_instance(drop_fn, ty::List::empty())), }; helper.do_call( @@ -798,58 +859,78 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let mut op = self.codegen_operand(&mut bx, arg); if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { - if let Pair(..) = op.val { - // In the case of Rc<Self>, we need to explicitly pass a - // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack - // that is understood elsewhere in the compiler as a method on - // `dyn Trait`. - // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until - // we get a value of a built-in pointer type - 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() - && !op.layout.ty.is_region_ptr() - { - for i in 0..op.layout.fields.count() { - let field = op.extract_field(&mut bx, i); - if !field.layout.is_zst() { - // we found the one non-zero-sized field that is allowed - // now find *its* non-zero-sized field, or stop if it's a - // pointer - op = field; - continue 'descend_newtypes; + match op.val { + Pair(data_ptr, meta) => { + // In the case of Rc<Self>, we need to explicitly pass a + // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack + // that is understood elsewhere in the compiler as a method on + // `dyn Trait`. + // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until + // we get a value of a built-in pointer type + 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() + && !op.layout.ty.is_region_ptr() + { + for i in 0..op.layout.fields.count() { + let field = op.extract_field(&mut bx, i); + if !field.layout.is_zst() { + // we found the one non-zero-sized field that is allowed + // now find *its* non-zero-sized field, or stop if it's a + // pointer + op = field; + continue 'descend_newtypes; + } } + + span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); } - span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); + // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its + // data pointer and vtable. Look up the method in the vtable, and pass + // the data pointer as the first argument + llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( + &mut bx, + meta, + op.layout.ty, + &fn_abi, + )); + llargs.push(data_ptr); + continue 'make_args; } - - // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its - // data pointer and vtable. Look up the method in the vtable, and pass - // the data pointer as the first argument - match op.val { - Pair(data_ptr, meta) => { - llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( - &mut bx, - meta, - op.layout.ty, - &fn_abi, - )); - llargs.push(data_ptr); - continue 'make_args; + Ref(data_ptr, Some(meta), _) => { + // by-value dynamic dispatch + llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( + &mut bx, + meta, + op.layout.ty, + &fn_abi, + )); + llargs.push(data_ptr); + continue; + } + Immediate(_) => { + let ty::Ref(_, ty, _) = op.layout.ty.kind() else { + span_bug!(span, "can't codegen a virtual call on {:#?}", op); + }; + if !ty.is_dyn_star() { + span_bug!(span, "can't codegen a virtual call on {:#?}", op); } - other => bug!("expected a Pair, got {:?}", other), + // FIXME(dyn-star): Make sure this is done on a &dyn* receiver + let place = op.deref(bx.cx()); + let data_ptr = place.project_field(&mut bx, 0); + let meta_ptr = place.project_field(&mut bx, 1); + let meta = bx.load_operand(meta_ptr); + llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( + &mut bx, + meta.immediate(), + op.layout.ty, + &fn_abi, + )); + llargs.push(data_ptr.llval); + continue; + } + _ => { + span_bug!(span, "can't codegen a virtual call on {:#?}", op); } - } else if let Ref(data_ptr, Some(meta), _) = op.val { - // by-value dynamic dispatch - llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( - &mut bx, - meta, - op.layout.ty, - &fn_abi, - )); - llargs.push(data_ptr); - continue; - } else { - span_bug!(span, "can't codegen a virtual call on {:?}", op); } } @@ -1161,39 +1242,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { llargs: &mut Vec<Bx::Value>, arg: &ArgAbi<'tcx, Ty<'tcx>>, ) { - // Fill padding with undef value, where applicable. - if let Some(ty) = arg.pad { - llargs.push(bx.const_undef(bx.reg_backend_type(&ty))) - } - - if arg.is_ignore() { - return; - } - - if let PassMode::Pair(..) = arg.mode { - match op.val { + match arg.mode { + PassMode::Ignore => return, + PassMode::Cast(_, true) => { + // Fill padding with undef value, where applicable. + llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32()))); + } + PassMode::Pair(..) => match op.val { Pair(a, b) => { llargs.push(a); llargs.push(b); return; } _ => bug!("codegen_argument: {:?} invalid for pair argument", op), - } - } else if arg.is_unsized_indirect() { - match op.val { + }, + PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => match op.val { Ref(a, Some(b), _) => { llargs.push(a); llargs.push(b); return; } _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op), - } + }, + _ => {} } // Force by-ref if we have to load through a cast pointer. let (mut llval, align, by_ref) = match op.val { Immediate(_) | Pair(..) => match arg.mode { - PassMode::Indirect { .. } | PassMode::Cast(_) => { + PassMode::Indirect { .. } | PassMode::Cast(..) => { let scratch = PlaceRef::alloca(bx, arg.layout); op.val.store(bx, scratch); (scratch.llval, scratch.align, true) @@ -1225,8 +1302,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if let PassMode::Cast(ty) = arg.mode { - let llty = bx.cast_backend_type(&ty); + if let PassMode::Cast(ty, _) = &arg.mode { + let llty = bx.cast_backend_type(ty); let addr = bx.pointercast(llval, bx.type_ptr_to(llty)); llval = bx.load(llty, addr, align.min(arg.layout.align.abi)); } else { @@ -1625,7 +1702,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } DirectOperand(index) => { // If there is a cast, we have to store and reload. - let op = if let PassMode::Cast(_) = ret_abi.mode { + let op = if let PassMode::Cast(..) = ret_abi.mode { let tmp = PlaceRef::alloca(bx, ret_abi.layout); tmp.storage_live(bx); bx.store_arg(&ret_abi, llval, tmp); diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index 9a995fbf6..4c6ab457c 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -25,26 +25,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { constant: &mir::Constant<'tcx>, ) -> Result<ConstValue<'tcx>, ErrorHandled> { let ct = self.monomorphize(constant.literal); - let ct = match ct { - mir::ConstantKind::Ty(ct) => ct, + let uv = match ct { + mir::ConstantKind::Ty(ct) => match ct.kind() { + ty::ConstKind::Unevaluated(uv) => uv.expand(), + ty::ConstKind::Value(val) => { + return Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val))); + } + err => span_bug!( + constant.span, + "encountered bad ConstKind after monomorphizing: {:?}", + err + ), + }, + mir::ConstantKind::Unevaluated(uv, _) => uv, mir::ConstantKind::Val(val, _) => return Ok(val), }; - match ct.kind() { - ty::ConstKind::Unevaluated(ct) => self - .cx - .tcx() - .const_eval_resolve(ty::ParamEnv::reveal_all(), ct, None) - .map_err(|err| { - self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered"); - err - }), - ty::ConstKind::Value(val) => Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val))), - err => span_bug!( - constant.span, - "encountered bad ConstKind after monomorphizing: {:?}", - err - ), - } + + self.cx.tcx().const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).map_err(|err| { + self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered"); + err + }) } /// process constant containing SIMD shuffle indices diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 8c3186efc..157c1c823 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -3,7 +3,7 @@ use rustc_index::vec::IndexVec; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir; use rustc_middle::ty; -use rustc_middle::ty::layout::LayoutOf; +use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_session::config::DebugInfo; use rustc_span::symbol::{kw, Symbol}; use rustc_span::{BytePos, Span}; @@ -93,15 +93,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } /// In order to have a good line stepping behavior in debugger, we overwrite debug - /// locations of macro expansions with that of the outermost expansion site - /// (unless the crate is being compiled with `-Z debug-macros`). + /// locations of macro expansions with that of the outermost expansion site (when the macro is + /// annotated with `#[collapse_debuginfo]` or when `-Zdebug-macros` is provided). fn adjust_span_for_debugging(&self, mut span: Span) -> Span { // Bail out if debug info emission is not enabled. if self.debug_context.is_none() { return span; } - if span.from_expansion() && !self.cx.sess().opts.unstable_opts.debug_macros { + if self.cx.tcx().should_collapse_debuginfo(span) { // Walk up the macro expansion chain until we reach a non-expanded span. // We also stop at the function body level because no line stepping can occur // at the level above that. diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 94ac71a4d..215edbe02 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -77,10 +77,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); let llval = match name { - sym::assume => { - bx.assume(args[0].immediate()); - return; - } sym::abort => { bx.abort(); return; @@ -555,14 +551,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return; } - sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => { + sym::ptr_guaranteed_cmp => { let a = args[0].immediate(); let b = args[1].immediate(); - if name == sym::ptr_guaranteed_eq { - bx.icmp(IntPredicate::IntEQ, a, b) - } else { - bx.icmp(IntPredicate::IntNE, a, b) - } + bx.icmp(IntPredicate::IntEQ, a, b) } sym::ptr_offset_from | sym::ptr_offset_from_unsigned => { @@ -597,8 +589,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty) = fn_abi.ret.mode { - let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty)); + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { + let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty)); let ptr = bx.pointercast(result.llval, ptr_llty); bx.store(llval, ptr, result.align); } else { diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 8ee375fa9..2b931bfc9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -150,13 +150,13 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let start_llbb = Bx::append_block(cx, llfn, "start"); let mut bx = Bx::build(cx, start_llbb); - if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { + if mir.basic_blocks.iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); } let cleanup_kinds = analyze::cleanup_kinds(&mir); let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir - .basic_blocks() + .basic_blocks .indices() .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None }) .collect(); @@ -172,8 +172,8 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( unreachable_block: None, double_unwind_guard: None, cleanup_kinds, - landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), - funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()), + landing_pads: IndexVec::from_elem(None, &mir.basic_blocks), + funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks.len()), locals: IndexVec::new(), debug_context, per_local_var_debug_info: None, @@ -191,7 +191,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // errored or at least linted ErrorHandled::Reported(_) | ErrorHandled::Linted => {} ErrorHandled::TooGeneric => { - span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err) + span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err) } } } @@ -283,7 +283,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_abi.args[idx]; idx += 1; - if arg.pad.is_some() { + if let PassMode::Cast(_, true) = arg.mode { llarg_idx += 1; } let pr_field = place.project_field(bx, i); @@ -309,7 +309,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let arg = &fx.fn_abi.args[idx]; idx += 1; - if arg.pad.is_some() { + if let PassMode::Cast(_, true) = arg.mode { llarg_idx += 1; } diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index c612634fc..37b1e0362 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -72,10 +72,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { ) -> Self { let layout = bx.layout_of(ty); - if layout.is_zst() { - return OperandRef::new_zst(bx, layout); - } - let val = match val { ConstValue::Scalar(x) => { let Abi::Scalar(scalar) = layout.abi else { @@ -84,10 +80,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout)); OperandValue::Immediate(llval) } - ConstValue::ZeroSized => { - let llval = bx.zst_to_backend(bx.immediate_backend_type(layout)); - OperandValue::Immediate(llval) - } + ConstValue::ZeroSized => return OperandRef::new_zst(bx, layout), ConstValue::Slice { data, start, end } => { let Abi::ScalarPair(a_scalar, _) = layout.abi else { bug!("from_const: invalid ScalarPair layout: {:#?}", layout); diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 268c4d765..13d8f6edd 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -4,7 +4,6 @@ use super::{FunctionCx, LocalRef}; use crate::common::IntPredicate; use crate::glue; use crate::traits::*; -use crate::MemFlags; use rustc_middle::mir; use rustc_middle::mir::tcx::PlaceTy; @@ -245,7 +244,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { }; bx.intcast(tag.immediate(), cast_to, signed) } - TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => { + TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => { // Rebase from niche values to discriminants, and check // whether the result is in range for the niche variants. let niche_llty = bx.cx().immediate_backend_type(tag.layout); @@ -303,7 +302,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx.select( is_niche, niche_discr, - bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64), + bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64), ) } } @@ -338,21 +337,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } Variants::Multiple { tag_encoding: - TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, + TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, tag_field, .. } => { - if variant_index != dataful_variant { - if bx.cx().sess().target.arch == "arm" - || bx.cx().sess().target.arch == "aarch64" - { - // FIXME(#34427): as workaround for LLVM bug on ARM, - // use memset of 0 before assigning niche value. - let fill_byte = bx.cx().const_u8(0); - let size = bx.cx().const_usize(self.layout.size.bytes()); - bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty()); - } - + if variant_index != untagged_variant { let niche = self.project_field(bx, tag_field); let niche_llty = bx.cx().immediate_backend_type(niche.layout); let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 26b9fbf44..56852b0fc 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -4,6 +4,7 @@ use super::{FunctionCx, LocalRef}; use crate::base; use crate::common::{self, IntPredicate}; +use crate::meth::get_vtable; use crate::traits::*; use crate::MemFlags; @@ -87,7 +88,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let size = bx.const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays - if bx.cx().const_to_opt_uint(v) == Some(0) { + if bx.cx().const_to_opt_u128(v, false) == Some(0) { let fill = bx.cx().const_u8(0); bx.memset(start, fill, size, dest.align, MemFlags::empty()); return bx; @@ -271,6 +272,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bug!("unexpected non-pair operand"); } } + mir::CastKind::DynStar => { + let data = match operand.val { + OperandValue::Ref(_, _, _) => todo!(), + OperandValue::Immediate(v) => v, + OperandValue::Pair(_, _) => todo!(), + }; + let trait_ref = + if let ty::Dynamic(data, _, ty::DynStar) = cast.ty.kind() { + data.principal() + } else { + bug!("Only valid to do a DynStar cast into a DynStar type") + }; + let vtable = get_vtable(bx.cx(), source.ty(self.mir, bx.tcx()), trait_ref); + OperandValue::Pair(data, vtable) + } mir::CastKind::Pointer( PointerCast::MutToConstPointer | PointerCast::ArrayToPointer, ) diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs index f452f2988..1db0fb3a6 100644 --- a/compiler/rustc_codegen_ssa/src/mir/statement.rs +++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs @@ -1,4 +1,5 @@ use rustc_middle::mir; +use rustc_middle::mir::NonDivergingIntrinsic; use super::FunctionCx; use super::LocalRef; @@ -73,11 +74,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope); bx } - mir::StatementKind::CopyNonOverlapping(box mir::CopyNonOverlapping { - ref src, - ref dst, - ref count, - }) => { + mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => { + let op_val = self.codegen_operand(&mut bx, op); + bx.assume(op_val.immediate()); + bx + } + mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping( + mir::CopyNonOverlapping { ref count, ref src, ref dst }, + )) => { let dst_val = self.codegen_operand(&mut bx, dst); let src_val = self.codegen_operand(&mut bx, src); let count = self.codegen_operand(&mut bx, count).immediate(); |