diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/analyze.rs | 23 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/block.rs | 43 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/debuginfo.rs | 4 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/intrinsic.rs | 22 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/locals.rs | 6 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/mod.rs | 6 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/operand.rs | 13 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/place.rs | 55 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/statement.rs | 1 |
10 files changed, 87 insertions, 88 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index 2285e7f4e..c1de9b76f 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -36,13 +36,13 @@ pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // Arguments get assigned to by means of the function being called for arg in mir.args_iter() { - analyzer.assign(arg, DefLocation::Argument); + analyzer.define(arg, DefLocation::Argument); } // If there exists a local definition that dominates all uses of that local, // the definition should be visited first. Traverse blocks in an order that // is a topological sort of dominance partial order. - for (bb, data) in traversal::reverse_postorder(&mir) { + for (bb, data) in traversal::reverse_postorder(mir) { analyzer.visit_basic_block_data(bb, data); } @@ -74,7 +74,7 @@ struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { } impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> { - fn assign(&mut self, local: mir::Local, location: DefLocation) { + fn define(&mut self, local: mir::Local, location: DefLocation) { let kind = &mut self.locals[local]; match *kind { LocalKind::ZST => {} @@ -162,7 +162,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> debug!("visit_assign(place={:?}, rvalue={:?})", place, rvalue); if let Some(local) = place.as_local() { - self.assign(local, DefLocation::Body(location)); + self.define(local, DefLocation::Assignment(location)); if self.locals[local] != LocalKind::Memory { let decl_span = self.fx.mir.local_decls[local].source_info.span; if !self.fx.rvalue_creates_operand(rvalue, decl_span) { @@ -183,9 +183,14 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> fn visit_local(&mut self, local: mir::Local, context: PlaceContext, location: Location) { match context { - PlaceContext::MutatingUse(MutatingUseContext::Call) - | PlaceContext::MutatingUse(MutatingUseContext::Yield) => { - self.assign(local, DefLocation::Body(location)); + PlaceContext::MutatingUse(MutatingUseContext::Call) => { + let call = location.block; + let TerminatorKind::Call { target, .. } = + self.fx.mir.basic_blocks[call].terminator().kind + else { + bug!() + }; + self.define(local, DefLocation::CallReturn { call, target }); } PlaceContext::NonUse(_) @@ -197,7 +202,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> ) => match &mut self.locals[local] { LocalKind::ZST => {} LocalKind::Memory => {} - LocalKind::SSA(def) if def.dominates(location, &self.dominators) => {} + LocalKind::SSA(def) if def.dominates(location, self.dominators) => {} // Reads from uninitialized variables (e.g., in dead code, after // optimizations) require locals to be in (uninitialized) memory. // N.B., there can be uninitialized reads of a local visited after @@ -237,6 +242,8 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> } } } + + PlaceContext::MutatingUse(MutatingUseContext::Yield) => bug!(), } } } diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 3d2d8f8b5..a1662f25e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -47,7 +47,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { &self, fx: &'b mut FunctionCx<'a, 'tcx, Bx>, ) -> Option<&'b Bx::Funclet> { - let cleanup_kinds = (&fx.cleanup_kinds).as_ref()?; + let cleanup_kinds = fx.cleanup_kinds.as_ref()?; let funclet_bb = cleanup_kinds[self.bb].funclet_bb(self.bb)?; // If `landing_pad_for` hasn't been called yet to create the `Funclet`, // it has to be now. This may not seem necessary, as RPO should lead @@ -161,7 +161,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { ) -> MergingSucc { // If there is a cleanup block and the function we're calling can unwind, then // do an invoke, otherwise do a call. - let fn_ty = bx.fn_decl_backend_type(&fn_abi); + let fn_ty = bx.fn_decl_backend_type(fn_abi); let fn_attrs = if bx.tcx().def_kind(fx.instance.def_id()).has_codegen_attrs() { Some(bx.tcx().codegen_fn_attrs(fx.instance.def_id())) @@ -204,9 +204,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { let invokeret = bx.invoke( fn_ty, fn_attrs, - Some(&fn_abi), + Some(fn_abi), fn_ptr, - &llargs, + llargs, ret_llbb, unwind_block, self.funclet(fx), @@ -225,7 +225,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } MergingSucc::False } else { - let llret = bx.call(fn_ty, fn_attrs, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx)); + let llret = bx.call(fn_ty, fn_attrs, Some(fn_abi), fn_ptr, llargs, self.funclet(fx)); if fx.mir[self.bb].is_cleanup { bx.apply_attrs_to_cleanup_callsite(llret); } @@ -273,7 +273,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { bx.codegen_inline_asm( template, - &operands, + operands, options, line_spans, instance, @@ -281,7 +281,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { ); MergingSucc::False } else { - bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None); + bx.codegen_inline_asm(template, operands, options, line_spans, instance, None); if let Some(target) = destination { self.funclet_br(fx, bx, target, mergeable_succ) @@ -318,7 +318,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { discr: &mir::Operand<'tcx>, targets: &SwitchTargets, ) { - let discr = self.codegen_operand(bx, &discr); + let discr = self.codegen_operand(bx, discr); let switch_ty = discr.layout.ty; let mut target_iter = targets.iter(); if target_iter.len() == 1 { @@ -498,7 +498,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args = &args[..1]; ( meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE) - .get_fn(bx, vtable, ty, &fn_abi), + .get_fn(bx, vtable, ty, fn_abi), fn_abi, ) } @@ -540,7 +540,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { debug!("args' = {:?}", args); ( meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE) - .get_fn(bx, meta.immediate(), ty, &fn_abi), + .get_fn(bx, meta.immediate(), ty, fn_abi), fn_abi, ) } @@ -864,7 +864,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // promotes any complex rvalues to constants. if i == 2 && intrinsic == sym::simd_shuffle { if let mir::Operand::Constant(constant) = arg { - let (llval, ty) = self.simd_shuffle_indices(&bx, constant); + let (llval, ty) = self.simd_shuffle_indices(bx, constant); return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty), @@ -881,7 +881,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Self::codegen_intrinsic_call( bx, *instance.as_ref().unwrap(), - &fn_abi, + fn_abi, &args, dest, span, @@ -937,7 +937,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx, meta, op.layout.ty, - &fn_abi, + fn_abi, )); llargs.push(data_ptr); continue 'make_args; @@ -948,7 +948,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx, meta, op.layout.ty, - &fn_abi, + fn_abi, )); llargs.push(data_ptr); continue; @@ -975,7 +975,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx, meta.immediate(), op.layout.ty, - &fn_abi, + fn_abi, )); llargs.push(data_ptr.llval); continue; @@ -1587,9 +1587,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span)); let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, reason.lang_item()); - let fn_ty = bx.fn_decl_backend_type(&fn_abi); + let fn_ty = bx.fn_decl_backend_type(fn_abi); - let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref()); + let llret = bx.call(fn_ty, None, Some(fn_abi), fn_ptr, &[], funclet.as_ref()); bx.apply_attrs_to_cleanup_callsite(llret); bx.unreachable(); @@ -1662,10 +1662,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } } else { - self.codegen_place( - bx, - mir::PlaceRef { local: dest.local, projection: &dest.projection }, - ) + self.codegen_place(bx, mir::PlaceRef { local: dest.local, projection: dest.projection }) }; if fn_ret.is_indirect() { if dest.align < dest.layout.align.abi { @@ -1696,7 +1693,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match dest { Nothing => (), - Store(dst) => bx.store_arg(&ret_abi, llval, dst), + Store(dst) => bx.store_arg(ret_abi, llval, dst), IndirectOperand(tmp, index) => { let op = bx.load_operand(tmp); tmp.storage_dead(bx); @@ -1708,7 +1705,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let op = if let PassMode::Cast { .. } = ret_abi.mode { let tmp = PlaceRef::alloca(bx, ret_abi.layout); tmp.storage_live(bx); - bx.store_arg(&ret_abi, llval, tmp); + bx.store_arg(ret_abi, llval, tmp); let op = bx.load_operand(tmp); tmp.storage_dead(bx); op diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 0dc30d21c..14915e816 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -398,7 +398,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let Some(dbg_loc) = self.dbg_loc(var.source_info) else { return }; let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } = - calculate_debuginfo_offset(bx, &var.projection, base.layout); + calculate_debuginfo_offset(bx, var.projection, base.layout); // When targeting MSVC, create extra allocas for arguments instead of pointing multiple // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records @@ -416,7 +416,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if should_create_individual_allocas { let DebugInfoOffset { direct_offset: _, indirect_offsets: _, result: place } = - calculate_debuginfo_offset(bx, &var.projection, base); + calculate_debuginfo_offset(bx, var.projection, base); // Create a variable which will be a pointer to the actual value let ptr_ty = Ty::new_ptr( diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 136d06d56..a5bffc33d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -4,8 +4,8 @@ use super::FunctionCx; use crate::common::IntPredicate; use crate::errors; use crate::errors::InvalidMonomorphization; -use crate::glue; use crate::meth; +use crate::size_of_val; use crate::traits::*; use crate::MemFlags; @@ -88,21 +88,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::va_end => bx.va_end(args[0].immediate()), sym::size_of_val => { let tp_ty = fn_args.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llsize - } else { - bx.const_usize(bx.layout_of(tp_ty).size.bytes()) - } + let meta = + if let OperandValue::Pair(_, meta) = args[0].val { Some(meta) } else { None }; + let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); + llsize } sym::min_align_of_val => { let tp_ty = fn_args.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llalign - } else { - bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes()) - } + let meta = + if let OperandValue::Pair(_, meta) = args[0].val { Some(meta) } else { None }; + let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); + llalign } sym::vtable_size | sym::vtable_align => { let vtable = args[0].immediate(); diff --git a/compiler/rustc_codegen_ssa/src/mir/locals.rs b/compiler/rustc_codegen_ssa/src/mir/locals.rs index 378c54013..7db260c9f 100644 --- a/compiler/rustc_codegen_ssa/src/mir/locals.rs +++ b/compiler/rustc_codegen_ssa/src/mir/locals.rs @@ -43,7 +43,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let local = mir::Local::from_usize(local); let expected_ty = self.monomorphize(self.mir.local_decls[local].ty); if expected_ty != op.layout.ty { - warn!("Unexpected initial operand type. See the issues/114858"); + warn!( + "Unexpected initial operand type: expected {expected_ty:?}, found {:?}.\ + See <https://github.com/rust-lang/rust/issues/114858>.", + op.layout.ty + ); } } } diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index d0b799e08..a6fcf1fd3 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -168,7 +168,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); debug!("fn_abi: {:?}", fn_abi); - let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir); + let debug_context = cx.create_function_debug_context(instance, fn_abi, llfn, mir); let start_llbb = Bx::append_block(cx, llfn, "start"); let mut start_bx = Bx::build(cx, start_llbb); @@ -180,7 +180,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } let cleanup_kinds = - base::wants_new_eh_instructions(cx.tcx().sess).then(|| analyze::cleanup_kinds(&mir)); + base::wants_new_eh_instructions(cx.tcx().sess).then(|| analyze::cleanup_kinds(mir)); let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> = mir.basic_blocks @@ -261,7 +261,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( drop(start_bx); // Codegen the body of each block using reverse postorder - for (bb, _) in traversal::reverse_postorder(&mir) { + for (bb, _) in traversal::reverse_postorder(mir) { fx.codegen_block(bb); } } diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 0ab2b7ecd..794cbd315 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -2,7 +2,7 @@ use super::place::PlaceRef; use super::{FunctionCx, LocalRef}; use crate::base; -use crate::glue; +use crate::size_of_val; use crate::traits::*; use crate::MemFlags; @@ -105,7 +105,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { bug!("from_const: invalid ScalarPair layout: {:#?}", layout); }; let a = Scalar::from_pointer( - Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data), Size::ZERO), + Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data).into(), Size::ZERO), &bx.tcx(), ); let a_llval = bx.scalar_to_backend( @@ -132,7 +132,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { offset: Size, ) -> Self { let alloc_align = alloc.inner().align; - assert_eq!(alloc_align, layout.align.abi); + assert!(alloc_align >= layout.align.abi); let read_scalar = |start, size, s: abi::Scalar, ty| { match alloc.0.read_scalar( @@ -155,7 +155,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => { let size = s.size(bx); assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); - let val = read_scalar(offset, size, s, bx.backend_type(layout)); + let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout)); OperandRef { val: OperandValue::Immediate(val), layout } } Abi::ScalarPair( @@ -414,6 +414,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { // value is through `undef`/`poison`, and the store itself is useless. } OperandValue::Ref(r, None, source_align) => { + assert!(dest.layout.is_sized(), "cannot directly store unsized values"); if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let ty = bx.backend_type(dest.layout); @@ -465,13 +466,13 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { .ty; let OperandValue::Ref(llptr, Some(llextra), _) = self else { - bug!("store_unsized called with a sized value") + bug!("store_unsized called with a sized value (or with an extern type)") }; // Allocate an appropriate region on the stack, and copy the value into it. Since alloca // doesn't support dynamic alignment, we allocate an extra align - 1 bytes, and align the // pointer manually. - let (size, align) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); + let (size, align) = size_of_val::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); let one = bx.const_usize(1); let align_minus_1 = bx.sub(align, one); let size_extra = bx.add(size, align_minus_1); diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index eb590a45a..c0bb3ac56 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -2,7 +2,7 @@ use super::operand::OperandValue; use super::{FunctionCx, LocalRef}; use crate::common::IntPredicate; -use crate::glue; +use crate::size_of_val; use crate::traits::*; use rustc_middle::mir; @@ -99,6 +99,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { let offset = self.layout.fields.offset(ix); let effective_field_align = self.align.restrict_for_offset(offset); + // `simple` is called when we don't need to adjust the offset to + // the dynamic alignment of the field. let mut simple = || { let llval = match self.layout.abi { _ if offset.bytes() == 0 => { @@ -141,35 +143,21 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { }; // Simple cases, which don't need DST adjustment: - // * no metadata available - just log the case - // * known alignment - sized types, `[T]`, `str` or a foreign type - // * packed struct - there is no alignment padding + // * known alignment - sized types, `[T]`, `str` + // * offset 0 -- rounding up to alignment cannot change the offset + // Note that looking at `field.align` is incorrect since that is not necessarily equal + // to the dynamic alignment of the type. match field.ty.kind() { - _ if self.llextra.is_none() => { - debug!( - "unsized field `{}`, of `{:?}` has no metadata for adjustment", - ix, self.llval - ); - return simple(); - } _ if field.is_sized() => return simple(), - ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(), - ty::Adt(def, _) => { - if def.repr().packed() { - // FIXME(eddyb) generalize the adjustment when we - // start supporting packing to larger alignments. - assert_eq!(self.layout.align.abi.bytes(), 1); - return simple(); - } - } + ty::Slice(..) | ty::Str => return simple(), + _ if offset.bytes() == 0 => return simple(), _ => {} } // We need to get the pointer manually now. // We do this by casting to a `*i8`, then offsetting it by the appropriate amount. // We do this instead of, say, simply adjusting the pointer from the result of a GEP - // because the field may have an arbitrary alignment in the LLVM representation - // anyway. + // because the field may have an arbitrary alignment in the LLVM representation. // // To demonstrate: // @@ -186,7 +174,16 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { let unaligned_offset = bx.cx().const_usize(offset.bytes()); // Get the alignment of the field - let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); + let (_, mut unsized_align) = size_of_val::size_and_align_of_dst(bx, field.ty, meta); + + // For packed types, we need to cap alignment. + if let ty::Adt(def, _) = self.layout.ty.kind() + && let Some(packed) = def.repr().pack + { + let packed = bx.const_usize(packed.bytes()); + let cmp = bx.icmp(IntPredicate::IntULT, unsized_align, packed); + unsized_align = bx.select(cmp, unsized_align, packed) + } // Bump the unaligned offset up to the appropriate alignment let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align); @@ -474,27 +471,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cg_base.project_index(bx, llindex) } mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = bx.cx().const_usize(offset as u64); + let lloffset = bx.cx().const_usize(offset); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = bx.cx().const_usize(offset as u64); + let lloffset = bx.cx().const_usize(offset); let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to, from_end } => { - let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64)); + let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from)); let projected_ty = PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty; subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty)); if subslice.layout.is_unsized() { assert!(from_end, "slice subslices should be `from_end`"); - subslice.llextra = Some(bx.sub( - cg_base.llextra.unwrap(), - bx.cx().const_usize((from as u64) + (to as u64)), - )); + subslice.llextra = + Some(bx.sub(cg_base.llextra.unwrap(), bx.cx().const_usize(from + to))); } subslice diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 8e5019967..02b51dfe5 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -702,7 +702,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let fn_ptr = bx.get_fn_addr(instance); let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty()); - let fn_ty = bx.fn_decl_backend_type(&fn_abi); + let fn_ty = bx.fn_decl_backend_type(fn_abi); let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() { Some(bx.tcx().codegen_fn_attrs(instance.def_id())) } else { diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs index 899e41265..a158fc6e2 100644 --- a/compiler/rustc_codegen_ssa/src/mir/statement.rs +++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs @@ -3,7 +3,6 @@ use rustc_middle::mir::NonDivergingIntrinsic; use super::FunctionCx; use super::LocalRef; -use crate::traits::BuilderMethods; use crate::traits::*; impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { |