diff options
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src')
19 files changed, 492 insertions, 195 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs index 0497c2570..99059e788 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs @@ -465,7 +465,7 @@ pub(crate) fn codegen_terminator_call<'tcx>( let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi); let sig = fx.bcx.import_signature(sig); - (CallTarget::Indirect(sig, method), Some(ptr)) + (CallTarget::Indirect(sig, method), Some(ptr.get_addr(fx))) } // Normal call @@ -560,7 +560,19 @@ pub(crate) fn codegen_drop<'tcx>( // we don't actually need to drop anything } else { match ty.kind() { - ty::Dynamic(..) => { + ty::Dynamic(_, _, ty::Dyn) => { + // IN THIS ARM, WE HAVE: + // ty = *mut (dyn Trait) + // which is: exists<T> ( *mut T, Vtable<T: Trait> ) + // args[0] args[1] + // + // args = ( Data, Vtable ) + // | + // v + // /-------\ + // | ... | + // \-------/ + // let (ptr, vtable) = drop_place.to_ptr_maybe_unsized(); let ptr = ptr.get_addr(fx); let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap()); @@ -578,6 +590,43 @@ pub(crate) fn codegen_drop<'tcx>( let sig = fx.bcx.import_signature(sig); fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]); } + ty::Dynamic(_, _, ty::DynStar) => { + // IN THIS ARM, WE HAVE: + // ty = *mut (dyn* Trait) + // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>) + // + // args = [ * ] + // | + // v + // ( Data, Vtable ) + // | + // v + // /-------\ + // | ... | + // \-------/ + // + // + // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING + // + // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer) + // vtable = (*args[0]).1 // loads the vtable out + // (data, vtable) // an equivalent Rust `*mut dyn Trait` + // + // SO THEN WE CAN USE THE ABOVE CODE. + let (data, vtable) = drop_place.to_cvalue(fx).dyn_star_force_data_on_stack(fx); + let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable); + + let virtual_drop = Instance { + def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0), + substs: drop_instance.substs, + }; + let fn_abi = + RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty()); + + let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi); + let sig = fx.bcx.import_signature(sig); + fx.bcx.ins().call_indirect(sig, drop_fn, &[data]); + } _ => { assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _))); diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs index 96e25d3a8..e5ad31eb9 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs @@ -193,7 +193,7 @@ pub(super) fn from_casted_value<'tcx>( kind: StackSlotKind::ExplicitSlot, // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to // specify stack slot alignment. - // Stack slot size may be bigger for for example `[u8; 3]` which is packed into an `i32`. + // Stack slot size may be bigger for example `[u8; 3]` which is packed into an `i32`. // It may also be smaller for example when the type is a wrapper around an integer with a // larger alignment than the integer. size: (std::cmp::max(abi_param_size, layout_size) + 15) / 16 * 16, diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs index 6d321c7b2..bad8a87b9 100644 --- a/compiler/rustc_codegen_cranelift/src/allocator.rs +++ b/compiler/rustc_codegen_cranelift/src/allocator.rs @@ -78,7 +78,7 @@ fn codegen_inner( let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap(); let mut ctx = Context::new(); - ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone()); + ctx.func.signature = sig.clone(); { let mut func_ctx = FunctionBuilderContext::new(); let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx); @@ -116,7 +116,7 @@ fn codegen_inner( let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap(); let mut ctx = Context::new(); - ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig); + ctx.func.signature = sig; { let mut func_ctx = FunctionBuilderContext::new(); let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx); diff --git a/compiler/rustc_codegen_cranelift/src/archive.rs b/compiler/rustc_codegen_cranelift/src/archive.rs index fb5313f17..f2e3bf16e 100644 --- a/compiler/rustc_codegen_cranelift/src/archive.rs +++ b/compiler/rustc_codegen_cranelift/src/archive.rs @@ -160,6 +160,8 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { let err = err.to_string(); if err == "Unknown file magic" { // Not an object file; skip it. + } else if object::read::archive::ArchiveFile::parse(&*data).is_ok() { + // Nested archive file; skip it. } else { sess.fatal(&format!( "error parsing `{}` during archive creation: {}", diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs index 399474d79..1db445027 100644 --- a/compiler/rustc_codegen_cranelift/src/base.rs +++ b/compiler/rustc_codegen_cranelift/src/base.rs @@ -6,6 +6,8 @@ use rustc_middle::ty::adjustment::PointerCast; use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::print::with_no_trimmed_paths; +use cranelift_codegen::ir::UserFuncName; + use crate::constant::ConstantCx; use crate::debuginfo::FunctionDebugContext; use crate::prelude::*; @@ -64,7 +66,7 @@ pub(crate) fn codegen_fn<'tcx>( let mut func_ctx = FunctionBuilderContext::new(); let mut func = cached_func; func.clear(); - func.name = ExternalName::user(0, func_id.as_u32()); + func.name = UserFuncName::user(0, func_id.as_u32()); func.signature = sig; func.collect_debug_info(); @@ -633,7 +635,12 @@ fn codegen_stmt<'tcx>( lval.write_cvalue(fx, operand.cast_pointer_to(to_layout)); } Rvalue::Cast( - CastKind::Misc + CastKind::IntToInt + | CastKind::FloatToFloat + | CastKind::FloatToInt + | CastKind::IntToFloat + | CastKind::FnPtrToPtr + | CastKind::PtrToPtr | CastKind::PointerExposeAddress | CastKind::PointerFromExposedAddress, ref operand, @@ -701,9 +708,9 @@ fn codegen_stmt<'tcx>( let operand = codegen_operand(fx, operand); operand.unsize_value(fx, lval); } - Rvalue::Cast(CastKind::DynStar, _, _) => { - // FIXME(dyn-star) - unimplemented!() + Rvalue::Cast(CastKind::DynStar, ref operand, _) => { + let operand = codegen_operand(fx, operand); + operand.coerce_dyn_star(fx, lval); } Rvalue::Discriminant(place) => { let place = codegen_place(fx, place); @@ -763,11 +770,7 @@ fn codegen_stmt<'tcx>( lval.write_cvalue(fx, CValue::by_val(operand, box_layout)); } Rvalue::NullaryOp(null_op, ty) => { - assert!( - lval.layout() - .ty - .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()) - ); + assert!(lval.layout().ty.is_sized(fx.tcx, ParamEnv::reveal_all())); let layout = fx.layout_of(fx.monomorphize(ty)); let val = match null_op { NullOp::SizeOf => layout.size.bytes(), @@ -850,6 +853,7 @@ pub(crate) fn codegen_place<'tcx>( PlaceElem::Deref => { cplace = cplace.place_deref(fx); } + PlaceElem::OpaqueCast(ty) => cplace = cplace.place_opaque_cast(fx, ty), PlaceElem::Field(field, _ty) => { cplace = cplace.place_field(fx, field); } @@ -916,7 +920,7 @@ pub(crate) fn codegen_operand<'tcx>( let cplace = codegen_place(fx, *place); cplace.to_cvalue(fx) } - Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_), + Operand::Constant(const_) => crate::constant::codegen_constant_operand(fx, const_), } } diff --git a/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs b/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs index dfde97920..f855e20e0 100644 --- a/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs +++ b/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs @@ -10,6 +10,7 @@ pub(super) struct ConcurrencyLimiter { helper_thread: Option<HelperThread>, state: Arc<Mutex<state::ConcurrencyLimiterState>>, available_token_condvar: Arc<Condvar>, + finished: bool, } impl ConcurrencyLimiter { @@ -32,6 +33,7 @@ impl ConcurrencyLimiter { helper_thread: Some(helper_thread), state, available_token_condvar: Arc::new(Condvar::new()), + finished: false, } } @@ -56,16 +58,23 @@ impl ConcurrencyLimiter { let mut state = self.state.lock().unwrap(); state.job_already_done(); } -} -impl Drop for ConcurrencyLimiter { - fn drop(&mut self) { - // + pub(crate) fn finished(mut self) { self.helper_thread.take(); // Assert that all jobs have finished let state = Mutex::get_mut(Arc::get_mut(&mut self.state).unwrap()).unwrap(); state.assert_done(); + + self.finished = true; + } +} + +impl Drop for ConcurrencyLimiter { + fn drop(&mut self) { + if !self.finished && !std::thread::panicking() { + panic!("Forgot to call finished() on ConcurrencyLimiter"); + } } } diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs index 6b4ed9b9d..148b66d95 100644 --- a/compiler/rustc_codegen_cranelift/src/constant.rs +++ b/compiler/rustc_codegen_cranelift/src/constant.rs @@ -5,10 +5,7 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::interpret::{ read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar, }; -use rustc_middle::ty::ConstKind; -use rustc_span::DUMMY_SP; -use cranelift_codegen::ir::GlobalValueData; use cranelift_module::*; use crate::prelude::*; @@ -42,15 +39,7 @@ pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool { let mut all_constants_ok = true; for constant in &fx.mir.required_consts { let unevaluated = match fx.monomorphize(constant.literal) { - ConstantKind::Ty(ct) => match ct.kind() { - ConstKind::Unevaluated(uv) => uv.expand(), - ConstKind::Value(_) => continue, - ConstKind::Param(_) - | ConstKind::Infer(_) - | ConstKind::Bound(_, _) - | ConstKind::Placeholder(_) - | ConstKind::Error(_) => unreachable!("{:?}", ct), - }, + ConstantKind::Ty(_) => unreachable!(), ConstantKind::Unevaluated(uv, _) => uv, ConstantKind::Val(..) => continue, }; @@ -90,53 +79,46 @@ pub(crate) fn codegen_tls_ref<'tcx>( CValue::by_val(tls_ptr, layout) } -fn codegen_static_ref<'tcx>( - fx: &mut FunctionCx<'_, '_, 'tcx>, - def_id: DefId, - layout: TyAndLayout<'tcx>, -) -> CPlace<'tcx> { - let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false); - let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func); - if fx.clif_comments.enabled() { - fx.add_comment(local_data_id, format!("{:?}", def_id)); - } - let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id); - assert!(!layout.is_unsized(), "unsized statics aren't supported"); - assert!( - matches!( - fx.bcx.func.global_values[local_data_id], - GlobalValueData::Symbol { tls: false, .. } - ), - "tls static referenced without Rvalue::ThreadLocalRef" - ); - CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout) -} - -pub(crate) fn codegen_constant<'tcx>( +pub(crate) fn eval_mir_constant<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, constant: &Constant<'tcx>, -) -> CValue<'tcx> { - let (const_val, ty) = match fx.monomorphize(constant.literal) { - ConstantKind::Ty(const_) => unreachable!("{:?}", const_), - ConstantKind::Unevaluated(ty::Unevaluated { def, substs, promoted }, ty) +) -> (ConstValue<'tcx>, Ty<'tcx>) { + let constant_kind = fx.monomorphize(constant.literal); + let uv = match constant_kind { + ConstantKind::Ty(const_) => match const_.kind() { + ty::ConstKind::Unevaluated(uv) => uv.expand(), + ty::ConstKind::Value(val) => { + return (fx.tcx.valtree_to_const_val((const_.ty(), val)), const_.ty()); + } + err => span_bug!( + constant.span, + "encountered bad ConstKind after monomorphizing: {:?}", + err + ), + }, + ConstantKind::Unevaluated(mir::UnevaluatedConst { def, .. }, _) if fx.tcx.is_static(def.did) => { - assert!(substs.is_empty()); - assert!(promoted.is_none()); - - return codegen_static_ref(fx, def.did, fx.layout_of(ty)).to_cvalue(fx); - } - ConstantKind::Unevaluated(unevaluated, ty) => { - match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) { - Ok(const_val) => (const_val, ty), - Err(_) => { - span_bug!(constant.span, "erroneous constant not captured by required_consts"); - } - } + span_bug!(constant.span, "MIR constant refers to static"); } - ConstantKind::Val(val, ty) => (val, ty), + ConstantKind::Unevaluated(uv, _) => uv, + ConstantKind::Val(val, _) => return (val, constant_kind.ty()), }; + ( + fx.tcx.const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).unwrap_or_else(|_err| { + span_bug!(constant.span, "erroneous constant not captured by required_consts"); + }), + constant_kind.ty(), + ) +} + +pub(crate) fn codegen_constant_operand<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + constant: &Constant<'tcx>, +) -> CValue<'tcx> { + let (const_val, ty) = eval_mir_constant(fx, constant); + codegen_const_value(fx, const_val, ty) } @@ -253,7 +235,7 @@ pub(crate) fn codegen_const_value<'tcx>( } } -pub(crate) fn pointer_for_allocation<'tcx>( +fn pointer_for_allocation<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, alloc: ConstAllocation<'tcx>, ) -> crate::pointer::Pointer { @@ -308,7 +290,7 @@ fn data_id_for_static( let is_mutable = if tcx.is_mutable_static(def_id) { true } else { - !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all()) + !ty.is_freeze(tcx, ParamEnv::reveal_all()) }; let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes(); @@ -476,14 +458,13 @@ pub(crate) fn mir_operand_get_const_val<'tcx>( operand: &Operand<'tcx>, ) -> Option<ConstValue<'tcx>> { match operand { - Operand::Constant(const_) => match const_.literal { - ConstantKind::Ty(const_) => fx - .monomorphize(const_) - .eval_for_mir(fx.tcx, ParamEnv::reveal_all()) - .try_to_value(fx.tcx), + Operand::Constant(const_) => match fx.monomorphize(const_.literal) { + ConstantKind::Ty(const_) => Some( + const_.eval_for_mir(fx.tcx, ParamEnv::reveal_all()).try_to_value(fx.tcx).unwrap(), + ), ConstantKind::Val(val, _) => Some(val), ConstantKind::Unevaluated(uv, _) => { - fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), uv, None).ok() + Some(fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), uv, None).unwrap()) } }, // FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored @@ -499,7 +480,16 @@ pub(crate) fn mir_operand_get_const_val<'tcx>( match &stmt.kind { StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => { match &local_and_rvalue.1 { - Rvalue::Cast(CastKind::Misc, operand, ty) => { + Rvalue::Cast( + CastKind::IntToInt + | CastKind::FloatToFloat + | CastKind::FloatToInt + | CastKind::IntToFloat + | CastKind::FnPtrToPtr + | CastKind::PtrToPtr, + operand, + ty, + ) => { if computed_const_val.is_some() { return None; // local assigned twice } diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs index 8eabe1cbc..f873561c1 100644 --- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs +++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs @@ -106,7 +106,7 @@ impl OngoingCodegen { } } - drop(self.concurrency_limiter); + self.concurrency_limiter.finished(); ( CodegenResults { diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs index 0e77e4004..6a430b521 100644 --- a/compiler/rustc_codegen_cranelift/src/driver/jit.rs +++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs @@ -67,13 +67,12 @@ fn create_jit_module( hotswap: bool, ) -> (JITModule, CodegenCx) { let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string()); - let imported_symbols = load_imported_symbols_for_jit(tcx.sess, crate_info); let isa = crate::build_isa(tcx.sess, backend_config); let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names()); jit_builder.hotswap(hotswap); crate::compiler_builtins::register_functions_for_jit(&mut jit_builder); - jit_builder.symbols(imported_symbols); + jit_builder.symbol_lookup_fn(dep_symbol_lookup_fn(tcx.sess, crate_info)); jit_builder.symbol("__clif_jit_fn", clif_jit_fn as *const u8); let mut jit_module = JITModule::new(jit_builder); @@ -286,10 +285,10 @@ fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) -> }) } -fn load_imported_symbols_for_jit( +fn dep_symbol_lookup_fn( sess: &Session, crate_info: CrateInfo, -) -> Vec<(String, *const u8)> { +) -> Box<dyn Fn(&str) -> Option<*const u8>> { use rustc_middle::middle::dependency_format::Linkage; let mut dylib_paths = Vec::new(); @@ -316,39 +315,23 @@ fn load_imported_symbols_for_jit( } } - let mut imported_symbols = Vec::new(); - for path in dylib_paths { - use object::{Object, ObjectSymbol}; - let lib = libloading::Library::new(&path).unwrap(); - let obj = std::fs::read(path).unwrap(); - let obj = object::File::parse(&*obj).unwrap(); - imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| { - let name = symbol.name().unwrap().to_string(); - if name.is_empty() || !symbol.is_global() || symbol.is_undefined() { - return None; - } - if name.starts_with("rust_metadata_") { - // The metadata is part of a section that is not loaded by the dynamic linker in - // case of cg_llvm. - return None; - } - let dlsym_name = if cfg!(target_os = "macos") { - // On macOS `dlsym` expects the name without leading `_`. - assert!(name.starts_with('_'), "{:?}", name); - &name[1..] - } else { - &name - }; - let symbol: libloading::Symbol<'_, *const u8> = - unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap(); - Some((name, *symbol)) - })); - std::mem::forget(lib) - } + let imported_dylibs = Box::leak( + dylib_paths + .into_iter() + .map(|path| unsafe { libloading::Library::new(&path).unwrap() }) + .collect::<Box<[_]>>(), + ); sess.abort_if_errors(); - imported_symbols + Box::new(move |sym_name| { + for dylib in &*imported_dylibs { + if let Ok(sym) = unsafe { dylib.get::<*const u8>(sym_name.as_bytes()) } { + return Some(*sym); + } + } + None + }) } fn codegen_shim<'tcx>( diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs index 8b3d475cb..3fcc84d39 100644 --- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs +++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs @@ -27,7 +27,7 @@ pub(crate) fn codegen_inline_asm<'tcx>( } // Used by stdarch - if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string()) + if template[0] == InlineAsmTemplatePiece::String("mov ".to_string()) && matches!( template[1], InlineAsmTemplatePiece::Placeholder { @@ -36,24 +36,26 @@ pub(crate) fn codegen_inline_asm<'tcx>( span: _ } ) - && template[2] == InlineAsmTemplatePiece::String("\n".to_string()) - && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string()) - && template[4] == InlineAsmTemplatePiece::String("\n".to_string()) - && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string()) + && template[2] == InlineAsmTemplatePiece::String(", rbx".to_string()) + && template[3] == InlineAsmTemplatePiece::String("\n".to_string()) + && template[4] == InlineAsmTemplatePiece::String("cpuid".to_string()) + && template[5] == InlineAsmTemplatePiece::String("\n".to_string()) + && template[6] == InlineAsmTemplatePiece::String("xchg ".to_string()) && matches!( - template[6], + template[7], InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ } ) + && template[8] == InlineAsmTemplatePiece::String(", rbx".to_string()) { assert_eq!(operands.len(), 4); let (leaf, eax_place) = match operands[1] { InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)), - late: true, + late: _, ref in_value, out_place: Some(out_place), } => ( @@ -68,7 +70,7 @@ pub(crate) fn codegen_inline_asm<'tcx>( InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86( X86InlineAsmRegClass::reg, )), - late: true, + late: _, place: Some(place), } => crate::base::codegen_place(fx, place), _ => unreachable!(), @@ -76,7 +78,7 @@ pub(crate) fn codegen_inline_asm<'tcx>( let (sub_leaf, ecx_place) = match operands[2] { InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)), - late: true, + late: _, ref in_value, out_place: Some(out_place), } => ( @@ -88,7 +90,7 @@ pub(crate) fn codegen_inline_asm<'tcx>( let edx_place = match operands[3] { InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)), - late: true, + late: _, place: Some(place), } => crate::base::codegen_place(fx, place), _ => unreachable!(), diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs index a799dca93..783d426c3 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs @@ -14,6 +14,10 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( target: Option<BasicBlock>, ) { match intrinsic { + "llvm.x86.sse2.pause" | "llvm.aarch64.isb" => { + // Spin loop hint + } + // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8` "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => { intrinsic_args!(fx, args => (a); intrinsic); @@ -25,8 +29,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( let mut res = fx.bcx.ins().iconst(types::I32, 0); for lane in (0..lane_count).rev() { - let a_lane = - a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx); + let a_lane = a.value_lane(fx, lane).load_scalar(fx); // cast float to int let a_lane = match lane_ty { diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index 2e4ca594f..0302b843a 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -84,6 +84,30 @@ fn simd_for_each_lane<'tcx>( } } +fn simd_pair_for_each_lane_typed<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + x: CValue<'tcx>, + y: CValue<'tcx>, + ret: CPlace<'tcx>, + f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>, +) { + assert_eq!(x.layout(), y.layout()); + let layout = x.layout(); + + let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx); + let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx); + assert_eq!(lane_count, ret_lane_count); + + for lane_idx in 0..lane_count { + let x_lane = x.value_lane(fx, lane_idx); + let y_lane = y.value_lane(fx, lane_idx); + + let res_lane = f(fx, x_lane, y_lane); + + ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane); + } +} + fn simd_pair_for_each_lane<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, x: CValue<'tcx>, @@ -504,37 +528,7 @@ fn codegen_regular_intrinsic_call<'tcx>( _ => unreachable!(), }; - let signed = type_sign(lhs.layout().ty); - - let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs); - - let (val, has_overflow) = checked_res.load_scalar_pair(fx); - let clif_ty = fx.clif_type(lhs.layout().ty).unwrap(); - - let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed); - - let val = match (intrinsic, signed) { - (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val), - (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val), - (sym::saturating_add, true) => { - let rhs = rhs.load_scalar(fx); - let rhs_ge_zero = - fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0); - let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min); - fx.bcx.ins().select(has_overflow, sat_val, val) - } - (sym::saturating_sub, true) => { - let rhs = rhs.load_scalar(fx); - let rhs_ge_zero = - fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0); - let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max); - fx.bcx.ins().select(has_overflow, sat_val, val) - } - _ => unreachable!(), - }; - - let res = CValue::by_val(val, lhs.layout()); - + let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs); ret.write_cvalue(fx, res); } sym::rotate_left => { @@ -819,8 +813,8 @@ fn codegen_regular_intrinsic_call<'tcx>( sym::ptr_guaranteed_cmp => { intrinsic_args!(fx, args => (a, b); intrinsic); - let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b); - ret.write_cvalue(fx, val); + let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx); + ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8))); } sym::caller_location => { @@ -1206,7 +1200,7 @@ fn codegen_regular_intrinsic_call<'tcx>( // FIXME once unwinding is supported, change this to actually catch panics let f_sig = fx.bcx.func.import_signature(Signature { call_conv: fx.target_config.default_call_conv, - params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))], + params: vec![AbiParam::new(pointer_ty(fx.tcx))], returns: vec![], }); diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index 1f358b1bb..51fce8c85 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -2,6 +2,7 @@ use rustc_middle::ty::subst::SubstsRef; use rustc_span::Symbol; +use rustc_target::abi::Endian; use super::*; use crate::prelude::*; @@ -26,7 +27,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( span: Span, ) { match intrinsic { - sym::simd_cast => { + sym::simd_as | sym::simd_cast => { intrinsic_args!(fx, args => (a); intrinsic); if !a.layout().ty.is_simd() { @@ -162,6 +163,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( } } } else { + // FIXME remove this case intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap() }; @@ -650,8 +652,128 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( } } - // simd_saturating_* - // simd_bitmask + sym::simd_select_bitmask => { + intrinsic_args!(fx, args => (m, a, b); intrinsic); + + if !a.layout().ty.is_simd() { + report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty); + return; + } + assert_eq!(a.layout(), b.layout()); + + let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx); + let lane_layout = fx.layout_of(lane_ty); + + let m = m.load_scalar(fx); + + for lane in 0..lane_count { + let m_lane = fx.bcx.ins().ushr_imm(m, u64::from(lane) as i64); + let m_lane = fx.bcx.ins().band_imm(m_lane, 1); + let a_lane = a.value_lane(fx, lane).load_scalar(fx); + let b_lane = b.value_lane(fx, lane).load_scalar(fx); + + let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0); + let res_lane = + CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout); + + ret.place_lane(fx, lane).write_cvalue(fx, res_lane); + } + } + + sym::simd_bitmask => { + intrinsic_args!(fx, args => (a); intrinsic); + + let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx); + let lane_clif_ty = fx.clif_type(lane_ty).unwrap(); + + // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a + // vector mask and returns the most significant bit (MSB) of each lane in the form + // of either: + // * an unsigned integer + // * an array of `u8` + // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits. + // + // The bit order of the result depends on the byte endianness, LSB-first for little + // endian and MSB-first for big endian. + let expected_int_bits = lane_count.max(8); + let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64); + + match lane_ty.kind() { + ty::Int(_) | ty::Uint(_) => {} + _ => { + fx.tcx.sess.span_fatal( + span, + &format!( + "invalid monomorphization of `simd_bitmask` intrinsic: \ + vector argument `{}`'s element type `{}`, expected integer element \ + type", + a.layout().ty, + lane_ty + ), + ); + } + } + + let res_type = + Type::int_with_byte_size(u16::try_from(expected_bytes).unwrap()).unwrap(); + let mut res = fx.bcx.ins().iconst(res_type, 0); + + let lanes = match fx.tcx.sess.target.endian { + Endian::Big => Box::new(0..lane_count) as Box<dyn Iterator<Item = u64>>, + Endian::Little => Box::new((0..lane_count).rev()) as Box<dyn Iterator<Item = u64>>, + }; + for lane in lanes { + let a_lane = a.value_lane(fx, lane).load_scalar(fx); + + // extract sign bit of an int + let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_clif_ty.bits() - 1)); + + // shift sign bit into result + let a_lane_sign = clif_intcast(fx, a_lane_sign, res_type, false); + res = fx.bcx.ins().ishl_imm(res, 1); + res = fx.bcx.ins().bor(res, a_lane_sign); + } + + match ret.layout().ty.kind() { + ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {} + ty::Array(elem, len) + if matches!(elem.kind(), ty::Uint(ty::UintTy::U8)) + && len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all()) + == Some(expected_bytes) => {} + _ => { + fx.tcx.sess.span_fatal( + span, + &format!( + "invalid monomorphization of `simd_bitmask` intrinsic: \ + cannot return `{}`, expected `u{}` or `[u8; {}]`", + ret.layout().ty, + expected_int_bits, + expected_bytes + ), + ); + } + } + + let res = CValue::by_val(res, ret.layout()); + ret.write_cvalue(fx, res); + } + + sym::simd_saturating_add | sym::simd_saturating_sub => { + intrinsic_args!(fx, args => (x, y); intrinsic); + + let bin_op = match intrinsic { + sym::simd_saturating_add => BinOp::Add, + sym::simd_saturating_sub => BinOp::Sub, + _ => unreachable!(), + }; + + // FIXME use vector instructions when possible + simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| { + crate::num::codegen_saturating_int_binop(fx, bin_op, x_lane, y_lane) + }); + } + + // simd_arith_offset // simd_scatter // simd_gather _ => { diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs index 913414e76..629d79d50 100644 --- a/compiler/rustc_codegen_cranelift/src/lib.rs +++ b/compiler/rustc_codegen_cranelift/src/lib.rs @@ -96,8 +96,8 @@ mod prelude { pub(crate) use cranelift_codegen::ir::function::Function; pub(crate) use cranelift_codegen::ir::types; pub(crate) use cranelift_codegen::ir::{ - AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc, - StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value, + AbiParam, Block, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc, StackSlot, + StackSlotData, StackSlotKind, TrapCode, Type, Value, }; pub(crate) use cranelift_codegen::isa::{self, CallConv}; pub(crate) use cranelift_codegen::Context; @@ -251,7 +251,6 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::Tar let mut flags_builder = settings::builder(); flags_builder.enable("is_pic").unwrap(); - flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" }; flags_builder.set("enable_verifier", enable_verifier).unwrap(); flags_builder.set("regalloc_checker", enable_verifier).unwrap(); @@ -279,6 +278,15 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::Tar } } + if target_triple.architecture == target_lexicon::Architecture::X86_64 { + // Windows depends on stack probes to grow the committed part of the stack + flags_builder.enable("enable_probestack").unwrap(); + flags_builder.set("probestack_strategy", "inline").unwrap(); + } else { + // __cranelift_probestack is not provided and inline stack probes are only supported on x86_64 + flags_builder.set("enable_probestack", "false").unwrap(); + } + let flags = settings::Flags::new(flags_builder); let isa_builder = match sess.opts.cg.target_cpu.as_deref() { diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs index 3c024a84d..cae6312a6 100644 --- a/compiler/rustc_codegen_cranelift/src/main_shim.rs +++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs @@ -75,7 +75,7 @@ pub(crate) fn maybe_create_entry_wrapper( let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap(); let mut ctx = Context::new(); - ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig); + ctx.func.signature = cmain_sig; { let mut func_ctx = FunctionBuilderContext::new(); let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx); diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs index 4ce8adb18..ecbab408d 100644 --- a/compiler/rustc_codegen_cranelift/src/num.rs +++ b/compiler/rustc_codegen_cranelift/src/num.rs @@ -150,18 +150,12 @@ pub(crate) fn codegen_int_binop<'tcx>( BinOp::BitXor => b.bxor(lhs, rhs), BinOp::BitAnd => b.band(lhs, rhs), BinOp::BitOr => b.bor(lhs, rhs), - BinOp::Shl => { - let lhs_ty = fx.bcx.func.dfg.value_type(lhs); - let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1)); - fx.bcx.ins().ishl(lhs, actual_shift) - } + BinOp::Shl => b.ishl(lhs, rhs), BinOp::Shr => { - let lhs_ty = fx.bcx.func.dfg.value_type(lhs); - let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1)); if signed { - fx.bcx.ins().sshr(lhs, actual_shift) + b.sshr(lhs, rhs) } else { - fx.bcx.ins().ushr(lhs, actual_shift) + b.ushr(lhs, rhs) } } // Compare binops handles by `codegen_binop`. @@ -279,22 +273,15 @@ pub(crate) fn codegen_checked_int_binop<'tcx>( } } BinOp::Shl => { - let lhs_ty = fx.bcx.func.dfg.value_type(lhs); - let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1)); - let val = fx.bcx.ins().ishl(lhs, masked_shift); + let val = fx.bcx.ins().ishl(lhs, rhs); let ty = fx.bcx.func.dfg.value_type(val); let max_shift = i64::from(ty.bits()) - 1; let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift); (val, has_overflow) } BinOp::Shr => { - let lhs_ty = fx.bcx.func.dfg.value_type(lhs); - let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1)); - let val = if !signed { - fx.bcx.ins().ushr(lhs, masked_shift) - } else { - fx.bcx.ins().sshr(lhs, masked_shift) - }; + let val = + if !signed { fx.bcx.ins().ushr(lhs, rhs) } else { fx.bcx.ins().sshr(lhs, rhs) }; let ty = fx.bcx.func.dfg.value_type(val); let max_shift = i64::from(ty.bits()) - 1; let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift); @@ -309,6 +296,42 @@ pub(crate) fn codegen_checked_int_binop<'tcx>( CValue::by_val_pair(res, has_overflow, out_layout) } +pub(crate) fn codegen_saturating_int_binop<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + bin_op: BinOp, + lhs: CValue<'tcx>, + rhs: CValue<'tcx>, +) -> CValue<'tcx> { + assert_eq!(lhs.layout().ty, rhs.layout().ty); + + let signed = type_sign(lhs.layout().ty); + let clif_ty = fx.clif_type(lhs.layout().ty).unwrap(); + let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed); + + let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs); + let (val, has_overflow) = checked_res.load_scalar_pair(fx); + + let val = match (bin_op, signed) { + (BinOp::Add, false) => fx.bcx.ins().select(has_overflow, max, val), + (BinOp::Sub, false) => fx.bcx.ins().select(has_overflow, min, val), + (BinOp::Add, true) => { + let rhs = rhs.load_scalar(fx); + let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0); + let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min); + fx.bcx.ins().select(has_overflow, sat_val, val) + } + (BinOp::Sub, true) => { + let rhs = rhs.load_scalar(fx); + let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0); + let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max); + fx.bcx.ins().select(has_overflow, sat_val, val) + } + _ => unreachable!(), + }; + + CValue::by_val(val, lhs.layout()) +} + pub(crate) fn codegen_float_binop<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, bin_op: BinOp, diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs index dd9d891dd..9c88f7dbc 100644 --- a/compiler/rustc_codegen_cranelift/src/unsize.rs +++ b/compiler/rustc_codegen_cranelift/src/unsize.rs @@ -25,7 +25,12 @@ pub(crate) fn unsized_info<'tcx>( .bcx .ins() .iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64), - (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => { + ( + &ty::Dynamic(ref data_a, _, src_dyn_kind), + &ty::Dynamic(ref data_b, _, target_dyn_kind), + ) => { + assert_eq!(src_dyn_kind, target_dyn_kind); + let old_info = old_info.expect("unsized_info: missing old info for trait upcasting coercion"); if data_a.principal_def_id() == data_b.principal_def_id() { @@ -101,6 +106,21 @@ fn unsize_ptr<'tcx>( } } +/// Coerces `src` to `dst_ty` which is guaranteed to be a `dyn*` type. +pub(crate) fn cast_to_dyn_star<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + src: Value, + src_ty_and_layout: TyAndLayout<'tcx>, + dst_ty: Ty<'tcx>, + old_info: Option<Value>, +) -> (Value, Value) { + assert!( + matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)), + "destination type must be a dyn*" + ); + (src, unsized_info(fx, src_ty_and_layout.ty, dst_ty, old_info)) +} + /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` pub(crate) fn coerce_unsized_into<'tcx>( @@ -147,6 +167,24 @@ pub(crate) fn coerce_unsized_into<'tcx>( } } +pub(crate) fn coerce_dyn_star<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + src: CValue<'tcx>, + dst: CPlace<'tcx>, +) { + let (data, extra) = if let ty::Dynamic(_, _, ty::DynStar) = src.layout().ty.kind() { + let (data, vtable) = src.load_scalar_pair(fx); + (data, Some(vtable)) + } else { + let data = src.load_scalar(fx); + (data, None) + }; + + let (data, vtable) = cast_to_dyn_star(fx, data, src.layout(), dst.layout().ty, extra); + + dst.write_cvalue(fx, CValue::by_val_pair(data, vtable, dst.layout())); +} + // Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs pub(crate) fn size_and_align_of_dst<'tcx>( diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index cfaadca94..c3dfbd372 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -107,6 +107,50 @@ impl<'tcx> CValue<'tcx> { } } + // FIXME remove + // Forces the data value of a dyn* value to the stack and returns a pointer to it as well as the + // vtable pointer. + pub(crate) fn dyn_star_force_data_on_stack( + self, + fx: &mut FunctionCx<'_, '_, 'tcx>, + ) -> (Value, Value) { + assert!(self.1.ty.is_dyn_star()); + + match self.0 { + CValueInner::ByRef(ptr, None) => { + let (a_scalar, b_scalar) = match self.1.abi { + Abi::ScalarPair(a, b) => (a, b), + _ => unreachable!("dyn_star_force_data_on_stack({:?})", self), + }; + let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); + let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar); + let mut flags = MemFlags::new(); + flags.set_notrap(); + let vtable = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags); + (ptr.get_addr(fx), vtable) + } + CValueInner::ByValPair(data, vtable) => { + let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData { + kind: StackSlotKind::ExplicitSlot, + // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to + // specify stack slot alignment. + size: (u32::try_from(fx.target_config.pointer_type().bytes()).unwrap() + 15) + / 16 + * 16, + }); + let data_ptr = Pointer::stack_slot(stack_slot); + let mut flags = MemFlags::new(); + flags.set_notrap(); + data_ptr.store(fx, data, flags); + + (data_ptr.get_addr(fx), vtable) + } + CValueInner::ByRef(_, Some(_)) | CValueInner::ByVal(_) => { + unreachable!("dyn_star_force_data_on_stack({:?})", self) + } + } + } + pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> { match self.0 { CValueInner::ByRef(ptr, meta) => Some((ptr, meta)), @@ -236,6 +280,10 @@ impl<'tcx> CValue<'tcx> { crate::unsize::coerce_unsized_into(fx, self, dest); } + pub(crate) fn coerce_dyn_star(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) { + crate::unsize::coerce_dyn_star(fx, self, dest); + } + /// If `ty` is signed, `const_val` must already be sign extended. pub(crate) fn const_val( fx: &mut FunctionCx<'_, '_, 'tcx>, @@ -621,6 +669,14 @@ impl<'tcx> CPlace<'tcx> { } } + pub(crate) fn place_opaque_cast( + self, + fx: &mut FunctionCx<'_, '_, 'tcx>, + ty: Ty<'tcx>, + ) -> CPlace<'tcx> { + CPlace { inner: self.inner, layout: fx.layout_of(ty) } + } + pub(crate) fn place_field( self, fx: &mut FunctionCx<'_, '_, 'tcx>, diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs index 36b3725ef..f04fb82de 100644 --- a/compiler/rustc_codegen_cranelift/src/vtable.rs +++ b/compiler/rustc_codegen_cranelift/src/vtable.rs @@ -45,12 +45,26 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, arg: CValue<'tcx>, idx: usize, -) -> (Value, Value) { - let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi { - arg.load_scalar_pair(fx) - } else { - let (ptr, vtable) = arg.try_to_ptr().unwrap(); - (ptr.get_addr(fx), vtable.unwrap()) +) -> (Pointer, Value) { + let (ptr, vtable) = 'block: { + if let ty::Ref(_, ty, _) = arg.layout().ty.kind() { + if ty.is_dyn_star() { + let inner_layout = fx.layout_of(arg.layout().ty.builtin_deref(true).unwrap().ty); + let dyn_star = CPlace::for_ptr(Pointer::new(arg.load_scalar(fx)), inner_layout); + let ptr = dyn_star.place_field(fx, mir::Field::new(0)).to_ptr(); + let vtable = + dyn_star.place_field(fx, mir::Field::new(1)).to_cvalue(fx).load_scalar(fx); + break 'block (ptr, vtable); + } + } + + if let Abi::ScalarPair(_, _) = arg.layout().abi { + let (ptr, vtable) = arg.load_scalar_pair(fx); + (Pointer::new(ptr), vtable) + } else { + let (ptr, vtable) = arg.try_to_ptr().unwrap(); + (ptr, vtable.unwrap()) + } }; let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes(); |