summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_cranelift/src/abi
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_codegen_cranelift/src/abi
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src/abi')
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/comments.rs135
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs611
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs299
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/returning.rs141
4 files changed, 1186 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
new file mode 100644
index 000000000..37d2679c1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -0,0 +1,135 @@
+//! Annotate the clif ir with comments describing how arguments are passed into the current function
+//! and where all locals are stored.
+
+use std::borrow::Cow;
+
+use rustc_middle::mir;
+use rustc_target::abi::call::PassMode;
+
+use cranelift_codegen::entity::EntityRef;
+
+use crate::prelude::*;
+
+pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ if fx.clif_comments.enabled() {
+ fx.add_global_comment(
+ "kind loc.idx param pass mode ty".to_string(),
+ );
+ }
+}
+
+pub(super) fn add_arg_comment<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ kind: &str,
+ local: Option<mir::Local>,
+ local_field: Option<usize>,
+ params: &[Value],
+ arg_abi_mode: PassMode,
+ arg_layout: TyAndLayout<'tcx>,
+) {
+ if !fx.clif_comments.enabled() {
+ return;
+ }
+
+ let local = if let Some(local) = local {
+ Cow::Owned(format!("{:?}", local))
+ } else {
+ Cow::Borrowed("???")
+ };
+ let local_field = if let Some(local_field) = local_field {
+ Cow::Owned(format!(".{}", local_field))
+ } else {
+ Cow::Borrowed("")
+ };
+
+ let params = match params {
+ [] => Cow::Borrowed("-"),
+ [param] => Cow::Owned(format!("= {:?}", param)),
+ [param_a, param_b] => Cow::Owned(format!("= {:?},{:?}", param_a, param_b)),
+ params => Cow::Owned(format!(
+ "= {}",
+ params.iter().map(ToString::to_string).collect::<Vec<_>>().join(",")
+ )),
+ };
+
+ let pass_mode = format!("{:?}", arg_abi_mode);
+ fx.add_global_comment(format!(
+ "{kind:5}{local:>3}{local_field:<5} {params:10} {pass_mode:36} {ty:?}",
+ kind = kind,
+ local = local,
+ local_field = local_field,
+ params = params,
+ pass_mode = pass_mode,
+ ty = arg_layout.ty,
+ ));
+}
+
+pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ if fx.clif_comments.enabled() {
+ fx.add_global_comment(String::new());
+ fx.add_global_comment(
+ "kind local ty size align (abi,pref)".to_string(),
+ );
+ }
+}
+
+pub(super) fn add_local_place_comments<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: CPlace<'tcx>,
+ local: Local,
+) {
+ if !fx.clif_comments.enabled() {
+ return;
+ }
+ let TyAndLayout { ty, layout } = place.layout();
+ let rustc_target::abi::LayoutS {
+ size,
+ align,
+ abi: _,
+ variants: _,
+ fields: _,
+ largest_niche: _,
+ } = layout.0.0;
+
+ let (kind, extra) = match *place.inner() {
+ CPlaceInner::Var(place_local, var) => {
+ assert_eq!(local, place_local);
+ ("ssa", Cow::Owned(format!(",var={}", var.index())))
+ }
+ CPlaceInner::VarPair(place_local, var1, var2) => {
+ assert_eq!(local, place_local);
+ ("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())))
+ }
+ CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
+ CPlaceInner::Addr(ptr, meta) => {
+ let meta = if let Some(meta) = meta {
+ Cow::Owned(format!(",meta={}", meta))
+ } else {
+ Cow::Borrowed("")
+ };
+ match ptr.debug_base_and_offset() {
+ (crate::pointer::PointerBase::Addr(addr), offset) => {
+ ("reuse", format!("storage={}{}{}", addr, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
+ ("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Dangling(align), offset) => {
+ ("zst", format!("align={},offset={}", align.bytes(), offset).into())
+ }
+ }
+ }
+ };
+
+ fx.add_global_comment(format!(
+ "{:<5} {:5} {:30} {:4}b {}, {}{}{}",
+ kind,
+ format!("{:?}", local),
+ format!("{:?}", ty),
+ size.bytes(),
+ align.abi.bytes(),
+ align.pref.bytes(),
+ if extra.is_empty() { "" } else { " " },
+ extra,
+ ));
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
new file mode 100644
index 000000000..815450f68
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -0,0 +1,611 @@
+//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
+
+mod comments;
+mod pass_mode;
+mod returning;
+
+use cranelift_module::ModuleError;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_target::abi::call::{Conv, FnAbi};
+use rustc_target::spec::abi::Abi;
+
+use cranelift_codegen::ir::{AbiParam, SigRef};
+
+use self::pass_mode::*;
+use crate::prelude::*;
+
+pub(crate) use self::returning::codegen_return;
+
+fn clif_sig_from_fn_abi<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ default_call_conv: CallConv,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+) -> Signature {
+ let call_conv = match fn_abi.conv {
+ Conv::Rust | Conv::C => default_call_conv,
+ Conv::RustCold => CallConv::Cold,
+ Conv::X86_64SysV => CallConv::SystemV,
+ Conv::X86_64Win64 => CallConv::WindowsFastcall,
+ Conv::ArmAapcs
+ | Conv::CCmseNonSecureCall
+ | Conv::Msp430Intr
+ | Conv::PtxKernel
+ | Conv::X86Fastcall
+ | Conv::X86Intr
+ | Conv::X86Stdcall
+ | Conv::X86ThisCall
+ | Conv::X86VectorCall
+ | Conv::AmdGpuKernel
+ | Conv::AvrInterrupt
+ | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
+ };
+ let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
+
+ let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
+ // Sometimes the first param is an pointer to the place where the return value needs to be stored.
+ let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
+
+ Signature { params, returns, call_conv }
+}
+
+pub(crate) fn get_function_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ triple: &target_lexicon::Triple,
+ inst: Instance<'tcx>,
+) -> Signature {
+ assert!(!inst.substs.needs_infer());
+ clif_sig_from_fn_abi(
+ tcx,
+ CallConv::triple_default(triple),
+ &RevealAllLayoutCx(tcx).fn_abi_of_instance(inst, ty::List::empty()),
+ )
+}
+
+/// Instance must be monomorphized
+pub(crate) fn import_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ module: &mut dyn Module,
+ inst: Instance<'tcx>,
+) -> FuncId {
+ let name = tcx.symbol_name(inst).name;
+ let sig = get_function_sig(tcx, module.isa().triple(), inst);
+ match module.declare_function(name, Linkage::Import, &sig) {
+ Ok(func_id) => func_id,
+ Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{name}` as function, but it was already declared as static"
+ )),
+ Err(ModuleError::IncompatibleSignature(_, prev_sig, new_sig)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{name}` with signature {new_sig:?}, \
+ but it was already declared with signature {prev_sig:?}"
+ )),
+ Err(err) => Err::<_, _>(err).unwrap(),
+ }
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ /// Instance must be monomorphized
+ pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
+ let func_id = import_function(self.tcx, self.module, inst);
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", inst));
+ }
+
+ func_ref
+ }
+
+ pub(crate) fn lib_call(
+ &mut self,
+ name: &str,
+ params: Vec<AbiParam>,
+ returns: Vec<AbiParam>,
+ args: &[Value],
+ ) -> &[Value] {
+ let sig = Signature { params, returns, call_conv: self.target_config.default_call_conv };
+ let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", name));
+ }
+ let call_inst = self.bcx.ins().call(func_ref, args);
+ if self.clif_comments.enabled() {
+ self.add_comment(call_inst, format!("easy_call {}", name));
+ }
+ let results = self.bcx.inst_results(call_inst);
+ assert!(results.len() <= 2, "{}", results.len());
+ results
+ }
+
+ pub(crate) fn easy_call(
+ &mut self,
+ name: &str,
+ args: &[CValue<'tcx>],
+ return_ty: Ty<'tcx>,
+ ) -> CValue<'tcx> {
+ let (input_tys, args): (Vec<_>, Vec<_>) = args
+ .iter()
+ .map(|arg| {
+ (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
+ })
+ .unzip();
+ let return_layout = self.layout_of(return_ty);
+ let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
+ tup.iter().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
+ } else {
+ vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
+ };
+ let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
+ match *ret_vals {
+ [] => CValue::by_ref(
+ Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
+ return_layout,
+ ),
+ [val] => CValue::by_val(val, return_layout),
+ [val, extra] => CValue::by_val_pair(val, extra, return_layout),
+ _ => unreachable!(),
+ }
+ }
+}
+
+/// Make a [`CPlace`] capable of holding value of the specified type.
+fn make_local_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ is_ssa: bool,
+) -> CPlace<'tcx> {
+ let place = if is_ssa {
+ if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
+ CPlace::new_var_pair(fx, local, layout)
+ } else {
+ CPlace::new_var(fx, local, layout)
+ }
+ } else {
+ CPlace::new_stack_slot(fx, layout)
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ place
+}
+
+pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
+ fx.bcx.append_block_params_for_function_params(start_block);
+
+ fx.bcx.switch_to_block(start_block);
+ fx.bcx.ins().nop();
+
+ let ssa_analyzed = crate::analyze::analyze(fx);
+
+ self::comments::add_args_header_comment(fx);
+
+ let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
+ let ret_place =
+ self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
+ assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
+
+ // None means pass_mode == NoPass
+ enum ArgKind<'tcx> {
+ Normal(Option<CValue<'tcx>>),
+ Spread(Vec<Option<CValue<'tcx>>>),
+ }
+
+ let fn_abi = fx.fn_abi.take().unwrap();
+
+ // FIXME implement variadics in cranelift
+ if fn_abi.c_variadic {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
+ }
+
+ let mut arg_abis_iter = fn_abi.args.iter();
+
+ let func_params = fx
+ .mir
+ .args_iter()
+ .map(|local| {
+ let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+
+ // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
+ if Some(local) == fx.mir.spread_arg {
+ // This argument (e.g. the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual function arguments.
+
+ let tupled_arg_tys = match arg_ty.kind() {
+ ty::Tuple(ref tys) => tys,
+ _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
+ };
+
+ let mut params = Vec::new();
+ for (i, _arg_ty) in tupled_arg_tys.iter().enumerate() {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
+ params.push(param);
+ }
+
+ (local, ArgKind::Spread(params), arg_ty)
+ } else {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
+ (local, ArgKind::Normal(param), arg_ty)
+ }
+ })
+ .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
+
+ assert!(fx.caller_location.is_none());
+ if fx.instance.def.requires_caller_location(fx.tcx) {
+ // Store caller location for `#[track_caller]`.
+ let arg_abi = arg_abis_iter.next().unwrap();
+ fx.caller_location =
+ Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
+ }
+
+ assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
+ fx.fn_abi = Some(fn_abi);
+ assert!(block_params_iter.next().is_none(), "arg_value left behind");
+
+ self::comments::add_locals_header_comment(fx);
+
+ for (local, arg_kind, ty) in func_params {
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ // While this is normally an optimization to prevent an unnecessary copy when an argument is
+ // not mutated by the current function, this is necessary to support unsized arguments.
+ if let ArgKind::Normal(Some(val)) = arg_kind {
+ if let Some((addr, meta)) = val.try_to_ptr() {
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so it is fine to borrow the backing storage of this argument
+ // to prevent a copy.
+
+ let place = if let Some(meta) = meta {
+ CPlace::for_ptr_with_extra(addr, meta, val.layout())
+ } else {
+ CPlace::for_ptr(addr, val.layout())
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ assert_eq!(fx.local_map.push(place), local);
+ continue;
+ }
+ }
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+
+ match arg_kind {
+ ArgKind::Normal(param) => {
+ if let Some(param) = param {
+ place.write_cvalue(fx, param);
+ }
+ }
+ ArgKind::Spread(params) => {
+ for (i, param) in params.into_iter().enumerate() {
+ if let Some(param) = param {
+ place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
+ }
+ }
+ }
+ }
+ }
+
+ for local in fx.mir.vars_and_temps_iter() {
+ let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+ }
+
+ fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+}
+
+struct CallArgument<'tcx> {
+ value: CValue<'tcx>,
+ is_owned: bool,
+}
+
+// FIXME avoid intermediate `CValue` before calling `adjust_arg_for_abi`
+fn codegen_call_argument_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CallArgument<'tcx> {
+ CallArgument {
+ value: codegen_operand(fx, operand),
+ is_owned: matches!(operand, Operand::Move(_)),
+ }
+}
+
+pub(crate) fn codegen_terminator_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+ func: &Operand<'tcx>,
+ args: &[Operand<'tcx>],
+ destination: Place<'tcx>,
+ target: Option<BasicBlock>,
+) {
+ let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
+ let fn_sig =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+
+ let ret_place = codegen_place(fx, destination);
+
+ // Handle special calls like instrinsics and empty drop glue.
+ let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+ let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(fx.tcx);
+
+ if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
+ crate::intrinsics::codegen_llvm_intrinsic_call(
+ fx,
+ &fx.tcx.symbol_name(instance).name,
+ substs,
+ args,
+ ret_place,
+ target,
+ );
+ return;
+ }
+
+ match instance.def {
+ InstanceDef::Intrinsic(_) => {
+ crate::intrinsics::codegen_intrinsic_call(
+ fx,
+ instance,
+ args,
+ ret_place,
+ target,
+ source_info,
+ );
+ return;
+ }
+ InstanceDef::DropGlue(_, None) => {
+ // empty drop glue - a nop.
+ let dest = target.expect("Non terminating drop_in_place_real???");
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ }
+ _ => Some(instance),
+ }
+ } else {
+ None
+ };
+
+ let extra_args = &args[fn_sig.inputs().len()..];
+ let extra_args = fx
+ .tcx
+ .mk_type_list(extra_args.iter().map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx))));
+ let fn_abi = if let Some(instance) = instance {
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(instance, extra_args)
+ } else {
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_fn_ptr(fn_ty.fn_sig(fx.tcx), extra_args)
+ };
+
+ let is_cold = if fn_sig.abi == Abi::RustCold {
+ true
+ } else {
+ instance
+ .map(|inst| {
+ fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD)
+ })
+ .unwrap_or(false)
+ };
+ if is_cold {
+ fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
+ if let Some(destination_block) = target {
+ fx.bcx.set_cold_block(fx.get_block(destination_block));
+ }
+ }
+
+ // Unpack arguments tuple for closures
+ let mut args = if fn_sig.abi == Abi::RustCall {
+ assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
+ let self_arg = codegen_call_argument_operand(fx, &args[0]);
+ let pack_arg = codegen_call_argument_operand(fx, &args[1]);
+
+ let tupled_arguments = match pack_arg.value.layout().ty.kind() {
+ ty::Tuple(ref tupled_arguments) => tupled_arguments,
+ _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
+ };
+
+ let mut args = Vec::with_capacity(1 + tupled_arguments.len());
+ args.push(self_arg);
+ for i in 0..tupled_arguments.len() {
+ args.push(CallArgument {
+ value: pack_arg.value.value_field(fx, mir::Field::new(i)),
+ is_owned: pack_arg.is_owned,
+ });
+ }
+ args
+ } else {
+ args.iter().map(|arg| codegen_call_argument_operand(fx, arg)).collect::<Vec<_>>()
+ };
+
+ // Pass the caller location for `#[track_caller]`.
+ if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
+ let caller_location = fx.get_caller_location(source_info);
+ args.push(CallArgument { value: caller_location, is_owned: false });
+ }
+
+ let args = args;
+ assert_eq!(fn_abi.args.len(), args.len());
+
+ enum CallTarget {
+ Direct(FuncRef),
+ Indirect(SigRef, Value),
+ }
+
+ let (func_ref, first_arg_override) = match instance {
+ // Trait object call
+ Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
+ if fx.clif_comments.enabled() {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(
+ nop_inst,
+ format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0]),
+ );
+ }
+
+ let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0].value, idx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, method), Some(ptr))
+ }
+
+ // Normal call
+ Some(instance) => {
+ let func_ref = fx.get_function_ref(instance);
+ (CallTarget::Direct(func_ref), None)
+ }
+
+ // Indirect call
+ None => {
+ if fx.clif_comments.enabled() {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(nop_inst, "indirect call");
+ }
+
+ let func = codegen_operand(fx, func).load_scalar(fx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, func), None)
+ }
+ };
+
+ self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
+ let call_args = return_ptr
+ .into_iter()
+ .chain(first_arg_override.into_iter())
+ .chain(
+ args.into_iter()
+ .enumerate()
+ .skip(if first_arg_override.is_some() { 1 } else { 0 })
+ .map(|(i, arg)| {
+ adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
+ })
+ .flatten(),
+ )
+ .collect::<Vec<Value>>();
+
+ let call_inst = match func_ref {
+ CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
+ CallTarget::Indirect(sig, func_ptr) => {
+ fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
+ }
+ };
+
+ // FIXME find a cleaner way to support varargs
+ if fn_sig.c_variadic {
+ if !matches!(fn_sig.abi, Abi::C { .. }) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
+ );
+ }
+ let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
+ let abi_params = call_args
+ .into_iter()
+ .map(|arg| {
+ let ty = fx.bcx.func.dfg.value_type(arg);
+ if !ty.is_int() {
+ // FIXME set %al to upperbound on float args once floats are supported
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ &format!("Non int ty {:?} for variadic call", ty),
+ );
+ }
+ AbiParam::new(ty)
+ })
+ .collect::<Vec<AbiParam>>();
+ fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
+ }
+
+ call_inst
+ });
+
+ if let Some(dest) = target {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+}
+
+pub(crate) fn codegen_drop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+ drop_place: CPlace<'tcx>,
+) {
+ let ty = drop_place.layout().ty;
+ let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_instance.def {
+ // we don't actually need to drop anything
+ } else {
+ match ty.kind() {
+ ty::Dynamic(..) => {
+ let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
+ let ptr = ptr.get_addr(fx);
+ let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
+
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
+ substs: drop_instance.substs,
+ };
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
+
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+ fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
+ }
+ _ => {
+ assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
+
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(drop_instance, ty::List::empty());
+
+ let arg_value = drop_place.place_ref(
+ fx,
+ fx.layout_of(fx.tcx.mk_ref(
+ fx.tcx.lifetimes.re_erased,
+ TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
+ )),
+ );
+ let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0], true);
+
+ let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
+
+ if drop_instance.def.requires_caller_location(fx.tcx) {
+ // Pass the caller location for `#[track_caller]`.
+ let caller_location = fx.get_caller_location(source_info);
+ call_args.extend(
+ adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1], false).into_iter(),
+ );
+ }
+
+ let func_ref = fx.get_function_ref(drop_instance);
+ fx.bcx.ins().call(func_ref, &call_args);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
new file mode 100644
index 000000000..6c10baa53
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -0,0 +1,299 @@
+//! Argument passing
+
+use crate::prelude::*;
+use crate::value_and_place::assert_assignable;
+
+use cranelift_codegen::ir::{ArgumentExtension, ArgumentPurpose};
+use rustc_target::abi::call::{
+ ArgAbi, ArgAttributes, ArgExtension as RustcArgExtension, CastTarget, PassMode, Reg, RegKind,
+};
+use smallvec::{smallvec, SmallVec};
+
+pub(super) trait ArgAbiExt<'tcx> {
+ fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]>;
+ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>);
+}
+
+fn reg_to_abi_param(reg: Reg) -> AbiParam {
+ let clif_ty = match (reg.kind, reg.size.bytes()) {
+ (RegKind::Integer, 1) => types::I8,
+ (RegKind::Integer, 2) => types::I16,
+ (RegKind::Integer, 3..=4) => types::I32,
+ (RegKind::Integer, 5..=8) => types::I64,
+ (RegKind::Integer, 9..=16) => types::I128,
+ (RegKind::Float, 4) => types::F32,
+ (RegKind::Float, 8) => types::F64,
+ (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
+ _ => unreachable!("{:?}", reg),
+ };
+ AbiParam::new(clif_ty)
+}
+
+fn apply_arg_attrs_to_abi_param(mut param: AbiParam, arg_attrs: ArgAttributes) -> AbiParam {
+ match arg_attrs.arg_ext {
+ RustcArgExtension::None => {}
+ RustcArgExtension::Zext => param.extension = ArgumentExtension::Uext,
+ RustcArgExtension::Sext => param.extension = ArgumentExtension::Sext,
+ }
+ param
+}
+
+fn cast_target_to_abi_params(cast: CastTarget) -> SmallVec<[AbiParam; 2]> {
+ let (rest_count, rem_bytes) = if cast.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ } else {
+ (
+ cast.rest.total.bytes() / cast.rest.unit.size.bytes(),
+ cast.rest.total.bytes() % cast.rest.unit.size.bytes(),
+ )
+ };
+
+ // Note: Unlike the LLVM equivalent of this code we don't have separate branches for when there
+ // is no prefix as a single unit, an array and a heterogeneous struct are not represented using
+ // different types in Cranelift IR. Instead a single array of primitive types is used.
+
+ // Create list of fields in the main structure
+ let mut args = cast
+ .prefix
+ .iter()
+ .flatten()
+ .map(|&reg| reg_to_abi_param(reg))
+ .chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit)))
+ .collect::<SmallVec<_>>();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(cast.rest.unit.kind, RegKind::Integer);
+ args.push(reg_to_abi_param(Reg {
+ kind: RegKind::Integer,
+ size: Size::from_bytes(rem_bytes),
+ }));
+ }
+
+ args
+}
+
+impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
+ match self.mode {
+ PassMode::Ignore => smallvec![],
+ PassMode::Direct(attrs) => match self.layout.abi {
+ Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
+ AbiParam::new(scalar_to_clif_type(tcx, scalar)),
+ attrs
+ )],
+ Abi::Vector { .. } => {
+ let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+ smallvec![AbiParam::new(vector_ty)]
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
+ Abi::ScalarPair(a, b) => {
+ let a = scalar_to_clif_type(tcx, a);
+ let b = scalar_to_clif_type(tcx, b);
+ smallvec![
+ apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
+ apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
+ ]
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Cast(cast) => cast_target_to_abi_params(cast),
+ PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
+ if on_stack {
+ // Abi requires aligning struct size to pointer size
+ let size = self.layout.size.align_to(tcx.data_layout.pointer_align.abi);
+ let size = u32::try_from(size.bytes()).unwrap();
+ smallvec![apply_arg_attrs_to_abi_param(
+ AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructArgument(size),),
+ attrs
+ )]
+ } else {
+ smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
+ }
+ }
+ PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+ assert!(!on_stack);
+ smallvec![
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), extra_attrs),
+ ]
+ }
+ }
+ }
+
+ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
+ match self.mode {
+ PassMode::Ignore => (None, vec![]),
+ PassMode::Direct(_) => match self.layout.abi {
+ Abi::Scalar(scalar) => {
+ (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
+ }
+ Abi::Vector { .. } => {
+ let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+ (None, vec![AbiParam::new(vector_ty)])
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Pair(_, _) => match self.layout.abi {
+ Abi::ScalarPair(a, b) => {
+ let a = scalar_to_clif_type(tcx, a);
+ let b = scalar_to_clif_type(tcx, b);
+ (None, vec![AbiParam::new(a), AbiParam::new(b)])
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Cast(cast) => (None, cast_target_to_abi_params(cast).into_iter().collect()),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
+ assert!(!on_stack);
+ (Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ }
+ }
+}
+
+pub(super) fn to_casted_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ cast: CastTarget,
+) -> SmallVec<[Value; 2]> {
+ let (ptr, meta) = arg.force_stack(fx);
+ assert!(meta.is_none());
+ let mut offset = 0;
+ cast_target_to_abi_params(cast)
+ .into_iter()
+ .map(|param| {
+ let val = ptr.offset_i64(fx, offset).load(fx, param.value_type, MemFlags::new());
+ offset += i64::from(param.value_type.bytes());
+ val
+ })
+ .collect()
+}
+
+pub(super) fn from_casted_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ block_params: &[Value],
+ layout: TyAndLayout<'tcx>,
+ cast: CastTarget,
+) -> CValue<'tcx> {
+ let abi_params = cast_target_to_abi_params(cast);
+ let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum();
+ let layout_size = u32::try_from(layout.size.bytes()).unwrap();
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ // Stack slot size may be bigger for for example `[u8; 3]` which is packed into an `i32`.
+ // It may also be smaller for example when the type is a wrapper around an integer with a
+ // larger alignment than the integer.
+ size: (std::cmp::max(abi_param_size, layout_size) + 15) / 16 * 16,
+ });
+ let ptr = Pointer::new(fx.bcx.ins().stack_addr(pointer_ty(fx.tcx), stack_slot, 0));
+ let mut offset = 0;
+ let mut block_params_iter = block_params.iter().copied();
+ for param in abi_params {
+ let val = ptr.offset_i64(fx, offset).store(
+ fx,
+ block_params_iter.next().unwrap(),
+ MemFlags::new(),
+ );
+ offset += i64::from(param.value_type.bytes());
+ val
+ }
+ assert_eq!(block_params_iter.next(), None, "Leftover block param");
+ CValue::by_ref(ptr, layout)
+}
+
+/// Get a set of values to be passed as function arguments.
+pub(super) fn adjust_arg_for_abi<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ is_owned: bool,
+) -> SmallVec<[Value; 2]> {
+ assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty, 16);
+ match arg_abi.mode {
+ PassMode::Ignore => smallvec![],
+ PassMode::Direct(_) => smallvec![arg.load_scalar(fx)],
+ PassMode::Pair(_, _) => {
+ let (a, b) = arg.load_scalar_pair(fx);
+ smallvec![a, b]
+ }
+ PassMode::Cast(cast) => to_casted_value(fx, arg, cast),
+ PassMode::Indirect { .. } => {
+ if is_owned {
+ match arg.force_stack(fx) {
+ (ptr, None) => smallvec![ptr.get_addr(fx)],
+ (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
+ }
+ } else {
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so we must make a copy of the argument unless the argument
+ // local is moved.
+ let place = CPlace::new_stack_slot(fx, arg.layout());
+ place.write_cvalue(fx, arg);
+ smallvec![place.to_ptr().get_addr(fx)]
+ }
+ }
+ }
+}
+
+/// Create a [`CValue`] containing the value of a function parameter adding clif function parameters
+/// as necessary.
+pub(super) fn cvalue_for_param<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Option<mir::Local>,
+ local_field: Option<usize>,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ block_params_iter: &mut impl Iterator<Item = Value>,
+) -> Option<CValue<'tcx>> {
+ let block_params = arg_abi
+ .get_abi_param(fx.tcx)
+ .into_iter()
+ .map(|abi_param| {
+ let block_param = block_params_iter.next().unwrap();
+ assert_eq!(fx.bcx.func.dfg.value_type(block_param), abi_param.value_type);
+ block_param
+ })
+ .collect::<SmallVec<[_; 2]>>();
+
+ crate::abi::comments::add_arg_comment(
+ fx,
+ "arg",
+ local,
+ local_field,
+ &block_params,
+ arg_abi.mode,
+ arg_abi.layout,
+ );
+
+ match arg_abi.mode {
+ PassMode::Ignore => None,
+ PassMode::Direct(_) => {
+ assert_eq!(block_params.len(), 1, "{:?}", block_params);
+ Some(CValue::by_val(block_params[0], arg_abi.layout))
+ }
+ PassMode::Pair(_, _) => {
+ assert_eq!(block_params.len(), 2, "{:?}", block_params);
+ Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
+ }
+ PassMode::Cast(cast) => Some(from_casted_value(fx, &block_params, arg_abi.layout, cast)),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ assert_eq!(block_params.len(), 1, "{:?}", block_params);
+ Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ assert_eq!(block_params.len(), 2, "{:?}", block_params);
+ Some(CValue::by_ref_unsized(
+ Pointer::new(block_params[0]),
+ block_params[1],
+ arg_abi.layout,
+ ))
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
new file mode 100644
index 000000000..ff3bb2dfd
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -0,0 +1,141 @@
+//! Return value handling
+
+use crate::prelude::*;
+
+use rustc_target::abi::call::{ArgAbi, PassMode};
+use smallvec::{smallvec, SmallVec};
+
+/// Return a place where the return value of the current function can be written to. If necessary
+/// this adds an extra parameter pointing to where the return value needs to be stored.
+pub(super) fn codegen_return_param<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ssa_analyzed: &rustc_index::vec::IndexVec<Local, crate::analyze::SsaKind>,
+ block_params_iter: &mut impl Iterator<Item = Value>,
+) -> CPlace<'tcx> {
+ let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
+ PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
+ let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
+ (
+ super::make_local_place(
+ fx,
+ RETURN_PLACE,
+ fx.fn_abi.as_ref().unwrap().ret.layout,
+ is_ssa,
+ ),
+ smallvec![],
+ )
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ let ret_param = block_params_iter.next().unwrap();
+ assert_eq!(fx.bcx.func.dfg.value_type(ret_param), fx.pointer_type);
+ (
+ CPlace::for_ptr(Pointer::new(ret_param), fx.fn_abi.as_ref().unwrap().ret.layout),
+ smallvec![ret_param],
+ )
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ };
+
+ crate::abi::comments::add_arg_comment(
+ fx,
+ "ret",
+ Some(RETURN_PLACE),
+ None,
+ &ret_param,
+ fx.fn_abi.as_ref().unwrap().ret.mode,
+ fx.fn_abi.as_ref().unwrap().ret.layout,
+ );
+
+ ret_place
+}
+
+/// Invokes the closure with if necessary a value representing the return pointer. When the closure
+/// returns the call return value(s) if any are written to the correct place.
+pub(super) fn codegen_with_call_return_arg<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ ret_place: CPlace<'tcx>,
+ f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> Inst,
+) {
+ let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
+ PassMode::Ignore => (None, None),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ if matches!(ret_place.inner(), CPlaceInner::Addr(_, None)) {
+ // This is an optimization to prevent unnecessary copies of the return value when
+ // the return place is already a memory place as opposed to a register.
+ // This match arm can be safely removed.
+ (None, Some(ret_place.to_ptr().get_addr(fx)))
+ } else {
+ let place = CPlace::new_stack_slot(fx, ret_arg_abi.layout);
+ (Some(place), Some(place.to_ptr().get_addr(fx)))
+ }
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => (None, None),
+ };
+
+ let call_inst = f(fx, return_ptr);
+
+ match ret_arg_abi.mode {
+ PassMode::Ignore => {}
+ PassMode::Direct(_) => {
+ let ret_val = fx.bcx.inst_results(call_inst)[0];
+ ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_arg_abi.layout));
+ }
+ PassMode::Pair(_, _) => {
+ let ret_val_a = fx.bcx.inst_results(call_inst)[0];
+ let ret_val_b = fx.bcx.inst_results(call_inst)[1];
+ ret_place
+ .write_cvalue(fx, CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout));
+ }
+ PassMode::Cast(cast) => {
+ let results =
+ fx.bcx.inst_results(call_inst).iter().copied().collect::<SmallVec<[Value; 2]>>();
+ let result =
+ super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
+ ret_place.write_cvalue(fx, result);
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ if let Some(ret_temp_place) = ret_temp_place {
+ // If ret_temp_place is None, it is not necessary to copy the return value.
+ let ret_temp_value = ret_temp_place.to_cvalue(fx);
+ ret_place.write_cvalue(fx, ret_temp_value);
+ }
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ }
+}
+
+/// Codegen a return instruction with the right return value(s) if any.
+pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
+ match fx.fn_abi.as_ref().unwrap().ret.mode {
+ PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ fx.bcx.ins().return_(&[]);
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ PassMode::Direct(_) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let ret_val = place.to_cvalue(fx).load_scalar(fx);
+ fx.bcx.ins().return_(&[ret_val]);
+ }
+ PassMode::Pair(_, _) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
+ fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
+ }
+ PassMode::Cast(cast) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let ret_val = place.to_cvalue(fx);
+ let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
+ fx.bcx.ins().return_(&ret_vals);
+ }
+ }
+}