summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_cranelift/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src')
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/comments.rs135
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs611
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs299
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/returning.rs141
-rw-r--r--compiler/rustc_codegen_cranelift/src/allocator.rs147
-rw-r--r--compiler/rustc_codegen_cranelift/src/analyze.rs48
-rw-r--r--compiler/rustc_codegen_cranelift/src/archive.rs236
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs955
-rw-r--r--compiler/rustc_codegen_cranelift/src/cast.rs164
-rw-r--r--compiler/rustc_codegen_cranelift/src/codegen_i128.rs153
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs475
-rw-r--r--compiler/rustc_codegen_cranelift/src/compiler_builtins.rs43
-rw-r--r--compiler/rustc_codegen_cranelift/src/config.rs107
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs580
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs190
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs218
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs357
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/object.rs83
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs136
-rw-r--r--compiler/rustc_codegen_cranelift/src/discriminant.rs176
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/aot.rs436
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs385
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/mod.rs53
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs677
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs74
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs192
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs1292
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs659
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs316
-rw-r--r--compiler/rustc_codegen_cranelift/src/linkage.rs36
-rw-r--r--compiler/rustc_codegen_cranelift/src/main_shim.rs161
-rw-r--r--compiler/rustc_codegen_cranelift/src/num.rs440
-rw-r--r--compiler/rustc_codegen_cranelift/src/optimize/mod.rs20
-rw-r--r--compiler/rustc_codegen_cranelift/src/optimize/peephole.rs67
-rw-r--r--compiler/rustc_codegen_cranelift/src/pointer.rs134
-rw-r--r--compiler/rustc_codegen_cranelift/src/pretty_clif.rs278
-rw-r--r--compiler/rustc_codegen_cranelift/src/toolchain.rs31
-rw-r--r--compiler/rustc_codegen_cranelift/src/trap.rs57
-rw-r--r--compiler/rustc_codegen_cranelift/src/unsize.rs227
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs883
-rw-r--r--compiler/rustc_codegen_cranelift/src/vtable.rs79
41 files changed, 11751 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
new file mode 100644
index 000000000..37d2679c1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -0,0 +1,135 @@
+//! Annotate the clif ir with comments describing how arguments are passed into the current function
+//! and where all locals are stored.
+
+use std::borrow::Cow;
+
+use rustc_middle::mir;
+use rustc_target::abi::call::PassMode;
+
+use cranelift_codegen::entity::EntityRef;
+
+use crate::prelude::*;
+
+pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ if fx.clif_comments.enabled() {
+ fx.add_global_comment(
+ "kind loc.idx param pass mode ty".to_string(),
+ );
+ }
+}
+
+pub(super) fn add_arg_comment<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ kind: &str,
+ local: Option<mir::Local>,
+ local_field: Option<usize>,
+ params: &[Value],
+ arg_abi_mode: PassMode,
+ arg_layout: TyAndLayout<'tcx>,
+) {
+ if !fx.clif_comments.enabled() {
+ return;
+ }
+
+ let local = if let Some(local) = local {
+ Cow::Owned(format!("{:?}", local))
+ } else {
+ Cow::Borrowed("???")
+ };
+ let local_field = if let Some(local_field) = local_field {
+ Cow::Owned(format!(".{}", local_field))
+ } else {
+ Cow::Borrowed("")
+ };
+
+ let params = match params {
+ [] => Cow::Borrowed("-"),
+ [param] => Cow::Owned(format!("= {:?}", param)),
+ [param_a, param_b] => Cow::Owned(format!("= {:?},{:?}", param_a, param_b)),
+ params => Cow::Owned(format!(
+ "= {}",
+ params.iter().map(ToString::to_string).collect::<Vec<_>>().join(",")
+ )),
+ };
+
+ let pass_mode = format!("{:?}", arg_abi_mode);
+ fx.add_global_comment(format!(
+ "{kind:5}{local:>3}{local_field:<5} {params:10} {pass_mode:36} {ty:?}",
+ kind = kind,
+ local = local,
+ local_field = local_field,
+ params = params,
+ pass_mode = pass_mode,
+ ty = arg_layout.ty,
+ ));
+}
+
+pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ if fx.clif_comments.enabled() {
+ fx.add_global_comment(String::new());
+ fx.add_global_comment(
+ "kind local ty size align (abi,pref)".to_string(),
+ );
+ }
+}
+
+pub(super) fn add_local_place_comments<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: CPlace<'tcx>,
+ local: Local,
+) {
+ if !fx.clif_comments.enabled() {
+ return;
+ }
+ let TyAndLayout { ty, layout } = place.layout();
+ let rustc_target::abi::LayoutS {
+ size,
+ align,
+ abi: _,
+ variants: _,
+ fields: _,
+ largest_niche: _,
+ } = layout.0.0;
+
+ let (kind, extra) = match *place.inner() {
+ CPlaceInner::Var(place_local, var) => {
+ assert_eq!(local, place_local);
+ ("ssa", Cow::Owned(format!(",var={}", var.index())))
+ }
+ CPlaceInner::VarPair(place_local, var1, var2) => {
+ assert_eq!(local, place_local);
+ ("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())))
+ }
+ CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
+ CPlaceInner::Addr(ptr, meta) => {
+ let meta = if let Some(meta) = meta {
+ Cow::Owned(format!(",meta={}", meta))
+ } else {
+ Cow::Borrowed("")
+ };
+ match ptr.debug_base_and_offset() {
+ (crate::pointer::PointerBase::Addr(addr), offset) => {
+ ("reuse", format!("storage={}{}{}", addr, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
+ ("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Dangling(align), offset) => {
+ ("zst", format!("align={},offset={}", align.bytes(), offset).into())
+ }
+ }
+ }
+ };
+
+ fx.add_global_comment(format!(
+ "{:<5} {:5} {:30} {:4}b {}, {}{}{}",
+ kind,
+ format!("{:?}", local),
+ format!("{:?}", ty),
+ size.bytes(),
+ align.abi.bytes(),
+ align.pref.bytes(),
+ if extra.is_empty() { "" } else { " " },
+ extra,
+ ));
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
new file mode 100644
index 000000000..815450f68
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -0,0 +1,611 @@
+//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
+
+mod comments;
+mod pass_mode;
+mod returning;
+
+use cranelift_module::ModuleError;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_target::abi::call::{Conv, FnAbi};
+use rustc_target::spec::abi::Abi;
+
+use cranelift_codegen::ir::{AbiParam, SigRef};
+
+use self::pass_mode::*;
+use crate::prelude::*;
+
+pub(crate) use self::returning::codegen_return;
+
+fn clif_sig_from_fn_abi<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ default_call_conv: CallConv,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+) -> Signature {
+ let call_conv = match fn_abi.conv {
+ Conv::Rust | Conv::C => default_call_conv,
+ Conv::RustCold => CallConv::Cold,
+ Conv::X86_64SysV => CallConv::SystemV,
+ Conv::X86_64Win64 => CallConv::WindowsFastcall,
+ Conv::ArmAapcs
+ | Conv::CCmseNonSecureCall
+ | Conv::Msp430Intr
+ | Conv::PtxKernel
+ | Conv::X86Fastcall
+ | Conv::X86Intr
+ | Conv::X86Stdcall
+ | Conv::X86ThisCall
+ | Conv::X86VectorCall
+ | Conv::AmdGpuKernel
+ | Conv::AvrInterrupt
+ | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
+ };
+ let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
+
+ let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
+ // Sometimes the first param is an pointer to the place where the return value needs to be stored.
+ let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
+
+ Signature { params, returns, call_conv }
+}
+
+pub(crate) fn get_function_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ triple: &target_lexicon::Triple,
+ inst: Instance<'tcx>,
+) -> Signature {
+ assert!(!inst.substs.needs_infer());
+ clif_sig_from_fn_abi(
+ tcx,
+ CallConv::triple_default(triple),
+ &RevealAllLayoutCx(tcx).fn_abi_of_instance(inst, ty::List::empty()),
+ )
+}
+
+/// Instance must be monomorphized
+pub(crate) fn import_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ module: &mut dyn Module,
+ inst: Instance<'tcx>,
+) -> FuncId {
+ let name = tcx.symbol_name(inst).name;
+ let sig = get_function_sig(tcx, module.isa().triple(), inst);
+ match module.declare_function(name, Linkage::Import, &sig) {
+ Ok(func_id) => func_id,
+ Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{name}` as function, but it was already declared as static"
+ )),
+ Err(ModuleError::IncompatibleSignature(_, prev_sig, new_sig)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{name}` with signature {new_sig:?}, \
+ but it was already declared with signature {prev_sig:?}"
+ )),
+ Err(err) => Err::<_, _>(err).unwrap(),
+ }
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ /// Instance must be monomorphized
+ pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
+ let func_id = import_function(self.tcx, self.module, inst);
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", inst));
+ }
+
+ func_ref
+ }
+
+ pub(crate) fn lib_call(
+ &mut self,
+ name: &str,
+ params: Vec<AbiParam>,
+ returns: Vec<AbiParam>,
+ args: &[Value],
+ ) -> &[Value] {
+ let sig = Signature { params, returns, call_conv: self.target_config.default_call_conv };
+ let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", name));
+ }
+ let call_inst = self.bcx.ins().call(func_ref, args);
+ if self.clif_comments.enabled() {
+ self.add_comment(call_inst, format!("easy_call {}", name));
+ }
+ let results = self.bcx.inst_results(call_inst);
+ assert!(results.len() <= 2, "{}", results.len());
+ results
+ }
+
+ pub(crate) fn easy_call(
+ &mut self,
+ name: &str,
+ args: &[CValue<'tcx>],
+ return_ty: Ty<'tcx>,
+ ) -> CValue<'tcx> {
+ let (input_tys, args): (Vec<_>, Vec<_>) = args
+ .iter()
+ .map(|arg| {
+ (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
+ })
+ .unzip();
+ let return_layout = self.layout_of(return_ty);
+ let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
+ tup.iter().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
+ } else {
+ vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
+ };
+ let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
+ match *ret_vals {
+ [] => CValue::by_ref(
+ Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
+ return_layout,
+ ),
+ [val] => CValue::by_val(val, return_layout),
+ [val, extra] => CValue::by_val_pair(val, extra, return_layout),
+ _ => unreachable!(),
+ }
+ }
+}
+
+/// Make a [`CPlace`] capable of holding value of the specified type.
+fn make_local_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ is_ssa: bool,
+) -> CPlace<'tcx> {
+ let place = if is_ssa {
+ if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
+ CPlace::new_var_pair(fx, local, layout)
+ } else {
+ CPlace::new_var(fx, local, layout)
+ }
+ } else {
+ CPlace::new_stack_slot(fx, layout)
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ place
+}
+
+pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
+ fx.bcx.append_block_params_for_function_params(start_block);
+
+ fx.bcx.switch_to_block(start_block);
+ fx.bcx.ins().nop();
+
+ let ssa_analyzed = crate::analyze::analyze(fx);
+
+ self::comments::add_args_header_comment(fx);
+
+ let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
+ let ret_place =
+ self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
+ assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
+
+ // None means pass_mode == NoPass
+ enum ArgKind<'tcx> {
+ Normal(Option<CValue<'tcx>>),
+ Spread(Vec<Option<CValue<'tcx>>>),
+ }
+
+ let fn_abi = fx.fn_abi.take().unwrap();
+
+ // FIXME implement variadics in cranelift
+ if fn_abi.c_variadic {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
+ }
+
+ let mut arg_abis_iter = fn_abi.args.iter();
+
+ let func_params = fx
+ .mir
+ .args_iter()
+ .map(|local| {
+ let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+
+ // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
+ if Some(local) == fx.mir.spread_arg {
+ // This argument (e.g. the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual function arguments.
+
+ let tupled_arg_tys = match arg_ty.kind() {
+ ty::Tuple(ref tys) => tys,
+ _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
+ };
+
+ let mut params = Vec::new();
+ for (i, _arg_ty) in tupled_arg_tys.iter().enumerate() {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
+ params.push(param);
+ }
+
+ (local, ArgKind::Spread(params), arg_ty)
+ } else {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
+ (local, ArgKind::Normal(param), arg_ty)
+ }
+ })
+ .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
+
+ assert!(fx.caller_location.is_none());
+ if fx.instance.def.requires_caller_location(fx.tcx) {
+ // Store caller location for `#[track_caller]`.
+ let arg_abi = arg_abis_iter.next().unwrap();
+ fx.caller_location =
+ Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
+ }
+
+ assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
+ fx.fn_abi = Some(fn_abi);
+ assert!(block_params_iter.next().is_none(), "arg_value left behind");
+
+ self::comments::add_locals_header_comment(fx);
+
+ for (local, arg_kind, ty) in func_params {
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ // While this is normally an optimization to prevent an unnecessary copy when an argument is
+ // not mutated by the current function, this is necessary to support unsized arguments.
+ if let ArgKind::Normal(Some(val)) = arg_kind {
+ if let Some((addr, meta)) = val.try_to_ptr() {
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so it is fine to borrow the backing storage of this argument
+ // to prevent a copy.
+
+ let place = if let Some(meta) = meta {
+ CPlace::for_ptr_with_extra(addr, meta, val.layout())
+ } else {
+ CPlace::for_ptr(addr, val.layout())
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ assert_eq!(fx.local_map.push(place), local);
+ continue;
+ }
+ }
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+
+ match arg_kind {
+ ArgKind::Normal(param) => {
+ if let Some(param) = param {
+ place.write_cvalue(fx, param);
+ }
+ }
+ ArgKind::Spread(params) => {
+ for (i, param) in params.into_iter().enumerate() {
+ if let Some(param) = param {
+ place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
+ }
+ }
+ }
+ }
+ }
+
+ for local in fx.mir.vars_and_temps_iter() {
+ let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+ }
+
+ fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+}
+
+struct CallArgument<'tcx> {
+ value: CValue<'tcx>,
+ is_owned: bool,
+}
+
+// FIXME avoid intermediate `CValue` before calling `adjust_arg_for_abi`
+fn codegen_call_argument_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CallArgument<'tcx> {
+ CallArgument {
+ value: codegen_operand(fx, operand),
+ is_owned: matches!(operand, Operand::Move(_)),
+ }
+}
+
+pub(crate) fn codegen_terminator_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+ func: &Operand<'tcx>,
+ args: &[Operand<'tcx>],
+ destination: Place<'tcx>,
+ target: Option<BasicBlock>,
+) {
+ let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
+ let fn_sig =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+
+ let ret_place = codegen_place(fx, destination);
+
+ // Handle special calls like instrinsics and empty drop glue.
+ let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+ let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(fx.tcx);
+
+ if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
+ crate::intrinsics::codegen_llvm_intrinsic_call(
+ fx,
+ &fx.tcx.symbol_name(instance).name,
+ substs,
+ args,
+ ret_place,
+ target,
+ );
+ return;
+ }
+
+ match instance.def {
+ InstanceDef::Intrinsic(_) => {
+ crate::intrinsics::codegen_intrinsic_call(
+ fx,
+ instance,
+ args,
+ ret_place,
+ target,
+ source_info,
+ );
+ return;
+ }
+ InstanceDef::DropGlue(_, None) => {
+ // empty drop glue - a nop.
+ let dest = target.expect("Non terminating drop_in_place_real???");
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ }
+ _ => Some(instance),
+ }
+ } else {
+ None
+ };
+
+ let extra_args = &args[fn_sig.inputs().len()..];
+ let extra_args = fx
+ .tcx
+ .mk_type_list(extra_args.iter().map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx))));
+ let fn_abi = if let Some(instance) = instance {
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(instance, extra_args)
+ } else {
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_fn_ptr(fn_ty.fn_sig(fx.tcx), extra_args)
+ };
+
+ let is_cold = if fn_sig.abi == Abi::RustCold {
+ true
+ } else {
+ instance
+ .map(|inst| {
+ fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD)
+ })
+ .unwrap_or(false)
+ };
+ if is_cold {
+ fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
+ if let Some(destination_block) = target {
+ fx.bcx.set_cold_block(fx.get_block(destination_block));
+ }
+ }
+
+ // Unpack arguments tuple for closures
+ let mut args = if fn_sig.abi == Abi::RustCall {
+ assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
+ let self_arg = codegen_call_argument_operand(fx, &args[0]);
+ let pack_arg = codegen_call_argument_operand(fx, &args[1]);
+
+ let tupled_arguments = match pack_arg.value.layout().ty.kind() {
+ ty::Tuple(ref tupled_arguments) => tupled_arguments,
+ _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
+ };
+
+ let mut args = Vec::with_capacity(1 + tupled_arguments.len());
+ args.push(self_arg);
+ for i in 0..tupled_arguments.len() {
+ args.push(CallArgument {
+ value: pack_arg.value.value_field(fx, mir::Field::new(i)),
+ is_owned: pack_arg.is_owned,
+ });
+ }
+ args
+ } else {
+ args.iter().map(|arg| codegen_call_argument_operand(fx, arg)).collect::<Vec<_>>()
+ };
+
+ // Pass the caller location for `#[track_caller]`.
+ if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
+ let caller_location = fx.get_caller_location(source_info);
+ args.push(CallArgument { value: caller_location, is_owned: false });
+ }
+
+ let args = args;
+ assert_eq!(fn_abi.args.len(), args.len());
+
+ enum CallTarget {
+ Direct(FuncRef),
+ Indirect(SigRef, Value),
+ }
+
+ let (func_ref, first_arg_override) = match instance {
+ // Trait object call
+ Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
+ if fx.clif_comments.enabled() {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(
+ nop_inst,
+ format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0]),
+ );
+ }
+
+ let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0].value, idx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, method), Some(ptr))
+ }
+
+ // Normal call
+ Some(instance) => {
+ let func_ref = fx.get_function_ref(instance);
+ (CallTarget::Direct(func_ref), None)
+ }
+
+ // Indirect call
+ None => {
+ if fx.clif_comments.enabled() {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(nop_inst, "indirect call");
+ }
+
+ let func = codegen_operand(fx, func).load_scalar(fx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, func), None)
+ }
+ };
+
+ self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
+ let call_args = return_ptr
+ .into_iter()
+ .chain(first_arg_override.into_iter())
+ .chain(
+ args.into_iter()
+ .enumerate()
+ .skip(if first_arg_override.is_some() { 1 } else { 0 })
+ .map(|(i, arg)| {
+ adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
+ })
+ .flatten(),
+ )
+ .collect::<Vec<Value>>();
+
+ let call_inst = match func_ref {
+ CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
+ CallTarget::Indirect(sig, func_ptr) => {
+ fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
+ }
+ };
+
+ // FIXME find a cleaner way to support varargs
+ if fn_sig.c_variadic {
+ if !matches!(fn_sig.abi, Abi::C { .. }) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
+ );
+ }
+ let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
+ let abi_params = call_args
+ .into_iter()
+ .map(|arg| {
+ let ty = fx.bcx.func.dfg.value_type(arg);
+ if !ty.is_int() {
+ // FIXME set %al to upperbound on float args once floats are supported
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ &format!("Non int ty {:?} for variadic call", ty),
+ );
+ }
+ AbiParam::new(ty)
+ })
+ .collect::<Vec<AbiParam>>();
+ fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
+ }
+
+ call_inst
+ });
+
+ if let Some(dest) = target {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+}
+
+pub(crate) fn codegen_drop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+ drop_place: CPlace<'tcx>,
+) {
+ let ty = drop_place.layout().ty;
+ let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_instance.def {
+ // we don't actually need to drop anything
+ } else {
+ match ty.kind() {
+ ty::Dynamic(..) => {
+ let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
+ let ptr = ptr.get_addr(fx);
+ let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
+
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
+ substs: drop_instance.substs,
+ };
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
+
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+ fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
+ }
+ _ => {
+ assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
+
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(drop_instance, ty::List::empty());
+
+ let arg_value = drop_place.place_ref(
+ fx,
+ fx.layout_of(fx.tcx.mk_ref(
+ fx.tcx.lifetimes.re_erased,
+ TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
+ )),
+ );
+ let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0], true);
+
+ let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
+
+ if drop_instance.def.requires_caller_location(fx.tcx) {
+ // Pass the caller location for `#[track_caller]`.
+ let caller_location = fx.get_caller_location(source_info);
+ call_args.extend(
+ adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1], false).into_iter(),
+ );
+ }
+
+ let func_ref = fx.get_function_ref(drop_instance);
+ fx.bcx.ins().call(func_ref, &call_args);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
new file mode 100644
index 000000000..6c10baa53
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -0,0 +1,299 @@
+//! Argument passing
+
+use crate::prelude::*;
+use crate::value_and_place::assert_assignable;
+
+use cranelift_codegen::ir::{ArgumentExtension, ArgumentPurpose};
+use rustc_target::abi::call::{
+ ArgAbi, ArgAttributes, ArgExtension as RustcArgExtension, CastTarget, PassMode, Reg, RegKind,
+};
+use smallvec::{smallvec, SmallVec};
+
+pub(super) trait ArgAbiExt<'tcx> {
+ fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]>;
+ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>);
+}
+
+fn reg_to_abi_param(reg: Reg) -> AbiParam {
+ let clif_ty = match (reg.kind, reg.size.bytes()) {
+ (RegKind::Integer, 1) => types::I8,
+ (RegKind::Integer, 2) => types::I16,
+ (RegKind::Integer, 3..=4) => types::I32,
+ (RegKind::Integer, 5..=8) => types::I64,
+ (RegKind::Integer, 9..=16) => types::I128,
+ (RegKind::Float, 4) => types::F32,
+ (RegKind::Float, 8) => types::F64,
+ (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
+ _ => unreachable!("{:?}", reg),
+ };
+ AbiParam::new(clif_ty)
+}
+
+fn apply_arg_attrs_to_abi_param(mut param: AbiParam, arg_attrs: ArgAttributes) -> AbiParam {
+ match arg_attrs.arg_ext {
+ RustcArgExtension::None => {}
+ RustcArgExtension::Zext => param.extension = ArgumentExtension::Uext,
+ RustcArgExtension::Sext => param.extension = ArgumentExtension::Sext,
+ }
+ param
+}
+
+fn cast_target_to_abi_params(cast: CastTarget) -> SmallVec<[AbiParam; 2]> {
+ let (rest_count, rem_bytes) = if cast.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ } else {
+ (
+ cast.rest.total.bytes() / cast.rest.unit.size.bytes(),
+ cast.rest.total.bytes() % cast.rest.unit.size.bytes(),
+ )
+ };
+
+ // Note: Unlike the LLVM equivalent of this code we don't have separate branches for when there
+ // is no prefix as a single unit, an array and a heterogeneous struct are not represented using
+ // different types in Cranelift IR. Instead a single array of primitive types is used.
+
+ // Create list of fields in the main structure
+ let mut args = cast
+ .prefix
+ .iter()
+ .flatten()
+ .map(|&reg| reg_to_abi_param(reg))
+ .chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit)))
+ .collect::<SmallVec<_>>();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(cast.rest.unit.kind, RegKind::Integer);
+ args.push(reg_to_abi_param(Reg {
+ kind: RegKind::Integer,
+ size: Size::from_bytes(rem_bytes),
+ }));
+ }
+
+ args
+}
+
+impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
+ match self.mode {
+ PassMode::Ignore => smallvec![],
+ PassMode::Direct(attrs) => match self.layout.abi {
+ Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
+ AbiParam::new(scalar_to_clif_type(tcx, scalar)),
+ attrs
+ )],
+ Abi::Vector { .. } => {
+ let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+ smallvec![AbiParam::new(vector_ty)]
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
+ Abi::ScalarPair(a, b) => {
+ let a = scalar_to_clif_type(tcx, a);
+ let b = scalar_to_clif_type(tcx, b);
+ smallvec![
+ apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
+ apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
+ ]
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Cast(cast) => cast_target_to_abi_params(cast),
+ PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
+ if on_stack {
+ // Abi requires aligning struct size to pointer size
+ let size = self.layout.size.align_to(tcx.data_layout.pointer_align.abi);
+ let size = u32::try_from(size.bytes()).unwrap();
+ smallvec![apply_arg_attrs_to_abi_param(
+ AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructArgument(size),),
+ attrs
+ )]
+ } else {
+ smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
+ }
+ }
+ PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+ assert!(!on_stack);
+ smallvec![
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), extra_attrs),
+ ]
+ }
+ }
+ }
+
+ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
+ match self.mode {
+ PassMode::Ignore => (None, vec![]),
+ PassMode::Direct(_) => match self.layout.abi {
+ Abi::Scalar(scalar) => {
+ (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
+ }
+ Abi::Vector { .. } => {
+ let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+ (None, vec![AbiParam::new(vector_ty)])
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Pair(_, _) => match self.layout.abi {
+ Abi::ScalarPair(a, b) => {
+ let a = scalar_to_clif_type(tcx, a);
+ let b = scalar_to_clif_type(tcx, b);
+ (None, vec![AbiParam::new(a), AbiParam::new(b)])
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Cast(cast) => (None, cast_target_to_abi_params(cast).into_iter().collect()),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
+ assert!(!on_stack);
+ (Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ }
+ }
+}
+
+pub(super) fn to_casted_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ cast: CastTarget,
+) -> SmallVec<[Value; 2]> {
+ let (ptr, meta) = arg.force_stack(fx);
+ assert!(meta.is_none());
+ let mut offset = 0;
+ cast_target_to_abi_params(cast)
+ .into_iter()
+ .map(|param| {
+ let val = ptr.offset_i64(fx, offset).load(fx, param.value_type, MemFlags::new());
+ offset += i64::from(param.value_type.bytes());
+ val
+ })
+ .collect()
+}
+
+pub(super) fn from_casted_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ block_params: &[Value],
+ layout: TyAndLayout<'tcx>,
+ cast: CastTarget,
+) -> CValue<'tcx> {
+ let abi_params = cast_target_to_abi_params(cast);
+ let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum();
+ let layout_size = u32::try_from(layout.size.bytes()).unwrap();
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ // Stack slot size may be bigger for for example `[u8; 3]` which is packed into an `i32`.
+ // It may also be smaller for example when the type is a wrapper around an integer with a
+ // larger alignment than the integer.
+ size: (std::cmp::max(abi_param_size, layout_size) + 15) / 16 * 16,
+ });
+ let ptr = Pointer::new(fx.bcx.ins().stack_addr(pointer_ty(fx.tcx), stack_slot, 0));
+ let mut offset = 0;
+ let mut block_params_iter = block_params.iter().copied();
+ for param in abi_params {
+ let val = ptr.offset_i64(fx, offset).store(
+ fx,
+ block_params_iter.next().unwrap(),
+ MemFlags::new(),
+ );
+ offset += i64::from(param.value_type.bytes());
+ val
+ }
+ assert_eq!(block_params_iter.next(), None, "Leftover block param");
+ CValue::by_ref(ptr, layout)
+}
+
+/// Get a set of values to be passed as function arguments.
+pub(super) fn adjust_arg_for_abi<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ is_owned: bool,
+) -> SmallVec<[Value; 2]> {
+ assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty, 16);
+ match arg_abi.mode {
+ PassMode::Ignore => smallvec![],
+ PassMode::Direct(_) => smallvec![arg.load_scalar(fx)],
+ PassMode::Pair(_, _) => {
+ let (a, b) = arg.load_scalar_pair(fx);
+ smallvec![a, b]
+ }
+ PassMode::Cast(cast) => to_casted_value(fx, arg, cast),
+ PassMode::Indirect { .. } => {
+ if is_owned {
+ match arg.force_stack(fx) {
+ (ptr, None) => smallvec![ptr.get_addr(fx)],
+ (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
+ }
+ } else {
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so we must make a copy of the argument unless the argument
+ // local is moved.
+ let place = CPlace::new_stack_slot(fx, arg.layout());
+ place.write_cvalue(fx, arg);
+ smallvec![place.to_ptr().get_addr(fx)]
+ }
+ }
+ }
+}
+
+/// Create a [`CValue`] containing the value of a function parameter adding clif function parameters
+/// as necessary.
+pub(super) fn cvalue_for_param<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Option<mir::Local>,
+ local_field: Option<usize>,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ block_params_iter: &mut impl Iterator<Item = Value>,
+) -> Option<CValue<'tcx>> {
+ let block_params = arg_abi
+ .get_abi_param(fx.tcx)
+ .into_iter()
+ .map(|abi_param| {
+ let block_param = block_params_iter.next().unwrap();
+ assert_eq!(fx.bcx.func.dfg.value_type(block_param), abi_param.value_type);
+ block_param
+ })
+ .collect::<SmallVec<[_; 2]>>();
+
+ crate::abi::comments::add_arg_comment(
+ fx,
+ "arg",
+ local,
+ local_field,
+ &block_params,
+ arg_abi.mode,
+ arg_abi.layout,
+ );
+
+ match arg_abi.mode {
+ PassMode::Ignore => None,
+ PassMode::Direct(_) => {
+ assert_eq!(block_params.len(), 1, "{:?}", block_params);
+ Some(CValue::by_val(block_params[0], arg_abi.layout))
+ }
+ PassMode::Pair(_, _) => {
+ assert_eq!(block_params.len(), 2, "{:?}", block_params);
+ Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
+ }
+ PassMode::Cast(cast) => Some(from_casted_value(fx, &block_params, arg_abi.layout, cast)),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ assert_eq!(block_params.len(), 1, "{:?}", block_params);
+ Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ assert_eq!(block_params.len(), 2, "{:?}", block_params);
+ Some(CValue::by_ref_unsized(
+ Pointer::new(block_params[0]),
+ block_params[1],
+ arg_abi.layout,
+ ))
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
new file mode 100644
index 000000000..ff3bb2dfd
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -0,0 +1,141 @@
+//! Return value handling
+
+use crate::prelude::*;
+
+use rustc_target::abi::call::{ArgAbi, PassMode};
+use smallvec::{smallvec, SmallVec};
+
+/// Return a place where the return value of the current function can be written to. If necessary
+/// this adds an extra parameter pointing to where the return value needs to be stored.
+pub(super) fn codegen_return_param<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ssa_analyzed: &rustc_index::vec::IndexVec<Local, crate::analyze::SsaKind>,
+ block_params_iter: &mut impl Iterator<Item = Value>,
+) -> CPlace<'tcx> {
+ let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
+ PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
+ let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
+ (
+ super::make_local_place(
+ fx,
+ RETURN_PLACE,
+ fx.fn_abi.as_ref().unwrap().ret.layout,
+ is_ssa,
+ ),
+ smallvec![],
+ )
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ let ret_param = block_params_iter.next().unwrap();
+ assert_eq!(fx.bcx.func.dfg.value_type(ret_param), fx.pointer_type);
+ (
+ CPlace::for_ptr(Pointer::new(ret_param), fx.fn_abi.as_ref().unwrap().ret.layout),
+ smallvec![ret_param],
+ )
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ };
+
+ crate::abi::comments::add_arg_comment(
+ fx,
+ "ret",
+ Some(RETURN_PLACE),
+ None,
+ &ret_param,
+ fx.fn_abi.as_ref().unwrap().ret.mode,
+ fx.fn_abi.as_ref().unwrap().ret.layout,
+ );
+
+ ret_place
+}
+
+/// Invokes the closure with if necessary a value representing the return pointer. When the closure
+/// returns the call return value(s) if any are written to the correct place.
+pub(super) fn codegen_with_call_return_arg<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ ret_place: CPlace<'tcx>,
+ f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> Inst,
+) {
+ let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
+ PassMode::Ignore => (None, None),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ if matches!(ret_place.inner(), CPlaceInner::Addr(_, None)) {
+ // This is an optimization to prevent unnecessary copies of the return value when
+ // the return place is already a memory place as opposed to a register.
+ // This match arm can be safely removed.
+ (None, Some(ret_place.to_ptr().get_addr(fx)))
+ } else {
+ let place = CPlace::new_stack_slot(fx, ret_arg_abi.layout);
+ (Some(place), Some(place.to_ptr().get_addr(fx)))
+ }
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => (None, None),
+ };
+
+ let call_inst = f(fx, return_ptr);
+
+ match ret_arg_abi.mode {
+ PassMode::Ignore => {}
+ PassMode::Direct(_) => {
+ let ret_val = fx.bcx.inst_results(call_inst)[0];
+ ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_arg_abi.layout));
+ }
+ PassMode::Pair(_, _) => {
+ let ret_val_a = fx.bcx.inst_results(call_inst)[0];
+ let ret_val_b = fx.bcx.inst_results(call_inst)[1];
+ ret_place
+ .write_cvalue(fx, CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout));
+ }
+ PassMode::Cast(cast) => {
+ let results =
+ fx.bcx.inst_results(call_inst).iter().copied().collect::<SmallVec<[Value; 2]>>();
+ let result =
+ super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
+ ret_place.write_cvalue(fx, result);
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ if let Some(ret_temp_place) = ret_temp_place {
+ // If ret_temp_place is None, it is not necessary to copy the return value.
+ let ret_temp_value = ret_temp_place.to_cvalue(fx);
+ ret_place.write_cvalue(fx, ret_temp_value);
+ }
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ }
+}
+
+/// Codegen a return instruction with the right return value(s) if any.
+pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
+ match fx.fn_abi.as_ref().unwrap().ret.mode {
+ PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ fx.bcx.ins().return_(&[]);
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ PassMode::Direct(_) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let ret_val = place.to_cvalue(fx).load_scalar(fx);
+ fx.bcx.ins().return_(&[ret_val]);
+ }
+ PassMode::Pair(_, _) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
+ fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
+ }
+ PassMode::Cast(cast) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let ret_val = place.to_cvalue(fx);
+ let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
+ fx.bcx.ins().return_(&ret_vals);
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
new file mode 100644
index 000000000..6d321c7b2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -0,0 +1,147 @@
+//! Allocator shim
+// Adapted from rustc
+
+use crate::prelude::*;
+
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_session::config::OomStrategy;
+
+/// Returns whether an allocator shim was created
+pub(crate) fn codegen(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+) -> bool {
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate {
+ false
+ } else if let Some(kind) = tcx.allocator_kind(()) {
+ codegen_inner(
+ module,
+ unwind_context,
+ kind,
+ tcx.lang_items().oom().is_some(),
+ tcx.sess.opts.unstable_opts.oom,
+ );
+ true
+ } else {
+ false
+ }
+}
+
+fn codegen_inner(
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+ oom_strategy: OomStrategy,
+) {
+ let usize_ty = module.target_config().pointer_type();
+
+ for method in ALLOCATOR_METHODS {
+ let mut arg_tys = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ arg_tys.push(usize_ty); // size
+ arg_tys.push(usize_ty); // align
+ }
+ AllocatorTy::Ptr => arg_tys.push(usize_ty),
+ AllocatorTy::Usize => arg_tys.push(usize_ty),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(usize_ty),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
+ returns: output.into_iter().map(AbiParam::new).collect(),
+ };
+
+ let caller_name = format!("__rust_{}", method.name);
+ let callee_name = kind.fn_name(method.name);
+
+ let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
+
+ let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = arg_tys
+ .into_iter()
+ .map(|ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ let call_inst = bcx.ins().call(callee_func_ref, &args);
+ let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
+
+ bcx.ins().return_(&results);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ module.define_function(func_id, &mut ctx).unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+ }
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
+ returns: vec![],
+ };
+
+ let callee_name = if has_alloc_error_handler { "__rg_oom" } else { "__rdl_oom" };
+
+ let func_id =
+ module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
+
+ let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = (&[usize_ty, usize_ty])
+ .iter()
+ .map(|&ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ bcx.ins().call(callee_func_ref, &args);
+
+ bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ module.define_function(func_id, &mut ctx).unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+
+ let data_id = module.declare_data(OomStrategy::SYMBOL, Linkage::Export, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(1);
+ let val = oom_strategy.should_panic();
+ data_ctx.define(Box::new([val]));
+ module.define_data(data_id, &data_ctx).unwrap();
+}
diff --git a/compiler/rustc_codegen_cranelift/src/analyze.rs b/compiler/rustc_codegen_cranelift/src/analyze.rs
new file mode 100644
index 000000000..35b89358b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/analyze.rs
@@ -0,0 +1,48 @@
+//! SSA analysis
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::StatementKind::*;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum SsaKind {
+ NotSsa,
+ Ssa,
+}
+
+pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
+ let mut flag_map = fx
+ .mir
+ .local_decls
+ .iter()
+ .map(|local_decl| {
+ let ty = fx.monomorphize(local_decl.ty);
+ if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
+ SsaKind::Ssa
+ } else {
+ SsaKind::NotSsa
+ }
+ })
+ .collect::<IndexVec<Local, SsaKind>>();
+
+ for bb in fx.mir.basic_blocks().iter() {
+ for stmt in bb.statements.iter() {
+ match &stmt.kind {
+ Assign(place_and_rval) => match &place_and_rval.1 {
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ not_ssa(&mut flag_map, place.local)
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ }
+ }
+
+ flag_map
+}
+
+fn not_ssa(flag_map: &mut IndexVec<Local, SsaKind>, local: Local) {
+ flag_map[local] = SsaKind::NotSsa;
+}
diff --git a/compiler/rustc_codegen_cranelift/src/archive.rs b/compiler/rustc_codegen_cranelift/src/archive.rs
new file mode 100644
index 000000000..b4c790961
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/archive.rs
@@ -0,0 +1,236 @@
+//! Creation of ar archives like for the lib and staticlib crate type
+
+use std::collections::BTreeMap;
+use std::fs::File;
+use std::io::{self, Read, Seek};
+use std::path::{Path, PathBuf};
+
+use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
+use rustc_session::Session;
+
+use object::read::archive::ArchiveFile;
+use object::{Object, ObjectSymbol, ReadCache};
+
+#[derive(Debug)]
+enum ArchiveEntry {
+ FromArchive { archive_index: usize, file_range: (u64, u64) },
+ File(PathBuf),
+}
+
+pub(crate) struct ArArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
+ Box::new(ArArchiveBuilder {
+ sess,
+ use_gnu_style_archive: sess.target.archive_format == "gnu",
+ // FIXME fix builtin ranlib on macOS
+ no_builtin_ranlib: sess.target.is_like_osx,
+
+ src_archives: vec![],
+ entries: vec![],
+ })
+ }
+
+ fn create_dll_import_lib(
+ &self,
+ _sess: &Session,
+ _lib_name: &str,
+ _dll_imports: &[rustc_session::cstore::DllImport],
+ _tmpdir: &Path,
+ ) -> PathBuf {
+ bug!("creating dll imports is not supported");
+ }
+}
+
+pub(crate) struct ArArchiveBuilder<'a> {
+ sess: &'a Session,
+ use_gnu_style_archive: bool,
+ no_builtin_ranlib: bool,
+
+ src_archives: Vec<File>,
+ // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+ // the end of an archive for linkers to not get confused.
+ entries: Vec<(Vec<u8>, ArchiveEntry)>,
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+ fn add_file(&mut self, file: &Path) {
+ self.entries.push((
+ file.file_name().unwrap().to_str().unwrap().to_string().into_bytes(),
+ ArchiveEntry::File(file.to_owned()),
+ ));
+ }
+
+ fn add_archive(
+ &mut self,
+ archive_path: &Path,
+ mut skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> std::io::Result<()> {
+ let read_cache = ReadCache::new(std::fs::File::open(&archive_path)?);
+ let archive = ArchiveFile::parse(&read_cache).unwrap();
+ let archive_index = self.src_archives.len();
+
+ for entry in archive.members() {
+ let entry = entry.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?;
+ let file_name = String::from_utf8(entry.name().to_vec())
+ .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?;
+ if !skip(&file_name) {
+ self.entries.push((
+ file_name.into_bytes(),
+ ArchiveEntry::FromArchive { archive_index, file_range: entry.file_range() },
+ ));
+ }
+ }
+
+ self.src_archives.push(read_cache.into_inner());
+ Ok(())
+ }
+
+ fn build(mut self: Box<Self>, output: &Path) -> bool {
+ enum BuilderKind {
+ Bsd(ar::Builder<File>),
+ Gnu(ar::GnuBuilder<File>),
+ }
+
+ let sess = self.sess;
+
+ let mut symbol_table = BTreeMap::new();
+
+ let mut entries = Vec::new();
+
+ for (mut entry_name, entry) in self.entries {
+ // FIXME only read the symbol table of the object files to avoid having to keep all
+ // object files in memory at once, or read them twice.
+ let data = match entry {
+ ArchiveEntry::FromArchive { archive_index, file_range } => {
+ // FIXME read symbols from symtab
+ let src_read_cache = &mut self.src_archives[archive_index];
+
+ src_read_cache.seek(io::SeekFrom::Start(file_range.0)).unwrap();
+ let mut data = std::vec::from_elem(0, usize::try_from(file_range.1).unwrap());
+ src_read_cache.read_exact(&mut data).unwrap();
+
+ data
+ }
+ ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error while reading object file during archive building: {}",
+ err
+ ));
+ }),
+ };
+
+ if !self.no_builtin_ranlib {
+ if symbol_table.contains_key(&entry_name) {
+ // The ar crate can't handle creating a symbol table in case of multiple archive
+ // members with the same name. Work around this by prepending a number until we
+ // get a unique name.
+ for i in 1.. {
+ let new_name = format!("{}_", i)
+ .into_bytes()
+ .into_iter()
+ .chain(entry_name.iter().copied())
+ .collect::<Vec<_>>();
+ if !symbol_table.contains_key(&new_name) {
+ entry_name = new_name;
+ break;
+ }
+ }
+ }
+
+ match object::File::parse(&*data) {
+ Ok(object) => {
+ symbol_table.insert(
+ entry_name.to_vec(),
+ object
+ .symbols()
+ .filter_map(|symbol| {
+ if symbol.is_undefined() || symbol.is_local() {
+ None
+ } else {
+ symbol.name().map(|name| name.as_bytes().to_vec()).ok()
+ }
+ })
+ .collect::<Vec<_>>(),
+ );
+ }
+ Err(err) => {
+ let err = err.to_string();
+ if err == "Unknown file magic" {
+ // Not an object file; skip it.
+ } else {
+ sess.fatal(&format!(
+ "error parsing `{}` during archive creation: {}",
+ String::from_utf8_lossy(&entry_name),
+ err
+ ));
+ }
+ }
+ }
+ }
+
+ entries.push((entry_name, data));
+ }
+
+ let mut builder = if self.use_gnu_style_archive {
+ BuilderKind::Gnu(
+ ar::GnuBuilder::new(
+ File::create(output).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ entries.iter().map(|(name, _)| name.clone()).collect(),
+ ar::GnuSymbolTableFormat::Size32,
+ symbol_table,
+ )
+ .unwrap(),
+ )
+ } else {
+ BuilderKind::Bsd(
+ ar::Builder::new(
+ File::create(output).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ symbol_table,
+ )
+ .unwrap(),
+ )
+ };
+
+ let any_members = !entries.is_empty();
+
+ // Add all files
+ for (entry_name, data) in entries.into_iter() {
+ let header = ar::Header::new(entry_name, data.len() as u64);
+ match builder {
+ BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ }
+ }
+
+ // Finalize archive
+ std::mem::drop(builder);
+
+ if self.no_builtin_ranlib {
+ let ranlib = crate::toolchain::get_toolchain_binary(self.sess, "ranlib");
+
+ // Run ranlib to be able to link the archive
+ let status = std::process::Command::new(ranlib)
+ .arg(output)
+ .status()
+ .expect("Couldn't run ranlib");
+
+ if !status.success() {
+ self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ }
+ }
+
+ any_members
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
new file mode 100644
index 000000000..122e103ff
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -0,0 +1,955 @@
+//! Codegen of a single function
+
+use rustc_ast::InlineAsmOptions;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+
+use indexmap::IndexSet;
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+use crate::pretty_clif::CommentWriter;
+
+pub(crate) fn codegen_fn<'tcx>(
+ cx: &mut crate::CodegenCx<'tcx>,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+) {
+ let tcx = cx.tcx;
+
+ let _inst_guard =
+ crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
+ debug_assert!(!instance.substs.needs_infer());
+
+ let mir = tcx.instance_mir(instance.def);
+ let _mir_guard = crate::PrintOnPanic(|| {
+ let mut buf = Vec::new();
+ with_no_trimmed_paths!({
+ rustc_middle::mir::pretty::write_mir_fn(tcx, mir, &mut |_, _| Ok(()), &mut buf)
+ .unwrap();
+ });
+ String::from_utf8_lossy(&buf).into_owned()
+ });
+
+ // Declare function
+ let symbol_name = tcx.symbol_name(instance);
+ let sig = get_function_sig(tcx, module.isa().triple(), instance);
+ let func_id = module.declare_function(symbol_name.name, Linkage::Local, &sig).unwrap();
+
+ cx.cached_context.clear();
+
+ // Make the FunctionBuilder
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
+ func.name = ExternalName::user(0, func_id.as_u32());
+ func.signature = sig;
+ func.collect_debug_info();
+
+ let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+ // Predefine blocks
+ let start_block = bcx.create_block();
+ let block_map: IndexVec<BasicBlock, Block> =
+ (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
+
+ // Make FunctionCx
+ let target_config = module.target_config();
+ let pointer_type = target_config.pointer_type();
+ let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+ let mut fx = FunctionCx {
+ cx,
+ module,
+ tcx,
+ target_config,
+ pointer_type,
+ constants_cx: ConstantCx::new(),
+
+ instance,
+ symbol_name,
+ mir,
+ fn_abi: Some(RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())),
+
+ bcx,
+ block_map,
+ local_map: IndexVec::with_capacity(mir.local_decls.len()),
+ caller_location: None, // set by `codegen_fn_prelude`
+
+ clif_comments,
+ source_info_set: indexmap::IndexSet::new(),
+ next_ssa_var: 0,
+ };
+
+ let arg_uninhabited = fx
+ .mir
+ .args_iter()
+ .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+
+ if !crate::constant::check_constants(&mut fx) {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
+ } else if arg_uninhabited {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ } else {
+ tcx.sess.time("codegen clif ir", || {
+ tcx.sess
+ .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
+ codegen_fn_content(&mut fx);
+ });
+ }
+
+ // Recover all necessary data from fx, before accessing func will prevent future access to it.
+ let instance = fx.instance;
+ let clif_comments = fx.clif_comments;
+ let source_info_set = fx.source_info_set;
+ let local_map = fx.local_map;
+
+ fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
+
+ crate::pretty_clif::write_clif_file(
+ tcx,
+ "unopt",
+ module.isa(),
+ instance,
+ &func,
+ &clif_comments,
+ );
+
+ // Verify function
+ verify_func(tcx, &clif_comments, &func);
+
+ compile_fn(
+ cx,
+ module,
+ instance,
+ symbol_name.name,
+ func_id,
+ func,
+ clif_comments,
+ source_info_set,
+ local_map,
+ );
+}
+
+fn compile_fn<'tcx>(
+ cx: &mut crate::CodegenCx<'tcx>,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+ symbol_name: &str,
+ func_id: FuncId,
+ func: Function,
+ mut clif_comments: CommentWriter,
+ source_info_set: IndexSet<SourceInfo>,
+ local_map: IndexVec<mir::Local, CPlace<'tcx>>,
+) {
+ let tcx = cx.tcx;
+
+ // Store function in context
+ let context = &mut cx.cached_context;
+ context.clear();
+ context.func = func;
+
+ // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+ // instruction, which doesn't have an encoding.
+ context.compute_cfg();
+ context.compute_domtree();
+ context.eliminate_unreachable_code(module.isa()).unwrap();
+ context.dce(module.isa()).unwrap();
+ // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
+ // invalidate it when it would change.
+ context.domtree.clear();
+
+ // Perform rust specific optimizations
+ tcx.sess.time("optimize clif ir", || {
+ crate::optimize::optimize_function(
+ tcx,
+ module.isa(),
+ instance,
+ context,
+ &mut clif_comments,
+ );
+ });
+
+ #[cfg(any())] // This is never true
+ let _clif_guard = {
+ use std::fmt::Write;
+
+ let func_clone = context.func.clone();
+ let clif_comments_clone = clif_comments.clone();
+ let mut clif = String::new();
+ for flag in module.isa().flags().iter() {
+ writeln!(clif, "set {}", flag).unwrap();
+ }
+ write!(clif, "target {}", module.isa().triple().architecture.to_string()).unwrap();
+ for isa_flag in module.isa().isa_flags().iter() {
+ write!(clif, " {}", isa_flag).unwrap();
+ }
+ writeln!(clif, "\n").unwrap();
+ crate::PrintOnPanic(move || {
+ let mut clif = clif.clone();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &clif_comments_clone,
+ &mut clif,
+ &func_clone,
+ )
+ .unwrap();
+ clif
+ })
+ };
+
+ // Define function
+ tcx.sess.time("define function", || {
+ context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
+ module.define_function(func_id, context).unwrap();
+ });
+
+ // Write optimized function to file for debugging
+ crate::pretty_clif::write_clif_file(
+ tcx,
+ "opt",
+ module.isa(),
+ instance,
+ &context.func,
+ &clif_comments,
+ );
+
+ if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
+ crate::pretty_clif::write_ir_file(
+ tcx,
+ || format!("{}.vcode", tcx.symbol_name(instance).name),
+ |file| file.write_all(disasm.as_bytes()),
+ )
+ }
+
+ // Define debuginfo for function
+ let isa = module.isa();
+ let debug_context = &mut cx.debug_context;
+ let unwind_context = &mut cx.unwind_context;
+ tcx.sess.time("generate debug info", || {
+ if let Some(debug_context) = debug_context {
+ debug_context.define_function(
+ instance,
+ func_id,
+ symbol_name,
+ isa,
+ context,
+ &source_info_set,
+ local_map,
+ );
+ }
+ unwind_context.add_function(func_id, &context, isa);
+ });
+}
+
+pub(crate) fn verify_func(
+ tcx: TyCtxt<'_>,
+ writer: &crate::pretty_clif::CommentWriter,
+ func: &Function,
+) {
+ tcx.sess.time("verify clif ir", || {
+ let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+ match cranelift_codegen::verify_function(&func, &flags) {
+ Ok(_) => {}
+ Err(err) => {
+ tcx.sess.err(&format!("{:?}", err));
+ let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+ &func,
+ Some(Box::new(writer)),
+ err,
+ );
+ tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
+ }
+ }
+ });
+}
+
+fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
+ for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
+ let block = fx.get_block(bb);
+ fx.bcx.switch_to_block(block);
+
+ if bb_data.is_cleanup {
+ // Unwinding after panicking is not supported
+ continue;
+
+ // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
+ // so for cleanup blocks.
+ }
+
+ fx.bcx.ins().nop();
+ for stmt in &bb_data.statements {
+ fx.set_debug_loc(stmt.source_info);
+ codegen_stmt(fx, block, stmt);
+ }
+
+ if fx.clif_comments.enabled() {
+ let mut terminator_head = "\n".to_string();
+ with_no_trimmed_paths!({
+ bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ });
+ let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+ fx.add_comment(inst, terminator_head);
+ }
+
+ let source_info = bb_data.terminator().source_info;
+ fx.set_debug_loc(source_info);
+
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { target } => {
+ if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+ let mut can_immediately_return = true;
+ for stmt in &fx.mir[*target].statements {
+ if let StatementKind::StorageDead(_) = stmt.kind {
+ } else {
+ // FIXME Can sometimes happen, see rust-lang/rust#70531
+ can_immediately_return = false;
+ break;
+ }
+ }
+
+ if can_immediately_return {
+ crate::abi::codegen_return(fx);
+ continue;
+ }
+ }
+
+ let block = fx.get_block(*target);
+ fx.bcx.ins().jump(block, &[]);
+ }
+ TerminatorKind::Return => {
+ crate::abi::codegen_return(fx);
+ }
+ TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
+ if !fx.tcx.sess.overflow_checks() {
+ if let mir::AssertKind::OverflowNeg(_) = *msg {
+ let target = fx.get_block(*target);
+ fx.bcx.ins().jump(target, &[]);
+ continue;
+ }
+ }
+ let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+ let target = fx.get_block(*target);
+ let failure = fx.bcx.create_block();
+ fx.bcx.set_cold_block(failure);
+
+ if *expected {
+ fx.bcx.ins().brz(cond, failure, &[]);
+ } else {
+ fx.bcx.ins().brnz(cond, failure, &[]);
+ };
+ fx.bcx.ins().jump(target, &[]);
+
+ fx.bcx.switch_to_block(failure);
+ fx.bcx.ins().nop();
+
+ match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = codegen_operand(fx, len).load_scalar(fx);
+ let index = codegen_operand(fx, index).load_scalar(fx);
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ codegen_panic_inner(
+ fx,
+ rustc_hir::LangItem::PanicBoundsCheck,
+ &[index, len, location],
+ source_info.span,
+ );
+ }
+ _ => {
+ let msg_str = msg.description();
+ codegen_panic(fx, msg_str, source_info);
+ }
+ }
+ }
+
+ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+ let discr = codegen_operand(fx, discr).load_scalar(fx);
+
+ let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
+ || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
+ if use_bool_opt {
+ assert_eq!(targets.iter().count(), 1);
+ let (then_value, then_block) = targets.iter().next().unwrap();
+ let then_block = fx.get_block(then_block);
+ let else_block = fx.get_block(targets.otherwise());
+ let test_zero = match then_value {
+ 0 => true,
+ 1 => false,
+ _ => unreachable!("{:?}", targets),
+ };
+
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ let (discr, is_inverted) =
+ crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+ let test_zero = if is_inverted { !test_zero } else { test_zero };
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
+ &fx.bcx, discr, test_zero,
+ ) {
+ if taken {
+ fx.bcx.ins().jump(then_block, &[]);
+ } else {
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ } else {
+ if test_zero {
+ fx.bcx.ins().brz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ } else {
+ fx.bcx.ins().brnz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ }
+ } else {
+ let mut switch = ::cranelift_frontend::Switch::new();
+ for (value, block) in targets.iter() {
+ let block = fx.get_block(block);
+ switch.set_entry(value, block);
+ }
+ let otherwise_block = fx.get_block(targets.otherwise());
+ switch.emit(&mut fx.bcx, discr, otherwise_block);
+ }
+ }
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target,
+ fn_span,
+ cleanup: _,
+ from_hir_call: _,
+ } => {
+ fx.tcx.sess.time("codegen call", || {
+ crate::abi::codegen_terminator_call(
+ fx,
+ mir::SourceInfo { span: *fn_span, ..source_info },
+ func,
+ args,
+ *destination,
+ *target,
+ )
+ });
+ }
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ destination,
+ line_spans: _,
+ cleanup: _,
+ } => {
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "cranelift doesn't support unwinding from inline assembly.",
+ );
+ }
+
+ crate::inline_asm::codegen_inline_asm(
+ fx,
+ source_info.span,
+ template,
+ operands,
+ *options,
+ );
+
+ match *destination {
+ Some(destination) => {
+ let destination_block = fx.get_block(destination);
+ fx.bcx.ins().jump(destination_block, &[]);
+ }
+ None => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ }
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ // FIXME implement unwinding
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ TerminatorKind::Unreachable => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::GeneratorDrop => {
+ bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+ }
+ TerminatorKind::Drop { place, target, unwind: _ } => {
+ let drop_place = codegen_place(fx, *place);
+ crate::abi::codegen_drop(fx, source_info, drop_place);
+
+ let target_block = fx.get_block(*target);
+ fx.bcx.ins().jump(target_block, &[]);
+ }
+ };
+ }
+
+ fx.bcx.seal_all_blocks();
+ fx.bcx.finalize();
+}
+
+fn codegen_stmt<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ #[allow(unused_variables)] cur_block: Block,
+ stmt: &Statement<'tcx>,
+) {
+ let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+ fx.set_debug_loc(stmt.source_info);
+
+ #[cfg(any())] // This is never true
+ match &stmt.kind {
+ StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+ _ => {
+ if fx.clif_comments.enabled() {
+ let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+ fx.add_comment(inst, format!("{:?}", stmt));
+ }
+ }
+ }
+
+ match &stmt.kind {
+ StatementKind::SetDiscriminant { place, variant_index } => {
+ let place = codegen_place(fx, **place);
+ crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+ }
+ StatementKind::Assign(to_place_and_rval) => {
+ let lval = codegen_place(fx, to_place_and_rval.0);
+ let dest_layout = lval.layout();
+ match to_place_and_rval.1 {
+ Rvalue::Use(ref operand) => {
+ let val = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::CopyForDeref(place) => {
+ let cplace = codegen_place(fx, place);
+ let val = cplace.to_cvalue(fx);
+ lval.write_cvalue(fx, val)
+ }
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ let place = codegen_place(fx, place);
+ let ref_ = place.place_ref(fx, lval.layout());
+ lval.write_cvalue(fx, ref_);
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = if !fx.tcx.sess.overflow_checks() {
+ let val =
+ crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
+ let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+ CValue::by_val_pair(val, is_overflow, lval.layout())
+ } else {
+ crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
+ };
+
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::UnaryOp(un_op, ref operand) => {
+ let operand = codegen_operand(fx, operand);
+ let layout = operand.layout();
+ let val = operand.load_scalar(fx);
+ let res = match un_op {
+ UnOp::Not => match layout.ty.kind() {
+ ty::Bool => {
+ let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
+ }
+ ty::Uint(_) | ty::Int(_) => {
+ CValue::by_val(fx.bcx.ins().bnot(val), layout)
+ }
+ _ => unreachable!("un op Not for {:?}", layout.ty),
+ },
+ UnOp::Neg => match layout.ty.kind() {
+ ty::Int(IntTy::I128) => {
+ // FIXME remove this case once ineg.i128 works
+ let zero =
+ CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
+ }
+ ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+ ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+ _ => unreachable!("un op Neg for {:?}", layout.ty),
+ },
+ };
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ReifyFnPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ match *from_ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let func_ref = fx.get_function_ref(
+ Instance::resolve_for_fn_ptr(
+ fx.tcx,
+ ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(fx.tcx),
+ );
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+ }
+ _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::UnsafeFnPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::MutToConstPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ArrayToPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ let operand = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+ }
+ Rvalue::Cast(
+ CastKind::Misc
+ | CastKind::PointerExposeAddress
+ | CastKind::PointerFromExposedAddress,
+ ref operand,
+ to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ let from_ty = operand.layout().ty;
+ let to_ty = fx.monomorphize(to_ty);
+
+ fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.builtin_deref(true)
+ .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
+ has_ptr_meta(fx.tcx, pointee_ty)
+ })
+ .unwrap_or(false)
+ }
+
+ if is_fat_ptr(fx, from_ty) {
+ if is_fat_ptr(fx, to_ty) {
+ // fat-ptr -> fat-ptr
+ lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+ } else {
+ // fat-ptr -> thin-ptr
+ let (ptr, _extra) = operand.load_scalar_pair(fx);
+ lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+ }
+ } else {
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+ let from = operand.load_scalar(fx);
+
+ let res = clif_int_or_float_cast(
+ fx,
+ from,
+ type_sign(from_ty),
+ to_clif_ty,
+ type_sign(to_ty),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ ref operand,
+ _to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ match *operand.layout().ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ fx.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .expect("failed to normalize and resolve closure during codegen")
+ .polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+ }
+ }
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ operand.unsize_value(fx, lval);
+ }
+ Rvalue::Discriminant(place) => {
+ let place = codegen_place(fx, place);
+ let value = place.to_cvalue(fx);
+ let discr =
+ crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
+ lval.write_cvalue(fx, discr);
+ }
+ Rvalue::Repeat(ref operand, times) => {
+ let operand = codegen_operand(fx, operand);
+ let times = fx
+ .monomorphize(times)
+ .eval(fx.tcx, ParamEnv::reveal_all())
+ .kind()
+ .try_to_bits(fx.tcx.data_layout.pointer_size)
+ .unwrap();
+ if operand.layout().size.bytes() == 0 {
+ // Do nothing for ZST's
+ } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+ let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+ // FIXME use emit_small_memset where possible
+ let addr = lval.to_ptr().get_addr(fx);
+ let val = operand.load_scalar(fx);
+ fx.bcx.call_memset(fx.target_config, addr, val, times);
+ } else {
+ let loop_block = fx.bcx.create_block();
+ let loop_block2 = fx.bcx.create_block();
+ let done_block = fx.bcx.create_block();
+ let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ fx.bcx.ins().jump(loop_block, &[zero]);
+
+ fx.bcx.switch_to_block(loop_block);
+ let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+ fx.bcx.ins().brnz(done, done_block, &[]);
+ fx.bcx.ins().jump(loop_block2, &[]);
+
+ fx.bcx.switch_to_block(loop_block2);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ let index = fx.bcx.ins().iadd_imm(index, 1);
+ fx.bcx.ins().jump(loop_block, &[index]);
+
+ fx.bcx.switch_to_block(done_block);
+ fx.bcx.ins().nop();
+ }
+ }
+ Rvalue::Len(place) => {
+ let place = codegen_place(fx, place);
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ let len = codegen_array_len(fx, place);
+ lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+ }
+ Rvalue::ShallowInitBox(ref operand, content_ty) => {
+ let content_ty = fx.monomorphize(content_ty);
+ let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+ let operand = codegen_operand(fx, operand);
+ let operand = operand.load_scalar(fx);
+ lval.write_cvalue(fx, CValue::by_val(operand, box_layout));
+ }
+ Rvalue::NullaryOp(null_op, ty) => {
+ assert!(
+ lval.layout()
+ .ty
+ .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
+ );
+ let layout = fx.layout_of(fx.monomorphize(ty));
+ let val = match null_op {
+ NullOp::SizeOf => layout.size.bytes(),
+ NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), val.into());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
+ AggregateKind::Array(_ty) => {
+ for (i, operand) in operands.iter().enumerate() {
+ let operand = codegen_operand(fx, operand);
+ let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ }
+ }
+ _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
+ },
+ }
+ }
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Deinit(_)
+ | StatementKind::Nop
+ | StatementKind::FakeRead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..) => {}
+
+ StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+ StatementKind::CopyNonOverlapping(inner) => {
+ let dst = codegen_operand(fx, &inner.dst);
+ let pointee = dst
+ .layout()
+ .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let dst = dst.load_scalar(fx);
+ let src = codegen_operand(fx, &inner.src).load_scalar(fx);
+ let count = codegen_operand(fx, &inner.count).load_scalar(fx);
+ let elem_size: u64 = pointee.size.bytes();
+ let bytes =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+ fx.bcx.call_memcpy(fx.target_config, dst, src, bytes);
+ }
+ }
+}
+
+fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
+ match *place.layout().ty.kind() {
+ ty::Array(_elem_ty, len) => {
+ let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+ fx.bcx.ins().iconst(fx.pointer_type, len)
+ }
+ ty::Slice(_elem_ty) => {
+ place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
+ }
+ _ => bug!("Rvalue::Len({:?})", place),
+ }
+}
+
+pub(crate) fn codegen_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: Place<'tcx>,
+) -> CPlace<'tcx> {
+ let mut cplace = fx.get_local_place(place.local);
+
+ for elem in place.projection {
+ match elem {
+ PlaceElem::Deref => {
+ cplace = cplace.place_deref(fx);
+ }
+ PlaceElem::Field(field, _ty) => {
+ cplace = cplace.place_field(fx, field);
+ }
+ PlaceElem::Index(local) => {
+ let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
+ let offset: u64 = offset;
+ let index = if !from_end {
+ fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+ } else {
+ let len = codegen_array_len(fx, cplace);
+ fx.bcx.ins().iadd_imm(len, -(offset as i64))
+ };
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::Subslice { from, to, from_end } => {
+ // These indices are generated by slice patterns.
+ // slice[from:-to] in Python terms.
+
+ let from: u64 = from;
+ let to: u64 = to;
+
+ match cplace.layout().ty.kind() {
+ ty::Array(elem_ty, _len) => {
+ assert!(!from_end, "array subslices are never `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let ptr = cplace.to_ptr();
+ cplace = CPlace::for_ptr(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.layout_of(fx.tcx.mk_array(*elem_ty, to - from)),
+ );
+ }
+ ty::Slice(elem_ty) => {
+ assert!(from_end, "slice subslices should be `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let (ptr, len) = cplace.to_ptr_maybe_unsized();
+ let len = len.unwrap();
+ cplace = CPlace::for_ptr_with_extra(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+ cplace.layout(),
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+ PlaceElem::Downcast(_adt_def, variant) => {
+ cplace = cplace.downcast_variant(fx, variant);
+ }
+ }
+ }
+
+ cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+ match operand {
+ Operand::Move(place) | Operand::Copy(place) => {
+ let cplace = codegen_place(fx, *place);
+ cplace.to_cvalue(fx)
+ }
+ Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
+ }
+}
+
+pub(crate) fn codegen_panic<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ msg_str: &str,
+ source_info: mir::SourceInfo,
+) {
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ let msg_ptr = fx.anonymous_str(msg_str);
+ let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let args = [msg_ptr, msg_len, location];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, source_info.span);
+}
+
+pub(crate) fn codegen_panic_inner<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lang_item: rustc_hir::LangItem,
+ args: &[Value],
+ span: Span,
+) {
+ let def_id =
+ fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+
+ let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let symbol_name = fx.tcx.symbol_name(instance).name;
+
+ fx.lib_call(
+ &*symbol_name,
+ vec![
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![],
+ args,
+ );
+
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/cast.rs b/compiler/rustc_codegen_cranelift/src/cast.rs
new file mode 100644
index 000000000..bad5d1f08
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/cast.rs
@@ -0,0 +1,164 @@
+//! Various number casting functions
+
+use crate::prelude::*;
+
+pub(crate) fn clif_intcast(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ val: Value,
+ to: Type,
+ signed: bool,
+) -> Value {
+ let from = fx.bcx.func.dfg.value_type(val);
+ match (from, to) {
+ // equal
+ (_, _) if from == to => val,
+
+ // extend
+ (_, _) if to.wider_or_equal(from) => {
+ if signed {
+ fx.bcx.ins().sextend(to, val)
+ } else {
+ fx.bcx.ins().uextend(to, val)
+ }
+ }
+
+ // reduce
+ (_, _) => fx.bcx.ins().ireduce(to, val),
+ }
+}
+
+pub(crate) fn clif_int_or_float_cast(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ from: Value,
+ from_signed: bool,
+ to_ty: Type,
+ to_signed: bool,
+) -> Value {
+ let from_ty = fx.bcx.func.dfg.value_type(from);
+
+ if from_ty.is_int() && to_ty.is_int() {
+ // int-like -> int-like
+ clif_intcast(
+ fx,
+ from,
+ to_ty,
+ // This is correct as either from_signed == to_signed (=> this is trivially correct)
+ // Or from_clif_ty == to_clif_ty, which means this is a no-op.
+ from_signed,
+ )
+ } else if from_ty.is_int() && to_ty.is_float() {
+ if from_ty == types::I128 {
+ // _______ss__f_
+ // __float tisf: i128 -> f32
+ // __float tidf: i128 -> f64
+ // __floatuntisf: u128 -> f32
+ // __floatuntidf: u128 -> f64
+
+ let name = format!(
+ "__float{sign}ti{flt}f",
+ sign = if from_signed { "" } else { "un" },
+ flt = match to_ty {
+ types::F32 => "s",
+ types::F64 => "d",
+ _ => unreachable!("{:?}", to_ty),
+ },
+ );
+
+ let from_rust_ty = if from_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+
+ let to_rust_ty = match to_ty {
+ types::F32 => fx.tcx.types.f32,
+ types::F64 => fx.tcx.types.f64,
+ _ => unreachable!(),
+ };
+
+ return fx
+ .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
+ .load_scalar(fx);
+ }
+
+ // int-like -> float
+ if from_signed {
+ fx.bcx.ins().fcvt_from_sint(to_ty, from)
+ } else {
+ fx.bcx.ins().fcvt_from_uint(to_ty, from)
+ }
+ } else if from_ty.is_float() && to_ty.is_int() {
+ let val = if to_ty == types::I128 {
+ // _____sssf___
+ // __fix sfti: f32 -> i128
+ // __fix dfti: f64 -> i128
+ // __fixunssfti: f32 -> u128
+ // __fixunsdfti: f64 -> u128
+
+ let name = format!(
+ "__fix{sign}{flt}fti",
+ sign = if to_signed { "" } else { "uns" },
+ flt = match from_ty {
+ types::F32 => "s",
+ types::F64 => "d",
+ _ => unreachable!("{:?}", to_ty),
+ },
+ );
+
+ let from_rust_ty = match from_ty {
+ types::F32 => fx.tcx.types.f32,
+ types::F64 => fx.tcx.types.f64,
+ _ => unreachable!(),
+ };
+
+ let to_rust_ty = if to_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+
+ fx.easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
+ .load_scalar(fx)
+ } else if to_ty == types::I8 || to_ty == types::I16 {
+ // FIXME implement fcvt_to_*int_sat.i8/i16
+ let val = if to_signed {
+ fx.bcx.ins().fcvt_to_sint_sat(types::I32, from)
+ } else {
+ fx.bcx.ins().fcvt_to_uint_sat(types::I32, from)
+ };
+ let (min, max) = match (to_ty, to_signed) {
+ (types::I8, false) => (0, i64::from(u8::MAX)),
+ (types::I16, false) => (0, i64::from(u16::MAX)),
+ (types::I8, true) => (i64::from(i8::MIN), i64::from(i8::MAX)),
+ (types::I16, true) => (i64::from(i16::MIN), i64::from(i16::MAX)),
+ _ => unreachable!(),
+ };
+ let min_val = fx.bcx.ins().iconst(types::I32, min);
+ let max_val = fx.bcx.ins().iconst(types::I32, max);
+
+ let val = if to_signed {
+ let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, min);
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, max);
+ let bottom_capped = fx.bcx.ins().select(has_underflow, min_val, val);
+ fx.bcx.ins().select(has_overflow, max_val, bottom_capped)
+ } else {
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, max);
+ fx.bcx.ins().select(has_overflow, max_val, val)
+ };
+ fx.bcx.ins().ireduce(to_ty, val)
+ } else if to_signed {
+ fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
+ } else {
+ fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
+ };
+
+ if let Some(false) = fx.tcx.sess.opts.unstable_opts.saturating_float_casts {
+ return val;
+ }
+
+ let is_not_nan = fx.bcx.ins().fcmp(FloatCC::Equal, from, from);
+ let zero = fx.bcx.ins().iconst(to_ty, 0);
+ fx.bcx.ins().select(is_not_nan, val, zero)
+ } else if from_ty.is_float() && to_ty.is_float() {
+ // float -> float
+ match (from_ty, to_ty) {
+ (types::F32, types::F64) => fx.bcx.ins().fpromote(types::F64, from),
+ (types::F64, types::F32) => fx.bcx.ins().fdemote(types::F32, from),
+ _ => from,
+ }
+ } else {
+ unreachable!("cast value from {:?} to {:?}", from_ty, to_ty);
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
new file mode 100644
index 000000000..638b2d573
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
@@ -0,0 +1,153 @@
+//! Replaces 128-bit operators with lang item calls where necessary
+
+use cranelift_codegen::ir::ArgumentPurpose;
+
+use crate::prelude::*;
+
+pub(crate) fn maybe_codegen<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ checked: bool,
+ lhs: CValue<'tcx>,
+ rhs: CValue<'tcx>,
+) -> Option<CValue<'tcx>> {
+ if lhs.layout().ty != fx.tcx.types.u128
+ && lhs.layout().ty != fx.tcx.types.i128
+ && rhs.layout().ty != fx.tcx.types.u128
+ && rhs.layout().ty != fx.tcx.types.i128
+ {
+ return None;
+ }
+
+ let is_signed = type_sign(lhs.layout().ty);
+
+ match bin_op {
+ BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => {
+ assert!(!checked);
+ None
+ }
+ BinOp::Add | BinOp::Sub if !checked => None,
+ BinOp::Mul if !checked || is_signed => {
+ if !checked {
+ let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+ if fx.tcx.sess.target.is_like_windows {
+ let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
+ let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+ let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+ assert!(lhs_extra.is_none());
+ assert!(rhs_extra.is_none());
+ let args = [
+ ret_place.to_ptr().get_addr(fx),
+ lhs_ptr.get_addr(fx),
+ rhs_ptr.get_addr(fx),
+ ];
+ fx.lib_call(
+ "__multi3",
+ vec![
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![],
+ &args,
+ );
+ Some(ret_place.to_cvalue(fx))
+ } else {
+ Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
+ }
+ } else {
+ let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+ let oflow = CPlace::new_stack_slot(fx, fx.layout_of(fx.tcx.types.i32));
+ let lhs = lhs.load_scalar(fx);
+ let rhs = rhs.load_scalar(fx);
+ let oflow_ptr = oflow.to_ptr().get_addr(fx);
+ let res = fx.lib_call(
+ "__muloti4",
+ vec![
+ AbiParam::new(types::I128),
+ AbiParam::new(types::I128),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![AbiParam::new(types::I128)],
+ &[lhs, rhs, oflow_ptr],
+ )[0];
+ let oflow = oflow.to_cvalue(fx).load_scalar(fx);
+ let oflow = fx.bcx.ins().ireduce(types::I8, oflow);
+ Some(CValue::by_val_pair(res, oflow, fx.layout_of(out_ty)))
+ }
+ }
+ BinOp::Add | BinOp::Sub | BinOp::Mul => {
+ assert!(checked);
+ let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+ let out_place = CPlace::new_stack_slot(fx, fx.layout_of(out_ty));
+ let (param_types, args) = if fx.tcx.sess.target.is_like_windows {
+ let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+ let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+ assert!(lhs_extra.is_none());
+ assert!(rhs_extra.is_none());
+ (
+ vec![
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ [out_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)],
+ )
+ } else {
+ (
+ vec![
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(types::I128),
+ AbiParam::new(types::I128),
+ ],
+ [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)],
+ )
+ };
+ let name = match (bin_op, is_signed) {
+ (BinOp::Add, false) => "__rust_u128_addo",
+ (BinOp::Add, true) => "__rust_i128_addo",
+ (BinOp::Sub, false) => "__rust_u128_subo",
+ (BinOp::Sub, true) => "__rust_i128_subo",
+ (BinOp::Mul, false) => "__rust_u128_mulo",
+ _ => unreachable!(),
+ };
+ fx.lib_call(name, param_types, vec![], &args);
+ Some(out_place.to_cvalue(fx))
+ }
+ BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
+ BinOp::Div | BinOp::Rem => {
+ assert!(!checked);
+ let name = match (bin_op, is_signed) {
+ (BinOp::Div, false) => "__udivti3",
+ (BinOp::Div, true) => "__divti3",
+ (BinOp::Rem, false) => "__umodti3",
+ (BinOp::Rem, true) => "__modti3",
+ _ => unreachable!(),
+ };
+ if fx.tcx.sess.target.is_like_windows {
+ let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+ let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+ assert!(lhs_extra.is_none());
+ assert!(rhs_extra.is_none());
+ let args = [lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
+ let ret = fx.lib_call(
+ name,
+ vec![AbiParam::new(fx.pointer_type), AbiParam::new(fx.pointer_type)],
+ vec![AbiParam::new(types::I64X2)],
+ &args,
+ )[0];
+ // FIXME use bitcast instead of store to get from i64x2 to i128
+ let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
+ ret_place.to_ptr().store(fx, ret, MemFlags::trusted());
+ Some(ret_place.to_cvalue(fx))
+ } else {
+ Some(fx.easy_call(name, &[lhs, rhs], lhs.layout().ty))
+ }
+ }
+ BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
+ assert!(!checked);
+ None
+ }
+ BinOp::Shl | BinOp::Shr => None,
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
new file mode 100644
index 000000000..f9dc1b516
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -0,0 +1,475 @@
+use cranelift_codegen::isa::TargetFrontendConfig;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::layout::{
+ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers,
+};
+use rustc_middle::ty::SymbolName;
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::{Integer, Primitive};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+
+pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
+ match tcx.data_layout.pointer_size.bits() {
+ 16 => types::I16,
+ 32 => types::I32,
+ 64 => types::I64,
+ bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
+ }
+}
+
+pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
+ match scalar.primitive() {
+ Primitive::Int(int, _sign) => match int {
+ Integer::I8 => types::I8,
+ Integer::I16 => types::I16,
+ Integer::I32 => types::I32,
+ Integer::I64 => types::I64,
+ Integer::I128 => types::I128,
+ },
+ Primitive::F32 => types::F32,
+ Primitive::F64 => types::F64,
+ Primitive::Pointer => pointer_ty(tcx),
+ }
+}
+
+fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
+ Some(match ty.kind() {
+ ty::Bool => types::I8,
+ ty::Uint(size) => match size {
+ UintTy::U8 => types::I8,
+ UintTy::U16 => types::I16,
+ UintTy::U32 => types::I32,
+ UintTy::U64 => types::I64,
+ UintTy::U128 => types::I128,
+ UintTy::Usize => pointer_ty(tcx),
+ },
+ ty::Int(size) => match size {
+ IntTy::I8 => types::I8,
+ IntTy::I16 => types::I16,
+ IntTy::I32 => types::I32,
+ IntTy::I64 => types::I64,
+ IntTy::I128 => types::I128,
+ IntTy::Isize => pointer_ty(tcx),
+ },
+ ty::Char => types::I32,
+ ty::Float(size) => match size {
+ FloatTy::F32 => types::F32,
+ FloatTy::F64 => types::F64,
+ },
+ ty::FnPtr(_) => pointer_ty(tcx),
+ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ if has_ptr_meta(tcx, *pointee_ty) {
+ return None;
+ } else {
+ pointer_ty(tcx)
+ }
+ }
+ ty::Adt(adt_def, _) if adt_def.repr().simd() => {
+ let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
+ {
+ Abi::Vector { element, count } => (element.clone(), *count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
+ _ => return None,
+ }
+ }
+ ty::Param(_) => bug!("ty param {:?}", ty),
+ _ => return None,
+ })
+}
+
+fn clif_pair_type_from_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+) -> Option<(types::Type, types::Type)> {
+ Some(match ty.kind() {
+ ty::Tuple(types) if types.len() == 2 => {
+ let a = clif_type_from_ty(tcx, types[0])?;
+ let b = clif_type_from_ty(tcx, types[1])?;
+ if a.is_vector() || b.is_vector() {
+ return None;
+ }
+ (a, b)
+ }
+ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ if has_ptr_meta(tcx, *pointee_ty) {
+ (pointer_ty(tcx), pointer_ty(tcx))
+ } else {
+ return None;
+ }
+ }
+ _ => return None,
+ })
+}
+
+/// Is a pointer to this type a fat ptr?
+pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+ let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
+ match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
+ Abi::Scalar(_) => false,
+ Abi::ScalarPair(_, _) => true,
+ abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
+ }
+}
+
+pub(crate) fn codegen_icmp_imm(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ intcc: IntCC,
+ lhs: Value,
+ rhs: i128,
+) -> Value {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ if lhs_ty == types::I128 {
+ // FIXME legalize `icmp_imm.i128` in Cranelift
+
+ let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
+ let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
+
+ match intcc {
+ IntCC::Equal => {
+ let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
+ let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+ fx.bcx.ins().band(lsb_eq, msb_eq)
+ }
+ IntCC::NotEqual => {
+ let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
+ let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
+ fx.bcx.ins().bor(lsb_ne, msb_ne)
+ }
+ _ => {
+ // if msb_eq {
+ // lsb_cc
+ // } else {
+ // msb_cc
+ // }
+
+ let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+ let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
+ let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
+
+ fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
+ }
+ }
+ } else {
+ let rhs = i64::try_from(rhs).expect("codegen_icmp_imm rhs out of range for <128bit int");
+ fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
+ }
+}
+
+pub(crate) fn type_min_max_value(
+ bcx: &mut FunctionBuilder<'_>,
+ ty: Type,
+ signed: bool,
+) -> (Value, Value) {
+ assert!(ty.is_int());
+
+ if ty == types::I128 {
+ if signed {
+ let min = i128::MIN as u128;
+ let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
+ let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
+ let min = bcx.ins().iconcat(min_lsb, min_msb);
+
+ let max = i128::MAX as u128;
+ let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
+ let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
+ let max = bcx.ins().iconcat(max_lsb, max_msb);
+
+ return (min, max);
+ } else {
+ let min_half = bcx.ins().iconst(types::I64, 0);
+ let min = bcx.ins().iconcat(min_half, min_half);
+
+ let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
+ let max = bcx.ins().iconcat(max_half, max_half);
+
+ return (min, max);
+ }
+ }
+
+ let min = match (ty, signed) {
+ (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
+ 0i64
+ }
+ (types::I8, true) => i64::from(i8::MIN),
+ (types::I16, true) => i64::from(i16::MIN),
+ (types::I32, true) => i64::from(i32::MIN),
+ (types::I64, true) => i64::MIN,
+ _ => unreachable!(),
+ };
+
+ let max = match (ty, signed) {
+ (types::I8, false) => i64::from(u8::MAX),
+ (types::I16, false) => i64::from(u16::MAX),
+ (types::I32, false) => i64::from(u32::MAX),
+ (types::I64, false) => u64::MAX as i64,
+ (types::I8, true) => i64::from(i8::MAX),
+ (types::I16, true) => i64::from(i16::MAX),
+ (types::I32, true) => i64::from(i32::MAX),
+ (types::I64, true) => i64::MAX,
+ _ => unreachable!(),
+ };
+
+ let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
+
+ (min, max)
+}
+
+pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
+ ty::Int(..) => true,
+ ty::Float(..) => false, // `signed` is unused for floats
+ _ => panic!("{}", ty),
+ }
+}
+
+pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
+ pub(crate) cx: &'clif mut crate::CodegenCx<'tcx>,
+ pub(crate) module: &'m mut dyn Module,
+ pub(crate) tcx: TyCtxt<'tcx>,
+ pub(crate) target_config: TargetFrontendConfig, // Cached from module
+ pub(crate) pointer_type: Type, // Cached from module
+ pub(crate) constants_cx: ConstantCx,
+
+ pub(crate) instance: Instance<'tcx>,
+ pub(crate) symbol_name: SymbolName<'tcx>,
+ pub(crate) mir: &'tcx Body<'tcx>,
+ pub(crate) fn_abi: Option<&'tcx FnAbi<'tcx, Ty<'tcx>>>,
+
+ pub(crate) bcx: FunctionBuilder<'clif>,
+ pub(crate) block_map: IndexVec<BasicBlock, Block>,
+ pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
+
+ /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
+ pub(crate) caller_location: Option<CValue<'tcx>>,
+
+ pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
+ pub(crate) source_info_set: indexmap::IndexSet<SourceInfo>,
+
+ /// This should only be accessed by `CPlace::new_var`.
+ pub(crate) next_ssa_var: u32,
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ RevealAllLayoutCx(self.tcx).handle_layout_err(err, span, ty)
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ RevealAllLayoutCx(self.tcx).handle_fn_abi_err(err, span, fn_abi_request)
+ }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for FunctionCx<'_, '_, 'tcx> {
+ fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> HasTargetSpec for FunctionCx<'_, '_, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ pub(crate) fn monomorphize<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ self.instance.subst_mir_and_normalize_erasing_regions(
+ self.tcx,
+ ty::ParamEnv::reveal_all(),
+ value,
+ )
+ }
+
+ pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
+ clif_type_from_ty(self.tcx, ty)
+ }
+
+ pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
+ clif_pair_type_from_ty(self.tcx, ty)
+ }
+
+ pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
+ *self.block_map.get(bb).unwrap()
+ }
+
+ pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
+ *self.local_map.get(local).unwrap_or_else(|| {
+ panic!("Local {:?} doesn't exist", local);
+ })
+ }
+
+ pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
+ let (index, _) = self.source_info_set.insert_full(source_info);
+ self.bcx.set_srcloc(SourceLoc::new(index as u32));
+ }
+
+ // Note: must be kept in sync with get_caller_location from cg_ssa
+ pub(crate) fn get_caller_location(&mut self, mut source_info: mir::SourceInfo) -> CValue<'tcx> {
+ let span_to_caller_location = |fx: &mut FunctionCx<'_, '_, 'tcx>, span: Span| {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = fx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = fx.tcx.const_caller_location((
+ rustc_span::symbol::Symbol::intern(
+ &caller.file.name.prefer_remapped().to_string_lossy(),
+ ),
+ caller.line as u32,
+ caller.col_display as u32 + 1,
+ ));
+ crate::constant::codegen_const_value(fx, const_loc, fx.tcx.caller_location_ty())
+ };
+
+ // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+ // If so, the starting `source_info.span` is in the innermost inlined
+ // function, and will be replaced with outer callsite spans as long
+ // as the inlined functions were `#[track_caller]`.
+ loop {
+ let scope_data = &self.mir.source_scopes[source_info.scope];
+
+ if let Some((callee, callsite_span)) = scope_data.inlined {
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !callee.def.requires_caller_location(self.tcx) {
+ return span_to_caller_location(self, source_info.span);
+ }
+ source_info.span = callsite_span;
+ }
+
+ // Skip past all of the parents with `inlined: None`.
+ match scope_data.inlined_parent_scope {
+ Some(parent) => source_info.scope = parent,
+ None => break,
+ }
+ }
+
+ // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
+ self.caller_location.unwrap_or_else(|| span_to_caller_location(self, source_info.span))
+ }
+
+ pub(crate) fn anonymous_str(&mut self, msg: &str) -> Value {
+ let mut data_ctx = DataContext::new();
+ data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
+ let msg_id = self.module.declare_anonymous_data(false, false).unwrap();
+
+ // Ignore DuplicateDefinition error, as the data will be the same
+ let _ = self.module.define_data(msg_id, &data_ctx);
+
+ let local_msg_id = self.module.declare_data_in_func(msg_id, self.bcx.func);
+ if self.clif_comments.enabled() {
+ self.add_comment(local_msg_id, msg);
+ }
+ self.bcx.ins().global_value(self.pointer_type, local_msg_id)
+ }
+}
+
+pub(crate) struct RevealAllLayoutCx<'tcx>(pub(crate) TyCtxt<'tcx>);
+
+impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ if let layout::LayoutError::SizeOverflow(_) = err {
+ self.0.sess.span_fatal(span, &err.to_string())
+ } else {
+ span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ }
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+ self.0.sess.span_fatal(span, &err.to_string())
+ } else {
+ match fn_abi_request {
+ FnAbiRequest::OfFnPtr { sig, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
+ sig,
+ extra_args,
+ err
+ );
+ }
+ FnAbiRequest::OfInstance { instance, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_instance({}, {:?})` failed: {}",
+ instance,
+ extra_args,
+ err
+ );
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for RevealAllLayoutCx<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.0
+ }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for RevealAllLayoutCx<'tcx> {
+ fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+ &self.0.data_layout
+ }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for RevealAllLayoutCx<'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> HasTargetSpec for RevealAllLayoutCx<'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.0.sess.target
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
new file mode 100644
index 000000000..c6a247cf5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
@@ -0,0 +1,43 @@
+macro_rules! builtin_functions {
+ ($register:ident; $(fn $name:ident($($arg_name:ident: $arg_ty:ty),*) -> $ret_ty:ty;)*) => {
+ #[cfg(feature = "jit")]
+ #[allow(improper_ctypes)]
+ extern "C" {
+ $(fn $name($($arg_name: $arg_ty),*) -> $ret_ty;)*
+ }
+
+ #[cfg(feature = "jit")]
+ pub(crate) fn $register(builder: &mut cranelift_jit::JITBuilder) {
+ for (name, val) in [$((stringify!($name), $name as *const u8)),*] {
+ builder.symbol(name, val);
+ }
+ }
+ };
+}
+
+builtin_functions! {
+ register_functions_for_jit;
+
+ // integers
+ fn __multi3(a: i128, b: i128) -> i128;
+ fn __udivti3(n: u128, d: u128) -> u128;
+ fn __divti3(n: i128, d: i128) -> i128;
+ fn __umodti3(n: u128, d: u128) -> u128;
+ fn __modti3(n: i128, d: i128) -> i128;
+ fn __rust_u128_addo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_addo(a: i128, b: i128) -> (i128, bool);
+ fn __rust_u128_subo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_subo(a: i128, b: i128) -> (i128, bool);
+ fn __rust_u128_mulo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_mulo(a: i128, b: i128) -> (i128, bool);
+
+ // floats
+ fn __floattisf(i: i128) -> f32;
+ fn __floattidf(i: i128) -> f64;
+ fn __floatuntisf(i: u128) -> f32;
+ fn __floatuntidf(i: u128) -> f64;
+ fn __fixsfti(f: f32) -> i128;
+ fn __fixdfti(f: f64) -> i128;
+ fn __fixunssfti(f: f32) -> u128;
+ fn __fixunsdfti(f: f64) -> u128;
+}
diff --git a/compiler/rustc_codegen_cranelift/src/config.rs b/compiler/rustc_codegen_cranelift/src/config.rs
new file mode 100644
index 000000000..e59a0cb0a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/config.rs
@@ -0,0 +1,107 @@
+use std::env;
+use std::str::FromStr;
+
+fn bool_env_var(key: &str) -> bool {
+ env::var(key).as_ref().map(|val| &**val) == Ok("1")
+}
+
+/// The mode to use for compilation.
+#[derive(Copy, Clone, Debug)]
+pub enum CodegenMode {
+ /// AOT compile the crate. This is the default.
+ Aot,
+ /// JIT compile and execute the crate.
+ Jit,
+ /// JIT compile and execute the crate, but only compile functions the first time they are used.
+ JitLazy,
+}
+
+impl FromStr for CodegenMode {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "aot" => Ok(CodegenMode::Aot),
+ "jit" => Ok(CodegenMode::Jit),
+ "jit-lazy" => Ok(CodegenMode::JitLazy),
+ _ => Err(format!("Unknown codegen mode `{}`", s)),
+ }
+ }
+}
+
+/// Configuration of cg_clif as passed in through `-Cllvm-args` and various env vars.
+#[derive(Clone, Debug)]
+pub struct BackendConfig {
+ /// Should the crate be AOT compiled or JIT executed.
+ ///
+ /// Defaults to AOT compilation. Can be set using `-Cllvm-args=mode=...`.
+ pub codegen_mode: CodegenMode,
+
+ /// When JIT mode is enable pass these arguments to the program.
+ ///
+ /// Defaults to the value of `CG_CLIF_JIT_ARGS`.
+ pub jit_args: Vec<String>,
+
+ /// Display the time it took to perform codegen for a crate.
+ ///
+ /// Defaults to true when the `CG_CLIF_DISPLAY_CG_TIME` env var is set to 1 or false otherwise.
+ /// Can be set using `-Cllvm-args=display_cg_time=...`.
+ pub display_cg_time: bool,
+
+ /// Enable the Cranelift ir verifier for all compilation passes. If not set it will only run
+ /// once before passing the clif ir to Cranelift for compilation.
+ ///
+ /// Defaults to true when the `CG_CLIF_ENABLE_VERIFIER` env var is set to 1 or when cg_clif is
+ /// compiled with debug assertions enabled or false otherwise. Can be set using
+ /// `-Cllvm-args=enable_verifier=...`.
+ pub enable_verifier: bool,
+
+ /// Don't cache object files in the incremental cache. Useful during development of cg_clif
+ /// to make it possible to use incremental mode for all analyses performed by rustc without
+ /// caching object files when their content should have been changed by a change to cg_clif.
+ ///
+ /// Defaults to true when the `CG_CLIF_DISABLE_INCR_CACHE` env var is set to 1 or false
+ /// otherwise. Can be set using `-Cllvm-args=disable_incr_cache=...`.
+ pub disable_incr_cache: bool,
+}
+
+impl Default for BackendConfig {
+ fn default() -> Self {
+ BackendConfig {
+ codegen_mode: CodegenMode::Aot,
+ jit_args: {
+ let args = std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
+ args.split(' ').map(|arg| arg.to_string()).collect()
+ },
+ display_cg_time: bool_env_var("CG_CLIF_DISPLAY_CG_TIME"),
+ enable_verifier: cfg!(debug_assertions) || bool_env_var("CG_CLIF_ENABLE_VERIFIER"),
+ disable_incr_cache: bool_env_var("CG_CLIF_DISABLE_INCR_CACHE"),
+ }
+ }
+}
+
+impl BackendConfig {
+ /// Parse the configuration passed in using `-Cllvm-args`.
+ pub fn from_opts(opts: &[String]) -> Result<Self, String> {
+ fn parse_bool(name: &str, value: &str) -> Result<bool, String> {
+ value.parse().map_err(|_| format!("failed to parse value `{}` for {}", value, name))
+ }
+
+ let mut config = BackendConfig::default();
+ for opt in opts {
+ if let Some((name, value)) = opt.split_once('=') {
+ match name {
+ "mode" => config.codegen_mode = value.parse()?,
+ "display_cg_time" => config.display_cg_time = parse_bool(name, value)?,
+ "enable_verifier" => config.enable_verifier = parse_bool(name, value)?,
+ "disable_incr_cache" => config.disable_incr_cache = parse_bool(name, value)?,
+ _ => return Err(format!("Unknown option `{}`", name)),
+ }
+ } else {
+ return Err(format!("Invalid option `{}`", opt));
+ }
+ }
+
+ Ok(config)
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
new file mode 100644
index 000000000..7f7fd0e9c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -0,0 +1,580 @@
+//! Handling of `static`s, `const`s and promoted allocations
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::interpret::{
+ read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
+};
+use rustc_middle::ty::ConstKind;
+use rustc_span::DUMMY_SP;
+
+use cranelift_codegen::ir::GlobalValueData;
+use cranelift_module::*;
+
+use crate::prelude::*;
+
+pub(crate) struct ConstantCx {
+ todo: Vec<TodoItem>,
+ done: FxHashSet<DataId>,
+ anon_allocs: FxHashMap<AllocId, DataId>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum TodoItem {
+ Alloc(AllocId),
+ Static(DefId),
+}
+
+impl ConstantCx {
+ pub(crate) fn new() -> Self {
+ ConstantCx { todo: vec![], done: FxHashSet::default(), anon_allocs: FxHashMap::default() }
+ }
+
+ pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
+ //println!("todo {:?}", self.todo);
+ define_all_allocs(tcx, module, &mut self);
+ //println!("done {:?}", self.done);
+ self.done.clear();
+ }
+}
+
+pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
+ let mut all_constants_ok = true;
+ for constant in &fx.mir.required_consts {
+ let const_ = match fx.monomorphize(constant.literal) {
+ ConstantKind::Ty(ct) => ct,
+ ConstantKind::Val(..) => continue,
+ };
+ match const_.kind() {
+ ConstKind::Value(_) => {}
+ ConstKind::Unevaluated(unevaluated) => {
+ if let Err(err) =
+ fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None)
+ {
+ all_constants_ok = false;
+ match err {
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {
+ fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ }
+ ErrorHandled::TooGeneric => {
+ span_bug!(
+ constant.span,
+ "codgen encountered polymorphic constant: {:?}",
+ err
+ );
+ }
+ }
+ }
+ }
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ }
+ }
+ all_constants_ok
+}
+
+pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
+ let mut constants_cx = ConstantCx::new();
+ constants_cx.todo.push(TodoItem::Static(def_id));
+ constants_cx.finalize(tcx, module);
+}
+
+pub(crate) fn codegen_tls_ref<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("tls {:?}", def_id));
+ }
+ let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
+ CValue::by_val(tls_ptr, layout)
+}
+
+fn codegen_static_ref<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CPlace<'tcx> {
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ }
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ assert!(!layout.is_unsized(), "unsized statics aren't supported");
+ assert!(
+ matches!(
+ fx.bcx.func.global_values[local_data_id],
+ GlobalValueData::Symbol { tls: false, .. }
+ ),
+ "tls static referenced without Rvalue::ThreadLocalRef"
+ );
+ CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
+}
+
+pub(crate) fn codegen_constant<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ constant: &Constant<'tcx>,
+) -> CValue<'tcx> {
+ let const_ = match fx.monomorphize(constant.literal) {
+ ConstantKind::Ty(ct) => ct,
+ ConstantKind::Val(val, ty) => return codegen_const_value(fx, val, ty),
+ };
+ let const_val = match const_.kind() {
+ ConstKind::Value(valtree) => fx.tcx.valtree_to_const_val((const_.ty(), valtree)),
+ ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
+ if fx.tcx.is_static(def.did) =>
+ {
+ assert!(substs.is_empty());
+ assert!(promoted.is_none());
+
+ return codegen_static_ref(fx, def.did, fx.layout_of(const_.ty())).to_cvalue(fx);
+ }
+ ConstKind::Unevaluated(unevaluated) => {
+ match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
+ Ok(const_val) => const_val,
+ Err(_) => {
+ span_bug!(constant.span, "erroneous constant not captured by required_consts");
+ }
+ }
+ }
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ };
+
+ codegen_const_value(fx, const_val, const_.ty())
+}
+
+pub(crate) fn codegen_const_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ const_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+) -> CValue<'tcx> {
+ let layout = fx.layout_of(ty);
+ assert!(!layout.is_unsized(), "sized const value");
+
+ if layout.is_zst() {
+ return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
+ }
+
+ match const_val {
+ ConstValue::ZeroSized => unreachable!(), // we already handles ZST above
+ ConstValue::Scalar(x) => match x {
+ Scalar::Int(int) => {
+ if fx.clif_type(layout.ty).is_some() {
+ return CValue::const_val(fx, layout, int);
+ } else {
+ let raw_val = int.to_bits(int.size()).unwrap();
+ let val = match int.size().bytes() {
+ 1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
+ 2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
+ 4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
+ 8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
+ 16 => {
+ let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
+ let msb =
+ fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ _ => unreachable!(),
+ };
+
+ let place = CPlace::new_stack_slot(fx, layout);
+ place.to_ptr().store(fx, val, MemFlags::trusted());
+ place.to_cvalue(fx)
+ }
+ }
+ Scalar::Ptr(ptr, _size) => {
+ let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
+ let base_addr = match fx.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ GlobalAlloc::Function(instance) => {
+ let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
+ let local_func_id =
+ fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+ fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
+ }
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
+ let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
+ // FIXME: factor this common code with the `Memory` arm into a function?
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ GlobalAlloc::Static(def_id) => {
+ assert!(fx.tcx.is_static(def_id));
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ };
+ let val = if offset.bytes() != 0 {
+ fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
+ } else {
+ base_addr
+ };
+ CValue::by_val(val, layout)
+ }
+ },
+ ConstValue::ByRef { alloc, offset } => CValue::by_ref(
+ pointer_for_allocation(fx, alloc)
+ .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
+ layout,
+ ),
+ ConstValue::Slice { data, start, end } => {
+ let ptr = pointer_for_allocation(fx, data)
+ .offset_i64(fx, i64::try_from(start).unwrap())
+ .get_addr(fx);
+ let len = fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
+ CValue::by_val_pair(ptr, len, layout)
+ }
+ }
+}
+
+pub(crate) fn pointer_for_allocation<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ alloc: ConstAllocation<'tcx>,
+) -> crate::pointer::Pointer {
+ let alloc_id = fx.tcx.create_memory_alloc(alloc);
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ &mut *fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+ }
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ crate::pointer::Pointer::new(global_ptr)
+}
+
+pub(crate) fn data_id_for_alloc_id(
+ cx: &mut ConstantCx,
+ module: &mut dyn Module,
+ alloc_id: AllocId,
+ mutability: rustc_hir::Mutability,
+) -> DataId {
+ cx.todo.push(TodoItem::Alloc(alloc_id));
+ *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
+ module.declare_anonymous_data(mutability == rustc_hir::Mutability::Mut, false).unwrap()
+ })
+}
+
+fn data_id_for_static(
+ tcx: TyCtxt<'_>,
+ module: &mut dyn Module,
+ def_id: DefId,
+ definition: bool,
+) -> DataId {
+ let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
+ let linkage = if definition {
+ crate::linkage::get_static_linkage(tcx, def_id)
+ } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
+ || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
+ {
+ Linkage::Preemptible
+ } else {
+ Linkage::Import
+ };
+
+ let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
+ let symbol_name = tcx.symbol_name(instance).name;
+ let ty = instance.ty(tcx, ParamEnv::reveal_all());
+ let is_mutable = if tcx.is_mutable_static(def_id) {
+ true
+ } else {
+ !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
+ };
+ let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
+
+ let attrs = tcx.codegen_fn_attrs(def_id);
+
+ let data_id = match module.declare_data(
+ &*symbol_name,
+ linkage,
+ is_mutable,
+ attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
+ ) {
+ Ok(data_id) => data_id,
+ Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{symbol_name}` as static, but it was already declared as function"
+ )),
+ Err(err) => Err::<_, _>(err).unwrap(),
+ };
+
+ if rlinkage.is_some() {
+ // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+
+ let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
+ let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(align);
+ let data = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
+ data_ctx.write_data_addr(0, data, 0);
+ match module.define_data(ref_data_id, &data_ctx) {
+ // Every time the static is referenced there will be another definition of this global,
+ // so duplicate definitions are expected and allowed.
+ Err(ModuleError::DuplicateDefinition(_)) => {}
+ res => res.unwrap(),
+ }
+ ref_data_id
+ } else {
+ data_id
+ }
+}
+
+fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
+ while let Some(todo_item) = cx.todo.pop() {
+ let (data_id, alloc, section_name) = match todo_item {
+ TodoItem::Alloc(alloc_id) => {
+ let alloc = match tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => alloc,
+ GlobalAlloc::Function(_) | GlobalAlloc::Static(_) | GlobalAlloc::VTable(..) => {
+ unreachable!()
+ }
+ };
+ let data_id = *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
+ module
+ .declare_anonymous_data(
+ alloc.inner().mutability == rustc_hir::Mutability::Mut,
+ false,
+ )
+ .unwrap()
+ });
+ (data_id, alloc, None)
+ }
+ TodoItem::Static(def_id) => {
+ //println!("static {:?}", def_id);
+
+ let section_name = tcx.codegen_fn_attrs(def_id).link_section;
+
+ let alloc = tcx.eval_static_initializer(def_id).unwrap();
+
+ let data_id = data_id_for_static(tcx, module, def_id, true);
+ (data_id, alloc, section_name)
+ }
+ };
+
+ //("data_id {}", data_id);
+ if cx.done.contains(&data_id) {
+ continue;
+ }
+
+ let mut data_ctx = DataContext::new();
+ let alloc = alloc.inner();
+ data_ctx.set_align(alloc.align.bytes());
+
+ if let Some(section_name) = section_name {
+ let (segment_name, section_name) = if tcx.sess.target.is_like_osx {
+ let section_name = section_name.as_str();
+ if let Some(names) = section_name.split_once(',') {
+ names
+ } else {
+ tcx.sess.fatal(&format!(
+ "#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
+ section_name
+ ));
+ }
+ } else {
+ ("", section_name.as_str())
+ };
+ data_ctx.set_segment_section(segment_name, section_name);
+ }
+
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
+ data_ctx.define(bytes.into_boxed_slice());
+
+ for &(offset, alloc_id) in alloc.relocations().iter() {
+ let addend = {
+ let endianness = tcx.data_layout.endian;
+ let offset = offset.bytes() as usize;
+ let ptr_size = tcx.data_layout.pointer_size;
+ let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+ offset..offset + ptr_size.bytes() as usize,
+ );
+ read_target_uint(endianness, bytes).unwrap()
+ };
+
+ let reloc_target_alloc = tcx.global_alloc(alloc_id);
+ let data_id = match reloc_target_alloc {
+ GlobalAlloc::Function(instance) => {
+ assert_eq!(addend, 0);
+ let func_id =
+ crate::abi::import_function(tcx, module, instance.polymorphize(tcx));
+ let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
+ data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
+ continue;
+ }
+ GlobalAlloc::Memory(target_alloc) => {
+ data_id_for_alloc_id(cx, module, alloc_id, target_alloc.inner().mutability)
+ }
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc_id = tcx.vtable_allocation((ty, trait_ref));
+ data_id_for_alloc_id(cx, module, alloc_id, Mutability::Not)
+ }
+ GlobalAlloc::Static(def_id) => {
+ if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ {
+ tcx.sess.fatal(&format!(
+ "Allocation {:?} contains reference to TLS value {:?}",
+ alloc, def_id
+ ));
+ }
+
+ // Don't push a `TodoItem::Static` here, as it will cause statics used by
+ // multiple crates to be duplicated between them. It isn't necessary anyway,
+ // as it will get pushed by `codegen_static` when necessary.
+ data_id_for_static(tcx, module, def_id, false)
+ }
+ };
+
+ let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
+ }
+
+ module.define_data(data_id, &data_ctx).unwrap();
+ cx.done.insert(data_id);
+ }
+
+ assert!(cx.todo.is_empty(), "{:?}", cx.todo);
+}
+
+pub(crate) fn mir_operand_get_const_val<'tcx>(
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> Option<ConstValue<'tcx>> {
+ match operand {
+ Operand::Constant(const_) => match const_.literal {
+ ConstantKind::Ty(const_) => fx
+ .monomorphize(const_)
+ .eval_for_mir(fx.tcx, ParamEnv::reveal_all())
+ .try_to_value(fx.tcx),
+ ConstantKind::Val(val, _) => Some(val),
+ },
+ // FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
+ // inside a temporary before being passed to the intrinsic requiring the const argument.
+ // This code tries to find a single constant defining definition of the referenced local.
+ Operand::Copy(place) | Operand::Move(place) => {
+ if !place.projection.is_empty() {
+ return None;
+ }
+ let mut computed_const_val = None;
+ for bb_data in fx.mir.basic_blocks() {
+ for stmt in &bb_data.statements {
+ match &stmt.kind {
+ StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => {
+ match &local_and_rvalue.1 {
+ Rvalue::Cast(CastKind::Misc, operand, ty) => {
+ if computed_const_val.is_some() {
+ return None; // local assigned twice
+ }
+ if !matches!(ty.kind(), ty::Uint(_) | ty::Int(_)) {
+ return None;
+ }
+ let const_val = mir_operand_get_const_val(fx, operand)?;
+ if fx.layout_of(*ty).size
+ != const_val.try_to_scalar_int()?.size()
+ {
+ return None;
+ }
+ computed_const_val = Some(const_val);
+ }
+ Rvalue::Use(operand) => {
+ computed_const_val = mir_operand_get_const_val(fx, operand)
+ }
+ _ => return None,
+ }
+ }
+ StatementKind::SetDiscriminant { place: stmt_place, variant_index: _ }
+ if &**stmt_place == place =>
+ {
+ return None;
+ }
+ StatementKind::CopyNonOverlapping(_) => {
+ return None;
+ } // conservative handling
+ StatementKind::Assign(_)
+ | StatementKind::FakeRead(_)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(_)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag(_, _)
+ | StatementKind::AscribeUserType(_, _)
+ | StatementKind::Coverage(_)
+ | StatementKind::Nop => {}
+ }
+ }
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::Assert { .. } => {}
+ TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => unreachable!(),
+ TerminatorKind::InlineAsm { .. } => return None,
+ TerminatorKind::Call { destination, target: Some(_), .. }
+ if destination == place =>
+ {
+ return None;
+ }
+ TerminatorKind::Call { .. } => {}
+ }
+ }
+ computed_const_val
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
new file mode 100644
index 000000000..589910ede
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
@@ -0,0 +1,190 @@
+//! Write the debuginfo into an object file.
+
+use cranelift_object::ObjectProduct;
+use rustc_data_structures::fx::FxHashMap;
+
+use gimli::write::{Address, AttributeValue, EndianVec, Result, Sections, Writer};
+use gimli::{RunTimeEndian, SectionId};
+
+use super::object::WriteDebugInfo;
+use super::DebugContext;
+
+impl DebugContext<'_> {
+ pub(crate) fn emit(&mut self, product: &mut ObjectProduct) {
+ let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
+ let root = self.dwarf.unit.root();
+ let root = self.dwarf.unit.get_mut(root);
+ root.set(gimli::DW_AT_ranges, AttributeValue::RangeListRef(unit_range_list_id));
+
+ let mut sections = Sections::new(WriterRelocate::new(self.endian));
+ self.dwarf.write(&mut sections).unwrap();
+
+ let mut section_map = FxHashMap::default();
+ let _: Result<()> = sections.for_each_mut(|id, section| {
+ if !section.writer.slice().is_empty() {
+ let section_id = product.add_debug_section(id, section.writer.take());
+ section_map.insert(id, section_id);
+ }
+ Ok(())
+ });
+
+ let _: Result<()> = sections.for_each(|id, section| {
+ if let Some(section_id) = section_map.get(&id) {
+ for reloc in &section.relocs {
+ product.add_debug_reloc(&section_map, section_id, reloc);
+ }
+ }
+ Ok(())
+ });
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct DebugReloc {
+ pub(crate) offset: u32,
+ pub(crate) size: u8,
+ pub(crate) name: DebugRelocName,
+ pub(crate) addend: i64,
+ pub(crate) kind: object::RelocationKind,
+}
+
+#[derive(Clone)]
+pub(crate) enum DebugRelocName {
+ Section(SectionId),
+ Symbol(usize),
+}
+
+/// A [`Writer`] that collects all necessary relocations.
+#[derive(Clone)]
+pub(super) struct WriterRelocate {
+ pub(super) relocs: Vec<DebugReloc>,
+ pub(super) writer: EndianVec<RunTimeEndian>,
+}
+
+impl WriterRelocate {
+ pub(super) fn new(endian: RunTimeEndian) -> Self {
+ WriterRelocate { relocs: Vec::new(), writer: EndianVec::new(endian) }
+ }
+
+ /// Perform the collected relocations to be usable for JIT usage.
+ #[cfg(all(feature = "jit", not(windows)))]
+ pub(super) fn relocate_for_jit(mut self, jit_module: &cranelift_jit::JITModule) -> Vec<u8> {
+ for reloc in self.relocs.drain(..) {
+ match reloc.name {
+ super::DebugRelocName::Section(_) => unreachable!(),
+ super::DebugRelocName::Symbol(sym) => {
+ let addr = jit_module.get_finalized_function(
+ cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
+ );
+ let val = (addr as u64 as i64 + reloc.addend) as u64;
+ self.writer.write_udata_at(reloc.offset as usize, val, reloc.size).unwrap();
+ }
+ }
+ }
+ self.writer.into_vec()
+ }
+}
+
+impl Writer for WriterRelocate {
+ type Endian = RunTimeEndian;
+
+ fn endian(&self) -> Self::Endian {
+ self.writer.endian()
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+
+ fn write(&mut self, bytes: &[u8]) -> Result<()> {
+ self.writer.write(bytes)
+ }
+
+ fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> {
+ self.writer.write_at(offset, bytes)
+ }
+
+ fn write_address(&mut self, address: Address, size: u8) -> Result<()> {
+ match address {
+ Address::Constant(val) => self.write_udata(val, size),
+ Address::Symbol { symbol, addend } => {
+ let offset = self.len() as u64;
+ self.relocs.push(DebugReloc {
+ offset: offset as u32,
+ size,
+ name: DebugRelocName::Symbol(symbol),
+ addend: addend as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata(0, size)
+ }
+ }
+ }
+
+ fn write_offset(&mut self, val: usize, section: SectionId, size: u8) -> Result<()> {
+ let offset = self.len() as u32;
+ self.relocs.push(DebugReloc {
+ offset,
+ size,
+ name: DebugRelocName::Section(section),
+ addend: val as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata(0, size)
+ }
+
+ fn write_offset_at(
+ &mut self,
+ offset: usize,
+ val: usize,
+ section: SectionId,
+ size: u8,
+ ) -> Result<()> {
+ self.relocs.push(DebugReloc {
+ offset: offset as u32,
+ size,
+ name: DebugRelocName::Section(section),
+ addend: val as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata_at(offset, 0, size)
+ }
+
+ fn write_eh_pointer(&mut self, address: Address, eh_pe: gimli::DwEhPe, size: u8) -> Result<()> {
+ match address {
+ // Address::Constant arm copied from gimli
+ Address::Constant(val) => {
+ // Indirect doesn't matter here.
+ let val = match eh_pe.application() {
+ gimli::DW_EH_PE_absptr => val,
+ gimli::DW_EH_PE_pcrel => {
+ // FIXME better handling of sign
+ let offset = self.len() as u64;
+ offset.wrapping_sub(val)
+ }
+ _ => {
+ return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
+ }
+ };
+ self.write_eh_pointer_data(val, eh_pe.format(), size)
+ }
+ Address::Symbol { symbol, addend } => match eh_pe.application() {
+ gimli::DW_EH_PE_pcrel => {
+ let size = match eh_pe.format() {
+ gimli::DW_EH_PE_sdata4 => 4,
+ _ => return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+ };
+ self.relocs.push(DebugReloc {
+ offset: self.len() as u32,
+ size,
+ name: DebugRelocName::Symbol(symbol),
+ addend,
+ kind: object::RelocationKind::Relative,
+ });
+ self.write_udata(0, size)
+ }
+ _ => Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+ },
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
new file mode 100644
index 000000000..bbcb95913
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -0,0 +1,218 @@
+//! Line info generation (`.debug_line`)
+
+use std::ffi::OsStr;
+use std::path::{Component, Path};
+
+use crate::prelude::*;
+
+use rustc_span::{
+ FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
+};
+
+use cranelift_codegen::binemit::CodeOffset;
+use cranelift_codegen::MachSrcLoc;
+
+use gimli::write::{
+ Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
+ UnitEntryId,
+};
+
+// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
+fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
+ let mut iter = path.components();
+ let file_name = match iter.next_back() {
+ Some(Component::Normal(p)) => p,
+ component => {
+ panic!(
+ "Path component {:?} of path {} is an invalid filename",
+ component,
+ path.display()
+ );
+ }
+ };
+ let parent = iter.as_path();
+ (parent, file_name)
+}
+
+// OPTIMIZATION: Avoid UTF-8 validation on UNIX.
+fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
+ #[cfg(unix)]
+ {
+ use std::os::unix::ffi::OsStrExt;
+ path.as_bytes()
+ }
+ #[cfg(not(unix))]
+ {
+ path.to_str().unwrap().as_bytes()
+ }
+}
+
+pub(crate) const MD5_LEN: usize = 16;
+
+pub(crate) fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
+ if hash.kind == SourceFileHashAlgorithm::Md5 {
+ let mut buf = [0u8; MD5_LEN];
+ buf.copy_from_slice(hash.hash_bytes());
+ Some(FileInfo { timestamp: 0, size: 0, md5: buf })
+ } else {
+ None
+ }
+}
+
+fn line_program_add_file(
+ line_program: &mut LineProgram,
+ line_strings: &mut LineStringTable,
+ file: &SourceFile,
+) -> FileId {
+ match &file.name {
+ FileName::Real(path) => {
+ let (dir_path, file_name) = split_path_dir_and_file(path.remapped_path_if_available());
+ let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
+ let file_name = osstr_as_utf8_bytes(file_name);
+
+ let dir_id = if !dir_name.is_empty() {
+ let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
+ line_program.add_directory(dir_name)
+ } else {
+ line_program.default_directory()
+ };
+ let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
+
+ let info = make_file_info(file.src_hash);
+
+ line_program.file_has_md5 &= info.is_some();
+ line_program.add_file(file_name, dir_id, info)
+ }
+ // FIXME give more appropriate file names
+ filename => {
+ let dir_id = line_program.default_directory();
+ let dummy_file_name = LineString::new(
+ filename.prefer_remapped().to_string().into_bytes(),
+ line_program.encoding(),
+ line_strings,
+ );
+ line_program.add_file(dummy_file_name, dir_id, None)
+ }
+ }
+}
+
+impl<'tcx> DebugContext<'tcx> {
+ pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
+ let loc = self.tcx.sess.source_map().lookup_char_pos(span.lo());
+
+ let file_id = line_program_add_file(
+ &mut self.dwarf.unit.line_program,
+ &mut self.dwarf.line_strings,
+ &loc.file,
+ );
+
+ let entry = self.dwarf.unit.get_mut(entry_id);
+
+ entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
+ entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(loc.line as u64));
+ entry.set(gimli::DW_AT_decl_column, AttributeValue::Udata(loc.col.to_usize() as u64));
+ }
+
+ pub(super) fn create_debug_lines(
+ &mut self,
+ symbol: usize,
+ entry_id: UnitEntryId,
+ context: &Context,
+ function_span: Span,
+ source_info_set: &indexmap::IndexSet<SourceInfo>,
+ ) -> CodeOffset {
+ let tcx = self.tcx;
+ let line_program = &mut self.dwarf.unit.line_program;
+
+ let line_strings = &mut self.dwarf.line_strings;
+ let mut last_span = None;
+ let mut last_file = None;
+ let mut create_row_for_span = |line_program: &mut LineProgram, span: Span| {
+ if let Some(last_span) = last_span {
+ if span == last_span {
+ line_program.generate_row();
+ return;
+ }
+ }
+ last_span = Some(span);
+
+ // Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
+ // In order to have a good line stepping behavior in debugger, we overwrite debug
+ // locations of macro expansions with that of the outermost expansion site
+ // (unless the crate is being compiled with `-Z debug-macros`).
+ let span = if !span.from_expansion() || tcx.sess.opts.unstable_opts.debug_macros {
+ span
+ } else {
+ // Walk up the macro expansion chain until we reach a non-expanded span.
+ // We also stop at the function body level because no line stepping can occur
+ // at the level above that.
+ rustc_span::hygiene::walk_chain(span, function_span.ctxt())
+ };
+
+ let (file, line, col) = match tcx.sess.source_map().lookup_line(span.lo()) {
+ Ok(SourceFileAndLine { sf: file, line }) => {
+ let line_pos = file.line_begin_pos(span.lo());
+
+ (
+ file,
+ u64::try_from(line).unwrap() + 1,
+ u64::from((span.lo() - line_pos).to_u32()) + 1,
+ )
+ }
+ Err(file) => (file, 0, 0),
+ };
+
+ // line_program_add_file is very slow.
+ // Optimize for the common case of the current file not being changed.
+ let current_file_changed = if let Some(last_file) = &last_file {
+ // If the allocations are not equal, then the files may still be equal, but that
+ // is not a problem, as this is just an optimization.
+ !rustc_data_structures::sync::Lrc::ptr_eq(last_file, &file)
+ } else {
+ true
+ };
+ if current_file_changed {
+ let file_id = line_program_add_file(line_program, line_strings, &file);
+ line_program.row().file = file_id;
+ last_file = Some(file);
+ }
+
+ line_program.row().line = line;
+ line_program.row().column = col;
+ line_program.generate_row();
+ };
+
+ line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
+
+ let mut func_end = 0;
+
+ let mcr = context.mach_compile_result.as_ref().unwrap();
+ for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
+ line_program.row().address_offset = u64::from(start);
+ if !loc.is_default() {
+ let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
+ create_row_for_span(line_program, source_info.span);
+ } else {
+ create_row_for_span(line_program, function_span);
+ }
+ func_end = end;
+ }
+
+ line_program.end_sequence(u64::from(func_end));
+
+ let func_end = mcr.buffer.total_size();
+
+ assert_ne!(func_end, 0);
+
+ let entry = self.dwarf.unit.get_mut(entry_id);
+ entry.set(
+ gimli::DW_AT_low_pc,
+ AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+ );
+ entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(func_end)));
+
+ self.emit_location(entry_id, function_span);
+
+ func_end
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
new file mode 100644
index 000000000..693092ba5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -0,0 +1,357 @@
+//! Handling of everything related to debuginfo.
+
+mod emit;
+mod line_info;
+mod object;
+mod unwind;
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+
+use cranelift_codegen::entity::EntityRef;
+use cranelift_codegen::ir::{Endianness, LabelValueLoc, ValueLabel};
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::ValueLocRange;
+
+use gimli::write::{
+ Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
+ LocationList, Range, RangeList, UnitEntryId,
+};
+use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
+
+pub(crate) use emit::{DebugReloc, DebugRelocName};
+pub(crate) use unwind::UnwindContext;
+
+pub(crate) struct DebugContext<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ endian: RunTimeEndian,
+
+ dwarf: DwarfUnit,
+ unit_range_list: RangeList,
+
+ types: FxHashMap<Ty<'tcx>, UnitEntryId>,
+}
+
+impl<'tcx> DebugContext<'tcx> {
+ pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
+ let encoding = Encoding {
+ format: Format::Dwarf32,
+ // FIXME this should be configurable
+ // macOS doesn't seem to support DWARF > 3
+ // 5 version is required for md5 file hash
+ version: if tcx.sess.target.is_like_osx {
+ 3
+ } else {
+ // FIXME change to version 5 once the gdb and lldb shipping with the latest debian
+ // support it.
+ 4
+ },
+ address_size: isa.frontend_config().pointer_bytes(),
+ };
+
+ let endian = match isa.endianness() {
+ Endianness::Little => RunTimeEndian::Little,
+ Endianness::Big => RunTimeEndian::Big,
+ };
+
+ let mut dwarf = DwarfUnit::new(encoding);
+
+ let producer = format!(
+ "cg_clif (rustc {}, cranelift {})",
+ rustc_interface::util::version_str().unwrap_or("unknown version"),
+ cranelift_codegen::VERSION,
+ );
+ let comp_dir = tcx
+ .sess
+ .opts
+ .working_dir
+ .to_string_lossy(FileNameDisplayPreference::Remapped)
+ .into_owned();
+ let (name, file_info) = match tcx.sess.local_crate_source_file.clone() {
+ Some(path) => {
+ let name = path.to_string_lossy().into_owned();
+ (name, None)
+ }
+ None => (tcx.crate_name(LOCAL_CRATE).to_string(), None),
+ };
+
+ let mut line_program = LineProgram::new(
+ encoding,
+ LineEncoding::default(),
+ LineString::new(comp_dir.as_bytes(), encoding, &mut dwarf.line_strings),
+ LineString::new(name.as_bytes(), encoding, &mut dwarf.line_strings),
+ file_info,
+ );
+ line_program.file_has_md5 = file_info.is_some();
+
+ dwarf.unit.line_program = line_program;
+
+ {
+ let name = dwarf.strings.add(name);
+ let comp_dir = dwarf.strings.add(comp_dir);
+
+ let root = dwarf.unit.root();
+ let root = dwarf.unit.get_mut(root);
+ root.set(gimli::DW_AT_producer, AttributeValue::StringRef(dwarf.strings.add(producer)));
+ root.set(gimli::DW_AT_language, AttributeValue::Language(gimli::DW_LANG_Rust));
+ root.set(gimli::DW_AT_name, AttributeValue::StringRef(name));
+ root.set(gimli::DW_AT_comp_dir, AttributeValue::StringRef(comp_dir));
+ root.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Constant(0)));
+ }
+
+ DebugContext {
+ tcx,
+
+ endian,
+
+ dwarf,
+ unit_range_list: RangeList(Vec::new()),
+
+ types: FxHashMap::default(),
+ }
+ }
+
+ fn dwarf_ty(&mut self, ty: Ty<'tcx>) -> UnitEntryId {
+ if let Some(type_id) = self.types.get(&ty) {
+ return *type_id;
+ }
+
+ let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
+
+ let primitive = |dwarf: &mut DwarfUnit, ate| {
+ let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
+ let type_entry = dwarf.unit.get_mut(type_id);
+ type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
+ type_id
+ };
+
+ let name = format!("{}", ty);
+ let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
+
+ let type_id = match ty.kind() {
+ ty::Bool => primitive(&mut self.dwarf, gimli::DW_ATE_boolean),
+ ty::Char => primitive(&mut self.dwarf, gimli::DW_ATE_UTF),
+ ty::Uint(_) => primitive(&mut self.dwarf, gimli::DW_ATE_unsigned),
+ ty::Int(_) => primitive(&mut self.dwarf, gimli::DW_ATE_signed),
+ ty::Float(_) => primitive(&mut self.dwarf, gimli::DW_ATE_float),
+ ty::Ref(_, pointee_ty, _mutbl)
+ | ty::RawPtr(ty::TypeAndMut { ty: pointee_ty, mutbl: _mutbl }) => {
+ let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_pointer_type);
+
+ // Ensure that type is inserted before recursing to avoid duplicates
+ self.types.insert(ty, type_id);
+
+ let pointee = self.dwarf_ty(*pointee_ty);
+
+ let type_entry = self.dwarf.unit.get_mut(type_id);
+
+ //type_entry.set(gimli::DW_AT_mutable, AttributeValue::Flag(mutbl == rustc_hir::Mutability::Mut));
+ type_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(pointee));
+
+ type_id
+ }
+ ty::Adt(adt_def, _substs) if adt_def.is_struct() && !layout.is_unsized() => {
+ let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type);
+
+ // Ensure that type is inserted before recursing to avoid duplicates
+ self.types.insert(ty, type_id);
+
+ let variant = adt_def.non_enum_variant();
+
+ for (field_idx, field_def) in variant.fields.iter().enumerate() {
+ let field_offset = layout.fields.offset(field_idx);
+ let field_layout = layout.field(
+ &layout::LayoutCx { tcx: self.tcx, param_env: ParamEnv::reveal_all() },
+ field_idx,
+ );
+
+ let field_type = self.dwarf_ty(field_layout.ty);
+
+ let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
+ let field_entry = self.dwarf.unit.get_mut(field_id);
+
+ field_entry.set(
+ gimli::DW_AT_name,
+ AttributeValue::String(field_def.name.as_str().to_string().into_bytes()),
+ );
+ field_entry.set(
+ gimli::DW_AT_data_member_location,
+ AttributeValue::Udata(field_offset.bytes()),
+ );
+ field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
+ }
+
+ type_id
+ }
+ _ => new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type),
+ };
+
+ let type_entry = self.dwarf.unit.get_mut(type_id);
+
+ type_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+ type_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes()));
+
+ self.types.insert(ty, type_id);
+
+ type_id
+ }
+
+ fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
+ let dw_ty = self.dwarf_ty(ty);
+
+ let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
+ let var_entry = self.dwarf.unit.get_mut(var_id);
+
+ var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+ var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
+
+ var_id
+ }
+
+ pub(crate) fn define_function(
+ &mut self,
+ instance: Instance<'tcx>,
+ func_id: FuncId,
+ name: &str,
+ isa: &dyn TargetIsa,
+ context: &Context,
+ source_info_set: &indexmap::IndexSet<SourceInfo>,
+ local_map: IndexVec<mir::Local, CPlace<'tcx>>,
+ ) {
+ let symbol = func_id.as_u32() as usize;
+ let mir = self.tcx.instance_mir(instance.def);
+
+ // FIXME: add to appropriate scope instead of root
+ let scope = self.dwarf.unit.root();
+
+ let entry_id = self.dwarf.unit.add(scope, gimli::DW_TAG_subprogram);
+ let entry = self.dwarf.unit.get_mut(entry_id);
+ let name_id = self.dwarf.strings.add(name);
+ // Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
+ entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
+ entry.set(gimli::DW_AT_linkage_name, AttributeValue::StringRef(name_id));
+
+ let end = self.create_debug_lines(symbol, entry_id, context, mir.span, source_info_set);
+
+ self.unit_range_list.0.push(Range::StartLength {
+ begin: Address::Symbol { symbol, addend: 0 },
+ length: u64::from(end),
+ });
+
+ let func_entry = self.dwarf.unit.get_mut(entry_id);
+ // Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
+ func_entry.set(
+ gimli::DW_AT_low_pc,
+ AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+ );
+ // Using Udata for DW_AT_high_pc requires at least DWARF4
+ func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
+
+ // FIXME make it more reliable and implement scopes before re-enabling this.
+ if false {
+ let value_labels_ranges = std::collections::HashMap::new(); // FIXME
+
+ for (local, _local_decl) in mir.local_decls.iter_enumerated() {
+ let ty = self.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ mir.local_decls[local].ty,
+ );
+ let var_id = self.define_local(entry_id, format!("{:?}", local), ty);
+
+ let location = place_location(
+ self,
+ isa,
+ symbol,
+ &local_map,
+ &value_labels_ranges,
+ Place { local, projection: ty::List::empty() },
+ );
+
+ let var_entry = self.dwarf.unit.get_mut(var_id);
+ var_entry.set(gimli::DW_AT_location, location);
+ }
+ }
+
+ // FIXME create locals for all entries in mir.var_debug_info
+ }
+}
+
+fn place_location<'tcx>(
+ debug_context: &mut DebugContext<'tcx>,
+ isa: &dyn TargetIsa,
+ symbol: usize,
+ local_map: &IndexVec<mir::Local, CPlace<'tcx>>,
+ #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
+ ValueLabel,
+ Vec<ValueLocRange>,
+ >,
+ place: Place<'tcx>,
+) -> AttributeValue {
+ assert!(place.projection.is_empty()); // FIXME implement them
+
+ match local_map[place.local].inner() {
+ CPlaceInner::Var(_local, var) => {
+ let value_label = cranelift_codegen::ir::ValueLabel::new(var.index());
+ if let Some(value_loc_ranges) = value_labels_ranges.get(&value_label) {
+ let loc_list = LocationList(
+ value_loc_ranges
+ .iter()
+ .map(|value_loc_range| Location::StartEnd {
+ begin: Address::Symbol {
+ symbol,
+ addend: i64::from(value_loc_range.start),
+ },
+ end: Address::Symbol { symbol, addend: i64::from(value_loc_range.end) },
+ data: translate_loc(isa, value_loc_range.loc).unwrap(),
+ })
+ .collect(),
+ );
+ let loc_list_id = debug_context.dwarf.unit.locations.add(loc_list);
+
+ AttributeValue::LocationListRef(loc_list_id)
+ } else {
+ // FIXME set value labels for unused locals
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ }
+ CPlaceInner::VarPair(_, _, _) => {
+ // FIXME implement this
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ CPlaceInner::VarLane(_, _, _) => {
+ // FIXME implement this
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ CPlaceInner::Addr(_, _) => {
+ // FIXME implement this (used by arguments and returns)
+
+ AttributeValue::Exprloc(Expression::new())
+
+ // For PointerBase::Stack:
+ //AttributeValue::Exprloc(translate_loc(ValueLoc::Stack(*stack_slot)).unwrap())
+ }
+ }
+}
+
+// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
+fn translate_loc(isa: &dyn TargetIsa, loc: LabelValueLoc) -> Option<Expression> {
+ match loc {
+ LabelValueLoc::Reg(reg) => {
+ let machine_reg = isa.map_regalloc_reg_to_dwarf(reg).unwrap();
+ let mut expr = Expression::new();
+ expr.op_reg(gimli::Register(machine_reg));
+ Some(expr)
+ }
+ LabelValueLoc::SPOffset(offset) => {
+ let mut expr = Expression::new();
+ expr.op_breg(X86_64::RSP, offset);
+ Some(expr)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs
new file mode 100644
index 000000000..9dc9b2cf9
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs
@@ -0,0 +1,83 @@
+use rustc_data_structures::fx::FxHashMap;
+
+use cranelift_module::FuncId;
+use cranelift_object::ObjectProduct;
+
+use object::write::{Relocation, StandardSegment};
+use object::{RelocationEncoding, SectionKind};
+
+use gimli::SectionId;
+
+use crate::debuginfo::{DebugReloc, DebugRelocName};
+
+pub(super) trait WriteDebugInfo {
+ type SectionId: Copy;
+
+ fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId;
+ fn add_debug_reloc(
+ &mut self,
+ section_map: &FxHashMap<SectionId, Self::SectionId>,
+ from: &Self::SectionId,
+ reloc: &DebugReloc,
+ );
+}
+
+impl WriteDebugInfo for ObjectProduct {
+ type SectionId = (object::write::SectionId, object::write::SymbolId);
+
+ fn add_debug_section(
+ &mut self,
+ id: SectionId,
+ data: Vec<u8>,
+ ) -> (object::write::SectionId, object::write::SymbolId) {
+ let name = if self.object.format() == object::BinaryFormat::MachO {
+ id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
+ } else {
+ id.name().to_string()
+ }
+ .into_bytes();
+
+ let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
+ // FIXME use SHT_X86_64_UNWIND for .eh_frame
+ let section_id = self.object.add_section(
+ segment,
+ name,
+ if id == SectionId::EhFrame { SectionKind::ReadOnlyData } else { SectionKind::Debug },
+ );
+ self.object
+ .section_mut(section_id)
+ .set_data(data, if id == SectionId::EhFrame { 8 } else { 1 });
+ let symbol_id = self.object.section_symbol(section_id);
+ (section_id, symbol_id)
+ }
+
+ fn add_debug_reloc(
+ &mut self,
+ section_map: &FxHashMap<SectionId, Self::SectionId>,
+ from: &Self::SectionId,
+ reloc: &DebugReloc,
+ ) {
+ let (symbol, symbol_offset) = match reloc.name {
+ DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
+ DebugRelocName::Symbol(id) => {
+ let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
+ self.object
+ .symbol_section_and_offset(symbol_id)
+ .expect("Debug reloc for undef sym???")
+ }
+ };
+ self.object
+ .add_relocation(
+ from.0,
+ Relocation {
+ offset: u64::from(reloc.offset),
+ symbol,
+ kind: reloc.kind,
+ encoding: RelocationEncoding::Generic,
+ size: reloc.size * 8,
+ addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
+ },
+ )
+ .unwrap();
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
new file mode 100644
index 000000000..d26392c49
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
@@ -0,0 +1,136 @@
+//! Unwind info generation (`.eh_frame`)
+
+use crate::prelude::*;
+
+use cranelift_codegen::ir::Endianness;
+use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
+
+use cranelift_object::ObjectProduct;
+use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
+use gimli::RunTimeEndian;
+
+use super::object::WriteDebugInfo;
+
+pub(crate) struct UnwindContext {
+ endian: RunTimeEndian,
+ frame_table: FrameTable,
+ cie_id: Option<CieId>,
+}
+
+impl UnwindContext {
+ pub(crate) fn new(isa: &dyn TargetIsa, pic_eh_frame: bool) -> Self {
+ let endian = match isa.endianness() {
+ Endianness::Little => RunTimeEndian::Little,
+ Endianness::Big => RunTimeEndian::Big,
+ };
+ let mut frame_table = FrameTable::default();
+
+ let cie_id = if let Some(mut cie) = isa.create_systemv_cie() {
+ if pic_eh_frame {
+ cie.fde_address_encoding =
+ gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0);
+ }
+ Some(frame_table.add_cie(cie))
+ } else {
+ None
+ };
+
+ UnwindContext { endian, frame_table, cie_id }
+ }
+
+ pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
+ let unwind_info = if let Some(unwind_info) = context.create_unwind_info(isa).unwrap() {
+ unwind_info
+ } else {
+ return;
+ };
+
+ match unwind_info {
+ UnwindInfo::SystemV(unwind_info) => {
+ self.frame_table.add_fde(
+ self.cie_id.unwrap(),
+ unwind_info
+ .to_fde(Address::Symbol { symbol: func_id.as_u32() as usize, addend: 0 }),
+ );
+ }
+ UnwindInfo::WindowsX64(_) => {
+ // FIXME implement this
+ }
+ unwind_info => unimplemented!("{:?}", unwind_info),
+ }
+ }
+
+ pub(crate) fn emit(self, product: &mut ObjectProduct) {
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
+ self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+ if !eh_frame.0.writer.slice().is_empty() {
+ let id = eh_frame.id();
+ let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec());
+ let mut section_map = FxHashMap::default();
+ section_map.insert(id, section_id);
+
+ for reloc in &eh_frame.0.relocs {
+ product.add_debug_reloc(&section_map, &section_id, reloc);
+ }
+ }
+ }
+
+ #[cfg(all(feature = "jit", windows))]
+ pub(crate) unsafe fn register_jit(self, _jit_module: &cranelift_jit::JITModule) {}
+
+ #[cfg(all(feature = "jit", not(windows)))]
+ pub(crate) unsafe fn register_jit(self, jit_module: &cranelift_jit::JITModule) {
+ use std::mem::ManuallyDrop;
+
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
+ self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+ if eh_frame.0.writer.slice().is_empty() {
+ return;
+ }
+
+ let mut eh_frame = eh_frame.0.relocate_for_jit(jit_module);
+
+ // GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
+ eh_frame.extend(&[0, 0, 0, 0]);
+
+ // FIXME support unregistering unwind tables once cranelift-jit supports deallocating
+ // individual functions
+ let eh_frame = ManuallyDrop::new(eh_frame);
+
+ // =======================================================================
+ // Everything after this line up to the end of the file is loosely based on
+ // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
+ #[cfg(target_os = "macos")]
+ {
+ // On macOS, `__register_frame` takes a pointer to a single FDE
+ let start = eh_frame.as_ptr();
+ let end = start.add(eh_frame.len());
+ let mut current = start;
+
+ // Walk all of the entries in the frame table and register them
+ while current < end {
+ let len = std::ptr::read::<u32>(current as *const u32) as usize;
+
+ // Skip over the CIE
+ if current != start {
+ __register_frame(current);
+ }
+
+ // Move to the next table entry (+4 because the length itself is not inclusive)
+ current = current.add(len + 4);
+ }
+ }
+ #[cfg(not(target_os = "macos"))]
+ {
+ // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
+ __register_frame(eh_frame.as_ptr());
+ }
+ }
+}
+
+extern "C" {
+ // libunwind import
+ fn __register_frame(fde: *const u8);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs
new file mode 100644
index 000000000..f619bb5ed
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs
@@ -0,0 +1,176 @@
+//! Handling of enum discriminants
+//!
+//! Adapted from <https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs>
+
+use rustc_target::abi::{Int, TagEncoding, Variants};
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_set_discriminant<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: CPlace<'tcx>,
+ variant_index: VariantIdx,
+) {
+ let layout = place.layout();
+ if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
+ return;
+ }
+ match layout.variants {
+ Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ Variants::Multiple {
+ tag: _,
+ tag_field,
+ tag_encoding: TagEncoding::Direct,
+ variants: _,
+ } => {
+ let ptr = place.place_field(fx, mir::Field::new(tag_field));
+ let to = layout.ty.discriminant_for_variant(fx.tcx, variant_index).unwrap().val;
+ let to = if ptr.layout().abi.is_signed() {
+ ty::ScalarInt::try_from_int(
+ ptr.layout().size.sign_extend(to) as i128,
+ ptr.layout().size,
+ )
+ .unwrap()
+ } else {
+ ty::ScalarInt::try_from_uint(to, ptr.layout().size).unwrap()
+ };
+ let discr = CValue::const_val(fx, ptr.layout(), to);
+ ptr.write_cvalue(fx, discr);
+ }
+ Variants::Multiple {
+ tag: _,
+ tag_field,
+ tag_encoding: TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ variants: _,
+ } => {
+ if variant_index != dataful_variant {
+ let niche = place.place_field(fx, mir::Field::new(tag_field));
+ let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+ let niche_value = ty::ScalarInt::try_from_uint(
+ u128::from(niche_value).wrapping_add(niche_start),
+ niche.layout().size,
+ )
+ .unwrap();
+ let niche_llval = CValue::const_val(fx, niche.layout(), niche_value);
+ niche.write_cvalue(fx, niche_llval);
+ }
+ }
+ }
+}
+
+pub(crate) fn codegen_get_discriminant<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ value: CValue<'tcx>,
+ dest_layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+ let layout = value.layout();
+
+ if layout.abi == Abi::Uninhabited {
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
+ // Return a dummy value
+ return CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout);
+ }
+
+ let (tag_scalar, tag_field, tag_encoding) = match &layout.variants {
+ Variants::Single { index } => {
+ let discr_val = layout
+ .ty
+ .discriminant_for_variant(fx.tcx, *index)
+ .map_or(u128::from(index.as_u32()), |discr| discr.val);
+ let discr_val = if dest_layout.abi.is_signed() {
+ ty::ScalarInt::try_from_int(
+ dest_layout.size.sign_extend(discr_val) as i128,
+ dest_layout.size,
+ )
+ .unwrap()
+ } else {
+ ty::ScalarInt::try_from_uint(discr_val, dest_layout.size).unwrap()
+ };
+ return CValue::const_val(fx, dest_layout, discr_val);
+ }
+ Variants::Multiple { tag, tag_field, tag_encoding, variants: _ } => {
+ (tag, *tag_field, tag_encoding)
+ }
+ };
+
+ let cast_to = fx.clif_type(dest_layout.ty).unwrap();
+
+ // Read the tag/niche-encoded discriminant from memory.
+ let tag = value.value_field(fx, mir::Field::new(tag_field));
+ let tag = tag.load_scalar(fx);
+
+ // Decode the discriminant (specifically if it's niche-encoded).
+ match *tag_encoding {
+ TagEncoding::Direct => {
+ let signed = match tag_scalar.primitive() {
+ Int(_, signed) => signed,
+ _ => false,
+ };
+ let val = clif_intcast(fx, tag, cast_to, signed);
+ CValue::by_val(val, dest_layout)
+ }
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ // Rebase from niche values to discriminants, and check
+ // whether the result is in range for the niche variants.
+
+ // We first compute the "relative discriminant" (wrt `niche_variants`),
+ // that is, if `n = niche_variants.end() - niche_variants.start()`,
+ // we remap `niche_start..=niche_start + n` (which may wrap around)
+ // to (non-wrap-around) `0..=n`, to be able to check whether the
+ // discriminant corresponds to a niche variant with one comparison.
+ // We also can't go directly to the (variant index) discriminant
+ // and check that it is in the range `niche_variants`, because
+ // that might not fit in the same type, on top of needing an extra
+ // comparison (see also the comment on `let niche_discr`).
+ let relative_discr = if niche_start == 0 {
+ tag
+ } else {
+ let niche_start = match fx.bcx.func.dfg.value_type(tag) {
+ types::I128 => {
+ let lsb = fx.bcx.ins().iconst(types::I64, niche_start as u64 as i64);
+ let msb =
+ fx.bcx.ins().iconst(types::I64, (niche_start >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ ty => fx.bcx.ins().iconst(ty, niche_start as i64),
+ };
+ fx.bcx.ins().isub(tag, niche_start)
+ };
+ let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+ let is_niche = {
+ codegen_icmp_imm(
+ fx,
+ IntCC::UnsignedLessThanOrEqual,
+ relative_discr,
+ i128::from(relative_max),
+ )
+ };
+
+ // NOTE(eddyb) this addition needs to be performed on the final
+ // type, in case the niche itself can't represent all variant
+ // indices (e.g. `u8` niche with more than `256` variants,
+ // but enough uninhabited variants so that the remaining variants
+ // fit in the niche).
+ // In other words, `niche_variants.end - niche_variants.start`
+ // is representable in the niche, but `niche_variants.end`
+ // might not be, in extreme cases.
+ let niche_discr = {
+ let relative_discr = if relative_max == 0 {
+ // HACK(eddyb) since we have only one niche, we know which
+ // one it is, and we can avoid having a dynamic value here.
+ fx.bcx.ins().iconst(cast_to, 0)
+ } else {
+ clif_intcast(fx, relative_discr, cast_to, false)
+ };
+ fx.bcx.ins().iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
+ };
+
+ let dataful_variant = fx.bcx.ins().iconst(cast_to, i64::from(dataful_variant.as_u32()));
+ let discr = fx.bcx.ins().select(is_niche, niche_discr, dataful_variant);
+ CValue::by_val(discr, dest_layout)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
new file mode 100644
index 000000000..3cd1ef563
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -0,0 +1,436 @@
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::path::PathBuf;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::back::metadata::create_compressed_metadata_file;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputType};
+use rustc_session::Session;
+
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_object::{ObjectBuilder, ObjectModule};
+
+use crate::{prelude::*, BackendConfig};
+
+struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
+
+impl<HCX> HashStable<HCX> for ModuleCodegenResult {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+fn make_module(sess: &Session, isa: Box<dyn TargetIsa>, name: String) -> ObjectModule {
+ let mut builder =
+ ObjectBuilder::new(isa, name + ".o", cranelift_module::default_libcall_names()).unwrap();
+ // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+ // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+ // can easily double the amount of time necessary to perform linking.
+ builder.per_function_section(sess.opts.unstable_opts.function_sections.unwrap_or(false));
+ ObjectModule::new(builder)
+}
+
+fn emit_module(
+ tcx: TyCtxt<'_>,
+ backend_config: &BackendConfig,
+ name: String,
+ kind: ModuleKind,
+ module: ObjectModule,
+ debug: Option<DebugContext<'_>>,
+ unwind_context: UnwindContext,
+) -> ModuleCodegenResult {
+ let mut product = module.finish();
+
+ if let Some(mut debug) = debug {
+ debug.emit(&mut product);
+ }
+
+ unwind_context.emit(&mut product);
+
+ let tmp_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(&name));
+ let obj = product.object.write().unwrap();
+
+ tcx.sess.prof.artifact_size("object_file", name.clone(), obj.len().try_into().unwrap());
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess.fatal(&format!("error writing object file: {}", err));
+ }
+
+ let work_product = if backend_config.disable_incr_cache {
+ None
+ } else {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ tcx.sess,
+ &name,
+ &[("o", &tmp_file)],
+ )
+ };
+
+ ModuleCodegenResult(
+ CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None },
+ work_product,
+ )
+}
+
+fn reuse_workproduct_for_cgu(
+ tcx: TyCtxt<'_>,
+ cgu: &CodegenUnit<'_>,
+ work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
+) -> CompiledModule {
+ let work_product = cgu.previous_work_product(tcx);
+ let obj_out = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu.name().as_str()));
+ let source_file = rustc_incremental::in_incr_comp_dir_sess(
+ &tcx.sess,
+ &work_product.saved_files.get("o").expect("no saved object file in work product"),
+ );
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
+ tcx.sess.err(&format!(
+ "unable to copy {} to {}: {}",
+ source_file.display(),
+ obj_out.display(),
+ err
+ ));
+ }
+
+ work_products.insert(cgu.work_product_id(), work_product);
+
+ CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object: Some(obj_out),
+ dwarf_object: None,
+ bytecode: None,
+ }
+}
+
+fn module_codegen(
+ tcx: TyCtxt<'_>,
+ (backend_config, cgu_name): (BackendConfig, rustc_span::Symbol),
+) -> ModuleCodegenResult {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+
+ let isa = crate::build_isa(tcx.sess, &backend_config);
+ let mut module = make_module(tcx.sess, isa, cgu_name.as_str().to_string());
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ module.isa(),
+ tcx.sess.opts.debuginfo != DebugInfo::None,
+ cgu_name,
+ );
+ super::predefine_mono_items(tcx, &mut module, &mono_items);
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => {
+ cx.tcx
+ .sess
+ .time("codegen fn", || crate::base::codegen_fn(&mut cx, &mut module, inst));
+ }
+ MonoItem::Static(def_id) => crate::constant::codegen_static(tcx, &mut module, def_id),
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx.hir().item(item_id);
+ if let rustc_hir::ItemKind::GlobalAsm(asm) = item.kind {
+ if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ cx.global_asm.push_str("\n.intel_syntax noprefix\n");
+ } else {
+ cx.global_asm.push_str("\n.att_syntax\n");
+ }
+ for piece in asm.template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => cx.global_asm.push_str(s),
+ InlineAsmTemplatePiece::Placeholder { .. } => todo!(),
+ }
+ }
+ cx.global_asm.push_str("\n.att_syntax\n\n");
+ } else {
+ bug!("Expected GlobalAsm found {:?}", item);
+ }
+ }
+ }
+ }
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut module,
+ &mut cx.unwind_context,
+ false,
+ cgu.is_primary(),
+ );
+
+ let debug_context = cx.debug_context;
+ let unwind_context = cx.unwind_context;
+ let codegen_result = tcx.sess.time("write object file", || {
+ emit_module(
+ tcx,
+ &backend_config,
+ cgu.name().as_str().to_string(),
+ ModuleKind::Regular,
+ module,
+ debug_context,
+ unwind_context,
+ )
+ });
+
+ codegen_global_asm(tcx, cgu.name().as_str(), &cx.global_asm);
+
+ codegen_result
+}
+
+pub(crate) fn run_aot(
+ tcx: TyCtxt<'_>,
+ backend_config: BackendConfig,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
+ let mut work_products = FxHashMap::default();
+
+ let cgus = if tcx.sess.opts.output_types.should_codegen() {
+ tcx.collect_and_partition_mono_items(()).1
+ } else {
+ // If only `--emit metadata` is used, we shouldn't perform any codegen.
+ // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+ &[]
+ };
+
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in &*cgus {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let modules = super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+ cgus.iter()
+ .map(|cgu| {
+ let cgu_reuse = determine_cgu_reuse(tcx, cgu);
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ _ if backend_config.disable_incr_cache => {}
+ CguReuse::No => {}
+ CguReuse::PreLto => {
+ return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+ }
+ CguReuse::PostLto => unreachable!(),
+ }
+
+ let dep_node = cgu.codegen_dep_node(tcx);
+ let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ (backend_config.clone(), cgu.name()),
+ module_codegen,
+ Some(rustc_middle::dep_graph::hash_result),
+ );
+
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+
+ module
+ })
+ .collect::<Vec<_>>()
+ });
+
+ tcx.sess.abort_if_errors();
+
+ let isa = crate::build_isa(tcx.sess, &backend_config);
+ let mut allocator_module = make_module(tcx.sess, isa, "allocator_shim".to_string());
+ assert_eq!(pointer_ty(tcx), allocator_module.target_config().pointer_type());
+ let mut allocator_unwind_context = UnwindContext::new(allocator_module.isa(), true);
+ let created_alloc_shim =
+ crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+ let allocator_module = if created_alloc_shim {
+ let ModuleCodegenResult(module, work_product) = emit_module(
+ tcx,
+ &backend_config,
+ "allocator_shim".to_string(),
+ ModuleKind::Allocator,
+ allocator_module,
+ None,
+ allocator_unwind_context,
+ );
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+ Some(module)
+ } else {
+ None
+ };
+
+ let metadata_module = if need_metadata_module {
+ let _timer = tcx.prof.generic_activity("codegen crate metadata");
+ let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+ use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+ let metadata_cgu_name = cgu_name_builder
+ .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+ .as_str()
+ .to_string();
+
+ let tmp_file =
+ tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+ let symbol_name = rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx);
+ let obj = create_compressed_metadata_file(tcx.sess, &metadata, &symbol_name);
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ }
+
+ (metadata_cgu_name, tmp_file)
+ });
+
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(tmp_file),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ };
+
+ // FIXME handle `-Ctarget-cpu=native`
+ let target_cpu = match tcx.sess.opts.cg.target_cpu {
+ Some(ref name) => name,
+ None => tcx.sess.target.cpu.as_ref(),
+ }
+ .to_owned();
+
+ Box::new((
+ CodegenResults {
+ modules,
+ allocator_module,
+ metadata_module,
+ metadata,
+ crate_info: CrateInfo::new(tcx, target_cpu),
+ },
+ work_products,
+ ))
+}
+
+fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
+ use std::io::Write;
+ use std::process::{Command, Stdio};
+
+ if global_asm.is_empty() {
+ return;
+ }
+
+ if cfg!(not(feature = "inline_asm"))
+ || tcx.sess.target.is_like_osx
+ || tcx.sess.target.is_like_windows
+ {
+ if global_asm.contains("__rust_probestack") {
+ return;
+ }
+
+ // FIXME fix linker error on macOS
+ if cfg!(not(feature = "inline_asm")) {
+ tcx.sess.fatal(
+ "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
+ );
+ } else {
+ tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+ }
+ }
+
+ let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
+ let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
+
+ // Remove all LLVM style comments
+ let global_asm = global_asm
+ .lines()
+ .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
+ .collect::<Vec<_>>()
+ .join("\n");
+
+ let output_object_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu_name));
+
+ // Assemble `global_asm`
+ let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+ let mut child = Command::new(assembler)
+ .arg("-o")
+ .arg(&global_asm_object_file)
+ .stdin(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn `as`.");
+ child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
+ let status = child.wait().expect("Failed to wait for `as`.");
+ if !status.success() {
+ tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
+ }
+
+ // Link the global asm and main object file together
+ let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
+ std::fs::rename(&output_object_file, &main_object_file).unwrap();
+ let status = Command::new(linker)
+ .arg("-r") // Create a new object file
+ .arg("-o")
+ .arg(output_object_file)
+ .arg(&main_object_file)
+ .arg(&global_asm_object_file)
+ .status()
+ .unwrap();
+ if !status.success() {
+ tcx.sess.fatal(&format!(
+ "Failed to link `{}` and `{}` together",
+ main_object_file.display(),
+ global_asm_object_file.display(),
+ ));
+ }
+
+ std::fs::remove_file(global_asm_object_file).unwrap();
+ std::fs::remove_file(main_object_file).unwrap();
+}
+
+fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+ let mut new_filename = path.file_stem().unwrap().to_owned();
+ new_filename.push(postfix);
+ if let Some(extension) = path.extension() {
+ new_filename.push(".");
+ new_filename.push(extension);
+ }
+ path.set_file_name(new_filename);
+ path
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
+ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
new file mode 100644
index 000000000..a56a91000
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -0,0 +1,385 @@
+//! The JIT driver uses [`cranelift_jit`] to JIT execute programs without writing any object
+//! files.
+
+use std::cell::RefCell;
+use std::ffi::CString;
+use std::os::raw::{c_char, c_int};
+use std::sync::{mpsc, Mutex};
+
+use rustc_codegen_ssa::CrateInfo;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_session::Session;
+use rustc_span::Symbol;
+
+use cranelift_jit::{JITBuilder, JITModule};
+
+// FIXME use std::sync::OnceLock once it stabilizes
+use once_cell::sync::OnceCell;
+
+use crate::{prelude::*, BackendConfig};
+use crate::{CodegenCx, CodegenMode};
+
+struct JitState {
+ backend_config: BackendConfig,
+ jit_module: JITModule,
+}
+
+thread_local! {
+ static LAZY_JIT_STATE: RefCell<Option<JitState>> = const { RefCell::new(None) };
+}
+
+/// The Sender owned by the rustc thread
+static GLOBAL_MESSAGE_SENDER: OnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> = OnceCell::new();
+
+/// A message that is sent from the jitted runtime to the rustc thread.
+/// Senders are responsible for upholding `Send` semantics.
+enum UnsafeMessage {
+ /// Request that the specified `Instance` be lazily jitted.
+ ///
+ /// Nothing accessible through `instance_ptr` may be moved or mutated by the sender after
+ /// this message is sent.
+ JitFn {
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+ tx: mpsc::Sender<*const u8>,
+ },
+}
+unsafe impl Send for UnsafeMessage {}
+
+impl UnsafeMessage {
+ /// Send the message.
+ fn send(self) -> Result<(), mpsc::SendError<UnsafeMessage>> {
+ thread_local! {
+ /// The Sender owned by the local thread
+ static LOCAL_MESSAGE_SENDER: mpsc::Sender<UnsafeMessage> =
+ GLOBAL_MESSAGE_SENDER
+ .get().unwrap()
+ .lock().unwrap()
+ .clone();
+ }
+ LOCAL_MESSAGE_SENDER.with(|sender| sender.send(self))
+ }
+}
+
+fn create_jit_module<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ backend_config: &BackendConfig,
+ hotswap: bool,
+) -> (JITModule, CodegenCx<'tcx>) {
+ let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
+ let imported_symbols = load_imported_symbols_for_jit(tcx.sess, crate_info);
+
+ let isa = crate::build_isa(tcx.sess, backend_config);
+ let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
+ jit_builder.hotswap(hotswap);
+ crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
+ jit_builder.symbols(imported_symbols);
+ jit_builder.symbol("__clif_jit_fn", clif_jit_fn as *const u8);
+ let mut jit_module = JITModule::new(jit_builder);
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+
+ crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut jit_module,
+ &mut cx.unwind_context,
+ true,
+ true,
+ );
+
+ (jit_module, cx)
+}
+
+pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
+ if !tcx.sess.opts.output_types.should_codegen() {
+ tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
+ }
+
+ if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
+ tcx.sess.fatal("can't jit non-executable crate");
+ }
+
+ let (mut jit_module, mut cx) = create_jit_module(
+ tcx,
+ &backend_config,
+ matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
+ );
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(());
+ let mono_items = cgus
+ .iter()
+ .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+ .flatten()
+ .collect::<FxHashMap<_, (_, _)>>()
+ .into_iter()
+ .collect::<Vec<(_, (_, _))>>();
+
+ super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+ super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => match backend_config.codegen_mode {
+ CodegenMode::Aot => unreachable!(),
+ CodegenMode::Jit => {
+ cx.tcx.sess.time("codegen fn", || {
+ crate::base::codegen_fn(&mut cx, &mut jit_module, inst)
+ });
+ }
+ CodegenMode::JitLazy => codegen_shim(&mut cx, &mut jit_module, inst),
+ },
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(tcx, &mut jit_module, def_id);
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = tcx.hir().item(item_id);
+ tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
+ }
+ }
+ }
+ });
+
+ if !cx.global_asm.is_empty() {
+ tcx.sess.fatal("Inline asm is not supported in JIT mode");
+ }
+
+ tcx.sess.abort_if_errors();
+
+ jit_module.finalize_definitions();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+
+ println!(
+ "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
+ );
+
+ let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+ .chain(backend_config.jit_args.iter().map(|arg| &**arg))
+ .map(|arg| CString::new(arg).unwrap())
+ .collect::<Vec<_>>();
+
+ let start_sig = Signature {
+ params: vec![
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
+ call_conv: jit_module.target_config().default_call_conv,
+ };
+ let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
+ let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ assert!(lazy_jit_state.is_none());
+ *lazy_jit_state = Some(JitState { backend_config, jit_module });
+ });
+
+ let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+ unsafe { ::std::mem::transmute(finalized_start) };
+
+ let (tx, rx) = mpsc::channel();
+ GLOBAL_MESSAGE_SENDER.set(Mutex::new(tx)).unwrap();
+
+ // Spawn the jitted runtime in a new thread so that this rustc thread can handle messages
+ // (eg to lazily JIT further functions as required)
+ std::thread::spawn(move || {
+ let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+ // Push a null pointer as a terminating argument. This is required by POSIX and
+ // useful as some dynamic linkers use it as a marker to jump over.
+ argv.push(std::ptr::null());
+
+ let ret = f(args.len() as c_int, argv.as_ptr());
+ std::process::exit(ret);
+ });
+
+ // Handle messages
+ loop {
+ match rx.recv().unwrap() {
+ // lazy JIT compilation request - compile requested instance and return pointer to result
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx } => {
+ tx.send(jit_fn(instance_ptr, trampoline_ptr))
+ .expect("jitted runtime hung up before response to lazy JIT request was sent");
+ }
+ }
+ }
+}
+
+extern "C" fn clif_jit_fn(
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+) -> *const u8 {
+ // send the JIT request to the rustc thread, with a channel for the response
+ let (tx, rx) = mpsc::channel();
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx }
+ .send()
+ .expect("rustc thread hung up before lazy JIT request was sent");
+
+ // block on JIT compilation result
+ rx.recv().expect("rustc thread hung up before responding to sent lazy JIT request")
+}
+
+fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) -> *const u8 {
+ rustc_middle::ty::tls::with(|tcx| {
+ // lift is used to ensure the correct lifetime for instance.
+ let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
+ let jit_module = &mut lazy_jit_state.jit_module;
+ let backend_config = lazy_jit_state.backend_config.clone();
+
+ let name = tcx.symbol_name(instance).name;
+ let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
+ let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let current_ptr = jit_module.read_got_entry(func_id);
+
+ // If the function's GOT entry has already been updated to point at something other
+ // than the shim trampoline, don't re-jit but just return the new pointer instead.
+ // This does not need synchronization as this code is executed only by a sole rustc
+ // thread.
+ if current_ptr != trampoline_ptr {
+ return current_ptr;
+ }
+
+ jit_module.prepare_for_function_redefine(func_id).unwrap();
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config,
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+ tcx.sess.time("codegen fn", || crate::base::codegen_fn(&mut cx, jit_module, instance));
+
+ assert!(cx.global_asm.is_empty());
+ jit_module.finalize_definitions();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+ jit_module.get_finalized_function(func_id)
+ })
+ })
+}
+
+fn load_imported_symbols_for_jit(
+ sess: &Session,
+ crate_info: CrateInfo,
+) -> Vec<(String, *const u8)> {
+ use rustc_middle::middle::dependency_format::Linkage;
+
+ let mut dylib_paths = Vec::new();
+
+ let data = &crate_info
+ .dependency_formats
+ .iter()
+ .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+ .unwrap()
+ .1;
+ for &cnum in &crate_info.used_crates {
+ let src = &crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ let name = crate_info.crate_name[&cnum];
+ let mut err = sess.struct_err(&format!("Can't load static lib {}", name));
+ err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+ err.emit();
+ }
+ Linkage::Dynamic => {
+ dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+ }
+ }
+ }
+
+ let mut imported_symbols = Vec::new();
+ for path in dylib_paths {
+ use object::{Object, ObjectSymbol};
+ let lib = libloading::Library::new(&path).unwrap();
+ let obj = std::fs::read(path).unwrap();
+ let obj = object::File::parse(&*obj).unwrap();
+ imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
+ let name = symbol.name().unwrap().to_string();
+ if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
+ return None;
+ }
+ if name.starts_with("rust_metadata_") {
+ // The metadata is part of a section that is not loaded by the dynamic linker in
+ // case of cg_llvm.
+ return None;
+ }
+ let dlsym_name = if cfg!(target_os = "macos") {
+ // On macOS `dlsym` expects the name without leading `_`.
+ assert!(name.starts_with('_'), "{:?}", name);
+ &name[1..]
+ } else {
+ &name
+ };
+ let symbol: libloading::Symbol<'_, *const u8> =
+ unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
+ Some((name, *symbol))
+ }));
+ std::mem::forget(lib)
+ }
+
+ sess.abort_if_errors();
+
+ imported_symbols
+}
+
+fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx>, module: &mut JITModule, inst: Instance<'tcx>) {
+ let tcx = cx.tcx;
+
+ let pointer_type = module.target_config().pointer_type();
+
+ let name = tcx.symbol_name(inst).name;
+ let sig = crate::abi::get_function_sig(tcx, module.isa().triple(), inst);
+ let func_id = module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let instance_ptr = Box::into_raw(Box::new(inst));
+
+ let jit_fn = module
+ .declare_function(
+ "__clif_jit_fn",
+ Linkage::Import,
+ &Signature {
+ call_conv: module.target_config().default_call_conv,
+ params: vec![AbiParam::new(pointer_type), AbiParam::new(pointer_type)],
+ returns: vec![AbiParam::new(pointer_type)],
+ },
+ )
+ .unwrap();
+
+ cx.cached_context.clear();
+ let trampoline = &mut cx.cached_context.func;
+ trampoline.signature = sig.clone();
+
+ let mut builder_ctx = FunctionBuilderContext::new();
+ let mut trampoline_builder = FunctionBuilder::new(trampoline, &mut builder_ctx);
+
+ let trampoline_fn = module.declare_func_in_func(func_id, trampoline_builder.func);
+ let jit_fn = module.declare_func_in_func(jit_fn, trampoline_builder.func);
+ let sig_ref = trampoline_builder.func.import_signature(sig);
+
+ let entry_block = trampoline_builder.create_block();
+ trampoline_builder.append_block_params_for_function_params(entry_block);
+ let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
+
+ trampoline_builder.switch_to_block(entry_block);
+ let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
+ let trampoline_ptr = trampoline_builder.ins().func_addr(pointer_type, trampoline_fn);
+ let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr, trampoline_ptr]);
+ let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
+ let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
+ let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
+ trampoline_builder.ins().return_(&ret_vals);
+
+ module.define_function(func_id, &mut cx.cached_context).unwrap();
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/mod.rs b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
new file mode 100644
index 000000000..8f5714ecb
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
@@ -0,0 +1,53 @@
+//! Drivers are responsible for calling [`codegen_fn`] or [`codegen_static`] for each mono item and
+//! performing any further actions like JIT executing or writing object files.
+//!
+//! [`codegen_fn`]: crate::base::codegen_fn
+//! [`codegen_static`]: crate::constant::codegen_static
+
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+pub(crate) mod aot;
+#[cfg(feature = "jit")]
+pub(crate) mod jit;
+
+fn predefine_mono_items<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ module: &mut dyn Module,
+ mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
+) {
+ tcx.sess.time("predefine functions", || {
+ let is_compiler_builtins = tcx.is_compiler_builtins(LOCAL_CRATE);
+ for &(mono_item, (linkage, visibility)) in mono_items {
+ match mono_item {
+ MonoItem::Fn(instance) => {
+ let name = tcx.symbol_name(instance).name;
+ let _inst_guard = crate::PrintOnPanic(|| format!("{:?} {}", instance, name));
+ let sig = get_function_sig(tcx, module.isa().triple(), instance);
+ let linkage = crate::linkage::get_clif_linkage(
+ mono_item,
+ linkage,
+ visibility,
+ is_compiler_builtins,
+ );
+ module.declare_function(name, linkage, &sig).unwrap();
+ }
+ MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
+ }
+ }
+ });
+}
+
+fn time<R>(tcx: TyCtxt<'_>, display: bool, name: &'static str, f: impl FnOnce() -> R) -> R {
+ if display {
+ println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
+ let before = std::time::Instant::now();
+ let res = tcx.sess.time(name, f);
+ let after = std::time::Instant::now();
+ println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before);
+ res
+ } else {
+ tcx.sess.time(name, f)
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
new file mode 100644
index 000000000..241de5e36
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -0,0 +1,677 @@
+//! Codegen of `asm!` invocations.
+
+use crate::prelude::*;
+
+use std::fmt::Write;
+
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_middle::mir::InlineAsmOperand;
+use rustc_span::sym;
+use rustc_target::asm::*;
+
+pub(crate) fn codegen_inline_asm<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ _span: Span,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+) {
+ // FIXME add .eh_frame unwind info directives
+
+ if !template.is_empty() {
+ if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::User(1));
+ return;
+ } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
+ && matches!(
+ template[1],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
+ && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
+ && matches!(
+ template[6],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ {
+ assert_eq!(operands.len(), 4);
+ let (leaf, eax_place) = match operands[1] {
+ InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
+ assert_eq!(
+ reg,
+ InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
+ );
+ (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place.unwrap()),
+ )
+ }
+ _ => unreachable!(),
+ };
+ let ebx_place = match operands[0] {
+ InlineAsmOperand::Out { reg, late: true, place } => {
+ assert_eq!(
+ reg,
+ InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::reg
+ ))
+ );
+ crate::base::codegen_place(fx, place.unwrap())
+ }
+ _ => unreachable!(),
+ };
+ let (sub_leaf, ecx_place) = match operands[2] {
+ InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
+ assert_eq!(
+ reg,
+ InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
+ );
+ (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place.unwrap()),
+ )
+ }
+ _ => unreachable!(),
+ };
+ let edx_place = match operands[3] {
+ InlineAsmOperand::Out { reg, late: true, place } => {
+ assert_eq!(
+ reg,
+ InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
+ );
+ crate::base::codegen_place(fx, place.unwrap())
+ }
+ _ => unreachable!(),
+ };
+
+ let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
+
+ eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+ ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+ ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+ edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+ return;
+ } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
+ // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
+ crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+ } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
+ crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+ }
+ }
+
+ let mut inputs = Vec::new();
+ let mut outputs = Vec::new();
+
+ let mut asm_gen = InlineAssemblyGenerator {
+ tcx: fx.tcx,
+ arch: fx.tcx.sess.asm_arch.unwrap(),
+ enclosing_def_id: fx.instance.def_id(),
+ template,
+ operands,
+ options,
+ registers: Vec::new(),
+ stack_slots_clobber: Vec::new(),
+ stack_slots_input: Vec::new(),
+ stack_slots_output: Vec::new(),
+ stack_slot_size: Size::from_bytes(0),
+ };
+ asm_gen.allocate_registers();
+ asm_gen.allocate_stack_slots();
+
+ let inline_asm_index = fx.cx.inline_asm_index.get();
+ fx.cx.inline_asm_index.set(inline_asm_index + 1);
+ let asm_name = format!(
+ "__inline_asm_{}_n{}",
+ fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
+ inline_asm_index
+ );
+
+ let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
+ fx.cx.global_asm.push_str(&generated_asm);
+
+ for (i, operand) in operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: _, ref value } => {
+ inputs.push((
+ asm_gen.stack_slots_input[i].unwrap(),
+ crate::base::codegen_operand(fx, value).load_scalar(fx),
+ ));
+ }
+ InlineAsmOperand::Out { reg: _, late: _, place } => {
+ if let Some(place) = place {
+ outputs.push((
+ asm_gen.stack_slots_output[i].unwrap(),
+ crate::base::codegen_place(fx, place),
+ ));
+ }
+ }
+ InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+ inputs.push((
+ asm_gen.stack_slots_input[i].unwrap(),
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ ));
+ if let Some(out_place) = out_place {
+ outputs.push((
+ asm_gen.stack_slots_output[i].unwrap(),
+ crate::base::codegen_place(fx, out_place),
+ ));
+ }
+ }
+ InlineAsmOperand::Const { value: _ } => todo!(),
+ InlineAsmOperand::SymFn { value: _ } => todo!(),
+ InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
+ }
+ }
+
+ call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
+}
+
+struct InlineAssemblyGenerator<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ arch: InlineAsmArch,
+ enclosing_def_id: DefId,
+ template: &'a [InlineAsmTemplatePiece],
+ operands: &'a [InlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+ registers: Vec<Option<InlineAsmReg>>,
+ stack_slots_clobber: Vec<Option<Size>>,
+ stack_slots_input: Vec<Option<Size>>,
+ stack_slots_output: Vec<Option<Size>>,
+ stack_slot_size: Size,
+}
+
+impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
+ fn allocate_registers(&mut self) {
+ let sess = self.tcx.sess;
+ let map = allocatable_registers(
+ self.arch,
+ sess.relocation_model(),
+ self.tcx.asm_target_features(self.enclosing_def_id),
+ &sess.target,
+ );
+ let mut allocated = FxHashMap::<_, (bool, bool)>::default();
+ let mut regs = vec![None; self.operands.len()];
+
+ // Add explicit registers to the allocated set.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().0 = true;
+ }
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
+ } => {
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().1 = true;
+ }
+ InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
+ | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
+ regs[i] = Some(reg);
+ allocated.insert(reg, (true, true));
+ }
+ _ => (),
+ }
+ }
+
+ // Allocate out/inout/inlateout registers first because they are more constrained.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::RegClass(class),
+ late: false,
+ ..
+ }
+ | InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::RegClass(class), ..
+ } => {
+ let mut alloc_reg = None;
+ for &reg in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.contains_key(&r) {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.insert(reg, (true, true));
+ }
+ _ => (),
+ }
+ }
+
+ // Allocate in/lateout.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
+ let mut alloc_reg = None;
+ for &reg in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.get(&r).copied().unwrap_or_default().0 {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().0 = true;
+ }
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::RegClass(class),
+ late: true,
+ ..
+ } => {
+ let mut alloc_reg = None;
+ for &reg in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.get(&r).copied().unwrap_or_default().1 {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().1 = true;
+ }
+ _ => (),
+ }
+ }
+
+ self.registers = regs;
+ }
+
+ fn allocate_stack_slots(&mut self) {
+ let mut slot_size = Size::from_bytes(0);
+ let mut slots_clobber = vec![None; self.operands.len()];
+ let mut slots_input = vec![None; self.operands.len()];
+ let mut slots_output = vec![None; self.operands.len()];
+
+ let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
+ let reg_size =
+ reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
+ let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
+ let offset = slot_size.align_to(align);
+ *slot_size = offset + reg_size;
+ offset
+ };
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for saving clobbered registers
+ let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
+ .unwrap()
+ .clobbered_regs();
+ for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
+ let mut need_save = true;
+ // If the register overlaps with a register clobbered by function call, then
+ // we don't need to save it.
+ for r in abi_clobber {
+ r.overlapping_regs(|r| {
+ if r == reg {
+ need_save = false;
+ }
+ });
+
+ if !need_save {
+ break;
+ }
+ }
+
+ if need_save {
+ slots_clobber[i] = Some(new_slot(reg.reg_class()));
+ }
+ }
+
+ // Allocate stack slots for inout
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
+ let slot = new_slot(reg.reg_class());
+ slots_input[i] = Some(slot);
+ slots_output[i] = Some(slot);
+ }
+ _ => (),
+ }
+ }
+
+ let slot_size_before_input = slot_size;
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for input
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg, .. }
+ | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
+ slots_input[i] = Some(new_slot(reg.reg_class()));
+ }
+ _ => (),
+ }
+ }
+
+ // Reset slot size to before input so that input and output operands can overlap
+ // and save some memory.
+ let slot_size_after_input = slot_size;
+ slot_size = slot_size_before_input;
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for output
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::Out { reg, place: Some(_), .. } => {
+ slots_output[i] = Some(new_slot(reg.reg_class()));
+ }
+ _ => (),
+ }
+ }
+
+ slot_size = slot_size.max(slot_size_after_input);
+
+ self.stack_slots_clobber = slots_clobber;
+ self.stack_slots_input = slots_input;
+ self.stack_slots_output = slots_output;
+ self.stack_slot_size = slot_size;
+ }
+
+ fn generate_asm_wrapper(&self, asm_name: &str) -> String {
+ let mut generated_asm = String::new();
+ writeln!(generated_asm, ".globl {}", asm_name).unwrap();
+ writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
+ writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
+ writeln!(generated_asm, "{}:", asm_name).unwrap();
+
+ let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
+
+ if is_x86 {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+ Self::prologue(&mut generated_asm, self.arch);
+
+ // Save clobbered registers
+ if !self.options.contains(InlineAsmOptions::NORETURN) {
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_clobber.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::save_register(&mut generated_asm, self.arch, reg, slot);
+ }
+ }
+
+ // Write input registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_input.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".att_syntax\n");
+ }
+
+ // The actual inline asm
+ for piece in self.template {
+ match piece {
+ InlineAsmTemplatePiece::String(s) => {
+ generated_asm.push_str(s);
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push('%');
+ }
+ self.registers[*operand_idx]
+ .unwrap()
+ .emit(&mut generated_asm, self.arch, *modifier)
+ .unwrap();
+ }
+ }
+ }
+ generated_asm.push('\n');
+
+ if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+
+ if !self.options.contains(InlineAsmOptions::NORETURN) {
+ // Read output registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_output.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::save_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ // Restore clobbered registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_clobber.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ Self::epilogue(&mut generated_asm, self.arch);
+ } else {
+ Self::epilogue_noreturn(&mut generated_asm, self.arch);
+ }
+
+ if is_x86 {
+ generated_asm.push_str(".att_syntax\n");
+ }
+ writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
+ generated_asm.push_str(".text\n");
+ generated_asm.push_str("\n\n");
+
+ generated_asm
+ }
+
+ fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" push ebp\n");
+ generated_asm.push_str(" mov ebp,[esp+8]\n");
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" push rbp\n");
+ generated_asm.push_str(" mov rbp,rdi\n");
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" addi sp, sp, -8\n");
+ generated_asm.push_str(" sw ra, 4(sp)\n");
+ generated_asm.push_str(" sw s0, 0(sp)\n");
+ generated_asm.push_str(" mv s0, a0\n");
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" addi sp, sp, -16\n");
+ generated_asm.push_str(" sd ra, 8(sp)\n");
+ generated_asm.push_str(" sd s0, 0(sp)\n");
+ generated_asm.push_str(" mv s0, a0\n");
+ }
+ _ => unimplemented!("prologue for {:?}", arch),
+ }
+ }
+
+ fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" pop ebp\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" pop rbp\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" lw s0, 0(sp)\n");
+ generated_asm.push_str(" lw ra, 4(sp)\n");
+ generated_asm.push_str(" addi sp, sp, 8\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ld s0, 0(sp)\n");
+ generated_asm.push_str(" ld ra, 8(sp)\n");
+ generated_asm.push_str(" addi sp, sp, 16\n");
+ generated_asm.push_str(" ret\n");
+ }
+ _ => unimplemented!("epilogue for {:?}", arch),
+ }
+ }
+
+ fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" ud2\n");
+ }
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ebreak\n");
+ }
+ _ => unimplemented!("epilogue_noreturn for {:?}", arch),
+ }
+ }
+
+ fn save_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+ ) {
+ match arch {
+ InlineAsmArch::X86 => {
+ write!(generated_asm, " mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
+ generated_asm.push('\n');
+ }
+ InlineAsmArch::X86_64 => {
+ write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ generated_asm.push('\n');
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" sw ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" sd ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("save_register for {:?}", arch),
+ }
+ }
+
+ fn restore_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+ ) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" mov ");
+ reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
+ writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" mov ");
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" lw ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ld ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("restore_register for {:?}", arch),
+ }
+ }
+}
+
+fn call_inline_asm<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ asm_name: &str,
+ slot_size: Size,
+ inputs: Vec<(Size, Value)>,
+ outputs: Vec<(Size, CPlace<'tcx>)>,
+) {
+ let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ size: u32::try_from(slot_size.bytes()).unwrap(),
+ });
+ if fx.clif_comments.enabled() {
+ fx.add_comment(stack_slot, "inline asm scratch slot");
+ }
+
+ let inline_asm_func = fx
+ .module
+ .declare_function(
+ asm_name,
+ Linkage::Import,
+ &Signature {
+ call_conv: CallConv::SystemV,
+ params: vec![AbiParam::new(fx.pointer_type)],
+ returns: vec![],
+ },
+ )
+ .unwrap();
+ let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(inline_asm_func, asm_name);
+ }
+
+ for (offset, value) in inputs {
+ fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ }
+
+ let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+ fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
+
+ for (offset, place) in outputs {
+ let ty = fx.clif_type(place.layout().ty).unwrap();
+ let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ place.write_cvalue(fx, CValue::by_val(value, place.layout()));
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
new file mode 100644
index 000000000..d02dfd93c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
@@ -0,0 +1,74 @@
+//! Emulation of a subset of the cpuid x86 instruction.
+
+use crate::prelude::*;
+
+/// Emulates a subset of the cpuid x86 instruction.
+///
+/// This emulates an intel cpu with sse and sse2 support, but which doesn't support anything else.
+pub(crate) fn codegen_cpuid_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ leaf: Value,
+ _sub_leaf: Value,
+) -> (Value, Value, Value, Value) {
+ let leaf_0 = fx.bcx.create_block();
+ let leaf_1 = fx.bcx.create_block();
+ let leaf_7 = fx.bcx.create_block();
+ let leaf_8000_0000 = fx.bcx.create_block();
+ let leaf_8000_0001 = fx.bcx.create_block();
+ let unsupported_leaf = fx.bcx.create_block();
+
+ let dest = fx.bcx.create_block();
+ let eax = fx.bcx.append_block_param(dest, types::I32);
+ let ebx = fx.bcx.append_block_param(dest, types::I32);
+ let ecx = fx.bcx.append_block_param(dest, types::I32);
+ let edx = fx.bcx.append_block_param(dest, types::I32);
+
+ let mut switch = cranelift_frontend::Switch::new();
+ switch.set_entry(0, leaf_0);
+ switch.set_entry(1, leaf_1);
+ switch.set_entry(7, leaf_7);
+ switch.set_entry(0x8000_0000, leaf_8000_0000);
+ switch.set_entry(0x8000_0001, leaf_8000_0001);
+ switch.emit(&mut fx.bcx, leaf, unsupported_leaf);
+
+ fx.bcx.switch_to_block(leaf_0);
+ let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
+ let vend0 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
+ let vend2 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
+ let vend1 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
+ fx.bcx.ins().jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
+
+ fx.bcx.switch_to_block(leaf_1);
+ let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
+ let additional_information = fx.bcx.ins().iconst(types::I32, 0);
+ let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
+ let edx_features = fx.bcx.ins().iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
+ fx.bcx.ins().jump(dest, &[cpu_signature, additional_information, ecx_features, edx_features]);
+
+ fx.bcx.switch_to_block(leaf_7);
+ // This leaf technically has subleaves, but we just return zero for all subleaves.
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ fx.bcx.ins().jump(dest, &[zero, zero, zero, zero]);
+
+ fx.bcx.switch_to_block(leaf_8000_0000);
+ let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ fx.bcx.ins().jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
+
+ fx.bcx.switch_to_block(leaf_8000_0001);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
+ let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
+ fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
+
+ fx.bcx.switch_to_block(unsupported_leaf);
+ crate::trap::trap_unreachable(
+ fx,
+ "__cpuid_count arch intrinsic doesn't yet support specified leaf",
+ );
+
+ fx.bcx.switch_to_block(dest);
+ fx.bcx.ins().nop();
+
+ (eax, ebx, ecx, edx)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
new file mode 100644
index 000000000..869670c8c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -0,0 +1,192 @@
+//! Emulate LLVM intrinsics
+
+use crate::intrinsics::*;
+use crate::prelude::*;
+
+use rustc_middle::ty::subst::SubstsRef;
+
+pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: &str,
+ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ target: Option<BasicBlock>,
+) {
+ match intrinsic {
+ // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
+ "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_ty = fx.clif_type(lane_ty).unwrap();
+ assert!(lane_count <= 32);
+
+ let mut res = fx.bcx.ins().iconst(types::I32, 0);
+
+ for lane in (0..lane_count).rev() {
+ let a_lane =
+ a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+
+ // cast float to int
+ let a_lane = match lane_ty {
+ types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
+ types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
+ _ => a_lane,
+ };
+
+ // extract sign bit of an int
+ let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
+
+ // shift sign bit into result
+ let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
+ res = fx.bcx.ins().ishl_imm(res, 1);
+ res = fx.bcx.ins().bor(res, a_lane_sign);
+ }
+
+ let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
+ ret.write_cvalue(fx, res);
+ }
+ "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
+ let (x, y, kind) = match args {
+ [x, y, kind] => (x, y, kind),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let x = codegen_operand(fx, x);
+ let y = codegen_operand(fx, y);
+ let kind = crate::constant::mir_operand_get_const_val(fx, kind)
+ .expect("llvm.x86.sse2.cmp.* kind not const");
+
+ let flt_cc = match kind
+ .try_to_bits(Size::from_bytes(1))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
+ {
+ 0 => FloatCC::Equal,
+ 1 => FloatCC::LessThan,
+ 2 => FloatCC::LessThanOrEqual,
+ 7 => FloatCC::Ordered,
+ 3 => FloatCC::Unordered,
+ 4 => FloatCC::NotEqual,
+ 5 => FloatCC::UnorderedOrGreaterThanOrEqual,
+ 6 => FloatCC::UnorderedOrGreaterThan,
+ kind => unreachable!("kind {:?}", kind),
+ };
+
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
+ let res_lane = match lane_ty.kind() {
+ ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
+ });
+ }
+ "llvm.x86.sse2.psrli.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrli.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.pslli.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrli.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.storeu.dq" => {
+ intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
+ let mem_addr = mem_addr.load_scalar(fx);
+
+ // FIXME correctly handle the unalignment
+ let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
+ dest.write_cvalue(fx, a);
+ }
+ "llvm.x86.addcarry.64" => {
+ intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
+ let c_in = c_in.load_scalar(fx);
+
+ llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
+ }
+ "llvm.x86.subborrow.64" => {
+ intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
+ let b_in = b_in.load_scalar(fx);
+
+ llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
+ }
+ _ => {
+ fx.tcx
+ .sess
+ .warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+ crate::trap::trap_unimplemented(fx, intrinsic);
+ }
+ }
+
+ let dest = target.expect("all llvm intrinsics used by stdlib should return");
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+}
+
+// llvm.x86.avx2.vperm2i128
+// llvm.x86.ssse3.pshuf.b.128
+// llvm.x86.avx2.pshuf.b
+// llvm.x86.avx2.psrli.w
+// llvm.x86.sse2.psrli.w
+
+fn llvm_add_sub<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ ret: CPlace<'tcx>,
+ cb_in: Value,
+ a: CValue<'tcx>,
+ b: CValue<'tcx>,
+) {
+ assert_eq!(
+ a.layout().ty,
+ fx.tcx.types.u64,
+ "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
+ );
+ assert_eq!(
+ b.layout().ty,
+ fx.tcx.types.u64,
+ "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
+ );
+
+ // c + carry -> c + first intermediate carry or borrow respectively
+ let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
+ let c = int0.value_field(fx, mir::Field::new(0));
+ let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
+
+ // c + carry -> c + second intermediate carry or borrow respectively
+ let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
+ let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
+ let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+ let (c, cb1) = int1.load_scalar_pair(fx);
+
+ // carry0 | carry1 -> carry or borrow respectively
+ let cb_out = fx.bcx.ins().bor(cb0, cb1);
+
+ let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
new file mode 100644
index 000000000..b2a83e1d4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -0,0 +1,1292 @@
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+macro_rules! intrinsic_args {
+ ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
+ #[allow(unused_parens)]
+ let ($($arg),*) = if let [$($arg),*] = $args {
+ ($(codegen_operand($fx, $arg)),*)
+ } else {
+ $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
+ };
+ }
+}
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::symbol::{kw, sym, Symbol};
+
+use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
+
+fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
+ bug!("wrong number of args for intrinsic {}", intrinsic);
+}
+
+fn report_atomic_type_validation_error<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'tcx>,
+) {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+ intrinsic, ty
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match layout.abi {
+ Abi::Vector { element, count } => (element, count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
+}
+
+fn simd_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
+) {
+ let layout = val.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_pair_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
+ let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_reduce<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ acc: Option<Value>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let (mut res_val, start_lane) =
+ if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
+ for lane_idx in start_lane..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ res_val = f(fx, lane_layout.ty, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
+// FIXME move all uses to `simd_reduce`
+fn simd_reduce_bool<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_lane(fx, 0).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
+ fx.bcx.ins().ireduce(types::I8, res_val)
+ } else {
+ res_val
+ };
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ val: Value,
+) -> Value {
+ let ty = fx.clif_type(ty).unwrap();
+
+ let int_ty = match ty {
+ types::F32 => types::I32,
+ types::F64 => types::I64,
+ ty => ty,
+ };
+
+ let val = fx.bcx.ins().bint(int_ty, val);
+ let mut res = fx.bcx.ins().ineg(val);
+
+ if ty.is_float() {
+ res = fx.bcx.ins().bitcast(ty, res);
+ }
+
+ res
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: CPlace<'tcx>,
+ target: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let intrinsic = fx.tcx.item_name(instance.def_id());
+ let substs = instance.substs;
+
+ let target = if let Some(target) = target {
+ target
+ } else {
+ // Insert non returning intrinsics here
+ match intrinsic {
+ sym::abort => {
+ fx.bcx.ins().trap(TrapCode::User(0));
+ }
+ sym::transmute => {
+ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
+ }
+ _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+ }
+ return;
+ };
+
+ if intrinsic.as_str().starts_with("simd_") {
+ self::simd::codegen_simd_intrinsic_call(
+ fx,
+ intrinsic,
+ substs,
+ args,
+ destination,
+ source_info.span,
+ );
+ let ret_block = fx.get_block(target);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
+ let ret_block = fx.get_block(target);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ codegen_regular_intrinsic_call(
+ fx,
+ instance,
+ intrinsic,
+ substs,
+ args,
+ destination,
+ Some(target),
+ source_info,
+ );
+ }
+}
+
+fn codegen_float_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+) -> bool {
+ let (name, arg_count, ty) = match intrinsic {
+ sym::expf32 => ("expf", 1, fx.tcx.types.f32),
+ sym::expf64 => ("exp", 1, fx.tcx.types.f64),
+ sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
+ sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
+ sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
+ sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
+ sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
+ sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
+ sym::powf32 => ("powf", 2, fx.tcx.types.f32),
+ sym::powf64 => ("pow", 2, fx.tcx.types.f64),
+ sym::logf32 => ("logf", 1, fx.tcx.types.f32),
+ sym::logf64 => ("log", 1, fx.tcx.types.f64),
+ sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
+ sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
+ sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
+ sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
+ sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
+ sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
+ sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
+ sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
+ sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
+ sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
+ sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
+ sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
+ sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
+ sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
+ sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
+ sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
+ sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
+ sym::roundf64 => ("round", 1, fx.tcx.types.f64),
+ sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
+ sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
+ sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
+ sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
+ _ => return false,
+ };
+
+ if args.len() != arg_count {
+ bug!("wrong number of args for intrinsic {:?}", intrinsic);
+ }
+
+ let (a, b, c);
+ let args = match args {
+ [x] => {
+ a = [codegen_operand(fx, x)];
+ &a as &[_]
+ }
+ [x, y] => {
+ b = [codegen_operand(fx, x), codegen_operand(fx, y)];
+ &b
+ }
+ [x, y, z] => {
+ c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
+ &c
+ }
+ _ => unreachable!(),
+ };
+
+ let res = fx.easy_call(name, &args, ty);
+ ret.write_cvalue(fx, res);
+
+ true
+}
+
+fn codegen_regular_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ intrinsic: Symbol,
+ substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ destination: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+ match intrinsic {
+ sym::assume => {
+ intrinsic_args!(fx, args => (_a); intrinsic);
+ }
+ sym::likely | sym::unlikely => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ ret.write_cvalue(fx, a);
+ }
+ sym::breakpoint => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ fx.bcx.ins().debugtrap();
+ }
+ sym::copy | sym::copy_nonoverlapping => {
+ intrinsic_args!(fx, args => (src, dst, count); intrinsic);
+ let src = src.load_scalar(fx);
+ let dst = dst.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ if intrinsic == sym::copy_nonoverlapping {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ }
+ sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
+ // NOTE: the volatile variants have src and dst swapped
+ intrinsic_args!(fx, args => (dst, src, count); intrinsic);
+ let dst = dst.load_scalar(fx);
+ let src = src.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ }
+ sym::size_of_val => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ size
+ } else {
+ fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ }
+ sym::min_align_of_val => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ align
+ } else {
+ fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ }
+
+ sym::vtable_size => {
+ intrinsic_args!(fx, args => (vtable); intrinsic);
+ let vtable = vtable.load_scalar(fx);
+
+ let size = crate::vtable::size_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ }
+
+ sym::vtable_align => {
+ intrinsic_args!(fx, args => (vtable); intrinsic);
+ let vtable = vtable.load_scalar(fx);
+
+ let align = crate::vtable::min_align_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ }
+
+ sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::unchecked_div
+ | sym::exact_div
+ | sym::unchecked_rem
+ | sym::unchecked_shl
+ | sym::unchecked_shr => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ // FIXME trap on overflow
+ let bin_op = match intrinsic {
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
+ sym::unchecked_mul => BinOp::Mul,
+ sym::unchecked_div | sym::exact_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ _ => unreachable!(),
+ };
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ }
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ assert_eq!(x.layout().ty, y.layout().ty);
+ let bin_op = match intrinsic {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => unreachable!(),
+ };
+
+ let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
+
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+ let bin_op = match intrinsic {
+ sym::saturating_add => BinOp::Add,
+ sym::saturating_sub => BinOp::Sub,
+ _ => unreachable!(),
+ };
+
+ let signed = type_sign(lhs.layout().ty);
+
+ let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
+
+ let (val, has_overflow) = checked_res.load_scalar_pair(fx);
+ let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
+
+ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+ let val = match (intrinsic, signed) {
+ (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
+ (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
+ (sym::saturating_add, true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero =
+ fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ (sym::saturating_sub, true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero =
+ fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ _ => unreachable!(),
+ };
+
+ let res = CValue::by_val(val, lhs.layout());
+
+ ret.write_cvalue(fx, res);
+ }
+ sym::rotate_left => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+ let y = y.load_scalar(fx);
+
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotl(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ }
+ sym::rotate_right => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+ let y = y.load_scalar(fx);
+
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotr(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ }
+
+ // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+ // doesn't have UB both are codegen'ed the same way
+ sym::offset | sym::arith_offset => {
+ intrinsic_args!(fx, args => (base, offset); intrinsic);
+ let offset = offset.load_scalar(fx);
+
+ let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+ }
+
+ sym::transmute => {
+ intrinsic_args!(fx, args => (from); intrinsic);
+
+ ret.write_cvalue_transmute(fx, from);
+ }
+ sym::write_bytes | sym::volatile_set_memory => {
+ intrinsic_args!(fx, args => (dst, val, count); intrinsic);
+ let val = val.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
+ let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
+ fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
+ }
+ sym::ctlz | sym::ctlz_nonzero => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ // FIXME trap on `ctlz_nonzero` with zero arg.
+ let res = fx.bcx.ins().clz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::cttz | sym::cttz_nonzero => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ // FIXME trap on `cttz_nonzero` with zero arg.
+ let res = fx.bcx.ins().ctz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::ctpop => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = fx.bcx.ins().popcnt(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::bitreverse => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = fx.bcx.ins().bitrev(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::bswap => {
+ // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+ match bcx.func.dfg.value_type(v) {
+ types::I8 => v,
+
+ // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+ types::I16 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 8);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+ let tmp2 = bcx.ins().ushr_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+ bcx.ins().bor(n1, n2)
+ }
+ types::I32 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 24);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+ let tmp3 = bcx.ins().ushr_imm(v, 8);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+ let tmp4 = bcx.ins().ushr_imm(v, 24);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ bcx.ins().bor(or_tmp1, or_tmp2)
+ }
+ types::I64 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 56);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 40);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+ let tmp3 = bcx.ins().ishl_imm(v, 24);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+ let tmp4 = bcx.ins().ishl_imm(v, 8);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+ let tmp5 = bcx.ins().ushr_imm(v, 8);
+ let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+ let tmp6 = bcx.ins().ushr_imm(v, 24);
+ let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+ let tmp7 = bcx.ins().ushr_imm(v, 40);
+ let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+ let tmp8 = bcx.ins().ushr_imm(v, 56);
+ let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ let or_tmp3 = bcx.ins().bor(n5, n6);
+ let or_tmp4 = bcx.ins().bor(n7, n8);
+
+ let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+ let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+ bcx.ins().bor(or_tmp5, or_tmp6)
+ }
+ types::I128 => {
+ let (lo, hi) = bcx.ins().isplit(v);
+ let lo = swap(bcx, lo);
+ let hi = swap(bcx, hi);
+ bcx.ins().iconcat(hi, lo)
+ }
+ ty => unreachable!("bswap {}", ty),
+ }
+ }
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
+ source_info,
+ )
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!(
+ "attempted to zero-initialize type `{}`, which is invalid",
+ layout.ty
+ ),
+ source_info,
+ );
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!(
+ "attempted to leave type `{}` uninitialized, which is invalid",
+ layout.ty
+ ),
+ source_info,
+ )
+ });
+ return;
+ }
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ // Cranelift treats loads as volatile by default
+ // FIXME correctly handle unaligned_volatile_load
+ let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
+ }
+ sym::volatile_store | sym::unaligned_volatile_store => {
+ intrinsic_args!(fx, args => (ptr, val); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ // Cranelift treats stores as volatile by default
+ // FIXME correctly handle unaligned_volatile_store
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
+ }
+
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let const_val =
+ fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+ let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ intrinsic_args!(fx, args => (ptr, base); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+ let base = base.load_scalar(fx);
+ let ty = substs.type_at(0);
+
+ let pointee_size: u64 = fx.layout_of(ty).size.bytes();
+ let diff_bytes = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = if intrinsic == sym::ptr_offset_from_unsigned {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ // Because diff_bytes ULE isize::MAX, this would be fine as signed,
+ // but unsigned is slightly easier to codegen, so might as well.
+ CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
+ } else {
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+ CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
+ };
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_guaranteed_eq => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_guaranteed_ne => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::caller_location => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let caller_location = fx.get_caller_location(source_info);
+ ret.write_cvalue(fx, caller_location);
+ }
+
+ _ if intrinsic.as_str().starts_with("atomic_fence") => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ fx.bcx.ins().fence();
+ }
+ _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ // FIXME use a compiler fence once Cranelift supports it
+ fx.bcx.ins().fence();
+ }
+ _ if intrinsic.as_str().starts_with("atomic_load") => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+ let clif_ty = fx.clif_type(ty).unwrap();
+
+ let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
+
+ let val = CValue::by_val(val, fx.layout_of(ty));
+ ret.write_cvalue(fx, val);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_store") => {
+ intrinsic_args!(fx, args => (ptr, val); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+
+ let val = val.load_scalar(fx);
+
+ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xchg") => {
+ intrinsic_args!(fx, args => (ptr, new); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
+ // both atomic_cxchg_* and atomic_cxchgweak_*
+ intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+
+ let ret_val =
+ CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+ ret.write_cvalue(fx, ret_val)
+ }
+
+ _ if intrinsic.as_str().starts_with("atomic_xadd") => {
+ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old =
+ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xsub") => {
+ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old =
+ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_and") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_or") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xor") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_nand") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_max") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_umax") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_min") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_umin") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+
+ sym::minnumf32 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ }
+ sym::minnumf64 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ }
+ sym::maxnumf32 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ }
+ sym::maxnumf64 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ }
+
+ kw::Try => {
+ intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
+ let f = f.load_scalar(fx);
+ let data = data.load_scalar(fx);
+ let _catch_fn = catch_fn.load_scalar(fx);
+
+ // FIXME once unwinding is supported, change this to actually catch panics
+ let f_sig = fx.bcx.func.import_signature(Signature {
+ call_conv: fx.target_config.default_call_conv,
+ params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+ returns: vec![],
+ });
+
+ fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ ret.write_cvalue(fx, ret_val);
+ }
+
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ let res = crate::num::codegen_float_binop(
+ fx,
+ match intrinsic {
+ sym::fadd_fast => BinOp::Add,
+ sym::fsub_fast => BinOp::Sub,
+ sym::fmul_fast => BinOp::Mul,
+ sym::fdiv_fast => BinOp::Div,
+ sym::frem_fast => BinOp::Rem,
+ _ => unreachable!(),
+ },
+ x,
+ y,
+ );
+ ret.write_cvalue(fx, res);
+ }
+ sym::float_to_int_unchecked => {
+ intrinsic_args!(fx, args => (f); intrinsic);
+ let f = f.load_scalar(fx);
+
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+ }
+
+ sym::raw_eq => {
+ intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
+ let lhs_ref = lhs_ref.load_scalar(fx);
+ let rhs_ref = rhs_ref.load_scalar(fx);
+
+ let size = fx.layout_of(substs.type_at(0)).layout.size();
+ // FIXME add and use emit_small_memcmp
+ let is_eq_value = if size == Size::ZERO {
+ // No bytes means they're trivially equal
+ fx.bcx.ins().iconst(types::I8, 1)
+ } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
+ // Can't use `trusted` for these loads; they could be unaligned.
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
+ let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
+ let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
+ fx.bcx.ins().bint(types::I8, eq)
+ } else {
+ // Just call `memcmp` (like slices do in core) when the
+ // size is too large or it's not a power-of-two.
+ let signed_bytes = i64::try_from(size.bytes()).unwrap();
+ let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
+ let returns = vec![AbiParam::new(types::I32)];
+ let args = &[lhs_ref, rhs_ref, bytes_val];
+ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+ let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
+ fx.bcx.ins().bint(types::I8, eq)
+ };
+ ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
+ }
+
+ sym::const_allocate => {
+ intrinsic_args!(fx, args => (_size, _align); intrinsic);
+
+ // returns a null pointer at runtime.
+ let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
+ }
+
+ sym::const_deallocate => {
+ intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
+ // nop at runtime.
+ }
+
+ sym::black_box => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ // FIXME implement black_box semantics
+ ret.write_cvalue(fx, a);
+ }
+
+ // FIXME implement variadics in cranelift
+ sym::va_copy | sym::va_arg | sym::va_end => {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
+ }
+
+ _ => {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
+ }
+ }
+
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
new file mode 100644
index 000000000..30e3d1125
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -0,0 +1,659 @@
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::Symbol;
+
+use super::*;
+use crate::prelude::*;
+
+fn report_simd_type_validation_error(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'_>,
+) {
+ fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+}
+
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ span: Span,
+) {
+ match intrinsic {
+ sym::simd_cast => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| {
+ let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap();
+
+ let from_signed = type_sign(lane_ty);
+ let to_signed = type_sign(ret_lane_ty);
+
+ clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed)
+ });
+ }
+
+ sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
+ let res_lane = match (lane_ty.kind(), intrinsic) {
+ (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
+ (ty::Uint(_), sym::simd_ne) => {
+ fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_lt) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_le) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_gt) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_ge) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
+ (ty::Int(_), sym::simd_ne) => {
+ fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_lt) => {
+ fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_le) => {
+ fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_gt) => {
+ fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_ge) => {
+ fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ (ty::Float(_), sym::simd_eq) => {
+ fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_ne) => {
+ fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_lt) => {
+ fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_le) => {
+ fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_gt) => {
+ fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_ge) => {
+ fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ _ => unreachable!(),
+ };
+
+ let ty = fx.clif_type(res_lane_ty).unwrap();
+
+ let res_lane = fx.bcx.ins().bint(ty, res_lane);
+ fx.bcx.ins().ineg(res_lane)
+ });
+ }
+
+ // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+ _ if intrinsic.as_str().starts_with("simd_shuffle") => {
+ let (x, y, idx) = match args {
+ [x, y, idx] => (x, y, idx),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let x = codegen_operand(fx, x);
+ let y = codegen_operand(fx, y);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
+ // If there is no suffix, use the index array length.
+ let n: u16 = if intrinsic == sym::simd_shuffle {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
+ match idx_ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
+ .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
+ .unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ .try_into()
+ .unwrap(),
+ _ => {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ idx_ty,
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+ return;
+ }
+ }
+ } else {
+ intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
+ };
+
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+ assert_eq!(lane_ty, ret_lane_ty);
+ assert_eq!(u64::from(n), ret_lane_count);
+
+ let total_len = lane_count * 2;
+
+ let indexes = {
+ use rustc_middle::mir::interpret::*;
+ let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
+ .expect("simd_shuffle* idx not const");
+
+ let idx_bytes = match idx_const {
+ ConstValue::ByRef { alloc, offset } => {
+ let size = Size::from_bytes(
+ 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
+ );
+ alloc.inner().get_bytes(fx, alloc_range(offset, size)).unwrap()
+ }
+ _ => unreachable!("{:?}", idx_const),
+ };
+
+ (0..ret_lane_count)
+ .map(|i| {
+ let i = usize::try_from(i).unwrap();
+ let idx = rustc_middle::mir::interpret::read_target_uint(
+ fx.tcx.data_layout.endian,
+ &idx_bytes[4 * i..4 * i + 4],
+ )
+ .expect("read_target_uint");
+ u16::try_from(idx).expect("try_from u32")
+ })
+ .collect::<Vec<u16>>()
+ };
+
+ for &idx in &indexes {
+ assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+ }
+
+ for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+ let in_lane = if u64::from(in_idx) < lane_count {
+ x.value_lane(fx, in_idx.into())
+ } else {
+ y.value_lane(fx, u64::from(in_idx) - lane_count)
+ };
+ let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
+ out_lane.write_cvalue(fx, in_lane);
+ }
+ }
+
+ sym::simd_insert => {
+ let (base, idx, val) = match args {
+ [base, idx, val] => (base, idx, val),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let base = codegen_operand(fx, base);
+ let val = codegen_operand(fx, val);
+
+ // FIXME validate
+ let idx_const = if let Some(idx_const) =
+ crate::constant::mir_operand_get_const_val(fx, idx)
+ {
+ idx_const
+ } else {
+ fx.tcx.sess.span_fatal(span, "Index argument for `simd_insert` is not a constant");
+ };
+
+ let idx = idx_const
+ .try_to_bits(Size::from_bytes(4 /* u32*/))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count),
+ );
+ }
+
+ ret.write_cvalue(fx, base);
+ let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret_lane.write_cvalue(fx, val);
+ }
+
+ sym::simd_extract => {
+ let (v, idx) = match args {
+ [v, idx] => (v, idx),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let v = codegen_operand(fx, v);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ let idx_const = if let Some(idx_const) =
+ crate::constant::mir_operand_get_const_val(fx, idx)
+ {
+ idx_const
+ } else {
+ fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
+ let res = crate::trap::trap_unimplemented_ret_value(
+ fx,
+ ret.layout(),
+ "Index argument for `simd_extract` is not a constant",
+ );
+ ret.write_cvalue(fx, res);
+ return;
+ };
+
+ let idx = idx_const
+ .try_to_bits(Size::from_bytes(4 /* u32*/))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count),
+ );
+ }
+
+ let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
+ ret.write_cvalue(fx, ret_lane);
+ }
+
+ sym::simd_neg => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(
+ fx,
+ a,
+ ret,
+ &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
+ ty::Int(_) => fx.bcx.ins().ineg(lane),
+ ty::Float(_) => fx.bcx.ins().fneg(lane),
+ _ => unreachable!(),
+ },
+ );
+ }
+
+ sym::simd_add
+ | sym::simd_sub
+ | sym::simd_mul
+ | sym::simd_div
+ | sym::simd_rem
+ | sym::simd_shl
+ | sym::simd_shr
+ | sym::simd_and
+ | sym::simd_or
+ | sym::simd_xor => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
+ match (lane_ty.kind(), intrinsic) {
+ (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
+
+ (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
+ (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
+ (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
+ (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
+ (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
+
+ (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
+ (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
+ (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
+ (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
+ (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
+ "fmodf",
+ vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[x_lane, y_lane],
+ )[0],
+ (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
+ "fmod",
+ vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[x_lane, y_lane],
+ )[0],
+
+ (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
+
+ (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
+ (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
+ (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
+ (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
+ (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
+
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_fma => {
+ intrinsic_args!(fx, args => (a, b, c); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+ assert_eq!(a.layout(), c.layout());
+ assert_eq!(a.layout(), ret.layout());
+
+ let layout = a.layout();
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+
+ for lane in 0..lane_count {
+ let a_lane = a.value_lane(fx, lane);
+ let b_lane = b.value_lane(fx, lane);
+ let c_lane = c.value_lane(fx, lane);
+
+ let res_lane = match lane_ty.kind() {
+ ty::Float(FloatTy::F32) => {
+ fx.easy_call("fmaf", &[a_lane, b_lane, c_lane], lane_ty)
+ }
+ ty::Float(FloatTy::F64) => {
+ fx.easy_call("fma", &[a_lane, b_lane, c_lane], lane_ty)
+ }
+ _ => unreachable!(),
+ };
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ }
+
+ sym::simd_fmin | sym::simd_fmax => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
+ match lane_ty.kind() {
+ ty::Float(_) => {}
+ _ => unreachable!("{:?}", lane_ty),
+ }
+ match intrinsic {
+ sym::simd_fmin => crate::num::codegen_float_min(fx, x_lane, y_lane),
+ sym::simd_fmax => crate::num::codegen_float_max(fx, x_lane, y_lane),
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_round => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(
+ fx,
+ a,
+ ret,
+ &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
+ ty::Float(FloatTy::F32) => fx.lib_call(
+ "roundf",
+ vec![AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[lane],
+ )[0],
+ ty::Float(FloatTy::F64) => fx.lib_call(
+ "round",
+ vec![AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[lane],
+ )[0],
+ _ => unreachable!("{:?}", lane_ty),
+ },
+ );
+ }
+
+ sym::simd_fabs | sym::simd_fsqrt | sym::simd_ceil | sym::simd_floor | sym::simd_trunc => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
+ match lane_ty.kind() {
+ ty::Float(_) => {}
+ _ => unreachable!("{:?}", lane_ty),
+ }
+ match intrinsic {
+ sym::simd_fabs => fx.bcx.ins().fabs(lane),
+ sym::simd_fsqrt => fx.bcx.ins().sqrt(lane),
+ sym::simd_ceil => fx.bcx.ins().ceil(lane),
+ sym::simd_floor => fx.bcx.ins().floor(lane),
+ sym::simd_trunc => fx.bcx.ins().trunc(lane),
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_reduce_add_ordered | sym::simd_reduce_add_unordered => {
+ intrinsic_args!(fx, args => (v, acc); intrinsic);
+ let acc = acc.load_scalar(fx);
+
+ // FIXME there must be no acc param for integer vectors
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
+ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fadd(a, b)
+ } else {
+ fx.bcx.ins().iadd(a, b)
+ }
+ });
+ }
+
+ sym::simd_reduce_mul_ordered | sym::simd_reduce_mul_unordered => {
+ intrinsic_args!(fx, args => (v, acc); intrinsic);
+ let acc = acc.load_scalar(fx);
+
+ // FIXME there must be no acc param for integer vectors
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
+ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fmul(a, b)
+ } else {
+ fx.bcx.ins().imul(a, b)
+ }
+ });
+ }
+
+ sym::simd_reduce_all => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b));
+ }
+
+ sym::simd_reduce_any => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b));
+ }
+
+ sym::simd_reduce_and => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b));
+ }
+
+ sym::simd_reduce_or => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b));
+ }
+
+ sym::simd_reduce_xor => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b));
+ }
+
+ sym::simd_reduce_min => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
+ let lt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b),
+ ty::Float(_) => return crate::num::codegen_float_min(fx, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(lt, a, b)
+ });
+ }
+
+ sym::simd_reduce_max => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
+ let gt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b),
+ ty::Float(_) => return crate::num::codegen_float_max(fx, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(gt, a, b)
+ });
+ }
+
+ sym::simd_select => {
+ intrinsic_args!(fx, args => (m, a, b); intrinsic);
+
+ if !m.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty);
+ return;
+ }
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+
+ for lane in 0..lane_count {
+ let m_lane = m.value_lane(fx, lane).load_scalar(fx);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+
+ let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
+ let res_lane =
+ CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ }
+
+ // simd_saturating_*
+ // simd_bitmask
+ // simd_scatter
+ // simd_gather
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
new file mode 100644
index 000000000..bb0793b1d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -0,0 +1,316 @@
+#![feature(rustc_private)]
+// Note: please avoid adding other feature gates where possible
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+#![warn(unreachable_pub)]
+
+#[macro_use]
+extern crate rustc_middle;
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_fs_util;
+extern crate rustc_hir;
+extern crate rustc_incremental;
+extern crate rustc_index;
+extern crate rustc_interface;
+extern crate rustc_metadata;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_target;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+use std::any::Any;
+use std::cell::Cell;
+
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
+use rustc_errors::ErrorGuaranteed;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_session::config::OutputFilenames;
+use rustc_session::Session;
+use rustc_span::Symbol;
+
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::settings::{self, Configurable};
+
+pub use crate::config::*;
+use crate::prelude::*;
+
+mod abi;
+mod allocator;
+mod analyze;
+mod archive;
+mod base;
+mod cast;
+mod codegen_i128;
+mod common;
+mod compiler_builtins;
+mod config;
+mod constant;
+mod debuginfo;
+mod discriminant;
+mod driver;
+mod inline_asm;
+mod intrinsics;
+mod linkage;
+mod main_shim;
+mod num;
+mod optimize;
+mod pointer;
+mod pretty_clif;
+mod toolchain;
+mod trap;
+mod unsize;
+mod value_and_place;
+mod vtable;
+
+mod prelude {
+ pub(crate) use rustc_span::{FileNameDisplayPreference, Span};
+
+ pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+ pub(crate) use rustc_middle::bug;
+ pub(crate) use rustc_middle::mir::{self, *};
+ pub(crate) use rustc_middle::ty::layout::{self, LayoutOf, TyAndLayout};
+ pub(crate) use rustc_middle::ty::{
+ self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut,
+ TypeFoldable, TypeVisitable, UintTy,
+ };
+ pub(crate) use rustc_target::abi::{Abi, Scalar, Size, VariantIdx};
+
+ pub(crate) use rustc_data_structures::fx::FxHashMap;
+
+ pub(crate) use rustc_index::vec::Idx;
+
+ pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
+ pub(crate) use cranelift_codegen::ir::function::Function;
+ pub(crate) use cranelift_codegen::ir::types;
+ pub(crate) use cranelift_codegen::ir::{
+ AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
+ StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+ };
+ pub(crate) use cranelift_codegen::isa::{self, CallConv};
+ pub(crate) use cranelift_codegen::Context;
+ pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+ pub(crate) use cranelift_module::{self, DataContext, FuncId, Linkage, Module};
+
+ pub(crate) use crate::abi::*;
+ pub(crate) use crate::base::{codegen_operand, codegen_place};
+ pub(crate) use crate::cast::*;
+ pub(crate) use crate::common::*;
+ pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
+ pub(crate) use crate::pointer::Pointer;
+ pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
+}
+
+struct PrintOnPanic<F: Fn() -> String>(F);
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+ fn drop(&mut self) {
+ if ::std::thread::panicking() {
+ println!("{}", (self.0)());
+ }
+ }
+}
+
+/// The codegen context holds any information shared between the codegen of individual functions
+/// inside a single codegen unit with the exception of the Cranelift [`Module`](cranelift_module::Module).
+struct CodegenCx<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ global_asm: String,
+ inline_asm_index: Cell<usize>,
+ cached_context: Context,
+ debug_context: Option<DebugContext<'tcx>>,
+ unwind_context: UnwindContext,
+ cgu_name: Symbol,
+}
+
+impl<'tcx> CodegenCx<'tcx> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ backend_config: BackendConfig,
+ isa: &dyn TargetIsa,
+ debug_info: bool,
+ cgu_name: Symbol,
+ ) -> Self {
+ assert_eq!(pointer_ty(tcx), isa.pointer_type());
+
+ let unwind_context =
+ UnwindContext::new(isa, matches!(backend_config.codegen_mode, CodegenMode::Aot));
+ let debug_context = if debug_info && !tcx.sess.target.options.is_like_windows {
+ Some(DebugContext::new(tcx, isa))
+ } else {
+ None
+ };
+ CodegenCx {
+ tcx,
+ global_asm: String::new(),
+ inline_asm_index: Cell::new(0),
+ cached_context: Context::new(),
+ debug_context,
+ unwind_context,
+ cgu_name,
+ }
+ }
+}
+
+pub struct CraneliftCodegenBackend {
+ pub config: Option<BackendConfig>,
+}
+
+impl CodegenBackend for CraneliftCodegenBackend {
+ fn init(&self, sess: &Session) {
+ use rustc_session::config::Lto;
+ match sess.lto() {
+ Lto::No | Lto::ThinLocal => {}
+ Lto::Thin | Lto::Fat => sess.warn("LTO is not supported. You may get a linker error."),
+ }
+ }
+
+ fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<rustc_span::Symbol> {
+ vec![]
+ }
+
+ fn print_version(&self) {
+ println!("Cranelift version: {}", cranelift_codegen::VERSION);
+ }
+
+ fn codegen_crate(
+ &self,
+ tcx: TyCtxt<'_>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any> {
+ tcx.sess.abort_if_errors();
+ let config = if let Some(config) = self.config.clone() {
+ config
+ } else {
+ if !tcx.sess.unstable_options() && !tcx.sess.opts.cg.llvm_args.is_empty() {
+ tcx.sess.fatal("`-Z unstable-options` must be passed to allow configuring cg_clif");
+ }
+ BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
+ .unwrap_or_else(|err| tcx.sess.fatal(&err))
+ };
+ match config.codegen_mode {
+ CodegenMode::Aot => driver::aot::run_aot(tcx, config, metadata, need_metadata_module),
+ CodegenMode::Jit | CodegenMode::JitLazy => {
+ #[cfg(feature = "jit")]
+ driver::jit::run_jit(tcx, config);
+
+ #[cfg(not(feature = "jit"))]
+ tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
+ }
+ }
+ }
+
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ _sess: &Session,
+ _outputs: &OutputFilenames,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+ Ok(*ongoing_codegen
+ .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
+ .unwrap())
+ }
+
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorGuaranteed> {
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ link_binary(sess, &crate::archive::ArArchiveBuilderBuilder, &codegen_results, outputs)
+ }
+}
+
+fn target_triple(sess: &Session) -> target_lexicon::Triple {
+ match sess.target.llvm_target.parse() {
+ Ok(triple) => triple,
+ Err(err) => sess.fatal(&format!("target not recognized: {}", err)),
+ }
+}
+
+fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::TargetIsa + 'static> {
+ use target_lexicon::BinaryFormat;
+
+ let target_triple = crate::target_triple(sess);
+
+ let mut flags_builder = settings::builder();
+ flags_builder.enable("is_pic").unwrap();
+ flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
+ let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" };
+ flags_builder.set("enable_verifier", enable_verifier).unwrap();
+ flags_builder.set("regalloc_checker", enable_verifier).unwrap();
+
+ let tls_model = match target_triple.binary_format {
+ BinaryFormat::Elf => "elf_gd",
+ BinaryFormat::Macho => "macho",
+ BinaryFormat::Coff => "coff",
+ _ => "none",
+ };
+ flags_builder.set("tls_model", tls_model).unwrap();
+
+ flags_builder.set("enable_simd", "true").unwrap();
+
+ flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
+
+ use rustc_session::config::OptLevel;
+ match sess.opts.optimize {
+ OptLevel::No => {
+ flags_builder.set("opt_level", "none").unwrap();
+ }
+ OptLevel::Less | OptLevel::Default => {}
+ OptLevel::Size | OptLevel::SizeMin | OptLevel::Aggressive => {
+ flags_builder.set("opt_level", "speed_and_size").unwrap();
+ }
+ }
+
+ let flags = settings::Flags::new(flags_builder);
+
+ let isa_builder = match sess.opts.cg.target_cpu.as_deref() {
+ Some("native") => {
+ let builder = cranelift_native::builder_with_options(true).unwrap();
+ builder
+ }
+ Some(value) => {
+ let mut builder =
+ cranelift_codegen::isa::lookup(target_triple.clone()).unwrap_or_else(|err| {
+ sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
+ });
+ if let Err(_) = builder.enable(value) {
+ sess.fatal("the specified target cpu isn't currently supported by Cranelift.");
+ }
+ builder
+ }
+ None => {
+ let mut builder =
+ cranelift_codegen::isa::lookup(target_triple.clone()).unwrap_or_else(|err| {
+ sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
+ });
+ if target_triple.architecture == target_lexicon::Architecture::X86_64 {
+ // Don't use "haswell" as the default, as it implies `has_lzcnt`.
+ // macOS CI is still at Ivy Bridge EP, so `lzcnt` is interpreted as `bsr`.
+ builder.enable("nehalem").unwrap();
+ }
+ builder
+ }
+ };
+
+ match isa_builder.finish(flags) {
+ Ok(target_isa) => target_isa,
+ Err(err) => sess.fatal(&format!("failed to build TargetIsa: {}", err)),
+ }
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+ Box::new(CraneliftCodegenBackend { config: None })
+}
diff --git a/compiler/rustc_codegen_cranelift/src/linkage.rs b/compiler/rustc_codegen_cranelift/src/linkage.rs
new file mode 100644
index 000000000..ca853aac1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/linkage.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+pub(crate) fn get_clif_linkage(
+ mono_item: MonoItem<'_>,
+ linkage: RLinkage,
+ visibility: Visibility,
+ is_compiler_builtins: bool,
+) -> Linkage {
+ match (linkage, visibility) {
+ (RLinkage::External, Visibility::Default) if is_compiler_builtins => Linkage::Hidden,
+ (RLinkage::External, Visibility::Default) => Linkage::Export,
+ (RLinkage::Internal, Visibility::Default) => Linkage::Local,
+ (RLinkage::External, Visibility::Hidden) => Linkage::Hidden,
+ (RLinkage::WeakAny, Visibility::Default) => Linkage::Preemptible,
+ _ => panic!("{:?} = {:?} {:?}", mono_item, linkage, visibility),
+ }
+}
+
+pub(crate) fn get_static_linkage(tcx: TyCtxt<'_>, def_id: DefId) -> Linkage {
+ let fn_attrs = tcx.codegen_fn_attrs(def_id);
+
+ if let Some(linkage) = fn_attrs.linkage {
+ match linkage {
+ RLinkage::External => Linkage::Export,
+ RLinkage::Internal => Linkage::Local,
+ RLinkage::ExternalWeak | RLinkage::WeakAny => Linkage::Preemptible,
+ _ => panic!("{:?}", linkage),
+ }
+ } else if tcx.is_reachable_non_generic(def_id) {
+ Linkage::Export
+ } else {
+ Linkage::Hidden
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
new file mode 100644
index 000000000..c67b6e98b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -0,0 +1,161 @@
+use rustc_hir::LangItem;
+use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::AssocKind;
+use rustc_session::config::EntryFnType;
+use rustc_span::symbol::Ident;
+
+use crate::prelude::*;
+
+/// Create the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub(crate) fn maybe_create_entry_wrapper(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ is_jit: bool,
+ is_primary_cgu: bool,
+) {
+ let (main_def_id, is_main_fn) = match tcx.entry_fn(()) {
+ Some((def_id, entry_ty)) => (
+ def_id,
+ match entry_ty {
+ EntryFnType::Main => true,
+ EntryFnType::Start => false,
+ },
+ ),
+ None => return,
+ };
+
+ if main_def_id.is_local() {
+ let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
+ if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
+ return;
+ }
+ } else if !is_primary_cgu {
+ return;
+ }
+
+ create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn);
+
+ fn create_entry_fn(
+ tcx: TyCtxt<'_>,
+ m: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ rust_main_def_id: DefId,
+ ignore_lang_start_wrapper: bool,
+ is_main_fn: bool,
+ ) {
+ let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
+ // Given that `main()` has no arguments,
+ // then its return type cannot have
+ // late-bound regions, since late-bound
+ // regions must appear in the argument
+ // listing.
+ let main_ret_ty = tcx.normalize_erasing_regions(
+ ty::ParamEnv::reveal_all(),
+ main_ret_ty.no_bound_vars().unwrap(),
+ );
+
+ let cmain_sig = Signature {
+ params: vec![
+ AbiParam::new(m.target_config().pointer_type()),
+ AbiParam::new(m.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
+ call_conv: CallConv::triple_default(m.isa().triple()),
+ };
+
+ let cmain_func_id = m.declare_function("main", Linkage::Export, &cmain_sig).unwrap();
+
+ let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
+
+ let main_name = tcx.symbol_name(instance).name;
+ let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
+ let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
+ let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
+
+ let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
+
+ let result = if is_main_fn && ignore_lang_start_wrapper {
+ // regular main fn, but ignoring #[lang = "start"] as we are running in the jit
+ // FIXME set program arguments somehow
+ let call_inst = bcx.ins().call(main_func_ref, &[]);
+ let call_results = bcx.func.dfg.inst_results(call_inst).to_owned();
+
+ let termination_trait = tcx.require_lang_item(LangItem::Termination, None);
+ let report = tcx
+ .associated_items(termination_trait)
+ .find_by_name_and_kind(
+ tcx,
+ Ident::from_str("report"),
+ AssocKind::Fn,
+ termination_trait,
+ )
+ .unwrap();
+ let report = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ report.def_id,
+ tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
+ )
+ .unwrap()
+ .unwrap()
+ .polymorphize(tcx);
+
+ let report_name = tcx.symbol_name(report).name;
+ let report_sig = get_function_sig(tcx, m.isa().triple(), report);
+ let report_func_id =
+ m.declare_function(report_name, Linkage::Import, &report_sig).unwrap();
+ let report_func_ref = m.declare_func_in_func(report_func_id, &mut bcx.func);
+
+ // FIXME do proper abi handling instead of expecting the pass mode to be identical
+ // for returns and arguments.
+ let report_call_inst = bcx.ins().call(report_func_ref, &call_results);
+ let res = bcx.func.dfg.inst_results(report_call_inst)[0];
+ match m.target_config().pointer_type() {
+ types::I32 => res,
+ types::I64 => bcx.ins().sextend(types::I64, res),
+ _ => unimplemented!("16bit systems are not yet supported"),
+ }
+ } else if is_main_fn {
+ let start_def_id = tcx.require_lang_item(LangItem::Start, None);
+ let start_instance = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ start_def_id,
+ tcx.intern_substs(&[main_ret_ty.into()]),
+ )
+ .unwrap()
+ .unwrap()
+ .polymorphize(tcx);
+ let start_func_id = import_function(tcx, m, start_instance);
+
+ let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
+
+ let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
+ let call_inst = bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv]);
+ bcx.inst_results(call_inst)[0]
+ } else {
+ // using user-defined start fn
+ let call_inst = bcx.ins().call(main_func_ref, &[arg_argc, arg_argv]);
+ bcx.inst_results(call_inst)[0]
+ };
+
+ bcx.ins().return_(&[result]);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ m.define_function(cmain_func_id, &mut ctx).unwrap();
+ unwind_context.add_function(cmain_func_id, &ctx, m.isa());
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs
new file mode 100644
index 000000000..4ce8adb18
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/num.rs
@@ -0,0 +1,440 @@
+//! Various operations on integer and floating-point numbers
+
+use crate::prelude::*;
+
+pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
+ use BinOp::*;
+ use IntCC::*;
+ Some(match bin_op {
+ Eq => Equal,
+ Lt => {
+ if signed {
+ SignedLessThan
+ } else {
+ UnsignedLessThan
+ }
+ }
+ Le => {
+ if signed {
+ SignedLessThanOrEqual
+ } else {
+ UnsignedLessThanOrEqual
+ }
+ }
+ Ne => NotEqual,
+ Ge => {
+ if signed {
+ SignedGreaterThanOrEqual
+ } else {
+ UnsignedGreaterThanOrEqual
+ }
+ }
+ Gt => {
+ if signed {
+ SignedGreaterThan
+ } else {
+ UnsignedGreaterThan
+ }
+ }
+ _ => return None,
+ })
+}
+
+fn codegen_compare_bin_op<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ signed: bool,
+ lhs: Value,
+ rhs: Value,
+) -> CValue<'tcx> {
+ let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
+ let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
+ let val = fx.bcx.ins().bint(types::I8, val);
+ CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ match bin_op {
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ match in_lhs.layout().ty.kind() {
+ ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
+ let signed = type_sign(in_lhs.layout().ty);
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+
+ match in_lhs.layout().ty.kind() {
+ ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+ }
+}
+
+pub(crate) fn codegen_bool_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let res = match bin_op {
+ BinOp::BitXor => b.bxor(lhs, rhs),
+ BinOp::BitAnd => b.band(lhs, rhs),
+ BinOp::BitOr => b.bor(lhs, rhs),
+ // Compare binops handles by `codegen_binop`.
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+
+ CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+ assert_eq!(
+ in_lhs.layout().ty,
+ in_rhs.layout().ty,
+ "int binop requires lhs and rhs of same type"
+ );
+ }
+
+ if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
+ return res;
+ }
+
+ let signed = type_sign(in_lhs.layout().ty);
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let val = match bin_op {
+ BinOp::Add => b.iadd(lhs, rhs),
+ BinOp::Sub => b.isub(lhs, rhs),
+ BinOp::Mul => b.imul(lhs, rhs),
+ BinOp::Div => {
+ if signed {
+ b.sdiv(lhs, rhs)
+ } else {
+ b.udiv(lhs, rhs)
+ }
+ }
+ BinOp::Rem => {
+ if signed {
+ b.srem(lhs, rhs)
+ } else {
+ b.urem(lhs, rhs)
+ }
+ }
+ BinOp::BitXor => b.bxor(lhs, rhs),
+ BinOp::BitAnd => b.band(lhs, rhs),
+ BinOp::BitOr => b.bor(lhs, rhs),
+ BinOp::Shl => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ fx.bcx.ins().ishl(lhs, actual_shift)
+ }
+ BinOp::Shr => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ if signed {
+ fx.bcx.ins().sshr(lhs, actual_shift)
+ } else {
+ fx.bcx.ins().ushr(lhs, actual_shift)
+ }
+ }
+ // Compare binops handles by `codegen_binop`.
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+ };
+
+ CValue::by_val(val, in_lhs.layout())
+}
+
+pub(crate) fn codegen_checked_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+ assert_eq!(
+ in_lhs.layout().ty,
+ in_rhs.layout().ty,
+ "checked int binop requires lhs and rhs of same type"
+ );
+ }
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
+ return res;
+ }
+
+ let signed = type_sign(in_lhs.layout().ty);
+
+ let (res, has_overflow) = match bin_op {
+ BinOp::Add => {
+ /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
+ (val, c_out)*/
+ // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
+ let val = fx.bcx.ins().iadd(lhs, rhs);
+ let has_overflow = if !signed {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
+ } else {
+ let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+ let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
+ fx.bcx.ins().bxor(rhs_is_negative, slt)
+ };
+ (val, has_overflow)
+ }
+ BinOp::Sub => {
+ /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
+ (val, b_out)*/
+ // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
+ let val = fx.bcx.ins().isub(lhs, rhs);
+ let has_overflow = if !signed {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
+ } else {
+ let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+ let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
+ fx.bcx.ins().bxor(rhs_is_negative, sgt)
+ };
+ (val, has_overflow)
+ }
+ BinOp::Mul => {
+ let ty = fx.bcx.func.dfg.value_type(lhs);
+ match ty {
+ types::I8 | types::I16 | types::I32 if !signed => {
+ let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::UnsignedGreaterThan,
+ val,
+ (1 << ty.bits()) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, has_overflow)
+ }
+ types::I8 | types::I16 | types::I32 if signed => {
+ let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_underflow =
+ fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::SignedGreaterThan,
+ val,
+ (1 << (ty.bits() - 1)) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, fx.bcx.ins().bor(has_underflow, has_overflow))
+ }
+ types::I64 => {
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = if !signed {
+ let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
+ } else {
+ // Based on LLVM's instruction sequence for compiling
+ // a.checked_mul(b).is_some() to riscv64gc:
+ // mulh a2, a0, a1
+ // mul a0, a0, a1
+ // srai a0, a0, 63
+ // xor a0, a0, a2
+ // snez a0, a0
+ let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
+ let val_sign = fx.bcx.ins().sshr_imm(val, i64::from(ty.bits() - 1));
+ let xor = fx.bcx.ins().bxor(val_hi, val_sign);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, xor, 0)
+ };
+ (val, has_overflow)
+ }
+ types::I128 => {
+ unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
+ }
+ _ => unreachable!("invalid non-integer type {}", ty),
+ }
+ }
+ BinOp::Shl => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let val = fx.bcx.ins().ishl(lhs, masked_shift);
+ let ty = fx.bcx.func.dfg.value_type(val);
+ let max_shift = i64::from(ty.bits()) - 1;
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ (val, has_overflow)
+ }
+ BinOp::Shr => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let val = if !signed {
+ fx.bcx.ins().ushr(lhs, masked_shift)
+ } else {
+ fx.bcx.ins().sshr(lhs, masked_shift)
+ };
+ let ty = fx.bcx.func.dfg.value_type(val);
+ let max_shift = i64::from(ty.bits()) - 1;
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ (val, has_overflow)
+ }
+ _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
+ };
+
+ let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
+
+ let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
+ CValue::by_val_pair(res, has_overflow, out_layout)
+}
+
+pub(crate) fn codegen_float_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let res = match bin_op {
+ BinOp::Add => b.fadd(lhs, rhs),
+ BinOp::Sub => b.fsub(lhs, rhs),
+ BinOp::Mul => b.fmul(lhs, rhs),
+ BinOp::Div => b.fdiv(lhs, rhs),
+ BinOp::Rem => {
+ let name = match in_lhs.layout().ty.kind() {
+ ty::Float(FloatTy::F32) => "fmodf",
+ ty::Float(FloatTy::F64) => "fmod",
+ _ => bug!(),
+ };
+ return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
+ }
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ let fltcc = match bin_op {
+ BinOp::Eq => FloatCC::Equal,
+ BinOp::Lt => FloatCC::LessThan,
+ BinOp::Le => FloatCC::LessThanOrEqual,
+ BinOp::Ne => FloatCC::NotEqual,
+ BinOp::Ge => FloatCC::GreaterThanOrEqual,
+ BinOp::Gt => FloatCC::GreaterThan,
+ _ => unreachable!(),
+ };
+ let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
+ let val = fx.bcx.ins().bint(types::I8, val);
+ return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
+ }
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+
+ CValue::by_val(res, in_lhs.layout())
+}
+
+pub(crate) fn codegen_ptr_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ let is_thin_ptr = in_lhs
+ .layout()
+ .ty
+ .builtin_deref(true)
+ .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
+ .unwrap_or(true);
+
+ if is_thin_ptr {
+ match bin_op {
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
+ }
+ BinOp::Offset => {
+ let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
+ let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ CValue::by_val(res, base.layout())
+ }
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ }
+ } else {
+ let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
+ let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
+
+ let res = match bin_op {
+ BinOp::Eq => {
+ let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+ let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
+ fx.bcx.ins().band(ptr_eq, extra_eq)
+ }
+ BinOp::Ne => {
+ let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
+ let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
+ fx.bcx.ins().bor(ptr_ne, extra_ne)
+ }
+ BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
+ let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+
+ let ptr_cmp =
+ fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
+ let extra_cmp = fx.bcx.ins().icmp(
+ bin_op_to_intcc(bin_op, false).unwrap(),
+ lhs_extra,
+ rhs_extra,
+ );
+
+ fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
+ }
+ _ => panic!("bin_op {:?} on ptr", bin_op),
+ };
+
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
+ }
+}
+
+// In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
+// For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
+// and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
+// a float against itself. Only in case of NaN is it not equal to itself.
+pub(crate) fn codegen_float_min(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_ge_b, b, a);
+ fx.bcx.ins().select(a_is_nan, b, temp)
+}
+
+pub(crate) fn codegen_float_max(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_le_b, b, a);
+ fx.bcx.ins().select(a_is_nan, b, temp)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/mod.rs b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
new file mode 100644
index 000000000..d1f89adb3
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
@@ -0,0 +1,20 @@
+//! Various optimizations specific to cg_clif
+
+use cranelift_codegen::isa::TargetIsa;
+
+use crate::prelude::*;
+
+pub(crate) mod peephole;
+
+pub(crate) fn optimize_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ isa: &dyn TargetIsa,
+ instance: Instance<'tcx>,
+ ctx: &mut Context,
+ clif_comments: &mut crate::pretty_clif::CommentWriter,
+) {
+ // FIXME classify optimizations over opt levels once we have more
+
+ crate::pretty_clif::write_clif_file(tcx, "preopt", isa, instance, &ctx.func, &*clif_comments);
+ crate::base::verify_func(tcx, &*clif_comments, &ctx.func);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
new file mode 100644
index 000000000..d637b4d89
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
@@ -0,0 +1,67 @@
+//! Peephole optimizations that can be performed while creating clif ir.
+
+use cranelift_codegen::ir::{condcodes::IntCC, InstructionData, Opcode, Value, ValueDef};
+use cranelift_frontend::FunctionBuilder;
+
+/// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
+/// given value.
+pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+ if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ match bcx.func.dfg[arg_inst] {
+ InstructionData::Unary { opcode: Opcode::Bint, arg } => arg,
+ _ => arg,
+ }
+ } else {
+ arg
+ }
+}
+
+/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
+/// otherwise return the given value and false.
+pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
+ if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ match bcx.func.dfg[arg_inst] {
+ // This is the lowering of `Rvalue::Not`
+ InstructionData::IntCompareImm {
+ opcode: Opcode::IcmpImm,
+ cond: IntCC::Equal,
+ arg,
+ imm,
+ } if imm.bits() == 0 => (arg, true),
+ _ => (arg, false),
+ }
+ } else {
+ (arg, false)
+ }
+}
+
+/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
+pub(crate) fn maybe_known_branch_taken(
+ bcx: &FunctionBuilder<'_>,
+ arg: Value,
+ test_zero: bool,
+) -> Option<bool> {
+ let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ arg_inst
+ } else {
+ return None;
+ };
+
+ match bcx.func.dfg[arg_inst] {
+ InstructionData::UnaryBool { opcode: Opcode::Bconst, imm } => {
+ if test_zero {
+ Some(!imm)
+ } else {
+ Some(imm)
+ }
+ }
+ InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => {
+ if test_zero {
+ Some(imm.bits() == 0)
+ } else {
+ Some(imm.bits() != 0)
+ }
+ }
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/pointer.rs b/compiler/rustc_codegen_cranelift/src/pointer.rs
new file mode 100644
index 000000000..31d827f83
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/pointer.rs
@@ -0,0 +1,134 @@
+//! Defines [`Pointer`] which is used to improve the quality of the generated clif ir for pointer
+//! operations.
+
+use crate::prelude::*;
+
+use rustc_target::abi::Align;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+/// A pointer pointing either to a certain address, a certain stack slot or nothing.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct Pointer {
+ base: PointerBase,
+ offset: Offset32,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum PointerBase {
+ Addr(Value),
+ Stack(StackSlot),
+ Dangling(Align),
+}
+
+impl Pointer {
+ pub(crate) fn new(addr: Value) -> Self {
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn stack_slot(stack_slot: StackSlot) -> Self {
+ Pointer { base: PointerBase::Stack(stack_slot), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn const_addr(fx: &mut FunctionCx<'_, '_, '_>, addr: i64) -> Self {
+ let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn dangling(align: Align) -> Self {
+ Pointer { base: PointerBase::Dangling(align), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn debug_base_and_offset(self) -> (PointerBase, Offset32) {
+ (self.base, self.offset)
+ }
+
+ pub(crate) fn get_addr(self, fx: &mut FunctionCx<'_, '_, '_>) -> Value {
+ match self.base {
+ PointerBase::Addr(base_addr) => {
+ let offset: i64 = self.offset.into();
+ if offset == 0 { base_addr } else { fx.bcx.ins().iadd_imm(base_addr, offset) }
+ }
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset)
+ }
+ PointerBase::Dangling(align) => {
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+ }
+ }
+ }
+
+ pub(crate) fn offset(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Offset32) -> Self {
+ self.offset_i64(fx, extra_offset.into())
+ }
+
+ pub(crate) fn offset_i64(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: i64) -> Self {
+ if let Some(new_offset) = self.offset.try_add_i64(extra_offset) {
+ Pointer { base: self.base, offset: new_offset }
+ } else {
+ let base_offset: i64 = self.offset.into();
+ if let Some(new_offset) = base_offset.checked_add(extra_offset) {
+ let base_addr = match self.base {
+ PointerBase::Addr(addr) => addr,
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
+ }
+ PointerBase::Dangling(align) => {
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+ }
+ };
+ let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ } else {
+ panic!(
+ "self.offset ({}) + extra_offset ({}) not representable in i64",
+ base_offset, extra_offset
+ );
+ }
+ }
+ }
+
+ pub(crate) fn offset_value(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Value) -> Self {
+ match self.base {
+ PointerBase::Addr(addr) => Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+ offset: self.offset,
+ },
+ PointerBase::Stack(stack_slot) => {
+ let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset);
+ Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
+ offset: Offset32::new(0),
+ }
+ }
+ PointerBase::Dangling(align) => {
+ let addr =
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
+ Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+ offset: self.offset,
+ }
+ }
+ }
+ }
+
+ pub(crate) fn load(self, fx: &mut FunctionCx<'_, '_, '_>, ty: Type, flags: MemFlags) -> Value {
+ match self.base {
+ PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
+ PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_load(ty, stack_slot, self.offset),
+ PointerBase::Dangling(_align) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn store(self, fx: &mut FunctionCx<'_, '_, '_>, value: Value, flags: MemFlags) {
+ match self.base {
+ PointerBase::Addr(base_addr) => {
+ fx.bcx.ins().store(flags, value, base_addr, self.offset);
+ }
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx.ins().stack_store(value, stack_slot, self.offset);
+ }
+ PointerBase::Dangling(_align) => unreachable!(),
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
new file mode 100644
index 000000000..1d1ec2168
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -0,0 +1,278 @@
+//! This module provides the [CommentWriter] which makes it possible
+//! to add comments to the written cranelift ir.
+//!
+//! # Example
+//!
+//! ```clif
+//! test compile
+//! target x86_64
+//!
+//! function u0:0(i64, i64, i64) system_v {
+//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
+//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
+//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
+//!
+//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
+//! ; msg loc.idx param pass mode ssa flags ty
+//! ; ret _0 = v0 ByRef NOT_SSA (u8, u8)
+//! ; arg _1 = v1 ByRef NOT_SSA IsNotEmpty
+//! ; arg _2.0 = v2 ByVal(types::I64) NOT_SSA &&[u16]
+//!
+//! ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
+//! ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
+//! ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
+//! sig0 = (i64, i64, i64) system_v
+//! sig1 = (i64, i64, i64) system_v
+//! fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
+//!
+//! block0(v0: i64, v1: i64, v2: i64):
+//! v3 = stack_addr.i64 ss0
+//! v4 = stack_addr.i64 ss1
+//! store v2, v4
+//! v5 = stack_addr.i64 ss2
+//! jump block1
+//!
+//! block1:
+//! nop
+//! ; _3 = &mut _1
+//! ; _4 = _2
+//! v6 = load.i64 v4
+//! store v6, v5
+//! ;
+//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
+//! v7 = load.i64 v5
+//! call fn0(v0, v3, v7)
+//! jump block2
+//!
+//! block2:
+//! nop
+//! ;
+//! ; return
+//! return
+//! }
+//! ```
+
+use std::fmt;
+use std::io::Write;
+
+use cranelift_codegen::{
+ entity::SecondaryMap,
+ ir::entities::AnyEntity,
+ write::{FuncWriter, PlainWriter},
+};
+
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_session::config::OutputType;
+
+use crate::prelude::*;
+
+#[derive(Clone, Debug)]
+pub(crate) struct CommentWriter {
+ enabled: bool,
+ global_comments: Vec<String>,
+ entity_comments: FxHashMap<AnyEntity, String>,
+}
+
+impl CommentWriter {
+ pub(crate) fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ let enabled = should_write_ir(tcx);
+ let global_comments = if enabled {
+ vec![
+ format!("symbol {}", tcx.symbol_name(instance).name),
+ format!("instance {:?}", instance),
+ format!(
+ "abi {:?}",
+ RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())
+ ),
+ String::new(),
+ ]
+ } else {
+ vec![]
+ };
+
+ CommentWriter { enabled, global_comments, entity_comments: FxHashMap::default() }
+ }
+}
+
+impl CommentWriter {
+ pub(crate) fn enabled(&self) -> bool {
+ self.enabled
+ }
+
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ debug_assert!(self.enabled);
+ self.global_comments.push(comment.into());
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ debug_assert!(self.enabled);
+
+ use std::collections::hash_map::Entry;
+ match self.entity_comments.entry(entity.into()) {
+ Entry::Occupied(mut occ) => {
+ occ.get_mut().push('\n');
+ occ.get_mut().push_str(comment.as_ref());
+ }
+ Entry::Vacant(vac) => {
+ vac.insert(comment.into());
+ }
+ }
+ }
+}
+
+impl FuncWriter for &'_ CommentWriter {
+ fn write_preamble(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ ) -> Result<bool, fmt::Error> {
+ for comment in &self.global_comments {
+ if !comment.is_empty() {
+ writeln!(w, "; {}", comment)?;
+ } else {
+ writeln!(w)?;
+ }
+ }
+ if !self.global_comments.is_empty() {
+ writeln!(w)?;
+ }
+
+ self.super_preamble(w, func)
+ }
+
+ fn write_entity_definition(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ _func: &Function,
+ entity: AnyEntity,
+ value: &dyn fmt::Display,
+ ) -> fmt::Result {
+ write!(w, " {} = {}", entity, value)?;
+
+ if let Some(comment) = self.entity_comments.get(&entity) {
+ writeln!(w, " ; {}", comment.replace('\n', "\n; "))
+ } else {
+ writeln!(w)
+ }
+ }
+
+ fn write_block_header(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ block: Block,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_block_header(w, func, block, indent)
+ }
+
+ fn write_instruction(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ aliases: &SecondaryMap<Value, Vec<Value>>,
+ inst: Inst,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_instruction(w, func, aliases, inst, indent)?;
+ if let Some(comment) = self.entity_comments.get(&inst.into()) {
+ writeln!(w, "; {}", comment.replace('\n', "\n; "))?;
+ }
+ Ok(())
+ }
+}
+
+impl FunctionCx<'_, '_, '_> {
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ self.clif_comments.add_global_comment(comment);
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ self.clif_comments.add_comment(entity, comment);
+ }
+}
+
+pub(crate) fn should_write_ir(tcx: TyCtxt<'_>) -> bool {
+ tcx.sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
+}
+
+pub(crate) fn write_ir_file(
+ tcx: TyCtxt<'_>,
+ name: impl FnOnce() -> String,
+ write: impl FnOnce(&mut dyn Write) -> std::io::Result<()>,
+) {
+ if !should_write_ir(tcx) {
+ return;
+ }
+
+ let clif_output_dir = tcx.output_filenames(()).with_extension("clif");
+
+ match std::fs::create_dir(&clif_output_dir) {
+ Ok(()) => {}
+ Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {}
+ res @ Err(_) => res.unwrap(),
+ }
+
+ let clif_file_name = clif_output_dir.join(name());
+
+ let res = std::fs::File::create(clif_file_name).and_then(|mut file| write(&mut file));
+ if let Err(err) = res {
+ tcx.sess.warn(&format!("error writing ir file: {}", err));
+ }
+}
+
+pub(crate) fn write_clif_file<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ postfix: &str,
+ isa: &dyn cranelift_codegen::isa::TargetIsa,
+ instance: Instance<'tcx>,
+ func: &cranelift_codegen::ir::Function,
+ mut clif_comments: &CommentWriter,
+) {
+ // FIXME work around filename too long errors
+ write_ir_file(
+ tcx,
+ || format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),
+ |file| {
+ let mut clif = String::new();
+ cranelift_codegen::write::decorate_function(&mut clif_comments, &mut clif, func)
+ .unwrap();
+
+ for flag in isa.flags().iter() {
+ writeln!(file, "set {}", flag)?;
+ }
+ write!(file, "target {}", isa.triple().architecture.to_string())?;
+ for isa_flag in isa.isa_flags().iter() {
+ write!(file, " {}", isa_flag)?;
+ }
+ writeln!(file, "\n")?;
+ writeln!(file)?;
+ file.write_all(clif.as_bytes())?;
+ Ok(())
+ },
+ );
+}
+
+impl fmt::Debug for FunctionCx<'_, '_, '_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ writeln!(f, "{:?}", self.instance.substs)?;
+ writeln!(f, "{:?}", self.local_map)?;
+
+ let mut clif = String::new();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &self.clif_comments,
+ &mut clif,
+ &self.bcx.func,
+ )
+ .unwrap();
+ writeln!(f, "\n{}", clif)
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/toolchain.rs b/compiler/rustc_codegen_cranelift/src/toolchain.rs
new file mode 100644
index 000000000..f86236ef3
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/toolchain.rs
@@ -0,0 +1,31 @@
+//! Locating various executables part of a C toolchain.
+
+use std::path::PathBuf;
+
+use rustc_codegen_ssa::back::link::linker_and_flavor;
+use rustc_session::Session;
+
+/// Tries to infer the path of a binary for the target toolchain from the linker name.
+pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf {
+ let (mut linker, _linker_flavor) = linker_and_flavor(sess);
+ let linker_file_name = linker
+ .file_name()
+ .and_then(|name| name.to_str())
+ .unwrap_or_else(|| sess.fatal("couldn't extract file name from specified linker"));
+
+ if linker_file_name == "ld.lld" {
+ if tool != "ld" {
+ linker.set_file_name(tool)
+ }
+ } else {
+ let tool_file_name = linker_file_name
+ .replace("ld", tool)
+ .replace("gcc", tool)
+ .replace("clang", tool)
+ .replace("cc", tool);
+
+ linker.set_file_name(tool_file_name)
+ }
+
+ linker
+}
diff --git a/compiler/rustc_codegen_cranelift/src/trap.rs b/compiler/rustc_codegen_cranelift/src/trap.rs
new file mode 100644
index 000000000..923269c4d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/trap.rs
@@ -0,0 +1,57 @@
+//! Helpers used to print a message and abort in case of certain panics and some detected UB.
+
+use crate::prelude::*;
+
+fn codegen_print(fx: &mut FunctionCx<'_, '_, '_>, msg: &str) {
+ let puts = fx
+ .module
+ .declare_function(
+ "puts",
+ Linkage::Import,
+ &Signature {
+ call_conv: fx.target_config.default_call_conv,
+ params: vec![AbiParam::new(fx.pointer_type)],
+ returns: vec![AbiParam::new(types::I32)],
+ },
+ )
+ .unwrap();
+ let puts = fx.module.declare_func_in_func(puts, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(puts, "puts");
+ }
+
+ let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, fx.symbol_name, msg);
+ let msg_ptr = fx.anonymous_str(&real_msg);
+ fx.bcx.ins().call(puts, &[msg_ptr]);
+}
+
+/// Use this for example when a function call should never return. This will fill the current block,
+/// so you can **not** add instructions to it afterwards.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+ codegen_print(fx, msg.as_ref());
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
+/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
+/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
+/// to it afterwards.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+ codegen_print(fx, msg.as_ref());
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
+}
+
+/// Like `trap_unimplemented` but returns a fake value of the specified type.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unimplemented_ret_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ dest_layout: TyAndLayout<'tcx>,
+ msg: impl AsRef<str>,
+) -> CValue<'tcx> {
+ trap_unimplemented(fx, msg);
+ CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
new file mode 100644
index 000000000..052ca0a08
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -0,0 +1,227 @@
+//! Codegen of the [`PointerCast::Unsize`] operation.
+//!
+//! [`PointerCast::Unsize`]: `rustc_middle::ty::adjustment::PointerCast::Unsize`
+
+use crate::prelude::*;
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
+
+/// Retrieve the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit funny. It is intended for use
+/// in an upcast, where the new vtable for an object will be derived
+/// from the old one.
+pub(crate) fn unsized_info<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ old_info: Option<Value>,
+) -> Value {
+ let (source, target) =
+ fx.tcx.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
+ match (&source.kind(), &target.kind()) {
+ (&ty::Array(_, len), &ty::Slice(_)) => fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64),
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ let old_info =
+ old_info.expect("unsized_info: missing old info for trait upcasting coercion");
+ if data_a.principal_def_id() == data_b.principal_def_id() {
+ return old_info;
+ }
+
+ // trait upcasting coercion
+ let vptr_entry_idx =
+ fx.tcx.vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
+
+ if let Some(entry_idx) = vptr_entry_idx {
+ let entry_idx = u32::try_from(entry_idx).unwrap();
+ let entry_offset = entry_idx * fx.pointer_type.bytes();
+ let vptr_ptr = Pointer::new(old_info).offset_i64(fx, entry_offset.into()).load(
+ fx,
+ fx.pointer_type,
+ crate::vtable::vtable_memflags(),
+ );
+ vptr_ptr
+ } else {
+ old_info
+ }
+ }
+ (_, &ty::Dynamic(ref data, ..)) => crate::vtable::get_vtable(fx, source, data.principal()),
+ _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
+ }
+}
+
+/// Coerce `src` to `dst_ty`.
+fn unsize_ptr<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: Value,
+ src_layout: TyAndLayout<'tcx>,
+ dst_layout: TyAndLayout<'tcx>,
+ old_info: Option<Value>,
+) -> (Value, Value) {
+ match (&src_layout.ty.kind(), &dst_layout.ty.kind()) {
+ (&ty::Ref(_, a, _), &ty::Ref(_, b, _))
+ | (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+ | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ (src, unsized_info(fx, *a, *b, old_info))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ let (a, b) = (src_layout.ty.boxed_ty(), dst_layout.ty.boxed_ty());
+ (src, unsized_info(fx, a, b, old_info))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ if src_layout == dst_layout {
+ return (src, old_info.unwrap());
+ }
+
+ let mut result = None;
+ for i in 0..src_layout.fields.count() {
+ let src_f = src_layout.field(fx, i);
+ assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+ if src_f.is_zst() {
+ continue;
+ }
+ assert_eq!(src_layout.size, src_f.size);
+
+ let dst_f = dst_layout.field(fx, i);
+ assert_ne!(src_f.ty, dst_f.ty);
+ assert_eq!(result, None);
+ result = Some(unsize_ptr(fx, src, src_f, dst_f, old_info));
+ }
+ result.unwrap()
+ }
+ _ => bug!("unsize_ptr: called on bad types"),
+ }
+}
+
+/// Coerce `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty` and store the result in `dst`
+pub(crate) fn coerce_unsized_into<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: CValue<'tcx>,
+ dst: CPlace<'tcx>,
+) {
+ let src_ty = src.layout().ty;
+ let dst_ty = dst.layout().ty;
+ let mut coerce_ptr = || {
+ let (base, info) =
+ if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty).is_unsized() {
+ let (old_base, old_info) = src.load_scalar_pair(fx);
+ unsize_ptr(fx, old_base, src.layout(), dst.layout(), Some(old_info))
+ } else {
+ let base = src.load_scalar(fx);
+ unsize_ptr(fx, base, src.layout(), dst.layout(), None)
+ };
+ dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
+ };
+ match (&src_ty.kind(), &dst_ty.kind()) {
+ (&ty::Ref(..), &ty::Ref(..))
+ | (&ty::Ref(..), &ty::RawPtr(..))
+ | (&ty::RawPtr(..), &ty::RawPtr(..)) => coerce_ptr(),
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
+ let src_f = src.value_field(fx, mir::Field::new(i));
+ let dst_f = dst.place_field(fx, mir::Field::new(i));
+
+ if dst_f.layout().is_zst() {
+ continue;
+ }
+
+ if src_f.layout().ty == dst_f.layout().ty {
+ dst_f.write_cvalue(fx, src_f);
+ } else {
+ coerce_unsized_into(fx, src_f, dst_f);
+ }
+ }
+ }
+ _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty),
+ }
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
+
+pub(crate) fn size_and_align_of_dst<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ info: Value,
+) -> (Value, Value) {
+ assert!(layout.is_unsized() || layout.abi == Abi::Uninhabited);
+ match layout.ty.kind() {
+ ty::Dynamic(..) => {
+ // load size/align from vtable
+ (crate::vtable::size_of_obj(fx, info), crate::vtable::min_align_of_obj(fx, info))
+ }
+ ty::Slice(_) | ty::Str => {
+ let unit = layout.field(fx, 0);
+ // The info in this case is the length of the str, so the size is that
+ // times the unit size.
+ (
+ fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
+ fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
+ )
+ }
+ _ => {
+ // First get the size of all statically known fields.
+ // Don't use size_of because it also rounds up to alignment, which we
+ // want to avoid, as the unsized field's alignment could be smaller.
+ assert!(!layout.ty.is_simd());
+
+ let i = layout.fields.count() - 1;
+ let sized_size = layout.fields.offset(i).bytes();
+ let sized_align = layout.align.abi.bytes();
+ let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field).
+ let field_layout = layout.field(fx, i);
+ let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_layout, info);
+
+ // FIXME (#26403, #27023): We should be adding padding
+ // to `sized_size` (to accommodate the `unsized_align`
+ // required of the unsized field that follows) before
+ // summing it with `sized_size`. (Note that since #26403
+ // is unfixed, we do not yet add the necessary padding
+ // here. But this is where the add would go.)
+
+ // Return the sum of sizes and max of aligns.
+ let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
+
+ // Packed types ignore the alignment of their fields.
+ if let ty::Adt(def, _) = layout.ty.kind() {
+ if def.repr().packed() {
+ unsized_align = sized_align;
+ }
+ }
+
+ // Choose max of two known alignments (combined value must
+ // be aligned according to more restrictive of the two).
+ let cmp = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
+ let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
+
+ // Issue #27023: must add any necessary padding to `size`
+ // (to make it a multiple of `align`) before returning it.
+ //
+ // Namely, the returned size should be, in C notation:
+ //
+ // `size + ((size & (align-1)) ? align : 0)`
+ //
+ // emulated via the semi-standard fast bit trick:
+ //
+ // `(size + (align-1)) & -align`
+ let addend = fx.bcx.ins().iadd_imm(align, -1);
+ let add = fx.bcx.ins().iadd(size, addend);
+ let neg = fx.bcx.ins().ineg(align);
+ let size = fx.bcx.ins().band(add, neg);
+
+ (size, align)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
new file mode 100644
index 000000000..45ae2bd8f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -0,0 +1,883 @@
+//! Definition of [`CValue`] and [`CPlace`]
+
+use crate::prelude::*;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+fn codegen_field<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ base: Pointer,
+ extra: Option<Value>,
+ layout: TyAndLayout<'tcx>,
+ field: mir::Field,
+) -> (Pointer, TyAndLayout<'tcx>) {
+ let field_offset = layout.fields.offset(field.index());
+ let field_layout = layout.field(&*fx, field.index());
+
+ let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
+ (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
+ };
+
+ if let Some(extra) = extra {
+ if !field_layout.is_unsized() {
+ return simple(fx);
+ }
+ match field_layout.ty.kind() {
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
+ ty::Adt(def, _) if def.repr().packed() => {
+ assert_eq!(layout.align.abi.bytes(), 1);
+ simple(fx)
+ }
+ _ => {
+ // We have to align the offset for DST's
+ let unaligned_offset = field_offset.bytes();
+ let (_, unsized_align) =
+ crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
+
+ let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
+ let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
+ let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
+ let offset = fx.bcx.ins().band(and_lhs, and_rhs);
+
+ (base.offset_value(fx, offset), field_layout)
+ }
+ }
+ } else {
+ simple(fx)
+ }
+}
+
+fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
+ let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
+ Offset32::new(b_offset.bytes().try_into().unwrap())
+}
+
+/// A read-only value
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
+
+#[derive(Debug, Copy, Clone)]
+enum CValueInner {
+ ByRef(Pointer, Option<Value>),
+ ByVal(Value),
+ ByValPair(Value, Value),
+}
+
+impl<'tcx> CValue<'tcx> {
+ pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, None), layout)
+ }
+
+ pub(crate) fn by_ref_unsized(
+ ptr: Pointer,
+ meta: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
+ }
+
+ pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByVal(value), layout)
+ }
+
+ pub(crate) fn by_val_pair(
+ value: Value,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByValPair(value, extra), layout)
+ }
+
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.1
+ }
+
+ // FIXME remove
+ pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => (ptr, meta),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
+ let cplace = CPlace::new_stack_slot(fx, layout);
+ cplace.write_cvalue(fx, self);
+ (cplace.to_ptr(), None)
+ }
+ }
+ }
+
+ pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
+ }
+ }
+
+ /// Load a value with layout.abi of scalar
+ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let clif_ty = match layout.abi {
+ Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
+ Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
+ .by(u16::try_from(count).unwrap())
+ .unwrap(),
+ _ => unreachable!("{:?}", layout.ty),
+ };
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, clif_ty, flags)
+ }
+ CValueInner::ByVal(value) => value,
+ CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
+ CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
+ }
+ }
+
+ /// Load a value pair with layout.abi of scalar pair
+ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let (a_scalar, b_scalar) = match layout.abi {
+ Abi::ScalarPair(a, b) => (a, b),
+ _ => unreachable!("load_scalar_pair({:?})", self),
+ };
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
+ let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let val1 = ptr.load(fx, clif_ty1, flags);
+ let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+ (val1, val2)
+ }
+ CValueInner::ByRef(_, Some(_)) => {
+ bug!("load_scalar_pair for unsized value not allowed")
+ }
+ CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
+ CValueInner::ByValPair(val1, val2) => (val1, val2),
+ }
+ }
+
+ pub(crate) fn value_field(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count } => {
+ let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+ let field = u8::try_from(field.index()).unwrap();
+ assert!(field < count);
+ let lane = fx.bcx.ins().extractlane(val, field);
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(lane, field_layout)
+ }
+ _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(val1, val2) => match layout.abi {
+ Abi::ScalarPair(_, _) => {
+ let val = match field.as_u32() {
+ 0 => val1,
+ 1 => val2,
+ _ => bug!("field should be 0 or 1"),
+ };
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(val, field_layout)
+ }
+ _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+ },
+ CValueInner::ByRef(ptr, None) => {
+ let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
+ CValue::by_ref(field_ptr, field_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn value_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count: _ } => {
+ assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
+ let lane_idx = u8::try_from(lane_idx).unwrap();
+ let lane = fx.bcx.ins().extractlane(val, lane_idx);
+ CValue::by_val(lane, lane_layout)
+ }
+ _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(_, _) => unreachable!(),
+ CValueInner::ByRef(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CValue::by_ref(field_ptr, lane_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
+ crate::unsize::coerce_unsized_into(fx, self, dest);
+ }
+
+ /// If `ty` is signed, `const_val` must already be sign extended.
+ pub(crate) fn const_val(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ const_val: ty::ScalarInt,
+ ) -> CValue<'tcx> {
+ assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
+ use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+
+ let clif_ty = fx.clif_type(layout.ty).unwrap();
+
+ if let ty::Bool = layout.ty.kind() {
+ assert!(
+ const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
+ "Invalid bool 0x{:032X}",
+ const_val
+ );
+ }
+
+ let val = match layout.ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ let const_val = const_val.to_bits(layout.size).unwrap();
+ let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
+ let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
+ fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
+ }
+ ty::Float(FloatTy::F32) => {
+ fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
+ }
+ ty::Float(FloatTy::F64) => {
+ fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
+ }
+ _ => panic!(
+ "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
+ layout.ty
+ ),
+ };
+
+ CValue::by_val(val, layout)
+ }
+
+ pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
+ assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert_eq!(self.layout().abi, layout.abi);
+ CValue(self.0, layout)
+ }
+}
+
+/// A place where you can write a value to or read a value from
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CPlace<'tcx> {
+ inner: CPlaceInner,
+ layout: TyAndLayout<'tcx>,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum CPlaceInner {
+ Var(Local, Variable),
+ VarPair(Local, Variable, Variable),
+ VarLane(Local, Variable, u8),
+ Addr(Pointer, Option<Value>),
+}
+
+impl<'tcx> CPlace<'tcx> {
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ pub(crate) fn inner(&self) -> &CPlaceInner {
+ &self.inner
+ }
+
+ pub(crate) fn new_stack_slot(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ assert!(!layout.is_unsized());
+ if layout.size.bytes() == 0 {
+ return CPlace {
+ inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
+ layout,
+ };
+ }
+
+ if layout.size.bytes() >= u64::from(u32::MAX - 16) {
+ fx.tcx
+ .sess
+ .fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
+ }
+
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
+ });
+ CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
+ }
+
+ pub(crate) fn new_var(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
+ CPlace { inner: CPlaceInner::Var(local, var), layout }
+ }
+
+ pub(crate) fn new_var_pair(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var1 = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ let var2 = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+
+ let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
+ fx.bcx.declare_var(var1, ty1);
+ fx.bcx.declare_var(var2, ty2);
+ CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
+ }
+
+ pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+ CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
+ }
+
+ pub(crate) fn for_ptr_with_extra(
+ ptr: Pointer,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
+ }
+
+ pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
+ let layout = self.layout();
+ match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ let val = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let val1 = fx.bcx.use_var(var1);
+ //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+ let val2 = fx.bcx.use_var(var2);
+ //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+ CValue::by_val_pair(val1, val2, layout)
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let val = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ let val = fx.bcx.ins().extractlane(val, lane);
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::Addr(ptr, extra) => {
+ if let Some(extra) = extra {
+ CValue::by_ref_unsized(ptr, extra, layout)
+ } else {
+ CValue::by_ref(ptr, layout)
+ }
+ }
+ }
+ }
+
+ pub(crate) fn to_ptr(self) -> Pointer {
+ match self.to_ptr_maybe_unsized() {
+ (ptr, None) => ptr,
+ (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
+ match self.inner {
+ CPlaceInner::Addr(ptr, extra) => (ptr, extra),
+ CPlaceInner::Var(_, _)
+ | CPlaceInner::VarPair(_, _, _)
+ | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
+ assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
+
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
+ }
+
+ pub(crate) fn write_cvalue_transmute(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ ) {
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
+ }
+
+ fn write_cvalue_maybe_transmute(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ method: &'static str,
+ ) {
+ fn transmute_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ var: Variable,
+ data: Value,
+ dst_ty: Type,
+ ) {
+ let src_ty = fx.bcx.func.dfg.value_type(data);
+ assert_eq!(
+ src_ty.bytes(),
+ dst_ty.bytes(),
+ "write_cvalue_transmute: {:?} -> {:?}",
+ src_ty,
+ dst_ty,
+ );
+ let data = match (src_ty, dst_ty) {
+ (_, _) if src_ty == dst_ty => data,
+
+ // This is a `write_cvalue_transmute`.
+ (types::I32, types::F32)
+ | (types::F32, types::I32)
+ | (types::I64, types::F64)
+ | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
+ _ if src_ty.is_vector() && dst_ty.is_vector() => {
+ fx.bcx.ins().raw_bitcast(dst_ty, data)
+ }
+ _ if src_ty.is_vector() || dst_ty.is_vector() => {
+ // FIXME do something more efficient for transmutes between vectors and integers.
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (src_ty.bytes() + 15) / 16 * 16,
+ });
+ let ptr = Pointer::stack_slot(stack_slot);
+ ptr.store(fx, data, MemFlags::trusted());
+ ptr.load(fx, dst_ty, MemFlags::trusted())
+ }
+
+ // `CValue`s should never contain SSA-only types, so if you ended
+ // up here having seen an error like `B1 -> I8`, then before
+ // calling `write_cvalue` you need to add a `bint` instruction.
+ _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
+ };
+ //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, data);
+ }
+
+ assert_eq!(self.layout().size, from.layout().size);
+
+ if fx.clif_comments.enabled() {
+ use cranelift_codegen::cursor::{Cursor, CursorPosition};
+ let cur_block = match fx.bcx.cursor().position() {
+ CursorPosition::After(block) => block,
+ _ => unreachable!(),
+ };
+ fx.add_comment(
+ fx.bcx.func.layout.last_inst(cur_block).unwrap(),
+ format!(
+ "{}: {:?}: {:?} <- {:?}: {:?}",
+ method,
+ self.inner(),
+ self.layout().ty,
+ from.0,
+ from.layout().ty
+ ),
+ );
+ }
+
+ let dst_layout = self.layout();
+ let to_ptr = match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ if let ty::Array(element, len) = dst_layout.ty.kind() {
+ // Can only happen for vector types
+ let len =
+ u16::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
+ let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
+
+ let data = match from.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, vector_ty, flags)
+ }
+ CValueInner::ByVal(_)
+ | CValueInner::ByValPair(_, _)
+ | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
+ };
+
+ fx.bcx.def_var(var, data);
+ return;
+ }
+ let data = CValue(from.0, dst_layout).load_scalar(fx);
+ let dst_ty = fx.clif_type(self.layout().ty).unwrap();
+ transmute_value(fx, var, data, dst_ty);
+ return;
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
+ let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
+ transmute_value(fx, var1, data1, dst_ty1);
+ transmute_value(fx, var2, data2, dst_ty2);
+ return;
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let data = from.load_scalar(fx);
+
+ // First get the old vector
+ let vector = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+
+ // Next insert the written lane into the vector
+ let vector = fx.bcx.ins().insertlane(vector, data, lane);
+
+ // Finally write the new vector
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, vector);
+
+ return;
+ }
+ CPlaceInner::Addr(ptr, None) => {
+ if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
+ return;
+ }
+ ptr
+ }
+ CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
+ };
+
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ match from.layout().abi {
+ // FIXME make Abi::Vector work too
+ Abi::Scalar(_) => {
+ let val = from.load_scalar(fx);
+ to_ptr.store(fx, val, flags);
+ return;
+ }
+ Abi::ScalarPair(a_scalar, b_scalar) => {
+ let (value, extra) = from.load_scalar_pair(fx);
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ to_ptr.store(fx, value, flags);
+ to_ptr.offset(fx, b_offset).store(fx, extra, flags);
+ return;
+ }
+ _ => {}
+ }
+
+ match from.0 {
+ CValueInner::ByVal(val) => {
+ to_ptr.store(fx, val, flags);
+ }
+ CValueInner::ByValPair(_, _) => {
+ bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
+ }
+ CValueInner::ByRef(from_ptr, None) => {
+ let from_addr = from_ptr.get_addr(fx);
+ let to_addr = to_ptr.get_addr(fx);
+ let src_layout = from.1;
+ let size = dst_layout.size.bytes();
+ let src_align = src_layout.align.abi.bytes() as u8;
+ let dst_align = dst_layout.align.abi.bytes() as u8;
+ fx.bcx.emit_small_memory_copy(
+ fx.target_config,
+ to_addr,
+ from_addr,
+ size,
+ dst_align,
+ src_align,
+ true,
+ MemFlags::trusted(),
+ );
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ pub(crate) fn place_field(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => match layout.ty.kind() {
+ ty::Array(_, _) => {
+ // Can only happen for vector types
+ return CPlace {
+ inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
+ let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
+
+ match f0_ty.kind() {
+ ty::Array(_, _) => {
+ assert_eq!(field.as_u32(), 0);
+ return CPlace {
+ inner: CPlaceInner::Var(local, var),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ _ => {
+ return CPlace {
+ inner: CPlaceInner::VarLane(
+ local,
+ var,
+ field.as_u32().try_into().unwrap(),
+ ),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ }
+ }
+ _ => {}
+ },
+ CPlaceInner::VarPair(local, var1, var2) => {
+ let layout = layout.field(&*fx, field.index());
+
+ match field.as_u32() {
+ 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
+ 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
+ _ => unreachable!("field should be 0 or 1"),
+ }
+ }
+ _ => {}
+ }
+
+ let (base, extra) = self.to_ptr_maybe_unsized();
+
+ let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
+ if field_layout.is_unsized() {
+ if let ty::Foreign(_) = field_layout.ty.kind() {
+ assert!(extra.is_none());
+ CPlace::for_ptr(field_ptr, field_layout)
+ } else {
+ CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
+ }
+ } else {
+ CPlace::for_ptr(field_ptr, field_layout)
+ }
+ }
+
+ /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn place_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => {
+ assert!(matches!(layout.abi, Abi::Vector { .. }));
+ CPlace {
+ inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
+ layout: lane_layout,
+ }
+ }
+ CPlaceInner::VarPair(_, _, _) => unreachable!(),
+ CPlaceInner::VarLane(_, _, _) => unreachable!(),
+ CPlaceInner::Addr(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CPlace::for_ptr(field_ptr, lane_layout)
+ }
+ CPlaceInner::Addr(_, Some(_)) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn place_index(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ index: Value,
+ ) -> CPlace<'tcx> {
+ let (elem_layout, ptr) = match self.layout().ty.kind() {
+ ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
+ ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
+ _ => bug!("place_index({:?})", self.layout().ty),
+ };
+
+ let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
+
+ CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
+ }
+
+ pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
+ let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
+ if has_ptr_meta(fx.tcx, inner_layout.ty) {
+ let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
+ CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
+ } else {
+ CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
+ }
+ }
+
+ pub(crate) fn place_ref(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ if has_ptr_meta(fx.tcx, self.layout().ty) {
+ let (ptr, extra) = self.to_ptr_maybe_unsized();
+ CValue::by_val_pair(
+ ptr.get_addr(fx),
+ extra.expect("unsized type without metadata"),
+ layout,
+ )
+ } else {
+ CValue::by_val(self.to_ptr().get_addr(fx), layout)
+ }
+ }
+
+ pub(crate) fn downcast_variant(
+ self,
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ variant: VariantIdx,
+ ) -> Self {
+ assert!(!self.layout().is_unsized());
+ let layout = self.layout().for_variant(fx, variant);
+ CPlace { inner: self.inner, layout }
+ }
+}
+
+#[track_caller]
+pub(crate) fn assert_assignable<'tcx>(
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ from_ty: Ty<'tcx>,
+ to_ty: Ty<'tcx>,
+ limit: usize,
+) {
+ if limit == 0 {
+ // assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
+ // soundness. don't attempt to check deep types to avoid exponential behavior in certain
+ // cases.
+ return;
+ }
+ match (from_ty.kind(), to_ty.kind()) {
+ (ty::Ref(_, a, _), ty::Ref(_, b, _))
+ | (
+ ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
+ ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
+ ) => {
+ assert_assignable(fx, *a, *b, limit - 1);
+ }
+ (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
+ | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
+ assert_assignable(fx, *a, *b, limit - 1);
+ }
+ (ty::FnPtr(_), ty::FnPtr(_)) => {
+ let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
+ ParamEnv::reveal_all(),
+ from_ty.fn_sig(fx.tcx),
+ );
+ let to_sig = fx
+ .tcx
+ .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+ assert_eq!(
+ from_sig, to_sig,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
+ // fn(&T) -> for<'l> fn(&'l T) is allowed
+ }
+ (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
+ for (from, to) in from_traits.iter().zip(to_traits) {
+ let from =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
+ let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
+ assert_eq!(
+ from, to,
+ "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
+ from_traits, to_traits, fx,
+ );
+ }
+ // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
+ }
+ (&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
+ let mut types_a = types_a.iter();
+ let mut types_b = types_b.iter();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
+ if adt_def_a.did() == adt_def_b.did() =>
+ {
+ let mut types_a = substs_a.types();
+ let mut types_b = substs_b.types();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
+ (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
+ if def_id_a == def_id_b =>
+ {
+ let mut types_a = substs_a.types();
+ let mut types_b = substs_b.types();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
+ // No way to check if it is correct or not with polymorphization enabled
+ }
+ _ => {
+ assert_eq!(
+ from_ty,
+ to_ty,
+ "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
+ from_ty.kind(),
+ to_ty.kind(),
+ fx,
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
new file mode 100644
index 000000000..36b3725ef
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -0,0 +1,79 @@
+//! Codegen vtables and vtable accesses.
+//!
+//! See `rustc_codegen_ssa/src/meth.rs` for reference.
+
+use crate::constant::data_id_for_alloc_id;
+use crate::prelude::*;
+
+pub(crate) fn vtable_memflags() -> MemFlags {
+ let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
+ flags.set_readonly(); // A vtable is always read-only.
+ flags
+}
+
+pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_DROPINPLACE * usize_size) as i32,
+ )
+}
+
+pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_SIZE * usize_size) as i32,
+ )
+}
+
+pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_ALIGN * usize_size) as i32,
+ )
+}
+
+pub(crate) fn get_ptr_and_method_ref<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ idx: usize,
+) -> (Value, Value) {
+ let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
+ arg.load_scalar_pair(fx)
+ } else {
+ let (ptr, vtable) = arg.try_to_ptr().unwrap();
+ (ptr.get_addr(fx), vtable.unwrap())
+ };
+
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
+ let func_ref = fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (idx * usize_size as usize) as i32,
+ );
+ (ptr, func_ref)
+}
+
+pub(crate) fn get_vtable<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Value {
+ let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
+ let data_id =
+ data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, Mutability::Not);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("vtable: {:?}", alloc_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+}