summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/traits
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_codegen_ssa/src/traits
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/traits')
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/abi.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/asm.rs66
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs161
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs481
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/consts.rs41
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs57
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/debuginfo.rs79
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/declare.rs21
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/intrinsic.rs39
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/misc.rs26
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/mod.rs102
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/statics.rs24
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/type_.rs151
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/write.rs68
14 files changed, 1324 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_ssa/src/traits/abi.rs b/compiler/rustc_codegen_ssa/src/traits/abi.rs
new file mode 100644
index 000000000..a00d78daf
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/abi.rs
@@ -0,0 +1,8 @@
+use super::BackendTypes;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::FnAbi;
+
+pub trait AbiBuilderMethods<'tcx>: BackendTypes {
+ fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value);
+ fn get_param(&mut self, index: usize) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/asm.rs b/compiler/rustc_codegen_ssa/src/traits/asm.rs
new file mode 100644
index 000000000..c2ae74b18
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/asm.rs
@@ -0,0 +1,66 @@
+use super::BackendTypes;
+use crate::mir::operand::OperandRef;
+use crate::mir::place::PlaceRef;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::Instance;
+use rustc_span::Span;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+
+#[derive(Debug)]
+pub enum InlineAsmOperandRef<'tcx, B: BackendTypes + ?Sized> {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ value: OperandRef<'tcx, B::Value>,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ place: Option<PlaceRef<'tcx, B::Value>>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_value: OperandRef<'tcx, B::Value>,
+ out_place: Option<PlaceRef<'tcx, B::Value>>,
+ },
+ Const {
+ string: String,
+ },
+ SymFn {
+ instance: Instance<'tcx>,
+ },
+ SymStatic {
+ def_id: DefId,
+ },
+}
+
+#[derive(Debug)]
+pub enum GlobalAsmOperandRef<'tcx> {
+ Const { string: String },
+ SymFn { instance: Instance<'tcx> },
+ SymStatic { def_id: DefId },
+}
+
+pub trait AsmBuilderMethods<'tcx>: BackendTypes {
+ /// Take an inline assembly expression and splat it out via LLVM
+ fn codegen_inline_asm(
+ &mut self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Self>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ instance: Instance<'_>,
+ dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
+ );
+}
+
+pub trait AsmMethods<'tcx> {
+ fn codegen_global_asm(
+ &self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[GlobalAsmOperandRef<'tcx>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ );
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
new file mode 100644
index 000000000..779bd3ea2
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -0,0 +1,161 @@
+use super::write::WriteBackendMethods;
+use super::CodegenObject;
+use crate::back::write::TargetMachineFactoryFn;
+use crate::{CodegenResults, ModuleCodegen};
+
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_session::{
+ config::{self, OutputFilenames, PrintRequest},
+ cstore::MetadataLoaderDyn,
+ Session,
+};
+use rustc_span::symbol::Symbol;
+use rustc_target::abi::call::FnAbi;
+use rustc_target::spec::Target;
+
+pub use rustc_data_structures::sync::MetadataRef;
+
+use std::any::Any;
+
+pub trait BackendTypes {
+ type Value: CodegenObject;
+ type Function: CodegenObject;
+
+ type BasicBlock: Copy;
+ type Type: CodegenObject;
+ type Funclet;
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `Dbg`, `Debug`, `DebugInfo`, `DI` etc.).
+ type DIScope: Copy;
+ type DILocation: Copy;
+ type DIVariable: Copy;
+}
+
+pub trait Backend<'tcx>:
+ Sized
+ + BackendTypes
+ + HasTyCtxt<'tcx>
+ + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
+ + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
+{
+}
+
+impl<'tcx, T> Backend<'tcx> for T where
+ Self: BackendTypes
+ + HasTyCtxt<'tcx>
+ + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
+ + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
+{
+}
+
+pub trait CodegenBackend {
+ fn init(&self, _sess: &Session) {}
+ fn print(&self, _req: PrintRequest, _sess: &Session) {}
+ fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<Symbol> {
+ vec![]
+ }
+ fn print_passes(&self) {}
+ fn print_version(&self) {}
+
+ /// If this plugin provides additional builtin targets, provide the one enabled by the options here.
+ /// Be careful: this is called *before* init() is called.
+ fn target_override(&self, _opts: &config::Options) -> Option<Target> {
+ None
+ }
+
+ /// The metadata loader used to load rlib and dylib metadata.
+ ///
+ /// Alternative codegen backends may want to use different rlib or dylib formats than the
+ /// default native static archives and dynamic libraries.
+ fn metadata_loader(&self) -> Box<MetadataLoaderDyn> {
+ Box::new(crate::back::metadata::DefaultMetadataLoader)
+ }
+
+ fn provide(&self, _providers: &mut Providers) {}
+ fn provide_extern(&self, _providers: &mut ExternProviders) {}
+ fn codegen_crate<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any>;
+
+ /// This is called on the returned `Box<dyn Any>` from `codegen_backend`
+ ///
+ /// # Panics
+ ///
+ /// Panics when the passed `Box<dyn Any>` was not returned by `codegen_backend`.
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ sess: &Session,
+ outputs: &OutputFilenames,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed>;
+
+ /// This is called on the returned `Box<dyn Any>` from `join_codegen`
+ ///
+ /// # Panics
+ ///
+ /// Panics when the passed `Box<dyn Any>` was not returned by `join_codegen`.
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorGuaranteed>;
+}
+
+pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Send + Sync {
+ fn codegen_allocator<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ module_name: &str,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+ ) -> Self::Module;
+ /// This generates the codegen unit and returns it along with
+ /// a `u64` giving an estimate of the unit's processing cost.
+ fn compile_codegen_unit(
+ &self,
+ tcx: TyCtxt<'_>,
+ cgu_name: Symbol,
+ ) -> (ModuleCodegen<Self::Module>, u64);
+ fn target_machine_factory(
+ &self,
+ sess: &Session,
+ opt_level: config::OptLevel,
+ target_features: &[String],
+ ) -> TargetMachineFactoryFn<Self>;
+ fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str;
+ fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str>;
+
+ fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::spawn(f)
+ }
+
+ fn spawn_named_thread<F, T>(
+ _time_trace: bool,
+ name: String,
+ f: F,
+ ) -> std::io::Result<std::thread::JoinHandle<T>>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::Builder::new().name(name).spawn(f)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
new file mode 100644
index 000000000..9f49749bb
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -0,0 +1,481 @@
+use super::abi::AbiBuilderMethods;
+use super::asm::AsmBuilderMethods;
+use super::consts::ConstMethods;
+use super::coverageinfo::CoverageInfoBuilderMethods;
+use super::debuginfo::DebugInfoBuilderMethods;
+use super::intrinsic::IntrinsicCallMethods;
+use super::misc::MiscMethods;
+use super::type_::{ArgAbiMethods, BaseTypeMethods};
+use super::{HasCodegen, StaticBuilderMethods};
+
+use crate::common::{
+ AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
+};
+use crate::mir::operand::OperandRef;
+use crate::mir::place::PlaceRef;
+use crate::MemFlags;
+
+use rustc_apfloat::{ieee, Float, Round, Status};
+use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
+use rustc_target::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+pub enum OverflowOp {
+ Add,
+ Sub,
+ Mul,
+}
+
+pub trait BuilderMethods<'a, 'tcx>:
+ HasCodegen<'tcx>
+ + CoverageInfoBuilderMethods<'tcx>
+ + DebugInfoBuilderMethods
+ + ArgAbiMethods<'tcx>
+ + AbiBuilderMethods<'tcx>
+ + IntrinsicCallMethods<'tcx>
+ + AsmBuilderMethods<'tcx>
+ + StaticBuilderMethods
+ + HasParamEnv<'tcx>
+ + HasTargetSpec
+{
+ fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
+
+ fn cx(&self) -> &Self::CodegenCx;
+ fn llbb(&self) -> Self::BasicBlock;
+
+ fn set_span(&mut self, span: Span);
+
+ // FIXME(eddyb) replace uses of this with `append_sibling_block`.
+ fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
+
+ fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
+
+ fn switch_to_block(&mut self, llbb: Self::BasicBlock);
+
+ fn ret_void(&mut self);
+ fn ret(&mut self, v: Self::Value);
+ fn br(&mut self, dest: Self::BasicBlock);
+ fn cond_br(
+ &mut self,
+ cond: Self::Value,
+ then_llbb: Self::BasicBlock,
+ else_llbb: Self::BasicBlock,
+ );
+ fn switch(
+ &mut self,
+ v: Self::Value,
+ else_llbb: Self::BasicBlock,
+ cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
+ );
+ fn invoke(
+ &mut self,
+ llty: Self::Type,
+ llfn: Self::Value,
+ args: &[Self::Value],
+ then: Self::BasicBlock,
+ catch: Self::BasicBlock,
+ funclet: Option<&Self::Funclet>,
+ ) -> Self::Value;
+ fn unreachable(&mut self);
+
+ fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn neg(&mut self, v: Self::Value) -> Self::Value;
+ fn fneg(&mut self, v: Self::Value) -> Self::Value;
+ fn not(&mut self, v: Self::Value) -> Self::Value;
+
+ fn checked_binop(
+ &mut self,
+ oop: OverflowOp,
+ ty: Ty<'_>,
+ lhs: Self::Value,
+ rhs: Self::Value,
+ ) -> (Self::Value, Self::Value);
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
+ fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
+ if let Abi::Scalar(scalar) = layout.abi {
+ self.to_immediate_scalar(val, scalar)
+ } else {
+ val
+ }
+ }
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
+
+ fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+ fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+ fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
+
+ fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
+ fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
+ fn atomic_load(
+ &mut self,
+ ty: Self::Type,
+ ptr: Self::Value,
+ order: AtomicOrdering,
+ size: Size,
+ ) -> Self::Value;
+ fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
+ -> OperandRef<'tcx, Self::Value>;
+
+ /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
+ fn write_operand_repeatedly(
+ self,
+ elem: OperandRef<'tcx, Self::Value>,
+ count: u64,
+ dest: PlaceRef<'tcx, Self::Value>,
+ ) -> Self;
+
+ fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
+ fn nonnull_metadata(&mut self, load: Self::Value);
+
+ fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
+ fn store_with_flags(
+ &mut self,
+ val: Self::Value,
+ ptr: Self::Value,
+ align: Align,
+ flags: MemFlags,
+ ) -> Self::Value;
+ fn atomic_store(
+ &mut self,
+ val: Self::Value,
+ ptr: Self::Value,
+ order: AtomicOrdering,
+ size: Size,
+ );
+
+ fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
+ fn inbounds_gep(
+ &mut self,
+ ty: Self::Type,
+ ptr: Self::Value,
+ indices: &[Self::Value],
+ ) -> Self::Value;
+ fn struct_gep(&mut self, ty: Self::Type, ptr: Self::Value, idx: u64) -> Self::Value;
+
+ fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+ fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+ fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
+ fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+
+ fn cast_float_to_int(
+ &mut self,
+ signed: bool,
+ x: Self::Value,
+ dest_ty: Self::Type,
+ ) -> Self::Value {
+ let in_ty = self.cx().val_ty(x);
+ let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
+ && self.cx().type_kind(in_ty) == TypeKind::Vector
+ {
+ (self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
+ } else {
+ (in_ty, dest_ty)
+ };
+ assert!(matches!(self.cx().type_kind(float_ty), TypeKind::Float | TypeKind::Double));
+ assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
+
+ if let Some(false) = self.cx().sess().opts.unstable_opts.saturating_float_casts {
+ return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
+ }
+
+ let try_sat_result =
+ if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) };
+ if let Some(try_sat_result) = try_sat_result {
+ return try_sat_result;
+ }
+
+ let int_width = self.cx().int_width(int_ty);
+ let float_width = self.cx().float_width(float_ty);
+ // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
+ // destination integer type after rounding towards zero. This `undef` value can cause UB in
+ // safe code (see issue #10184), so we implement a saturating conversion on top of it:
+ // Semantically, the mathematical value of the input is rounded towards zero to the next
+ // mathematical integer, and then the result is clamped into the range of the destination
+ // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
+ // the destination integer type. NaN is mapped to 0.
+ //
+ // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
+ // a value representable in int_ty.
+ // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
+ // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
+ // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
+ // representable. Note that this only works if float_ty's exponent range is sufficiently large.
+ // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
+ // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
+ // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
+ // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
+ // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
+ let int_max = |signed: bool, int_width: u64| -> u128 {
+ let shift_amount = 128 - int_width;
+ if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+ };
+ let int_min = |signed: bool, int_width: u64| -> i128 {
+ if signed { i128::MIN >> (128 - int_width) } else { 0 }
+ };
+
+ let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
+ let rounded_min =
+ ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+ assert_eq!(rounded_min.status, Status::OK);
+ let rounded_max =
+ ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+ assert!(rounded_max.value.is_finite());
+ (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+ };
+ let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
+ let rounded_min =
+ ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+ assert_eq!(rounded_min.status, Status::OK);
+ let rounded_max =
+ ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+ assert!(rounded_max.value.is_finite());
+ (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+ };
+ // To implement saturation, we perform the following steps:
+ //
+ // 1. Cast x to an integer with fpto[su]i. This may result in undef.
+ // 2. Compare x to f_min and f_max, and use the comparison results to select:
+ // a) int_ty::MIN if x < f_min or x is NaN
+ // b) int_ty::MAX if x > f_max
+ // c) the result of fpto[su]i otherwise
+ // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
+ //
+ // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
+ // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
+ // undef does not introduce any non-determinism either.
+ // More importantly, the above procedure correctly implements saturating conversion.
+ // Proof (sketch):
+ // If x is NaN, 0 is returned by definition.
+ // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
+ // This yields three cases to consider:
+ // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
+ // saturating conversion for inputs in that range.
+ // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
+ // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
+ // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
+ // is correct.
+ // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
+ // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
+ // QED.
+
+ let float_bits_to_llval = |bx: &mut Self, bits| {
+ let bits_llval = match float_width {
+ 32 => bx.cx().const_u32(bits as u32),
+ 64 => bx.cx().const_u64(bits as u64),
+ n => bug!("unsupported float width {}", n),
+ };
+ bx.bitcast(bits_llval, float_ty)
+ };
+ let (f_min, f_max) = match float_width {
+ 32 => compute_clamp_bounds_single(signed, int_width),
+ 64 => compute_clamp_bounds_double(signed, int_width),
+ n => bug!("unsupported float width {}", n),
+ };
+ let f_min = float_bits_to_llval(self, f_min);
+ let f_max = float_bits_to_llval(self, f_max);
+ let int_max = self.cx().const_uint_big(int_ty, int_max(signed, int_width));
+ let int_min = self.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
+ let zero = self.cx().const_uint(int_ty, 0);
+
+ // If we're working with vectors, constants must be "splatted": the constant is duplicated
+ // into each lane of the vector. The algorithm stays the same, we are just using the
+ // same constant across all lanes.
+ let maybe_splat = |bx: &mut Self, val| {
+ if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
+ bx.vector_splat(bx.vector_length(dest_ty), val)
+ } else {
+ val
+ }
+ };
+ let f_min = maybe_splat(self, f_min);
+ let f_max = maybe_splat(self, f_max);
+ let int_max = maybe_splat(self, int_max);
+ let int_min = maybe_splat(self, int_min);
+ let zero = maybe_splat(self, zero);
+
+ // Step 1 ...
+ let fptosui_result = if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
+ let less_or_nan = self.fcmp(RealPredicate::RealULT, x, f_min);
+ let greater = self.fcmp(RealPredicate::RealOGT, x, f_max);
+
+ // Step 2: We use two comparisons and two selects, with %s1 being the
+ // result:
+ // %less_or_nan = fcmp ult %x, %f_min
+ // %greater = fcmp olt %x, %f_max
+ // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
+ // %s1 = select %greater, int_ty::MAX, %s0
+ // Note that %less_or_nan uses an *unordered* comparison. This
+ // comparison is true if the operands are not comparable (i.e., if x is
+ // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
+ // x is NaN.
+ //
+ // Performance note: Unordered comparison can be lowered to a "flipped"
+ // comparison and a negation, and the negation can be merged into the
+ // select. Therefore, it not necessarily any more expensive than an
+ // ordered ("normal") comparison. Whether these optimizations will be
+ // performed is ultimately up to the backend, but at least x86 does
+ // perform them.
+ let s0 = self.select(less_or_nan, int_min, fptosui_result);
+ let s1 = self.select(greater, int_max, s0);
+
+ // Step 3: NaN replacement.
+ // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
+ // Therefore we only need to execute this step for signed integer types.
+ if signed {
+ // LLVM has no isNaN predicate, so we use (x == x) instead
+ let cmp = self.fcmp(RealPredicate::RealOEQ, x, x);
+ self.select(cmp, s1, zero)
+ } else {
+ s1
+ }
+ }
+
+ fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+
+ fn memcpy(
+ &mut self,
+ dst: Self::Value,
+ dst_align: Align,
+ src: Self::Value,
+ src_align: Align,
+ size: Self::Value,
+ flags: MemFlags,
+ );
+ fn memmove(
+ &mut self,
+ dst: Self::Value,
+ dst_align: Align,
+ src: Self::Value,
+ src_align: Align,
+ size: Self::Value,
+ flags: MemFlags,
+ );
+ fn memset(
+ &mut self,
+ ptr: Self::Value,
+ fill_byte: Self::Value,
+ size: Self::Value,
+ align: Align,
+ flags: MemFlags,
+ );
+
+ fn select(
+ &mut self,
+ cond: Self::Value,
+ then_val: Self::Value,
+ else_val: Self::Value,
+ ) -> Self::Value;
+
+ fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
+ fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
+ fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
+ fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
+ fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
+
+ fn set_personality_fn(&mut self, personality: Self::Value);
+
+ // These are used by everyone except msvc
+ fn cleanup_landing_pad(&mut self, ty: Self::Type, pers_fn: Self::Value) -> Self::Value;
+ fn resume(&mut self, exn: Self::Value);
+
+ // These are used only by msvc
+ fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
+ fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
+ fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
+ fn catch_switch(
+ &mut self,
+ parent: Option<Self::Value>,
+ unwind: Option<Self::BasicBlock>,
+ handlers: &[Self::BasicBlock],
+ ) -> Self::Value;
+
+ fn atomic_cmpxchg(
+ &mut self,
+ dst: Self::Value,
+ cmp: Self::Value,
+ src: Self::Value,
+ order: AtomicOrdering,
+ failure_order: AtomicOrdering,
+ weak: bool,
+ ) -> Self::Value;
+ fn atomic_rmw(
+ &mut self,
+ op: AtomicRmwBinOp,
+ dst: Self::Value,
+ src: Self::Value,
+ order: AtomicOrdering,
+ ) -> Self::Value;
+ fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
+ fn set_invariant_load(&mut self, load: Self::Value);
+
+ /// Called for `StorageLive`
+ fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
+
+ /// Called for `StorageDead`
+ fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
+
+ fn instrprof_increment(
+ &mut self,
+ fn_name: Self::Value,
+ hash: Self::Value,
+ num_counters: Self::Value,
+ index: Self::Value,
+ );
+
+ fn call(
+ &mut self,
+ llty: Self::Type,
+ llfn: Self::Value,
+ args: &[Self::Value],
+ funclet: Option<&Self::Funclet>,
+ ) -> Self::Value;
+ fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+
+ fn do_not_inline(&mut self, llret: Self::Value);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs
new file mode 100644
index 000000000..8a91d4735
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs
@@ -0,0 +1,41 @@
+use super::BackendTypes;
+use crate::mir::place::PlaceRef;
+use rustc_middle::mir::interpret::{ConstAllocation, Scalar};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{self, Size};
+
+pub trait ConstMethods<'tcx>: BackendTypes {
+ // Constant constructors
+ fn const_null(&self, t: Self::Type) -> Self::Value;
+ fn const_undef(&self, t: Self::Type) -> Self::Value;
+ fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
+ fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
+ fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
+ fn const_bool(&self, val: bool) -> Self::Value;
+ fn const_i16(&self, i: i16) -> Self::Value;
+ fn const_i32(&self, i: i32) -> Self::Value;
+ fn const_u32(&self, i: u32) -> Self::Value;
+ fn const_u64(&self, i: u64) -> Self::Value;
+ fn const_usize(&self, i: u64) -> Self::Value;
+ fn const_u8(&self, i: u8) -> Self::Value;
+ fn const_real(&self, t: Self::Type, val: f64) -> Self::Value;
+
+ fn const_str(&self, s: &str) -> (Self::Value, Self::Value);
+ fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value;
+
+ fn const_to_opt_uint(&self, v: Self::Value) -> Option<u64>;
+ fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
+
+ fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value;
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
+ fn zst_to_backend(&self, llty: Self::Type) -> Self::Value;
+ fn from_const_alloc(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ alloc: ConstAllocation<'tcx>,
+ offset: Size,
+ ) -> PlaceRef<'tcx, Self::Value>;
+
+ fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
new file mode 100644
index 000000000..e77201cf0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
@@ -0,0 +1,57 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::ty::Instance;
+
+pub trait CoverageInfoMethods<'tcx>: BackendTypes {
+ fn coverageinfo_finalize(&self);
+
+ /// Codegen a small function that will never be called, with one counter
+ /// that will never be incremented, that gives LLVM coverage tools a
+ /// function definition it needs in order to resolve coverage map references
+ /// to unused functions. This is necessary so unused functions will appear
+ /// as uncovered (coverage execution count `0`) in LLVM coverage reports.
+ fn define_unused_fn(&self, def_id: DefId);
+
+ /// For LLVM codegen, returns a function-specific `Value` for a global
+ /// string, to hold the function name passed to LLVM intrinsic
+ /// `instrprof.increment()`. The `Value` is only created once per instance.
+ /// Multiple invocations with the same instance return the same `Value`.
+ fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value;
+}
+
+pub trait CoverageInfoBuilderMethods<'tcx>: BackendTypes {
+ /// Returns true if the function source hash was added to the coverage map (even if it had
+ /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
+ /// not enabled (a coverage map is not being generated).
+ fn set_function_source_hash(
+ &mut self,
+ instance: Instance<'tcx>,
+ function_source_hash: u64,
+ ) -> bool;
+
+ /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
+ /// is not enabled (a coverage map is not being generated).
+ fn add_coverage_counter(
+ &mut self,
+ instance: Instance<'tcx>,
+ index: CounterValueReference,
+ region: CodeRegion,
+ ) -> bool;
+
+ /// Returns true if the expression was added to the coverage map; false if
+ /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
+ fn add_coverage_counter_expression(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+ ) -> bool;
+
+ /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
+ /// is not enabled (a coverage map is not being generated).
+ fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
new file mode 100644
index 000000000..f310789d1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
@@ -0,0 +1,79 @@
+use super::BackendTypes;
+use crate::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use rustc_middle::mir;
+use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty};
+use rustc_span::{SourceFile, Span, Symbol};
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::Size;
+
+pub trait DebugInfoMethods<'tcx>: BackendTypes {
+ fn create_vtable_debuginfo(
+ &self,
+ ty: Ty<'tcx>,
+ trait_ref: Option<PolyExistentialTraitRef<'tcx>>,
+ vtable: Self::Value,
+ );
+
+ /// Creates the function-specific debug context.
+ ///
+ /// Returns the FunctionDebugContext for the function which holds state needed
+ /// for debug info creation, if it is enabled.
+ fn create_function_debug_context(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ llfn: Self::Function,
+ mir: &mir::Body<'tcx>,
+ ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>>;
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_scope_fn(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ maybe_definition_llfn: Option<Self::Function>,
+ ) -> Self::DIScope;
+
+ fn dbg_loc(
+ &self,
+ scope: Self::DIScope,
+ inlined_at: Option<Self::DILocation>,
+ span: Span,
+ ) -> Self::DILocation;
+
+ fn extend_scope_to_file(
+ &self,
+ scope_metadata: Self::DIScope,
+ file: &SourceFile,
+ ) -> Self::DIScope;
+ fn debuginfo_finalize(&self);
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn create_dbg_var(
+ &self,
+ variable_name: Symbol,
+ variable_type: Ty<'tcx>,
+ scope_metadata: Self::DIScope,
+ variable_kind: VariableKind,
+ span: Span,
+ ) -> Self::DIVariable;
+}
+
+pub trait DebugInfoBuilderMethods: BackendTypes {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(
+ &mut self,
+ dbg_var: Self::DIVariable,
+ dbg_loc: Self::DILocation,
+ variable_alloca: Self::Value,
+ direct_offset: Size,
+ // NB: each offset implies a deref (i.e. they're steps in a pointer chain).
+ indirect_offsets: &[Size],
+ );
+ fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation);
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
+ fn set_var_name(&mut self, value: Self::Value, name: &str);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/declare.rs b/compiler/rustc_codegen_ssa/src/traits/declare.rs
new file mode 100644
index 000000000..655afcd17
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/declare.rs
@@ -0,0 +1,21 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::Instance;
+
+pub trait PreDefineMethods<'tcx>: BackendTypes {
+ fn predefine_static(
+ &self,
+ def_id: DefId,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ );
+ fn predefine_fn(
+ &self,
+ instance: Instance<'tcx>,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ );
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
new file mode 100644
index 000000000..7755e6793
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
@@ -0,0 +1,39 @@
+use super::BackendTypes;
+use crate::mir::operand::OperandRef;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::Span;
+use rustc_target::abi::call::FnAbi;
+
+pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
+ /// Remember to add all intrinsics here, in `compiler/rustc_typeck/src/check/mod.rs`,
+ /// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics,
+ /// add them to `compiler/rustc_codegen_llvm/src/context.rs`.
+ fn codegen_intrinsic_call(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, Self::Value>],
+ llresult: Self::Value,
+ span: Span,
+ );
+
+ fn abort(&mut self);
+ fn assume(&mut self, val: Self::Value);
+ fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
+ /// Trait method used to test whether a given pointer is associated with a type identifier.
+ fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value;
+ /// Trait method used to load a function while testing if it is associated with a type
+ /// identifier.
+ fn type_checked_load(
+ &mut self,
+ llvtable: Self::Value,
+ vtable_byte_offset: u64,
+ typeid: Self::Value,
+ ) -> Self::Value;
+ /// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in
+ /// Rust defined C-variadic functions.
+ fn va_start(&mut self, val: Self::Value) -> Self::Value;
+ /// Trait method used to inject `va_end` on the "spoofed" `VaListImpl` before
+ /// Rust defined C-variadic functions return.
+ fn va_end(&mut self, val: Self::Value) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/misc.rs b/compiler/rustc_codegen_ssa/src/traits/misc.rs
new file mode 100644
index 000000000..4266e42ec
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/misc.rs
@@ -0,0 +1,26 @@
+use super::BackendTypes;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_session::Session;
+use std::cell::RefCell;
+
+pub trait MiscMethods<'tcx>: BackendTypes {
+ fn vtables(
+ &self,
+ ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Self::Value>>;
+ fn check_overflow(&self) -> bool;
+ fn get_fn(&self, instance: Instance<'tcx>) -> Self::Function;
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> Self::Value;
+ fn eh_personality(&self) -> Self::Value;
+ fn sess(&self) -> &Session;
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx>;
+ fn used_statics(&self) -> &RefCell<Vec<Self::Value>>;
+ fn compiler_used_statics(&self) -> &RefCell<Vec<Self::Value>>;
+ fn set_frame_pointer_type(&self, llfn: Self::Function);
+ fn apply_target_cpu_attr(&self, llfn: Self::Function);
+ fn create_used_variable(&self);
+ fn create_compiler_used_variable(&self);
+ /// Declares the extern "C" main function for the entry point. Returns None if the symbol already exists.
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function>;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs
new file mode 100644
index 000000000..782fdadbf
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs
@@ -0,0 +1,102 @@
+//! Interface of a Rust codegen backend
+//!
+//! This crate defines all the traits that have to be implemented by a codegen backend in order to
+//! use the backend-agnostic codegen code in `rustc_codegen_ssa`.
+//!
+//! The interface is designed around two backend-specific data structures, the codegen context and
+//! the builder. The codegen context is supposed to be read-only after its creation and during the
+//! actual codegen, while the builder stores the information about the function during codegen and
+//! is used to produce the instructions of the backend IR.
+//!
+//! Finally, a third `Backend` structure has to implement methods related to how codegen information
+//! is passed to the backend, especially for asynchronous compilation.
+//!
+//! The traits contain associated types that are backend-specific, such as the backend's value or
+//! basic blocks.
+
+mod abi;
+mod asm;
+mod backend;
+mod builder;
+mod consts;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+mod misc;
+mod statics;
+mod type_;
+mod write;
+
+pub use self::abi::AbiBuilderMethods;
+pub use self::asm::{AsmBuilderMethods, AsmMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
+pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods};
+pub use self::builder::{BuilderMethods, OverflowOp};
+pub use self::consts::ConstMethods;
+pub use self::coverageinfo::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
+pub use self::declare::PreDefineMethods;
+pub use self::intrinsic::IntrinsicCallMethods;
+pub use self::misc::MiscMethods;
+pub use self::statics::{StaticBuilderMethods, StaticMethods};
+pub use self::type_::{
+ ArgAbiMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMembershipMethods,
+ TypeMethods,
+};
+pub use self::write::{ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
+
+use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt};
+use rustc_target::spec::HasTargetSpec;
+
+use std::fmt;
+
+pub trait CodegenObject: Copy + PartialEq + fmt::Debug {}
+impl<T: Copy + PartialEq + fmt::Debug> CodegenObject for T {}
+
+pub trait CodegenMethods<'tcx>:
+ Backend<'tcx>
+ + TypeMethods<'tcx>
+ + MiscMethods<'tcx>
+ + ConstMethods<'tcx>
+ + StaticMethods
+ + CoverageInfoMethods<'tcx>
+ + DebugInfoMethods<'tcx>
+ + AsmMethods<'tcx>
+ + PreDefineMethods<'tcx>
+ + HasParamEnv<'tcx>
+ + HasTyCtxt<'tcx>
+ + HasTargetSpec
+{
+}
+
+impl<'tcx, T> CodegenMethods<'tcx> for T where
+ Self: Backend<'tcx>
+ + TypeMethods<'tcx>
+ + MiscMethods<'tcx>
+ + ConstMethods<'tcx>
+ + StaticMethods
+ + CoverageInfoMethods<'tcx>
+ + DebugInfoMethods<'tcx>
+ + AsmMethods<'tcx>
+ + PreDefineMethods<'tcx>
+ + HasParamEnv<'tcx>
+ + HasTyCtxt<'tcx>
+ + HasTargetSpec
+{
+}
+
+pub trait HasCodegen<'tcx>:
+ Backend<'tcx> + std::ops::Deref<Target = <Self as HasCodegen<'tcx>>::CodegenCx>
+{
+ type CodegenCx: CodegenMethods<'tcx>
+ + BackendTypes<
+ Value = Self::Value,
+ Function = Self::Function,
+ BasicBlock = Self::BasicBlock,
+ Type = Self::Type,
+ Funclet = Self::Funclet,
+ DIScope = Self::DIScope,
+ DILocation = Self::DILocation,
+ DIVariable = Self::DIVariable,
+ >;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/statics.rs b/compiler/rustc_codegen_ssa/src/traits/statics.rs
new file mode 100644
index 000000000..413d31bb9
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/statics.rs
@@ -0,0 +1,24 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_target::abi::Align;
+
+pub trait StaticMethods: BackendTypes {
+ fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool);
+
+ /// Mark the given global value as "used", to prevent the compiler and linker from potentially
+ /// removing a static variable that may otherwise appear unused.
+ fn add_used_global(&self, global: Self::Value);
+
+ /// Same as add_used_global(), but only prevent the compiler from potentially removing an
+ /// otherwise unused symbol. The linker is still permitted to drop it.
+ ///
+ /// This corresponds to the documented semantics of the `#[used]` attribute, although
+ /// on some targets (non-ELF), we may use `add_used_global` for `#[used]` statics
+ /// instead.
+ fn add_compiler_used_global(&self, global: Self::Value);
+}
+
+pub trait StaticBuilderMethods: BackendTypes {
+ fn get_static(&mut self, def_id: DefId) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
new file mode 100644
index 000000000..8158e8dd0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -0,0 +1,151 @@
+use super::misc::MiscMethods;
+use super::Backend;
+use super::HasCodegen;
+use crate::common::TypeKind;
+use crate::mir::place::PlaceRef;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::call::{ArgAbi, CastTarget, FnAbi, Reg};
+use rustc_target::abi::{AddressSpace, Integer};
+
+// This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use
+// `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves.
+pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
+ fn type_i1(&self) -> Self::Type;
+ fn type_i8(&self) -> Self::Type;
+ fn type_i16(&self) -> Self::Type;
+ fn type_i32(&self) -> Self::Type;
+ fn type_i64(&self) -> Self::Type;
+ fn type_i128(&self) -> Self::Type;
+ fn type_isize(&self) -> Self::Type;
+
+ fn type_f32(&self) -> Self::Type;
+ fn type_f64(&self) -> Self::Type;
+
+ fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
+ fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
+ fn type_kind(&self, ty: Self::Type) -> TypeKind;
+ fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
+ fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type;
+ fn element_type(&self, ty: Self::Type) -> Self::Type;
+
+ /// Returns the number of elements in `self` if it is a LLVM vector type.
+ fn vector_length(&self, ty: Self::Type) -> usize;
+
+ fn float_width(&self, ty: Self::Type) -> usize;
+
+ /// Retrieves the bit width of the integer type `self`.
+ fn int_width(&self, ty: Self::Type) -> u64;
+
+ fn val_ty(&self, v: Self::Value) -> Self::Type;
+}
+
+pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
+ fn type_i8p(&self) -> Self::Type {
+ self.type_i8p_ext(AddressSpace::DATA)
+ }
+
+ fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type {
+ self.type_ptr_to_ext(self.type_i8(), address_space)
+ }
+
+ fn type_int(&self) -> Self::Type {
+ match &self.sess().target.c_int_width[..] {
+ "16" => self.type_i16(),
+ "32" => self.type_i32(),
+ "64" => self.type_i64(),
+ width => bug!("Unsupported c_int_width: {}", width),
+ }
+ }
+
+ fn type_from_integer(&self, i: Integer) -> Self::Type {
+ use Integer::*;
+ match i {
+ I8 => self.type_i8(),
+ I16 => self.type_i16(),
+ I32 => self.type_i32(),
+ I64 => self.type_i64(),
+ I128 => self.type_i128(),
+ }
+ }
+
+ fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
+ ty.needs_drop(self.tcx(), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_freeze(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
+ let param_env = ty::ParamEnv::reveal_all();
+ if ty.is_sized(self.tcx().at(DUMMY_SP), param_env) {
+ return false;
+ }
+
+ let tail = self.tcx().struct_tail_erasing_lifetimes(ty, param_env);
+ match tail.kind() {
+ ty::Foreign(..) => false,
+ ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
+ _ => bug!("unexpected unsized tail: {:?}", tail),
+ }
+ }
+}
+
+impl<'tcx, T> DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {}
+
+pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
+ fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
+ fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+ fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool;
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool;
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64;
+ fn scalar_pair_element_backend_type(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> Self::Type;
+}
+
+// For backends that support CFI using type membership (i.e., testing whether a given pointer is
+// associated with a type identifier).
+pub trait TypeMembershipMethods<'tcx>: Backend<'tcx> {
+ fn set_type_metadata(&self, function: Self::Function, typeid: String);
+ fn typeid_metadata(&self, typeid: String) -> Self::Value;
+}
+
+pub trait ArgAbiMethods<'tcx>: HasCodegen<'tcx> {
+ fn store_fn_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, Self::Value>,
+ );
+ fn store_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ val: Self::Value,
+ dst: PlaceRef<'tcx, Self::Value>,
+ );
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+}
+
+pub trait TypeMethods<'tcx>:
+ DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> + TypeMembershipMethods<'tcx>
+{
+}
+
+impl<'tcx, T> TypeMethods<'tcx> for T where
+ Self: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> + TypeMembershipMethods<'tcx>
+{
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs
new file mode 100644
index 000000000..e54ec34f1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/write.rs
@@ -0,0 +1,68 @@
+use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use crate::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use crate::{CompiledModule, ModuleCodegen};
+
+use rustc_errors::{FatalError, Handler};
+use rustc_middle::dep_graph::WorkProduct;
+
+pub trait WriteBackendMethods: 'static + Sized + Clone {
+ type Module: Send + Sync;
+ type TargetMachine;
+ type ModuleBuffer: ModuleBufferMethods;
+ type Context: ?Sized;
+ type ThinData: Send + Sync;
+ type ThinBuffer: ThinBufferMethods;
+
+ /// Merge all modules into main_module and returning it
+ fn run_link(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ modules: Vec<ModuleCodegen<Self::Module>>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError>;
+ /// Performs fat LTO by merging all modules into a single one and returning it
+ /// for further optimization.
+ fn run_fat_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<FatLTOInput<Self>>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<LtoModuleCodegen<Self>, FatalError>;
+ /// Performs thin LTO by performing necessary global analysis and returning two
+ /// lists, one of the modules that need optimization and another for modules that
+ /// can simply be copied over from the incr. comp. cache.
+ fn run_thin_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<(String, Self::ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
+ fn print_pass_timings(&self);
+ unsafe fn optimize(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<(), FatalError>;
+ fn optimize_fat(
+ cgcx: &CodegenContext<Self>,
+ llmod: &mut ModuleCodegen<Self::Module>,
+ ) -> Result<(), FatalError>;
+ unsafe fn optimize_thin(
+ cgcx: &CodegenContext<Self>,
+ thin: ThinModule<Self>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError>;
+ unsafe fn codegen(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<CompiledModule, FatalError>;
+ fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer);
+ fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer);
+}
+
+pub trait ThinBufferMethods: Send + Sync {
+ fn data(&self) -> &[u8];
+}
+
+pub trait ModuleBufferMethods: Send + Sync {
+ fn data(&self) -> &[u8];
+}