summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_gcc/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
commitef24de24a82fe681581cc130f342363c47c0969a (patch)
tree0d494f7e1a38b95c92426f58fe6eaa877303a86c /compiler/rustc_codegen_gcc/src
parentReleasing progress-linux version 1.74.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-ef24de24a82fe681581cc130f342363c47c0969a.tar.xz
rustc-ef24de24a82fe681581cc130f342363c47c0969a.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_gcc/src')
-rw-r--r--compiler/rustc_codegen_gcc/src/abi.rs95
-rw-r--r--compiler/rustc_codegen_gcc/src/allocator.rs109
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs4
-rw-r--r--compiler/rustc_codegen_gcc/src/attributes.rs105
-rw-r--r--compiler/rustc_codegen_gcc/src/back/lto.rs341
-rw-r--r--compiler/rustc_codegen_gcc/src/back/mod.rs1
-rw-r--r--compiler/rustc_codegen_gcc/src/back/write.rs102
-rw-r--r--compiler/rustc_codegen_gcc/src/base.rs76
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs36
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs16
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs68
-rw-r--r--compiler/rustc_codegen_gcc/src/declare.rs57
-rw-r--r--compiler/rustc_codegen_gcc/src/errors.rs87
-rw-r--r--compiler/rustc_codegen_gcc/src/gcc_util.rs230
-rw-r--r--compiler/rustc_codegen_gcc/src/int.rs246
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/archs.rs1569
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs44
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs151
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs196
-rw-r--r--compiler/rustc_codegen_gcc/src/type_.rs14
-rw-r--r--compiler/rustc_codegen_gcc/src/type_of.rs28
21 files changed, 3083 insertions, 492 deletions
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
index a49530ebb..f601cd95f 100644
--- a/compiler/rustc_codegen_gcc/src/abi.rs
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -1,9 +1,13 @@
+#[cfg(feature = "master")]
+use gccjit::FnAttribute;
use gccjit::{ToLValue, ToRValue, Type};
use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods};
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::bug;
use rustc_middle::ty::Ty;
-use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
+#[cfg(feature = "master")]
+use rustc_session::config;
+use rustc_target::abi::call::{ArgAttributes, CastTarget, FnAbi, PassMode, Reg, RegKind};
use crate::builder::Builder;
use crate::context::CodegenCx;
@@ -94,14 +98,23 @@ impl GccType for Reg {
}
}
+pub struct FnAbiGcc<'gcc> {
+ pub return_type: Type<'gcc>,
+ pub arguments_type: Vec<Type<'gcc>>,
+ pub is_c_variadic: bool,
+ pub on_stack_param_indices: FxHashSet<usize>,
+ #[cfg(feature = "master")]
+ pub fn_attributes: Vec<FnAttribute<'gcc>>,
+}
+
pub trait FnAbiGccExt<'gcc, 'tcx> {
// TODO(antoyo): return a function pointer type instead?
- fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>);
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> FnAbiGcc<'gcc>;
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
}
impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
- fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>) {
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> FnAbiGcc<'gcc> {
let mut on_stack_param_indices = FxHashSet::default();
// This capacity calculation is approximate.
@@ -109,7 +122,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 }
);
- let return_ty =
+ let return_type =
match self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
@@ -119,41 +132,89 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
cx.type_void()
}
};
+ #[cfg(feature = "master")]
+ let mut non_null_args = Vec::new();
+
+ #[cfg(feature = "master")]
+ let mut apply_attrs = |mut ty: Type<'gcc>, attrs: &ArgAttributes, arg_index: usize| {
+ if cx.sess().opts.optimize == config::OptLevel::No {
+ return ty;
+ }
+ if attrs.regular.contains(rustc_target::abi::call::ArgAttribute::NoAlias) {
+ ty = ty.make_restrict()
+ }
+ if attrs.regular.contains(rustc_target::abi::call::ArgAttribute::NonNull) {
+ non_null_args.push(arg_index as i32 + 1);
+ }
+ ty
+ };
+ #[cfg(not(feature = "master"))]
+ let apply_attrs = |ty: Type<'gcc>, _attrs: &ArgAttributes, _arg_index: usize| {
+ ty
+ };
for arg in self.args.iter() {
let arg_ty = match arg.mode {
PassMode::Ignore => continue,
- PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
- PassMode::Pair(..) => {
- argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0));
- argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1));
+ PassMode::Pair(a, b) => {
+ let arg_pos = argument_tys.len();
+ argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 0), &a, arg_pos));
+ argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 1), &b, arg_pos + 1));
continue;
}
- PassMode::Indirect { meta_attrs: Some(_), .. } => {
- unimplemented!();
- }
PassMode::Cast { ref cast, pad_i32 } => {
// add padding
if pad_i32 {
argument_tys.push(Reg::i32().gcc_type(cx));
}
- cast.gcc_type(cx)
+ let ty = cast.gcc_type(cx);
+ apply_attrs(ty, &cast.attrs, argument_tys.len())
}
- PassMode::Indirect { meta_attrs: None, on_stack: true, .. } => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: true } => {
+ // This is a "byval" argument, so we don't apply the `restrict` attribute on it.
on_stack_param_indices.insert(argument_tys.len());
arg.memory_ty(cx)
},
- PassMode::Indirect { meta_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
+ PassMode::Direct(attrs) => apply_attrs(arg.layout.immediate_gcc_type(cx), &attrs, argument_tys.len()),
+ PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
+ apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len())
+ }
+ PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
+ assert!(!on_stack);
+ let ty = apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len());
+ apply_attrs(ty, &meta_attrs, argument_tys.len())
+ }
};
argument_tys.push(arg_ty);
}
- (return_ty, argument_tys, self.c_variadic, on_stack_param_indices)
+ #[cfg(feature = "master")]
+ let fn_attrs = if non_null_args.is_empty() {
+ Vec::new()
+ } else {
+ vec![FnAttribute::NonNull(non_null_args)]
+ };
+
+ FnAbiGcc {
+ return_type,
+ arguments_type: argument_tys,
+ is_c_variadic: self.c_variadic,
+ on_stack_param_indices,
+ #[cfg(feature = "master")]
+ fn_attributes: fn_attrs,
+ }
}
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
- let (return_type, params, variadic, on_stack_param_indices) = self.gcc_type(cx);
- let pointer_type = cx.context.new_function_pointer_type(None, return_type, &params, variadic);
+ // FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`?
+ let FnAbiGcc {
+ return_type,
+ arguments_type,
+ is_c_variadic,
+ on_stack_param_indices,
+ ..
+ } = self.gcc_type(cx);
+ let pointer_type = cx.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic);
cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices);
pointer_type
}
diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs
index edd7ab722..c8c098e29 100644
--- a/compiler/rustc_codegen_gcc/src/allocator.rs
+++ b/compiler/rustc_codegen_gcc/src/allocator.rs
@@ -1,6 +1,6 @@
#[cfg(feature="master")]
use gccjit::FnAttribute;
-use gccjit::{FunctionType, GlobalKind, ToRValue};
+use gccjit::{Context, FunctionType, GlobalKind, ToRValue, Type};
use rustc_ast::expand::allocator::{
alloc_error_handler_name, default_fn_name, global_fn_name, AllocatorKind, AllocatorTy,
ALLOCATOR_METHODS, NO_ALLOC_SHIM_IS_UNSTABLE,
@@ -22,7 +22,6 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam
};
let i8 = context.new_type::<i8>();
let i8p = i8.make_pointer();
- let void = context.new_type::<()>();
if kind == AllocatorKind::Default {
for method in ALLOCATOR_METHODS {
@@ -47,67 +46,62 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam
panic!("invalid allocator output")
}
};
- let name = global_fn_name(method.name);
+ let from_name = global_fn_name(method.name);
+ let to_name = default_fn_name(method.name);
- let args: Vec<_> = types.iter().enumerate()
- .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
- .collect();
- let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false);
+ create_wrapper_function(tcx, context, &from_name, &to_name, &types, output);
+ }
+ }
- if tcx.sess.target.options.default_hidden_visibility {
- #[cfg(feature="master")]
- func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
- }
- if tcx.sess.must_emit_unwind_tables() {
- // TODO(antoyo): emit unwind tables.
- }
+ // FIXME(bjorn3): Add noreturn attribute
+ create_wrapper_function(
+ tcx,
+ context,
+ "__rust_alloc_error_handler",
+ &alloc_error_handler_name(alloc_error_handler_kind),
+ &[usize, usize],
+ None,
+ );
- let callee = default_fn_name(method.name);
- let args: Vec<_> = types.iter().enumerate()
- .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
- .collect();
- let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false);
- #[cfg(feature="master")]
- callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
-
- let block = func.new_block("entry");
-
- let args = args
- .iter()
- .enumerate()
- .map(|(i, _)| func.get_param(i as i32).to_rvalue())
- .collect::<Vec<_>>();
- let ret = context.new_call(None, callee, &args);
- //llvm::LLVMSetTailCall(ret, True);
- if output.is_some() {
- block.end_with_return(None, ret);
- }
- else {
- block.end_with_void_return(None);
- }
+ let name = OomStrategy::SYMBOL.to_string();
+ let global = context.new_global(None, GlobalKind::Exported, i8, name);
+ let value = tcx.sess.opts.unstable_opts.oom.should_panic();
+ let value = context.new_rvalue_from_int(i8, value as i32);
+ global.global_set_initializer_rvalue(value);
- // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances
- // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643
- }
- }
+ let name = NO_ALLOC_SHIM_IS_UNSTABLE.to_string();
+ let global = context.new_global(None, GlobalKind::Exported, i8, name);
+ let value = context.new_rvalue_from_int(i8, 0);
+ global.global_set_initializer_rvalue(value);
+}
+
+fn create_wrapper_function(
+ tcx: TyCtxt<'_>,
+ context: &Context<'_>,
+ from_name: &str,
+ to_name: &str,
+ types: &[Type<'_>],
+ output: Option<Type<'_>>,
+) {
+ let void = context.new_type::<()>();
- let types = [usize, usize];
- let name = "__rust_alloc_error_handler".to_string();
let args: Vec<_> = types.iter().enumerate()
.map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
.collect();
- let func = context.new_function(None, FunctionType::Exported, void, &args, name, false);
+ let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, from_name, false);
- if tcx.sess.target.default_hidden_visibility {
+ if tcx.sess.target.options.default_hidden_visibility {
#[cfg(feature="master")]
func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
}
+ if tcx.sess.must_emit_unwind_tables() {
+ // TODO(antoyo): emit unwind tables.
+ }
- let callee = alloc_error_handler_name(alloc_error_handler_kind);
let args: Vec<_> = types.iter().enumerate()
.map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
.collect();
- let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false);
+ let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, to_name, false);
#[cfg(feature="master")]
callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
@@ -118,18 +112,15 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam
.enumerate()
.map(|(i, _)| func.get_param(i as i32).to_rvalue())
.collect::<Vec<_>>();
- let _ret = context.new_call(None, callee, &args);
+ let ret = context.new_call(None, callee, &args);
//llvm::LLVMSetTailCall(ret, True);
- block.end_with_void_return(None);
-
- let name = OomStrategy::SYMBOL.to_string();
- let global = context.new_global(None, GlobalKind::Exported, i8, name);
- let value = tcx.sess.opts.unstable_opts.oom.should_panic();
- let value = context.new_rvalue_from_int(i8, value as i32);
- global.global_set_initializer_rvalue(value);
+ if output.is_some() {
+ block.end_with_return(None, ret);
+ }
+ else {
+ block.end_with_void_return(None);
+ }
- let name = NO_ALLOC_SHIM_IS_UNSTABLE.to_string();
- let global = context.new_global(None, GlobalKind::Exported, i8, name);
- let value = context.new_rvalue_from_int(i8, 0);
- global.global_set_initializer_rvalue(value);
+ // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances
+ // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643
}
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
index 905fdac92..f3a9ca77a 100644
--- a/compiler/rustc_codegen_gcc/src/asm.rs
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -452,10 +452,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}
InlineAsmOperandRef::Const { ref string } => {
- // Const operands get injected directly into the template
- if att_dialect {
- template_str.push('$');
- }
template_str.push_str(string);
}
}
diff --git a/compiler/rustc_codegen_gcc/src/attributes.rs b/compiler/rustc_codegen_gcc/src/attributes.rs
index eb0cce19b..6159971cf 100644
--- a/compiler/rustc_codegen_gcc/src/attributes.rs
+++ b/compiler/rustc_codegen_gcc/src/attributes.rs
@@ -4,72 +4,13 @@ use gccjit::Function;
use rustc_attr::InstructionSetAttr;
#[cfg(feature="master")]
use rustc_attr::InlineAttr;
-use rustc_codegen_ssa::target_features::tied_target_features;
-use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty;
#[cfg(feature="master")]
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use rustc_session::Session;
use rustc_span::symbol::sym;
-use smallvec::{smallvec, SmallVec};
use crate::{context::CodegenCx, errors::TiedTargetFeatures};
-
-// Given a map from target_features to whether they are enabled or disabled,
-// ensure only valid combinations are allowed.
-pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) -> Option<&'static [&'static str]> {
- for tied in tied_target_features(sess) {
- // Tied features must be set to the same value, or not set at all
- let mut tied_iter = tied.iter();
- let enabled = features.get(tied_iter.next().unwrap());
- if tied_iter.any(|feature| enabled != features.get(feature)) {
- return Some(tied);
- }
- }
- None
-}
-
-// TODO(antoyo): maybe move to a new module gcc_util.
-// To find a list of GCC's names, check https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
-fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
- let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
- match (arch, s) {
- ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"],
- ("x86", "pclmulqdq") => smallvec!["pclmul"],
- ("x86", "rdrand") => smallvec!["rdrnd"],
- ("x86", "bmi1") => smallvec!["bmi"],
- ("x86", "cmpxchg16b") => smallvec!["cx16"],
- ("x86", "avx512vaes") => smallvec!["vaes"],
- ("x86", "avx512gfni") => smallvec!["gfni"],
- ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"],
- // NOTE: seems like GCC requires 'avx512bw' for 'avx512vbmi2'.
- ("x86", "avx512vbmi2") => smallvec!["avx512vbmi2", "avx512bw"],
- // NOTE: seems like GCC requires 'avx512bw' for 'avx512bitalg'.
- ("x86", "avx512bitalg") => smallvec!["avx512bitalg", "avx512bw"],
- ("aarch64", "rcpc2") => smallvec!["rcpc-immo"],
- ("aarch64", "dpb") => smallvec!["ccpp"],
- ("aarch64", "dpb2") => smallvec!["ccdp"],
- ("aarch64", "frintts") => smallvec!["fptoint"],
- ("aarch64", "fcma") => smallvec!["complxnum"],
- ("aarch64", "pmuv3") => smallvec!["perfmon"],
- ("aarch64", "paca") => smallvec!["pauth"],
- ("aarch64", "pacg") => smallvec!["pauth"],
- // Rust ties fp and neon together. In LLVM neon implicitly enables fp,
- // but we manually enable neon when a feature only implicitly enables fp
- ("aarch64", "f32mm") => smallvec!["f32mm", "neon"],
- ("aarch64", "f64mm") => smallvec!["f64mm", "neon"],
- ("aarch64", "fhm") => smallvec!["fp16fml", "neon"],
- ("aarch64", "fp16") => smallvec!["fullfp16", "neon"],
- ("aarch64", "jsconv") => smallvec!["jsconv", "neon"],
- ("aarch64", "sve") => smallvec!["sve", "neon"],
- ("aarch64", "sve2") => smallvec!["sve2", "neon"],
- ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"],
- ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"],
- ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"],
- ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"],
- (_, s) => smallvec![s],
- }
-}
+use crate::gcc_util::{check_tied_features, to_gcc_features};
/// Get GCC attribute for the provided inline heuristic.
#[cfg(feature="master")]
@@ -112,8 +53,24 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
codegen_fn_attrs.inline
};
if let Some(attr) = inline_attr(cx, inline) {
+ if let FnAttribute::AlwaysInline = attr {
+ func.add_attribute(FnAttribute::Inline);
+ }
func.add_attribute(attr);
}
+
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+ func.add_attribute(FnAttribute::Cold);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) {
+ func.add_attribute(FnAttribute::ReturnsTwice);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
+ func.add_attribute(FnAttribute::Pure);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
+ func.add_attribute(FnAttribute::Const);
+ }
}
let function_features =
@@ -140,11 +97,33 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
}))
.collect::<Vec<_>>();
- // TODO(antoyo): check if we really need global backend features. (Maybe they could be applied
- // globally?)
+ // TODO(antoyo): cg_llvm adds global features to each function so that LTO keep them.
+ // Check if GCC requires the same.
let mut global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
function_features.extend(&mut global_features);
- let target_features = function_features.join(",");
+ let target_features = function_features
+ .iter()
+ .filter_map(|feature| {
+ // FIXME(antoyo): for some reasons, disabling SSE results in the following error when
+ // compiling Rust for Linux:
+ // SSE register return with SSE disabled
+ // TODO(antoyo): support soft-float and retpoline-external-thunk.
+ if feature.contains("soft-float") || feature.contains("retpoline-external-thunk") || *feature == "-sse" {
+ return None;
+ }
+
+ if feature.starts_with('-') {
+ Some(format!("no{}", feature))
+ }
+ else if feature.starts_with('+') {
+ Some(feature[1..].to_string())
+ }
+ else {
+ Some(feature.to_string())
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(",");
if !target_features.is_empty() {
#[cfg(feature="master")]
func.add_attribute(FnAttribute::Target(&target_features));
diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs
new file mode 100644
index 000000000..529454b11
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/back/lto.rs
@@ -0,0 +1,341 @@
+/// GCC requires to use the same toolchain for the whole compilation when doing LTO.
+/// So, we need the same version/commit of the linker (gcc) and lto front-end binaries (lto1,
+/// lto-wrapper, liblto_plugin.so).
+
+// FIXME(antoyo): the executables compiled with LTO are bigger than those compiled without LTO.
+// Since it is the opposite for cg_llvm, check if this is normal.
+//
+// Maybe we embed the bitcode in the final binary?
+// It doesn't look like we try to generate fat objects for the final binary.
+// Check if the way we combine the object files make it keep the LTO sections on the final link.
+// Maybe that's because the combined object files contain the IR (true) and the final link
+// does not remove it?
+//
+// TODO(antoyo): for performance, check which optimizations the C++ frontend enables.
+//
+// Fix these warnings:
+// /usr/bin/ld: warning: type of symbol `_RNvNvNvNtCs5JWOrf9uCus_5rayon11thread_pool19WORKER_THREAD_STATE7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
+// /usr/bin/ld: warning: type of symbol `_RNvNvNvNvNtNtNtCsAj5i4SGTR7_3std4sync4mpmc5waker17current_thread_id5DUMMY7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
+// /usr/bin/ld: warning: incremental linking of LTO and non-LTO objects; using -flinker-output=nolto-rel which will bypass whole program optimization
+
+use std::ffi::CString;
+use std::fs::{self, File};
+use std::path::{Path, PathBuf};
+
+use gccjit::OutputKind;
+use object::read::archive::ArchiveFile;
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule};
+use rustc_codegen_ssa::back::symbol_export;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
+use rustc_data_structures::memmap::Mmap;
+use rustc_errors::{FatalError, Handler};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
+use rustc_session::config::{CrateType, Lto};
+use tempfile::{TempDir, tempdir};
+
+use crate::back::write::save_temp_bitcode;
+use crate::errors::{
+ DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
+};
+use crate::{GccCodegenBackend, GccContext, to_gcc_opt_level};
+
+/// We keep track of the computed LTO cache keys from the previous
+/// session to determine which CGUs we can reuse.
+//pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
+
+pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
+ match crate_type {
+ CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
+ CrateType::Rlib | CrateType::ProcMacro => false,
+ }
+}
+
+struct LtoData {
+ // TODO(antoyo): use symbols_below_threshold.
+ //symbols_below_threshold: Vec<CString>,
+ upstream_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ tmp_path: TempDir,
+}
+
+fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, diag_handler: &Handler) -> Result<LtoData, FatalError> {
+ let export_threshold = match cgcx.lto {
+ // We're just doing LTO for our one crate
+ Lto::ThinLocal => SymbolExportLevel::Rust,
+
+ // We're doing LTO for the entire crate graph
+ Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
+
+ Lto::No => panic!("didn't request LTO but we're doing LTO"),
+ };
+
+ let tmp_path =
+ match tempdir() {
+ Ok(tmp_path) => tmp_path,
+ Err(error) => {
+ eprintln!("Cannot create temporary directory: {}", error);
+ return Err(FatalError);
+ },
+ };
+
+ let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
+ if info.level.is_below_threshold(export_threshold) || info.used {
+ Some(CString::new(name.as_str()).unwrap())
+ } else {
+ None
+ }
+ };
+ let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ let mut symbols_below_threshold = {
+ let _timer = cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
+ exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
+ };
+ info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
+
+ // If we're performing LTO for the entire crate graph, then for each of our
+ // upstream dependencies, find the corresponding rlib and load the bitcode
+ // from the archive.
+ //
+ // We save off all the bytecode and GCC module file path for later processing
+ // with either fat or thin LTO
+ let mut upstream_modules = Vec::new();
+ if cgcx.lto != Lto::ThinLocal {
+ // Make sure we actually can run LTO
+ for crate_type in cgcx.crate_types.iter() {
+ if !crate_type_allows_lto(*crate_type) {
+ diag_handler.emit_err(LtoDisallowed);
+ return Err(FatalError);
+ } else if *crate_type == CrateType::Dylib {
+ if !cgcx.opts.unstable_opts.dylib_lto {
+ diag_handler.emit_err(LtoDylib);
+ return Err(FatalError);
+ }
+ }
+ }
+
+ if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
+ diag_handler.emit_err(DynamicLinkingWithLTO);
+ return Err(FatalError);
+ }
+
+ for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
+ let exported_symbols =
+ cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ {
+ let _timer =
+ cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
+ symbols_below_threshold
+ .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
+ }
+
+ let archive_data = unsafe {
+ Mmap::map(File::open(&path).expect("couldn't open rlib"))
+ .expect("couldn't map rlib")
+ };
+ let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib");
+ let obj_files = archive
+ .members()
+ .filter_map(|child| {
+ child.ok().and_then(|c| {
+ std::str::from_utf8(c.name()).ok().map(|name| (name.trim(), c))
+ })
+ })
+ .filter(|&(name, _)| looks_like_rust_object_file(name));
+ for (name, child) in obj_files {
+ info!("adding bitcode from {}", name);
+ let path = tmp_path.path().join(name);
+ match save_as_file(child.data(&*archive_data).expect("corrupt rlib"), &path) {
+ Ok(()) => {
+ let buffer = ModuleBuffer::new(path);
+ let module = SerializedModule::Local(buffer);
+ upstream_modules.push((module, CString::new(name).unwrap()));
+ }
+ Err(e) => {
+ diag_handler.emit_err(e);
+ return Err(FatalError);
+ }
+ }
+ }
+ }
+ }
+
+ Ok(LtoData {
+ //symbols_below_threshold,
+ upstream_modules,
+ tmp_path,
+ })
+}
+
+fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
+ fs::write(path, obj)
+ .map_err(|error| LtoBitcodeFromRlib {
+ gcc_err: format!("write object file to temp dir: {}", error)
+ })
+}
+
+/// Performs fat LTO by merging all modules into a single one and returning it
+/// for further optimization.
+pub(crate) fn run_fat(
+ cgcx: &CodegenContext<GccCodegenBackend>,
+ modules: Vec<FatLtoInput<GccCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let lto_data = prepare_lto(cgcx, &diag_handler)?;
+ /*let symbols_below_threshold =
+ lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
+ fat_lto(cgcx, &diag_handler, modules, cached_modules, lto_data.upstream_modules, lto_data.tmp_path,
+ //&symbols_below_threshold,
+ )
+}
+
+fn fat_lto(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, modules: Vec<FatLtoInput<GccCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, tmp_path: TempDir,
+ //symbols_below_threshold: &[*const libc::c_char],
+) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> {
+ let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module");
+ info!("going for a fat lto");
+
+ // Sort out all our lists of incoming modules into two lists.
+ //
+ // * `serialized_modules` (also and argument to this function) contains all
+ // modules that are serialized in-memory.
+ // * `in_memory` contains modules which are already parsed and in-memory,
+ // such as from multi-CGU builds.
+ //
+ // All of `cached_modules` (cached from previous incremental builds) can
+ // immediately go onto the `serialized_modules` modules list and then we can
+ // split the `modules` array into these two lists.
+ let mut in_memory = Vec::new();
+ serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
+ info!("pushing cached module {:?}", wp.cgu_name);
+ (buffer, CString::new(wp.cgu_name).unwrap())
+ }));
+ for module in modules {
+ match module {
+ FatLtoInput::InMemory(m) => in_memory.push(m),
+ FatLtoInput::Serialized { name, buffer } => {
+ info!("pushing serialized module {:?}", name);
+ let buffer = SerializedModule::Local(buffer);
+ serialized_modules.push((buffer, CString::new(name).unwrap()));
+ }
+ }
+ }
+
+ // Find the "costliest" module and merge everything into that codegen unit.
+ // All the other modules will be serialized and reparsed into the new
+ // context, so this hopefully avoids serializing and parsing the largest
+ // codegen unit.
+ //
+ // Additionally use a regular module as the base here to ensure that various
+ // file copy operations in the backend work correctly. The only other kind
+ // of module here should be an allocator one, and if your crate is smaller
+ // than the allocator module then the size doesn't really matter anyway.
+ let costliest_module = in_memory
+ .iter()
+ .enumerate()
+ .filter(|&(_, module)| module.kind == ModuleKind::Regular)
+ .map(|(i, _module)| {
+ //let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
+ // TODO(antoyo): compute the cost of a module if GCC allows this.
+ (0, i)
+ })
+ .max();
+
+ // If we found a costliest module, we're good to go. Otherwise all our
+ // inputs were serialized which could happen in the case, for example, that
+ // all our inputs were incrementally reread from the cache and we're just
+ // re-executing the LTO passes. If that's the case deserialize the first
+ // module and create a linker with it.
+ let mut module: ModuleCodegen<GccContext> = match costliest_module {
+ Some((_cost, i)) => in_memory.remove(i),
+ None => {
+ unimplemented!("Incremental");
+ /*assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
+ let (buffer, name) = serialized_modules.remove(0);
+ info!("no in-memory regular modules to choose from, parsing {:?}", name);
+ ModuleCodegen {
+ module_llvm: GccContext::parse(cgcx, &name, buffer.data(), diag_handler)?,
+ name: name.into_string().unwrap(),
+ kind: ModuleKind::Regular,
+ }*/
+ }
+ };
+ let mut serialized_bitcode = Vec::new();
+ {
+ info!("using {:?} as a base module", module.name);
+
+ // We cannot load and merge GCC contexts in memory like cg_llvm is doing.
+ // Instead, we combine the object files into a single object file.
+ for module in in_memory {
+ let path = tmp_path.path().to_path_buf().join(&module.name);
+ let path = path.to_str().expect("path");
+ let context = &module.module_llvm.context;
+ let config = cgcx.config(module.kind);
+ // NOTE: we need to set the optimization level here in order for LTO to do its job.
+ context.set_optimization_level(to_gcc_opt_level(config.opt_level));
+ context.add_command_line_option("-flto=auto");
+ context.add_command_line_option("-flto-partition=one");
+ context.compile_to_file(OutputKind::ObjectFile, path);
+ let buffer = ModuleBuffer::new(PathBuf::from(path));
+ let llmod_id = CString::new(&module.name[..]).unwrap();
+ serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
+ }
+ // Sort the modules to ensure we produce deterministic results.
+ serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
+
+ // We add the object files and save in should_combine_object_files that we should combine
+ // them into a single object file when compiling later.
+ for (bc_decoded, name) in serialized_modules {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg_recorder("GCC_fat_lto_link_module", |recorder| {
+ recorder.record_arg(format!("{:?}", name))
+ });
+ info!("linking {:?}", name);
+ match bc_decoded {
+ SerializedModule::Local(ref module_buffer) => {
+ module.module_llvm.should_combine_object_files = true;
+ module.module_llvm.context.add_driver_option(module_buffer.0.to_str().expect("path"));
+ },
+ SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
+ SerializedModule::FromUncompressedFile(_) => unimplemented!("from uncompressed file"),
+ }
+ serialized_bitcode.push(bc_decoded);
+ }
+ save_temp_bitcode(cgcx, &module, "lto.input");
+
+ // Internalize everything below threshold to help strip out more modules and such.
+ /*unsafe {
+ let ptr = symbols_below_threshold.as_ptr();
+ llvm::LLVMRustRunRestrictionPass(
+ llmod,
+ ptr as *const *const libc::c_char,
+ symbols_below_threshold.len() as libc::size_t,
+ );*/
+ save_temp_bitcode(cgcx, &module, "lto.after-restriction");
+ //}
+ }
+
+ // NOTE: save the temporary directory used by LTO so that it gets deleted after linking instead
+ // of now.
+ module.module_llvm.temp_dir = Some(tmp_path);
+
+ Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
+}
+
+pub struct ModuleBuffer(PathBuf);
+
+impl ModuleBuffer {
+ pub fn new(path: PathBuf) -> ModuleBuffer {
+ ModuleBuffer(path)
+ }
+}
+
+impl ModuleBufferMethods for ModuleBuffer {
+ fn data(&self) -> &[u8] {
+ unimplemented!("data not needed for GCC codegen");
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/back/mod.rs b/compiler/rustc_codegen_gcc/src/back/mod.rs
index d692799d7..10187eab0 100644
--- a/compiler/rustc_codegen_gcc/src/back/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/back/mod.rs
@@ -1 +1,2 @@
+pub mod lto;
pub mod write;
diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs
index 5f54ac4eb..04772d770 100644
--- a/compiler/rustc_codegen_gcc/src/back/write.rs
+++ b/compiler/rustc_codegen_gcc/src/back/write.rs
@@ -2,27 +2,71 @@ use std::{env, fs};
use gccjit::OutputKind;
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
-use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig};
+use rustc_codegen_ssa::back::link::ensure_removed;
+use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
use rustc_errors::Handler;
+use rustc_fs_util::link_or_copy;
use rustc_session::config::OutputType;
use rustc_span::fatal_error::FatalError;
use rustc_target::spec::SplitDebuginfo;
use crate::{GccCodegenBackend, GccContext};
+use crate::errors::CopyBitcode;
-pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
- let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
+pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name);
{
let context = &module.module_llvm.context;
let module_name = module.name.clone();
+
+ let should_combine_object_files = module.module_llvm.should_combine_object_files;
+
let module_name = Some(&module_name[..]);
- let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+ // NOTE: Only generate object files with GIMPLE when this environment variable is set for
+ // now because this requires a particular setup (same gcc/lto1/lto-wrapper commit as libgccjit).
+ let fat_lto = env::var("EMBED_LTO_BITCODE").as_deref() == Ok("1");
+
+ let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
- if config.bitcode_needed() {
+ if config.bitcode_needed() && fat_lto {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("GCC_module_codegen_make_bitcode", &*module.name);
+
// TODO(antoyo)
+ /*if let Some(bitcode_filename) = bc_out.file_name() {
+ cgcx.prof.artifact_size(
+ "llvm_bitcode",
+ bitcode_filename.to_string_lossy(),
+ data.len() as u64,
+ );
+ }*/
+
+ if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("GCC_module_codegen_emit_bitcode", &*module.name);
+ context.add_command_line_option("-flto=auto");
+ context.add_command_line_option("-flto-partition=one");
+ context.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
+ }
+
+ if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("GCC_module_codegen_embed_bitcode", &*module.name);
+ // TODO(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes?
+ //embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
+
+ context.add_command_line_option("-flto=auto");
+ context.add_command_line_option("-flto-partition=one");
+ context.add_command_line_option("-ffat-lto-objects");
+ // TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument).
+ context.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
+ }
}
if config.emit_ir {
@@ -32,7 +76,7 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
if config.emit_asm {
let _timer = cgcx
.prof
- .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
+ .generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name);
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
}
@@ -41,7 +85,7 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
EmitObj::ObjectCode(_) => {
let _timer = cgcx
.prof
- .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
+ .generic_activity_with_arg("GCC_module_codegen_emit_obj", &*module.name);
if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
println!("Module {}", module.name);
}
@@ -60,11 +104,36 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
context.set_debug_info(true);
context.dump_to_file(path, true);
}
- context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
+ if should_combine_object_files && fat_lto {
+ context.add_command_line_option("-flto=auto");
+ context.add_command_line_option("-flto-partition=one");
+
+ context.add_driver_option("-Wl,-r");
+ // NOTE: we need -nostdlib, otherwise, we get the following error:
+ // /usr/bin/ld: cannot find -lgcc_s: No such file or directory
+ context.add_driver_option("-nostdlib");
+ // NOTE: without -fuse-linker-plugin, we get the following error:
+ // lto1: internal compiler error: decompressed stream: Destination buffer is too small
+ context.add_driver_option("-fuse-linker-plugin");
+
+ // NOTE: this doesn't actually generate an executable. With the above flags, it combines the .o files together in another .o.
+ context.compile_to_file(OutputKind::Executable, obj_out.to_str().expect("path to str"));
+ }
+ else {
+ context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
+ }
}
EmitObj::Bitcode => {
- // TODO(antoyo)
+ debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
+ if let Err(err) = link_or_copy(&bc_out, &obj_out) {
+ diag_handler.emit_err(CopyBitcode { err });
+ }
+
+ if !config.emit_bc {
+ debug!("removing_bitcode {:?}", bc_out);
+ ensure_removed(diag_handler, &bc_out);
+ }
}
EmitObj::None => {}
@@ -82,3 +151,18 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
unimplemented!();
}
+
+pub(crate) fn save_temp_bitcode(cgcx: &CodegenContext<GccCodegenBackend>, _module: &ModuleCodegen<GccContext>, _name: &str) {
+ if !cgcx.save_temps {
+ return;
+ }
+ unimplemented!();
+ /*unsafe {
+ let ext = format!("{}.bc", name);
+ let cgu = Some(&module.name[..]);
+ let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
+ let cstr = path_to_c_string(&path);
+ let llmod = module.module_llvm.llmod();
+ llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
+ }*/
+}
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
index 9e614ca4a..5073066c1 100644
--- a/compiler/rustc_codegen_gcc/src/base.rs
+++ b/compiler/rustc_codegen_gcc/src/base.rs
@@ -1,3 +1,4 @@
+use std::collections::HashSet;
use std::env;
use std::time::Instant;
@@ -18,6 +19,7 @@ use rustc_codegen_ssa::traits::DebugInfoMethods;
use rustc_session::config::DebugInfo;
use rustc_span::Symbol;
+use crate::{LockedTargetInfo, gcc_util};
use crate::GccContext;
use crate::builder::Builder;
use crate::context::CodegenCx;
@@ -50,6 +52,7 @@ pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
match linkage {
Linkage::External => FunctionType::Exported,
+ // TODO(antoyo): set the attribute externally_visible.
Linkage::AvailableExternally => FunctionType::Extern,
Linkage::LinkOnceAny => unimplemented!(),
Linkage::LinkOnceODR => unimplemented!(),
@@ -63,7 +66,7 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
}
}
-pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen<GccContext>, u64) {
+pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: LockedTargetInfo) -> (ModuleCodegen<GccContext>, u64) {
let prof_timer = tcx.prof.generic_activity("codegen_module");
let start_time = Instant::now();
@@ -71,7 +74,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i
let (module, _) = tcx.dep_graph.with_task(
dep_node,
tcx,
- (cgu_name, supports_128bit_integers),
+ (cgu_name, target_info),
module_codegen,
Some(dep_graph::hash_result),
);
@@ -82,38 +85,29 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
- fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, supports_128bit_integers): (Symbol, bool)) -> ModuleCodegen<GccContext> {
+ fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, target_info): (Symbol, LockedTargetInfo)) -> ModuleCodegen<GccContext> {
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
- //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
let context = Context::default();
context.add_command_line_option("-fexceptions");
context.add_driver_option("-fexceptions");
- // TODO(antoyo): only set on x86 platforms.
- context.add_command_line_option("-masm=intel");
- // TODO(antoyo): only add the following cli argument if the feature is supported.
- context.add_command_line_option("-msse2");
- context.add_command_line_option("-mavx2");
- // FIXME(antoyo): the following causes an illegal instruction on vmovdqu64 in std_example on my CPU.
- // Only add if the CPU supports it.
- context.add_command_line_option("-msha");
- context.add_command_line_option("-mpclmul");
- context.add_command_line_option("-mfma");
- context.add_command_line_option("-mfma4");
- context.add_command_line_option("-m64");
- context.add_command_line_option("-mbmi");
- context.add_command_line_option("-mgfni");
- //context.add_command_line_option("-mavxvnni"); // The CI doesn't support this option.
- context.add_command_line_option("-mf16c");
- context.add_command_line_option("-maes");
- context.add_command_line_option("-mxsavec");
- context.add_command_line_option("-mbmi2");
- context.add_command_line_option("-mrtm");
- context.add_command_line_option("-mvaes");
- context.add_command_line_option("-mvpclmulqdq");
- context.add_command_line_option("-mavx");
+ let disabled_features: HashSet<_> = tcx.sess.opts.cg.target_feature.split(',')
+ .filter(|feature| feature.starts_with('-'))
+ .map(|string| &string[1..])
+ .collect();
+
+ if tcx.sess.target.arch == "x86" || tcx.sess.target.arch == "x86_64" {
+ context.add_command_line_option("-masm=intel");
+ }
+
+ if !disabled_features.contains("avx") && tcx.sess.target.arch == "x86_64" {
+ // NOTE: we always enable AVX because the equivalent of llvm.x86.sse2.cmp.pd in GCC for
+ // SSE2 is multiple builtins, so we use the AVX __builtin_ia32_cmppd instead.
+ // FIXME(antoyo): use the proper builtins for llvm.x86.sse2.cmp.pd and similar.
+ context.add_command_line_option("-mavx");
+ }
for arg in &tcx.sess.opts.cg.llvm_args {
context.add_command_line_option(arg);
@@ -127,6 +121,16 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i
// NOTE: Rust relies on LLVM doing wrapping on overflow.
context.add_command_line_option("-fwrapv");
+ if tcx.sess.relocation_model() == rustc_target::spec::RelocModel::Static {
+ context.add_command_line_option("-mcmodel=kernel");
+ context.add_command_line_option("-fno-pie");
+ }
+
+ let target_cpu = gcc_util::target_cpu(tcx.sess);
+ if target_cpu != "generic" {
+ context.add_command_line_option(&format!("-march={}", target_cpu));
+ }
+
if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) {
context.add_command_line_option("-ffunction-sections");
context.add_command_line_option("-fdata-sections");
@@ -135,8 +139,14 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i
if env::var("CG_GCCJIT_DUMP_RTL").as_deref() == Ok("1") {
context.add_command_line_option("-fdump-rtl-vregs");
}
+ if env::var("CG_GCCJIT_DUMP_RTL_ALL").as_deref() == Ok("1") {
+ context.add_command_line_option("-fdump-rtl-all");
+ }
if env::var("CG_GCCJIT_DUMP_TREE_ALL").as_deref() == Ok("1") {
- context.add_command_line_option("-fdump-tree-all");
+ context.add_command_line_option("-fdump-tree-all-eh");
+ }
+ if env::var("CG_GCCJIT_DUMP_IPA_ALL").as_deref() == Ok("1") {
+ context.add_command_line_option("-fdump-ipa-all-eh");
}
if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
context.set_dump_code_on_compile(true);
@@ -152,11 +162,15 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i
context.set_keep_intermediates(true);
}
+ if env::var("CG_GCCJIT_VERBOSE").as_deref() == Ok("1") {
+ context.add_driver_option("-v");
+ }
+
// NOTE: The codegen generates unrechable blocks.
context.set_allow_unreachable_blocks(true);
{
- let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers);
+ let cx = CodegenCx::new(&context, cgu, tcx, target_info.supports_128bit_int());
let mono_items = cgu.items_in_deterministic_order(tcx);
for &(mono_item, data) in &mono_items {
@@ -181,7 +195,9 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i
ModuleCodegen {
name: cgu_name.to_string(),
module_llvm: GccContext {
- context
+ context,
+ should_combine_object_files: false,
+ temp_dir: None,
},
kind: ModuleKind::Regular,
}
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 308cb04ca..b8a8c144d 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -247,16 +247,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
}
fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
- let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
let stored_ty = self.cx.val_ty(val);
let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
-
- if dest_ptr_ty == stored_ptr_ty {
- ptr
- }
- else {
- self.bitcast(ptr, stored_ptr_ty)
- }
+ self.bitcast(ptr, stored_ptr_ty)
}
pub fn current_func(&self) -> Function<'gcc> {
@@ -500,7 +493,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
#[cfg(not(feature="master"))]
- fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: &CodegenFnAttrs, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
let call_site = self.call(typ, fn_attrs, None, func, args, None);
let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
self.llbb().end_with_conditional(None, condition, then, catch);
@@ -663,7 +656,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a + b
+ self.gcc_add(a, b)
}
fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -671,7 +664,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a - b
+ self.gcc_sub(a, b)
}
fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -680,11 +673,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a * b
+ self.gcc_mul(a, b)
}
fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a * b
+ self.gcc_mul(a, b)
}
fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
@@ -758,9 +751,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
loaded_value.to_rvalue()
}
- fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
- // TODO(antoyo): use ty.
- let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
+ fn volatile_load(&mut self, ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+ let ptr = self.context.new_cast(None, ptr, ty.make_volatile().make_pointer());
ptr.dereference(None).to_rvalue()
}
@@ -916,7 +908,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
.add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
}
- fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+ fn gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+ // NOTE: due to opaque pointers now being used, we need to cast here.
+ let ptr = self.context.new_cast(None, ptr, typ.make_pointer());
let ptr_type = ptr.get_type();
let mut pointee_type = ptr.get_type();
// NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is
@@ -927,6 +921,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
// require dereferencing the pointer.
for index in indices {
pointee_type = pointee_type.get_pointee().expect("pointee type");
+ #[cfg(feature="master")]
+ let pointee_size = {
+ let size = self.cx.context.new_sizeof(pointee_type);
+ self.context.new_cast(None, size, index.get_type())
+ };
+ #[cfg(not(feature="master"))]
let pointee_size = self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32);
result = result + self.gcc_int_cast(*index * pointee_size, self.sizet_type);
}
@@ -1420,7 +1420,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.cx
}
- fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
+ fn apply_attrs_to_cleanup_callsite(&mut self, _llret: RValue<'gcc>) {
// FIXME(bjorn3): implement
}
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
index 5f54cb16d..93fe27e54 100644
--- a/compiler/rustc_codegen_gcc/src/common.rs
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -424,35 +424,35 @@ impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
}
fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.i8_type
+ self.is_compatible_with(cx.i8_type)
}
fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.u8_type
+ self.is_compatible_with(cx.u8_type)
}
fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.i16_type
+ self.is_compatible_with(cx.i16_type)
}
fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.u16_type
+ self.is_compatible_with(cx.u16_type)
}
fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.i32_type
+ self.is_compatible_with(cx.i32_type)
}
fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.u32_type
+ self.is_compatible_with(cx.u32_type)
}
fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.i64_type
+ self.is_compatible_with(cx.i64_type)
}
fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.u64_type
+ self.is_compatible_with(cx.u64_type)
}
fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
index dcebd92a6..a043660ea 100644
--- a/compiler/rustc_codegen_gcc/src/context.rs
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -20,6 +20,7 @@ use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDat
use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
use crate::callee::get_fn;
+use crate::common::SignType;
#[derive(Clone)]
pub struct FuncSig<'gcc> {
@@ -129,29 +130,57 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self {
let check_overflow = tcx.sess.overflow_checks();
- let i8_type = context.new_c_type(CType::Int8t);
- let i16_type = context.new_c_type(CType::Int16t);
- let i32_type = context.new_c_type(CType::Int32t);
- let i64_type = context.new_c_type(CType::Int64t);
- let u8_type = context.new_c_type(CType::UInt8t);
- let u16_type = context.new_c_type(CType::UInt16t);
- let u32_type = context.new_c_type(CType::UInt32t);
- let u64_type = context.new_c_type(CType::UInt64t);
+ let create_type = |ctype, rust_type| {
+ let layout = tcx.layout_of(ParamEnv::reveal_all().and(rust_type)).unwrap();
+ let align = layout.align.abi.bytes();
+ #[cfg(feature="master")]
+ {
+ context.new_c_type(ctype).get_aligned(align)
+ }
+ #[cfg(not(feature="master"))]
+ {
+ // Since libgccjit 12 doesn't contain the fix to compare aligned integer types,
+ // only align u128 and i128.
+ if layout.ty.int_size_and_signed(tcx).0.bytes() == 16 {
+ context.new_c_type(ctype).get_aligned(align)
+ }
+ else {
+ context.new_c_type(ctype)
+ }
+ }
+ };
+
+ let i8_type = create_type(CType::Int8t, tcx.types.i8);
+ let i16_type = create_type(CType::Int16t, tcx.types.i16);
+ let i32_type = create_type(CType::Int32t, tcx.types.i32);
+ let i64_type = create_type(CType::Int64t, tcx.types.i64);
+ let u8_type = create_type(CType::UInt8t, tcx.types.u8);
+ let u16_type = create_type(CType::UInt16t, tcx.types.u16);
+ let u32_type = create_type(CType::UInt32t, tcx.types.u32);
+ let u64_type = create_type(CType::UInt64t, tcx.types.u64);
let (i128_type, u128_type) =
if supports_128bit_integers {
- let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
- let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
+ let i128_type = create_type(CType::Int128t, tcx.types.i128);
+ let u128_type = create_type(CType::UInt128t, tcx.types.u128);
(i128_type, u128_type)
}
else {
- let i128_type = context.new_array_type(None, i64_type, 2);
- let u128_type = context.new_array_type(None, u64_type, 2);
+ /*let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.i128)).unwrap();
+ let i128_align = layout.align.abi.bytes();
+ let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap();
+ let u128_align = layout.align.abi.bytes();*/
+
+ // TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
+ // gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
+ let i128_type = context.new_array_type(None, i64_type, 2)/*.get_aligned(i128_align)*/;
+ let u128_type = context.new_array_type(None, u64_type, 2)/*.get_aligned(u128_align)*/;
(i128_type, u128_type)
};
let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
+ // TODO(antoyo): set alignment on those types as well.
let float_type = context.new_type::<f32>();
let double_type = context.new_type::<f64>();
@@ -167,14 +196,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let ulonglong_type = context.new_c_type(CType::ULongLong);
let sizet_type = context.new_c_type(CType::SizeT);
- let isize_type = context.new_c_type(CType::LongLong);
- let usize_type = context.new_c_type(CType::ULongLong);
+ let usize_type = sizet_type;
+ let isize_type = usize_type;
let bool_type = context.new_type::<bool>();
- // TODO(antoyo): only have those assertions on x86_64.
- assert_eq!(isize_type.get_size(), i64_type.get_size());
- assert_eq!(usize_type.get_size(), u64_type.get_size());
-
let mut functions = FxHashMap::default();
let builtins = [
"__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow",
@@ -192,7 +217,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
}
- Self {
+ let mut cx = Self {
check_overflow,
codegen_unit,
context,
@@ -254,7 +279,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pointee_infos: Default::default(),
structs_as_pointer: Default::default(),
cleanup_blocks: Default::default(),
- }
+ };
+ // TODO(antoyo): instead of doing this, add SsizeT to libgccjit.
+ cx.isize_type = usize_type.to_signed(&cx);
+ cx
}
pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs
index 493626c3c..247454fa5 100644
--- a/compiler/rustc_codegen_gcc/src/declare.rs
+++ b/compiler/rustc_codegen_gcc/src/declare.rs
@@ -1,10 +1,12 @@
use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
+#[cfg(feature="master")]
+use gccjit::{FnAttribute, ToRValue};
use rustc_codegen_ssa::traits::BaseTypeMethods;
use rustc_middle::ty::Ty;
use rustc_span::Symbol;
use rustc_target::abi::call::FnAbi;
-use crate::abi::FnAbiGccExt;
+use crate::abi::{FnAbiGcc, FnAbiGccExt};
use crate::context::CodegenCx;
use crate::intrinsic::llvm;
@@ -78,9 +80,20 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Function<'gcc> {
- let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self);
- let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &params, variadic);
+ let FnAbiGcc {
+ return_type,
+ arguments_type,
+ is_c_variadic,
+ on_stack_param_indices,
+ #[cfg(feature="master")]
+ fn_attributes,
+ } = fn_abi.gcc_type(self);
+ let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &arguments_type, is_c_variadic);
self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
+ #[cfg(feature="master")]
+ for fn_attr in fn_attributes {
+ func.add_attribute(fn_attr);
+ }
func
}
@@ -114,6 +127,44 @@ fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*ll
.collect();
let func = cx.context.new_function(None, cx.linkage.get(), return_type, &params, mangle_name(name), variadic);
cx.functions.borrow_mut().insert(name.to_string(), func);
+
+ #[cfg(feature="master")]
+ if name == "rust_eh_personality" {
+ // NOTE: GCC will sometimes change the personality function set on a function from
+ // rust_eh_personality to __gcc_personality_v0 as an optimization.
+ // As such, we need to create a weak alias from __gcc_personality_v0 to
+ // rust_eh_personality in order to avoid a linker error.
+ // This needs to be weak in order to still allow using the standard
+ // __gcc_personality_v0 when the linking to it.
+ // Since aliases don't work (maybe because of a bug in LTO partitioning?), we
+ // create a wrapper function that calls rust_eh_personality.
+
+ let params: Vec<_> = param_types.into_iter().enumerate()
+ .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
+ .collect();
+ let gcc_func = cx.context.new_function(None, FunctionType::Exported, return_type, &params, "__gcc_personality_v0", variadic);
+
+ // We need a normal extern function for the crates that access rust_eh_personality
+ // without defining it, otherwise we'll get a compiler error.
+ //
+ // For the crate defining it, that needs to be a weak alias instead.
+ gcc_func.add_attribute(FnAttribute::Weak);
+
+ let block = gcc_func.new_block("start");
+ let mut args = vec![];
+ for param in &params {
+ args.push(param.to_rvalue());
+ }
+ let call = cx.context.new_call(None, func, &args);
+ if return_type == cx.type_void() {
+ block.add_eval(None, call);
+ block.end_with_void_return(None);
+ }
+ else {
+ block.end_with_return(None, call);
+ }
+ }
+
func
};
diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs
index 693367192..4bf3b71f5 100644
--- a/compiler/rustc_codegen_gcc/src/errors.rs
+++ b/compiler/rustc_codegen_gcc/src/errors.rs
@@ -1,8 +1,36 @@
-use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg};
-use rustc_macros::Diagnostic;
+use rustc_errors::{
+ DiagnosticArgValue, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, IntoDiagnosticArg,
+};
+use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::Span;
use std::borrow::Cow;
+use crate::fluent_generated as fluent;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_unknown_ctarget_feature_prefix)]
+#[note]
+pub(crate) struct UnknownCTargetFeaturePrefix<'a> {
+ pub feature: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_unknown_ctarget_feature)]
+#[note]
+pub(crate) struct UnknownCTargetFeature<'a> {
+ pub feature: &'a str,
+ #[subdiagnostic]
+ pub rust_feature: PossibleFeature<'a>,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum PossibleFeature<'a> {
+ #[help(codegen_gcc_possible_feature)]
+ Some { rust_feature: &'a str },
+ #[help(codegen_gcc_consider_filing_feature_request)]
+ None,
+}
+
struct ExitCode(Option<i32>);
impl IntoDiagnosticArg for ExitCode {
@@ -40,3 +68,58 @@ pub(crate) struct TiedTargetFeatures {
pub span: Span,
pub features: String,
}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_copy_bitcode)]
+pub(crate) struct CopyBitcode {
+ pub err: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_dynamic_linking_with_lto)]
+#[note]
+pub(crate) struct DynamicLinkingWithLTO;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_load_bitcode)]
+pub(crate) struct LoadBitcode {
+ name: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_lto_disallowed)]
+pub(crate) struct LtoDisallowed;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_lto_dylib)]
+pub(crate) struct LtoDylib;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_lto_bitcode_from_rlib)]
+pub(crate) struct LtoBitcodeFromRlib {
+ pub gcc_err: String,
+}
+
+pub(crate) struct TargetFeatureDisableOrEnable<'a> {
+ pub features: &'a [&'a str],
+ pub span: Option<Span>,
+ pub missing_features: Option<MissingFeatures>,
+}
+
+#[derive(Subdiagnostic)]
+#[help(codegen_gcc_missing_features)]
+pub(crate) struct MissingFeatures;
+
+impl IntoDiagnostic<'_, ErrorGuaranteed> for TargetFeatureDisableOrEnable<'_> {
+ fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = sess.struct_err(fluent::codegen_gcc_target_feature_disable_or_enable);
+ if let Some(span) = self.span {
+ diag.set_span(span);
+ };
+ if let Some(missing_features) = self.missing_features {
+ diag.subdiagnostic(missing_features);
+ }
+ diag.set_arg("features", self.features.join(", "));
+ diag
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/gcc_util.rs b/compiler/rustc_codegen_gcc/src/gcc_util.rs
new file mode 100644
index 000000000..1248fdcd2
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/gcc_util.rs
@@ -0,0 +1,230 @@
+#[cfg(feature="master")]
+use gccjit::Context;
+use smallvec::{smallvec, SmallVec};
+
+use rustc_codegen_ssa::target_features::{
+ supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES,
+};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::bug;
+use rustc_session::Session;
+
+use crate::errors::{PossibleFeature, TargetFeatureDisableOrEnable, UnknownCTargetFeature, UnknownCTargetFeaturePrefix};
+
+/// The list of GCC features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
+/// `--target` and similar).
+pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<String> {
+ // Features that come earlier are overridden by conflicting features later in the string.
+ // Typically we'll want more explicit settings to override the implicit ones, so:
+ //
+ // * Features from -Ctarget-cpu=*; are overridden by [^1]
+ // * Features implied by --target; are overridden by
+ // * Features from -Ctarget-feature; are overridden by
+ // * function specific features.
+ //
+ // [^1]: target-cpu=native is handled here, other target-cpu values are handled implicitly
+ // through GCC march implementation.
+ //
+ // FIXME(nagisa): it isn't clear what's the best interaction between features implied by
+ // `-Ctarget-cpu` and `--target` are. On one hand, you'd expect CLI arguments to always
+ // override anything that's implicit, so e.g. when there's no `--target` flag, features implied
+ // the host target are overridden by `-Ctarget-cpu=*`. On the other hand, what about when both
+ // `--target` and `-Ctarget-cpu=*` are specified? Both then imply some target features and both
+ // flags are specified by the user on the CLI. It isn't as clear-cut which order of precedence
+ // should be taken in cases like these.
+ let mut features = vec![];
+
+ // Features implied by an implicit or explicit `--target`.
+ features.extend(
+ sess.target
+ .features
+ .split(',')
+ .filter(|v| !v.is_empty() && backend_feature_name(v).is_some())
+ .map(String::from),
+ );
+
+ // -Ctarget-features
+ let supported_features = supported_target_features(sess);
+ let mut featsmap = FxHashMap::default();
+ let feats = sess.opts.cg.target_feature
+ .split(',')
+ .filter_map(|s| {
+ let enable_disable = match s.chars().next() {
+ None => return None,
+ Some(c @ ('+' | '-')) => c,
+ Some(_) => {
+ if diagnostics {
+ sess.emit_warning(UnknownCTargetFeaturePrefix { feature: s });
+ }
+ return None;
+ }
+ };
+
+ let feature = backend_feature_name(s)?;
+ // Warn against use of GCC specific feature names on the CLI.
+ if diagnostics && !supported_features.iter().any(|&(v, _)| v == feature) {
+ let rust_feature = supported_features.iter().find_map(|&(rust_feature, _)| {
+ let gcc_features = to_gcc_features(sess, rust_feature);
+ if gcc_features.contains(&feature) && !gcc_features.contains(&rust_feature) {
+ Some(rust_feature)
+ } else {
+ None
+ }
+ });
+ let unknown_feature =
+ if let Some(rust_feature) = rust_feature {
+ UnknownCTargetFeature {
+ feature,
+ rust_feature: PossibleFeature::Some { rust_feature },
+ }
+ }
+ else {
+ UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
+ };
+ sess.emit_warning(unknown_feature);
+ }
+
+ if diagnostics {
+ // FIXME(nagisa): figure out how to not allocate a full hashset here.
+ featsmap.insert(feature, enable_disable == '+');
+ }
+
+ // rustc-specific features do not get passed down to GCC…
+ if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+ return None;
+ }
+ // ... otherwise though we run through `to_gcc_features` when
+ // passing requests down to GCC. This means that all in-language
+ // features also work on the command line instead of having two
+ // different names when the GCC name and the Rust name differ.
+ Some(to_gcc_features(sess, feature)
+ .iter()
+ .flat_map(|feat| to_gcc_features(sess, feat).into_iter())
+ .map(|feature| {
+ if enable_disable == '-' {
+ format!("-{}", feature)
+ }
+ else {
+ feature.to_string()
+ }
+ })
+ .collect::<Vec<_>>(),
+ )
+ })
+ .flatten();
+ features.extend(feats);
+
+ if diagnostics {
+ if let Some(f) = check_tied_features(sess, &featsmap) {
+ sess.emit_err(TargetFeatureDisableOrEnable {
+ features: f,
+ span: None,
+ missing_features: None,
+ });
+ }
+ }
+
+ features
+}
+
+/// Returns a feature name for the given `+feature` or `-feature` string.
+///
+/// Only allows features that are backend specific (i.e. not [`RUSTC_SPECIFIC_FEATURES`].)
+fn backend_feature_name(s: &str) -> Option<&str> {
+ // features must start with a `+` or `-`.
+ let feature = s.strip_prefix(&['+', '-'][..]).unwrap_or_else(|| {
+ bug!("target feature `{}` must begin with a `+` or `-`", s);
+ });
+ // Rustc-specific feature requests like `+crt-static` or `-crt-static`
+ // are not passed down to GCC.
+ if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+ return None;
+ }
+ Some(feature)
+}
+
+// To find a list of GCC's names, check https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+pub fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
+ let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
+ match (arch, s) {
+ ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"],
+ ("x86", "pclmulqdq") => smallvec!["pclmul"],
+ ("x86", "rdrand") => smallvec!["rdrnd"],
+ ("x86", "bmi1") => smallvec!["bmi"],
+ ("x86", "cmpxchg16b") => smallvec!["cx16"],
+ ("x86", "avx512vaes") => smallvec!["vaes"],
+ ("x86", "avx512gfni") => smallvec!["gfni"],
+ ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"],
+ // NOTE: seems like GCC requires 'avx512bw' for 'avx512vbmi2'.
+ ("x86", "avx512vbmi2") => smallvec!["avx512vbmi2", "avx512bw"],
+ // NOTE: seems like GCC requires 'avx512bw' for 'avx512bitalg'.
+ ("x86", "avx512bitalg") => smallvec!["avx512bitalg", "avx512bw"],
+ ("aarch64", "rcpc2") => smallvec!["rcpc-immo"],
+ ("aarch64", "dpb") => smallvec!["ccpp"],
+ ("aarch64", "dpb2") => smallvec!["ccdp"],
+ ("aarch64", "frintts") => smallvec!["fptoint"],
+ ("aarch64", "fcma") => smallvec!["complxnum"],
+ ("aarch64", "pmuv3") => smallvec!["perfmon"],
+ ("aarch64", "paca") => smallvec!["pauth"],
+ ("aarch64", "pacg") => smallvec!["pauth"],
+ // Rust ties fp and neon together. In GCC neon implicitly enables fp,
+ // but we manually enable neon when a feature only implicitly enables fp
+ ("aarch64", "f32mm") => smallvec!["f32mm", "neon"],
+ ("aarch64", "f64mm") => smallvec!["f64mm", "neon"],
+ ("aarch64", "fhm") => smallvec!["fp16fml", "neon"],
+ ("aarch64", "fp16") => smallvec!["fullfp16", "neon"],
+ ("aarch64", "jsconv") => smallvec!["jsconv", "neon"],
+ ("aarch64", "sve") => smallvec!["sve", "neon"],
+ ("aarch64", "sve2") => smallvec!["sve2", "neon"],
+ ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"],
+ ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"],
+ ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"],
+ ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"],
+ (_, s) => smallvec![s],
+ }
+}
+
+// Given a map from target_features to whether they are enabled or disabled,
+// ensure only valid combinations are allowed.
+pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) -> Option<&'static [&'static str]> {
+ for tied in tied_target_features(sess) {
+ // Tied features must be set to the same value, or not set at all
+ let mut tied_iter = tied.iter();
+ let enabled = features.get(tied_iter.next().unwrap());
+ if tied_iter.any(|feature| enabled != features.get(feature)) {
+ return Some(tied);
+ }
+ }
+ None
+}
+
+fn arch_to_gcc(name: &str) -> &str {
+ match name {
+ "M68020" => "68020",
+ _ => name,
+ }
+}
+
+fn handle_native(name: &str) -> &str {
+ if name != "native" {
+ return arch_to_gcc(name);
+ }
+
+ #[cfg(feature="master")]
+ {
+ // Get the native arch.
+ let context = Context::default();
+ context.get_target_info().arch().unwrap()
+ .to_str()
+ .unwrap()
+ }
+ #[cfg(not(feature="master"))]
+ unimplemented!();
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+ match sess.opts.cg.target_cpu {
+ Some(ref name) => handle_native(name),
+ None => handle_native(sess.target.cpu.as_ref()),
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs
index 0cf120479..ea8550d20 100644
--- a/compiler/rustc_codegen_gcc/src/int.rs
+++ b/compiler/rustc_codegen_gcc/src/int.rs
@@ -7,7 +7,9 @@ use std::convert::TryFrom;
use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
-use rustc_middle::ty::Ty;
+use rustc_middle::ty::{ParamEnv, Ty};
+use rustc_target::abi::{Endian, call::{ArgAbi, ArgAttributes, Conv, FnAbi, PassMode}};
+use rustc_target::spec;
use crate::builder::ToGccComp;
use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
@@ -36,13 +38,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
self.cx.context.new_unary_op(None, operation, typ, a)
}
else {
- // TODO(antoyo): use __negdi2 and __negti2 instead?
let element_type = typ.dyncast_array().expect("element type");
- let values = [
+ self.from_low_high_rvalues(typ,
self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
- ];
- self.cx.context.new_array_constructor(None, typ, &values)
+ )
}
}
@@ -52,9 +52,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
}
else {
- let param_a = self.context.new_parameter(None, a_type, "a");
- let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false);
- self.context.new_call(None, func, &[a])
+ self.gcc_add(self.gcc_not(a), self.gcc_int(a_type, 1))
}
}
@@ -103,7 +101,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
self.llbb().end_with_conditional(None, condition, then_block, else_block);
- // TODO(antoyo): take endianness into account.
let shift_value = self.gcc_sub(b, sixty_four);
let high = self.high(a);
let sign =
@@ -113,11 +110,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
else {
zero
};
- let values = [
- high >> shift_value,
- sign,
- ];
- let array_value = self.context.new_array_constructor(None, a_type, &values);
+ let array_value = self.from_low_high_rvalues(a_type, high >> shift_value, sign);
then_block.add_assignment(None, result, array_value);
then_block.end_with_jump(None, after_block);
@@ -133,11 +126,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
- let values = [
+ let array_value = self.from_low_high_rvalues(a_type,
(high << shift_value) | shifted_low,
high >> b,
- ];
- let array_value = self.context.new_array_constructor(None, a_type, &values);
+ );
actual_else_block.add_assignment(None, result, array_value);
actual_else_block.end_with_jump(None, after_block);
@@ -317,18 +309,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
_ => unreachable!(),
},
};
- let a_type = lhs.get_type();
- let b_type = rhs.get_type();
- let param_a = self.context.new_parameter(None, a_type, "a");
- let param_b = self.context.new_parameter(None, b_type, "b");
- let result_field = self.context.new_field(None, a_type, "result");
- let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
- let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
- let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
- let result = self.context.new_call(None, func, &[lhs, rhs]);
- let overflow = result.access_field(None, overflow_field);
- let int_result = result.access_field(None, result_field);
- return (int_result, overflow);
+ return self.operation_with_overflow(func_name, lhs, rhs);
},
_ => {
match oop {
@@ -353,23 +334,111 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
(res.dereference(None).to_rvalue(), overflow)
}
- pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
+ pub fn operation_with_overflow(&self, func_name: &str, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
+ let a_type = lhs.get_type();
+ let b_type = rhs.get_type();
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let result_field = self.context.new_field(None, a_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+
+ let ret_ty = Ty::new_tup(self.tcx, &[self.tcx.types.i128, self.tcx.types.bool]);
+ let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ret_ty)).unwrap();
+
+ let arg_abi = ArgAbi {
+ layout,
+ mode: PassMode::Direct(ArgAttributes::new()),
+ };
+ let mut fn_abi = FnAbi {
+ args: vec![arg_abi.clone(), arg_abi.clone()].into_boxed_slice(),
+ ret: arg_abi,
+ c_variadic: false,
+ fixed_count: 2,
+ conv: Conv::C,
+ can_unwind: false,
+ };
+ fn_abi.adjust_for_foreign_abi(self.cx, spec::abi::Abi::C {
+ unwind: false,
+ }).unwrap();
+
+ let indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. });
+
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let result =
+ if indirect {
+ let return_value = self.current_func().new_local(None, return_type.as_type(), "return_value");
+ let return_param_type = return_type.as_type().make_pointer();
+ let return_param = self.context.new_parameter(None, return_param_type, "return_value");
+ let func = self.context.new_function(None, FunctionType::Extern, self.type_void(), &[return_param, param_a, param_b], func_name, false);
+ self.llbb().add_eval(None, self.context.new_call(None, func, &[return_value.get_address(None), lhs, rhs]));
+ return_value.to_rvalue()
+ }
+ else {
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ self.context.new_call(None, func, &[lhs, rhs])
+ };
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ return (int_result, overflow);
+ }
+
+ pub fn gcc_icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
let a_type = lhs.get_type();
let b_type = rhs.get_type();
if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
- let signed = a_type.is_compatible_with(self.i128_type);
- let sign =
- if signed {
- ""
- }
- else {
- "u"
- };
- let func_name = format!("__{}cmpti2", sign);
- let param_a = self.context.new_parameter(None, a_type, "a");
- let param_b = self.context.new_parameter(None, b_type, "b");
- let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false);
- let cmp = self.context.new_call(None, func, &[lhs, rhs]);
+ // This algorithm is based on compiler-rt's __cmpti2:
+ // https://github.com/llvm-mirror/compiler-rt/blob/f0745e8476f069296a7c71accedd061dce4cdf79/lib/builtins/cmpti2.c#L21
+ let result = self.current_func().new_local(None, self.int_type, "icmp_result");
+ let block1 = self.current_func().new_block("block1");
+ let block2 = self.current_func().new_block("block2");
+ let block3 = self.current_func().new_block("block3");
+ let block4 = self.current_func().new_block("block4");
+ let block5 = self.current_func().new_block("block5");
+ let block6 = self.current_func().new_block("block6");
+ let block7 = self.current_func().new_block("block7");
+ let block8 = self.current_func().new_block("block8");
+ let after = self.current_func().new_block("after");
+
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+ // NOTE: cast low to its unsigned type in order to perform a comparison correctly (e.g.
+ // the sign is only on high).
+ let unsigned_type = native_int_type.to_unsigned(&self.cx);
+
+ let lhs_low = self.context.new_cast(None, self.low(lhs), unsigned_type);
+ let rhs_low = self.context.new_cast(None, self.low(rhs), unsigned_type);
+
+ let condition = self.context.new_comparison(None, ComparisonOp::LessThan, self.high(lhs), self.high(rhs));
+ self.llbb().end_with_conditional(None, condition, block1, block2);
+
+ block1.add_assignment(None, result, self.context.new_rvalue_zero(self.int_type));
+ block1.end_with_jump(None, after);
+
+ let condition = self.context.new_comparison(None, ComparisonOp::GreaterThan, self.high(lhs), self.high(rhs));
+ block2.end_with_conditional(None, condition, block3, block4);
+
+ block3.add_assignment(None, result, self.context.new_rvalue_from_int(self.int_type, 2));
+ block3.end_with_jump(None, after);
+
+ let condition = self.context.new_comparison(None, ComparisonOp::LessThan, lhs_low, rhs_low);
+ block4.end_with_conditional(None, condition, block5, block6);
+
+ block5.add_assignment(None, result, self.context.new_rvalue_zero(self.int_type));
+ block5.end_with_jump(None, after);
+
+ let condition = self.context.new_comparison(None, ComparisonOp::GreaterThan, lhs_low, rhs_low);
+ block6.end_with_conditional(None, condition, block7, block8);
+
+ block7.add_assignment(None, result, self.context.new_rvalue_from_int(self.int_type, 2));
+ block7.end_with_jump(None, after);
+
+ block8.add_assignment(None, result, self.context.new_rvalue_one(self.int_type));
+ block8.end_with_jump(None, after);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after);
+
+ let cmp = result.to_rvalue();
let (op, limit) =
match op {
IntPredicate::IntEQ => {
@@ -378,6 +447,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
IntPredicate::IntNE => {
return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
},
+ // TODO(antoyo): cast to u128 for unsigned comparison. See below.
IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
IntPredicate::IntULT => (ComparisonOp::Equals, 0),
@@ -407,6 +477,18 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
rhs = self.context.new_cast(None, rhs, a_type);
}
}
+ match op {
+ IntPredicate::IntUGT | IntPredicate::IntUGE | IntPredicate::IntULT | IntPredicate::IntULE => {
+ if !a_type.is_vector() {
+ let unsigned_type = a_type.to_unsigned(&self.cx);
+ lhs = self.context.new_cast(None, lhs, unsigned_type);
+ rhs = self.context.new_cast(None, rhs, unsigned_type);
+ }
+ },
+ // TODO(antoyo): we probably need to handle signed comparison for unsigned
+ // integers.
+ _ => (),
+ }
self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
}
}
@@ -418,11 +500,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
a ^ b
}
else {
- let values = [
+ self.from_low_high_rvalues(a_type,
self.low(a) ^ self.low(b),
self.high(a) ^ self.high(b),
- ];
- self.context.new_array_constructor(None, a_type, &values)
+ )
}
}
@@ -468,12 +549,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
self.llbb().end_with_conditional(None, condition, then_block, else_block);
- // TODO(antoyo): take endianness into account.
- let values = [
+ let array_value = self.from_low_high_rvalues(a_type,
zero,
self.low(a) << (b - sixty_four),
- ];
- let array_value = self.context.new_array_constructor(None, a_type, &values);
+ );
then_block.add_assignment(None, result, array_value);
then_block.end_with_jump(None, after_block);
@@ -484,16 +563,16 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
b0_block.end_with_jump(None, after_block);
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
+ // TODO(antoyo): adjust this ^ comment.
let unsigned_type = native_int_type.to_unsigned(&self.cx);
let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
- let values = [
+
+ let array_value = self.from_low_high_rvalues(a_type,
self.low(a) << b,
(self.high(a) << b) | high_low,
- ];
-
- let array_value = self.context.new_array_constructor(None, a_type, &values);
+ );
actual_else_block.add_assignment(None, result, array_value);
actual_else_block.end_with_jump(None, after_block);
@@ -509,16 +588,16 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let arg_type = arg.get_type();
if !self.is_native_int_type(arg_type) {
let native_int_type = arg_type.dyncast_array().expect("get element type");
- let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue();
+ let lsb = self.low(arg);
let swapped_lsb = self.gcc_bswap(lsb, width / 2);
let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
- let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue();
+ let msb = self.high(arg);
let swapped_msb = self.gcc_bswap(msb, width / 2);
let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
// NOTE: we also need to swap the two elements here, in addition to swapping inside
// the elements themselves like done above.
- return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]);
+ return self.from_low_high_rvalues(arg_type, swapped_msb, swapped_lsb);
}
// TODO(antoyo): check if it's faster to use string literals and a
@@ -546,7 +625,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
- if self.is_native_int_type_or_bool(typ) {
+ if typ.is_u128(self) {
+ // FIXME(antoyo): libgccjit cannot create 128-bit values yet.
+ let num = self.context.new_rvalue_from_long(self.u64_type, int as i64);
+ self.gcc_int_cast(num, typ)
+ }
+ else if self.is_native_int_type_or_bool(typ) {
self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
}
else {
@@ -572,6 +656,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
}
else if typ.is_i128(self) {
+ // FIXME(antoyo): libgccjit cannot create 128-bit values yet.
let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
self.gcc_int_cast(num, typ)
}
@@ -616,11 +701,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
else {
assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
let native_int_type = a_type.dyncast_array().expect("get element type");
- let values = [
+ self.from_low_high_rvalues(a_type,
self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
- ];
- self.context.new_array_constructor(None, a_type, &values)
+ )
}
}
@@ -644,11 +728,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let zero = self.context.new_rvalue_zero(value_type);
let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
- let values = [
+ self.from_low_high_rvalues(dest_typ,
self.context.new_cast(None, value, dest_element_type),
self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
- ];
- self.context.new_array_constructor(None, dest_typ, &values)
+ )
}
else {
// Since u128 and i128 are the only types that can be unsupported, we know the type of
@@ -726,20 +809,47 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
- self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1))
+ let index =
+ match self.sess().target.options.endian {
+ Endian::Little => 1,
+ Endian::Big => 0,
+ };
+ self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index))
.to_rvalue()
}
fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
- self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0))
+ let index =
+ match self.sess().target.options.endian {
+ Endian::Little => 0,
+ Endian::Big => 1,
+ };
+ self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index))
.to_rvalue()
}
+ fn from_low_high_rvalues(&self, typ: Type<'gcc>, low: RValue<'gcc>, high: RValue<'gcc>) -> RValue<'gcc> {
+ let (first, last) =
+ match self.sess().target.options.endian {
+ Endian::Little => (low, high),
+ Endian::Big => (high, low),
+ };
+
+ let values = [first, last];
+ self.context.new_array_constructor(None, typ, &values)
+ }
+
fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
+ let (first, last) =
+ match self.sess().target.options.endian {
+ Endian::Little => (low, high),
+ Endian::Big => (high, low),
+ };
+
let native_int_type = typ.dyncast_array().expect("get element type");
let values = [
- self.context.new_rvalue_from_long(native_int_type, low),
- self.context.new_rvalue_from_long(native_int_type, high),
+ self.context.new_rvalue_from_long(native_int_type, first),
+ self.context.new_rvalue_from_long(native_int_type, last),
];
self.context.new_array_constructor(None, typ, &values)
}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
index 438eab789..15d67385c 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
@@ -2254,6 +2254,1494 @@ match name {
"llvm.hexagon.prefetch" => "__builtin_HEXAGON_prefetch",
"llvm.hexagon.vmemcpy" => "__builtin_hexagon_vmemcpy",
"llvm.hexagon.vmemset" => "__builtin_hexagon_vmemset",
+ // loongarch
+ "llvm.loongarch.asrtgt.d" => "__builtin_loongarch_asrtgt_d",
+ "llvm.loongarch.asrtle.d" => "__builtin_loongarch_asrtle_d",
+ "llvm.loongarch.break" => "__builtin_loongarch_break",
+ "llvm.loongarch.cacop.d" => "__builtin_loongarch_cacop_d",
+ "llvm.loongarch.cacop.w" => "__builtin_loongarch_cacop_w",
+ "llvm.loongarch.cpucfg" => "__builtin_loongarch_cpucfg",
+ "llvm.loongarch.crc.w.b.w" => "__builtin_loongarch_crc_w_b_w",
+ "llvm.loongarch.crc.w.d.w" => "__builtin_loongarch_crc_w_d_w",
+ "llvm.loongarch.crc.w.h.w" => "__builtin_loongarch_crc_w_h_w",
+ "llvm.loongarch.crc.w.w.w" => "__builtin_loongarch_crc_w_w_w",
+ "llvm.loongarch.crcc.w.b.w" => "__builtin_loongarch_crcc_w_b_w",
+ "llvm.loongarch.crcc.w.d.w" => "__builtin_loongarch_crcc_w_d_w",
+ "llvm.loongarch.crcc.w.h.w" => "__builtin_loongarch_crcc_w_h_w",
+ "llvm.loongarch.crcc.w.w.w" => "__builtin_loongarch_crcc_w_w_w",
+ "llvm.loongarch.csrrd.d" => "__builtin_loongarch_csrrd_d",
+ "llvm.loongarch.csrrd.w" => "__builtin_loongarch_csrrd_w",
+ "llvm.loongarch.csrwr.d" => "__builtin_loongarch_csrwr_d",
+ "llvm.loongarch.csrwr.w" => "__builtin_loongarch_csrwr_w",
+ "llvm.loongarch.csrxchg.d" => "__builtin_loongarch_csrxchg_d",
+ "llvm.loongarch.csrxchg.w" => "__builtin_loongarch_csrxchg_w",
+ "llvm.loongarch.dbar" => "__builtin_loongarch_dbar",
+ "llvm.loongarch.ibar" => "__builtin_loongarch_ibar",
+ "llvm.loongarch.iocsrrd.b" => "__builtin_loongarch_iocsrrd_b",
+ "llvm.loongarch.iocsrrd.d" => "__builtin_loongarch_iocsrrd_d",
+ "llvm.loongarch.iocsrrd.h" => "__builtin_loongarch_iocsrrd_h",
+ "llvm.loongarch.iocsrrd.w" => "__builtin_loongarch_iocsrrd_w",
+ "llvm.loongarch.iocsrwr.b" => "__builtin_loongarch_iocsrwr_b",
+ "llvm.loongarch.iocsrwr.d" => "__builtin_loongarch_iocsrwr_d",
+ "llvm.loongarch.iocsrwr.h" => "__builtin_loongarch_iocsrwr_h",
+ "llvm.loongarch.iocsrwr.w" => "__builtin_loongarch_iocsrwr_w",
+ "llvm.loongarch.lasx.vext2xv.d.b" => "__builtin_lasx_vext2xv_d_b",
+ "llvm.loongarch.lasx.vext2xv.d.h" => "__builtin_lasx_vext2xv_d_h",
+ "llvm.loongarch.lasx.vext2xv.d.w" => "__builtin_lasx_vext2xv_d_w",
+ "llvm.loongarch.lasx.vext2xv.du.bu" => "__builtin_lasx_vext2xv_du_bu",
+ "llvm.loongarch.lasx.vext2xv.du.hu" => "__builtin_lasx_vext2xv_du_hu",
+ "llvm.loongarch.lasx.vext2xv.du.wu" => "__builtin_lasx_vext2xv_du_wu",
+ "llvm.loongarch.lasx.vext2xv.h.b" => "__builtin_lasx_vext2xv_h_b",
+ "llvm.loongarch.lasx.vext2xv.hu.bu" => "__builtin_lasx_vext2xv_hu_bu",
+ "llvm.loongarch.lasx.vext2xv.w.b" => "__builtin_lasx_vext2xv_w_b",
+ "llvm.loongarch.lasx.vext2xv.w.h" => "__builtin_lasx_vext2xv_w_h",
+ "llvm.loongarch.lasx.vext2xv.wu.bu" => "__builtin_lasx_vext2xv_wu_bu",
+ "llvm.loongarch.lasx.vext2xv.wu.hu" => "__builtin_lasx_vext2xv_wu_hu",
+ "llvm.loongarch.lasx.xbnz.b" => "__builtin_lasx_xbnz_b",
+ "llvm.loongarch.lasx.xbnz.d" => "__builtin_lasx_xbnz_d",
+ "llvm.loongarch.lasx.xbnz.h" => "__builtin_lasx_xbnz_h",
+ "llvm.loongarch.lasx.xbnz.v" => "__builtin_lasx_xbnz_v",
+ "llvm.loongarch.lasx.xbnz.w" => "__builtin_lasx_xbnz_w",
+ "llvm.loongarch.lasx.xbz.b" => "__builtin_lasx_xbz_b",
+ "llvm.loongarch.lasx.xbz.d" => "__builtin_lasx_xbz_d",
+ "llvm.loongarch.lasx.xbz.h" => "__builtin_lasx_xbz_h",
+ "llvm.loongarch.lasx.xbz.v" => "__builtin_lasx_xbz_v",
+ "llvm.loongarch.lasx.xbz.w" => "__builtin_lasx_xbz_w",
+ "llvm.loongarch.lasx.xvabsd.b" => "__builtin_lasx_xvabsd_b",
+ "llvm.loongarch.lasx.xvabsd.bu" => "__builtin_lasx_xvabsd_bu",
+ "llvm.loongarch.lasx.xvabsd.d" => "__builtin_lasx_xvabsd_d",
+ "llvm.loongarch.lasx.xvabsd.du" => "__builtin_lasx_xvabsd_du",
+ "llvm.loongarch.lasx.xvabsd.h" => "__builtin_lasx_xvabsd_h",
+ "llvm.loongarch.lasx.xvabsd.hu" => "__builtin_lasx_xvabsd_hu",
+ "llvm.loongarch.lasx.xvabsd.w" => "__builtin_lasx_xvabsd_w",
+ "llvm.loongarch.lasx.xvabsd.wu" => "__builtin_lasx_xvabsd_wu",
+ "llvm.loongarch.lasx.xvadd.b" => "__builtin_lasx_xvadd_b",
+ "llvm.loongarch.lasx.xvadd.d" => "__builtin_lasx_xvadd_d",
+ "llvm.loongarch.lasx.xvadd.h" => "__builtin_lasx_xvadd_h",
+ "llvm.loongarch.lasx.xvadd.q" => "__builtin_lasx_xvadd_q",
+ "llvm.loongarch.lasx.xvadd.w" => "__builtin_lasx_xvadd_w",
+ "llvm.loongarch.lasx.xvadda.b" => "__builtin_lasx_xvadda_b",
+ "llvm.loongarch.lasx.xvadda.d" => "__builtin_lasx_xvadda_d",
+ "llvm.loongarch.lasx.xvadda.h" => "__builtin_lasx_xvadda_h",
+ "llvm.loongarch.lasx.xvadda.w" => "__builtin_lasx_xvadda_w",
+ "llvm.loongarch.lasx.xvaddi.bu" => "__builtin_lasx_xvaddi_bu",
+ "llvm.loongarch.lasx.xvaddi.du" => "__builtin_lasx_xvaddi_du",
+ "llvm.loongarch.lasx.xvaddi.hu" => "__builtin_lasx_xvaddi_hu",
+ "llvm.loongarch.lasx.xvaddi.wu" => "__builtin_lasx_xvaddi_wu",
+ "llvm.loongarch.lasx.xvaddwev.d.w" => "__builtin_lasx_xvaddwev_d_w",
+ "llvm.loongarch.lasx.xvaddwev.d.wu" => "__builtin_lasx_xvaddwev_d_wu",
+ "llvm.loongarch.lasx.xvaddwev.d.wu.w" => "__builtin_lasx_xvaddwev_d_wu_w",
+ "llvm.loongarch.lasx.xvaddwev.h.b" => "__builtin_lasx_xvaddwev_h_b",
+ "llvm.loongarch.lasx.xvaddwev.h.bu" => "__builtin_lasx_xvaddwev_h_bu",
+ "llvm.loongarch.lasx.xvaddwev.h.bu.b" => "__builtin_lasx_xvaddwev_h_bu_b",
+ "llvm.loongarch.lasx.xvaddwev.q.d" => "__builtin_lasx_xvaddwev_q_d",
+ "llvm.loongarch.lasx.xvaddwev.q.du" => "__builtin_lasx_xvaddwev_q_du",
+ "llvm.loongarch.lasx.xvaddwev.q.du.d" => "__builtin_lasx_xvaddwev_q_du_d",
+ "llvm.loongarch.lasx.xvaddwev.w.h" => "__builtin_lasx_xvaddwev_w_h",
+ "llvm.loongarch.lasx.xvaddwev.w.hu" => "__builtin_lasx_xvaddwev_w_hu",
+ "llvm.loongarch.lasx.xvaddwev.w.hu.h" => "__builtin_lasx_xvaddwev_w_hu_h",
+ "llvm.loongarch.lasx.xvaddwod.d.w" => "__builtin_lasx_xvaddwod_d_w",
+ "llvm.loongarch.lasx.xvaddwod.d.wu" => "__builtin_lasx_xvaddwod_d_wu",
+ "llvm.loongarch.lasx.xvaddwod.d.wu.w" => "__builtin_lasx_xvaddwod_d_wu_w",
+ "llvm.loongarch.lasx.xvaddwod.h.b" => "__builtin_lasx_xvaddwod_h_b",
+ "llvm.loongarch.lasx.xvaddwod.h.bu" => "__builtin_lasx_xvaddwod_h_bu",
+ "llvm.loongarch.lasx.xvaddwod.h.bu.b" => "__builtin_lasx_xvaddwod_h_bu_b",
+ "llvm.loongarch.lasx.xvaddwod.q.d" => "__builtin_lasx_xvaddwod_q_d",
+ "llvm.loongarch.lasx.xvaddwod.q.du" => "__builtin_lasx_xvaddwod_q_du",
+ "llvm.loongarch.lasx.xvaddwod.q.du.d" => "__builtin_lasx_xvaddwod_q_du_d",
+ "llvm.loongarch.lasx.xvaddwod.w.h" => "__builtin_lasx_xvaddwod_w_h",
+ "llvm.loongarch.lasx.xvaddwod.w.hu" => "__builtin_lasx_xvaddwod_w_hu",
+ "llvm.loongarch.lasx.xvaddwod.w.hu.h" => "__builtin_lasx_xvaddwod_w_hu_h",
+ "llvm.loongarch.lasx.xvand.v" => "__builtin_lasx_xvand_v",
+ "llvm.loongarch.lasx.xvandi.b" => "__builtin_lasx_xvandi_b",
+ "llvm.loongarch.lasx.xvandn.v" => "__builtin_lasx_xvandn_v",
+ "llvm.loongarch.lasx.xvavg.b" => "__builtin_lasx_xvavg_b",
+ "llvm.loongarch.lasx.xvavg.bu" => "__builtin_lasx_xvavg_bu",
+ "llvm.loongarch.lasx.xvavg.d" => "__builtin_lasx_xvavg_d",
+ "llvm.loongarch.lasx.xvavg.du" => "__builtin_lasx_xvavg_du",
+ "llvm.loongarch.lasx.xvavg.h" => "__builtin_lasx_xvavg_h",
+ "llvm.loongarch.lasx.xvavg.hu" => "__builtin_lasx_xvavg_hu",
+ "llvm.loongarch.lasx.xvavg.w" => "__builtin_lasx_xvavg_w",
+ "llvm.loongarch.lasx.xvavg.wu" => "__builtin_lasx_xvavg_wu",
+ "llvm.loongarch.lasx.xvavgr.b" => "__builtin_lasx_xvavgr_b",
+ "llvm.loongarch.lasx.xvavgr.bu" => "__builtin_lasx_xvavgr_bu",
+ "llvm.loongarch.lasx.xvavgr.d" => "__builtin_lasx_xvavgr_d",
+ "llvm.loongarch.lasx.xvavgr.du" => "__builtin_lasx_xvavgr_du",
+ "llvm.loongarch.lasx.xvavgr.h" => "__builtin_lasx_xvavgr_h",
+ "llvm.loongarch.lasx.xvavgr.hu" => "__builtin_lasx_xvavgr_hu",
+ "llvm.loongarch.lasx.xvavgr.w" => "__builtin_lasx_xvavgr_w",
+ "llvm.loongarch.lasx.xvavgr.wu" => "__builtin_lasx_xvavgr_wu",
+ "llvm.loongarch.lasx.xvbitclr.b" => "__builtin_lasx_xvbitclr_b",
+ "llvm.loongarch.lasx.xvbitclr.d" => "__builtin_lasx_xvbitclr_d",
+ "llvm.loongarch.lasx.xvbitclr.h" => "__builtin_lasx_xvbitclr_h",
+ "llvm.loongarch.lasx.xvbitclr.w" => "__builtin_lasx_xvbitclr_w",
+ "llvm.loongarch.lasx.xvbitclri.b" => "__builtin_lasx_xvbitclri_b",
+ "llvm.loongarch.lasx.xvbitclri.d" => "__builtin_lasx_xvbitclri_d",
+ "llvm.loongarch.lasx.xvbitclri.h" => "__builtin_lasx_xvbitclri_h",
+ "llvm.loongarch.lasx.xvbitclri.w" => "__builtin_lasx_xvbitclri_w",
+ "llvm.loongarch.lasx.xvbitrev.b" => "__builtin_lasx_xvbitrev_b",
+ "llvm.loongarch.lasx.xvbitrev.d" => "__builtin_lasx_xvbitrev_d",
+ "llvm.loongarch.lasx.xvbitrev.h" => "__builtin_lasx_xvbitrev_h",
+ "llvm.loongarch.lasx.xvbitrev.w" => "__builtin_lasx_xvbitrev_w",
+ "llvm.loongarch.lasx.xvbitrevi.b" => "__builtin_lasx_xvbitrevi_b",
+ "llvm.loongarch.lasx.xvbitrevi.d" => "__builtin_lasx_xvbitrevi_d",
+ "llvm.loongarch.lasx.xvbitrevi.h" => "__builtin_lasx_xvbitrevi_h",
+ "llvm.loongarch.lasx.xvbitrevi.w" => "__builtin_lasx_xvbitrevi_w",
+ "llvm.loongarch.lasx.xvbitsel.v" => "__builtin_lasx_xvbitsel_v",
+ "llvm.loongarch.lasx.xvbitseli.b" => "__builtin_lasx_xvbitseli_b",
+ "llvm.loongarch.lasx.xvbitset.b" => "__builtin_lasx_xvbitset_b",
+ "llvm.loongarch.lasx.xvbitset.d" => "__builtin_lasx_xvbitset_d",
+ "llvm.loongarch.lasx.xvbitset.h" => "__builtin_lasx_xvbitset_h",
+ "llvm.loongarch.lasx.xvbitset.w" => "__builtin_lasx_xvbitset_w",
+ "llvm.loongarch.lasx.xvbitseti.b" => "__builtin_lasx_xvbitseti_b",
+ "llvm.loongarch.lasx.xvbitseti.d" => "__builtin_lasx_xvbitseti_d",
+ "llvm.loongarch.lasx.xvbitseti.h" => "__builtin_lasx_xvbitseti_h",
+ "llvm.loongarch.lasx.xvbitseti.w" => "__builtin_lasx_xvbitseti_w",
+ "llvm.loongarch.lasx.xvbsll.v" => "__builtin_lasx_xvbsll_v",
+ "llvm.loongarch.lasx.xvbsrl.v" => "__builtin_lasx_xvbsrl_v",
+ "llvm.loongarch.lasx.xvclo.b" => "__builtin_lasx_xvclo_b",
+ "llvm.loongarch.lasx.xvclo.d" => "__builtin_lasx_xvclo_d",
+ "llvm.loongarch.lasx.xvclo.h" => "__builtin_lasx_xvclo_h",
+ "llvm.loongarch.lasx.xvclo.w" => "__builtin_lasx_xvclo_w",
+ "llvm.loongarch.lasx.xvclz.b" => "__builtin_lasx_xvclz_b",
+ "llvm.loongarch.lasx.xvclz.d" => "__builtin_lasx_xvclz_d",
+ "llvm.loongarch.lasx.xvclz.h" => "__builtin_lasx_xvclz_h",
+ "llvm.loongarch.lasx.xvclz.w" => "__builtin_lasx_xvclz_w",
+ "llvm.loongarch.lasx.xvdiv.b" => "__builtin_lasx_xvdiv_b",
+ "llvm.loongarch.lasx.xvdiv.bu" => "__builtin_lasx_xvdiv_bu",
+ "llvm.loongarch.lasx.xvdiv.d" => "__builtin_lasx_xvdiv_d",
+ "llvm.loongarch.lasx.xvdiv.du" => "__builtin_lasx_xvdiv_du",
+ "llvm.loongarch.lasx.xvdiv.h" => "__builtin_lasx_xvdiv_h",
+ "llvm.loongarch.lasx.xvdiv.hu" => "__builtin_lasx_xvdiv_hu",
+ "llvm.loongarch.lasx.xvdiv.w" => "__builtin_lasx_xvdiv_w",
+ "llvm.loongarch.lasx.xvdiv.wu" => "__builtin_lasx_xvdiv_wu",
+ "llvm.loongarch.lasx.xvexth.d.w" => "__builtin_lasx_xvexth_d_w",
+ "llvm.loongarch.lasx.xvexth.du.wu" => "__builtin_lasx_xvexth_du_wu",
+ "llvm.loongarch.lasx.xvexth.h.b" => "__builtin_lasx_xvexth_h_b",
+ "llvm.loongarch.lasx.xvexth.hu.bu" => "__builtin_lasx_xvexth_hu_bu",
+ "llvm.loongarch.lasx.xvexth.q.d" => "__builtin_lasx_xvexth_q_d",
+ "llvm.loongarch.lasx.xvexth.qu.du" => "__builtin_lasx_xvexth_qu_du",
+ "llvm.loongarch.lasx.xvexth.w.h" => "__builtin_lasx_xvexth_w_h",
+ "llvm.loongarch.lasx.xvexth.wu.hu" => "__builtin_lasx_xvexth_wu_hu",
+ "llvm.loongarch.lasx.xvextl.q.d" => "__builtin_lasx_xvextl_q_d",
+ "llvm.loongarch.lasx.xvextl.qu.du" => "__builtin_lasx_xvextl_qu_du",
+ "llvm.loongarch.lasx.xvextrins.b" => "__builtin_lasx_xvextrins_b",
+ "llvm.loongarch.lasx.xvextrins.d" => "__builtin_lasx_xvextrins_d",
+ "llvm.loongarch.lasx.xvextrins.h" => "__builtin_lasx_xvextrins_h",
+ "llvm.loongarch.lasx.xvextrins.w" => "__builtin_lasx_xvextrins_w",
+ "llvm.loongarch.lasx.xvfadd.d" => "__builtin_lasx_xvfadd_d",
+ "llvm.loongarch.lasx.xvfadd.s" => "__builtin_lasx_xvfadd_s",
+ "llvm.loongarch.lasx.xvfclass.d" => "__builtin_lasx_xvfclass_d",
+ "llvm.loongarch.lasx.xvfclass.s" => "__builtin_lasx_xvfclass_s",
+ "llvm.loongarch.lasx.xvfcmp.caf.d" => "__builtin_lasx_xvfcmp_caf_d",
+ "llvm.loongarch.lasx.xvfcmp.caf.s" => "__builtin_lasx_xvfcmp_caf_s",
+ "llvm.loongarch.lasx.xvfcmp.ceq.d" => "__builtin_lasx_xvfcmp_ceq_d",
+ "llvm.loongarch.lasx.xvfcmp.ceq.s" => "__builtin_lasx_xvfcmp_ceq_s",
+ "llvm.loongarch.lasx.xvfcmp.cle.d" => "__builtin_lasx_xvfcmp_cle_d",
+ "llvm.loongarch.lasx.xvfcmp.cle.s" => "__builtin_lasx_xvfcmp_cle_s",
+ "llvm.loongarch.lasx.xvfcmp.clt.d" => "__builtin_lasx_xvfcmp_clt_d",
+ "llvm.loongarch.lasx.xvfcmp.clt.s" => "__builtin_lasx_xvfcmp_clt_s",
+ "llvm.loongarch.lasx.xvfcmp.cne.d" => "__builtin_lasx_xvfcmp_cne_d",
+ "llvm.loongarch.lasx.xvfcmp.cne.s" => "__builtin_lasx_xvfcmp_cne_s",
+ "llvm.loongarch.lasx.xvfcmp.cor.d" => "__builtin_lasx_xvfcmp_cor_d",
+ "llvm.loongarch.lasx.xvfcmp.cor.s" => "__builtin_lasx_xvfcmp_cor_s",
+ "llvm.loongarch.lasx.xvfcmp.cueq.d" => "__builtin_lasx_xvfcmp_cueq_d",
+ "llvm.loongarch.lasx.xvfcmp.cueq.s" => "__builtin_lasx_xvfcmp_cueq_s",
+ "llvm.loongarch.lasx.xvfcmp.cule.d" => "__builtin_lasx_xvfcmp_cule_d",
+ "llvm.loongarch.lasx.xvfcmp.cule.s" => "__builtin_lasx_xvfcmp_cule_s",
+ "llvm.loongarch.lasx.xvfcmp.cult.d" => "__builtin_lasx_xvfcmp_cult_d",
+ "llvm.loongarch.lasx.xvfcmp.cult.s" => "__builtin_lasx_xvfcmp_cult_s",
+ "llvm.loongarch.lasx.xvfcmp.cun.d" => "__builtin_lasx_xvfcmp_cun_d",
+ "llvm.loongarch.lasx.xvfcmp.cun.s" => "__builtin_lasx_xvfcmp_cun_s",
+ "llvm.loongarch.lasx.xvfcmp.cune.d" => "__builtin_lasx_xvfcmp_cune_d",
+ "llvm.loongarch.lasx.xvfcmp.cune.s" => "__builtin_lasx_xvfcmp_cune_s",
+ "llvm.loongarch.lasx.xvfcmp.saf.d" => "__builtin_lasx_xvfcmp_saf_d",
+ "llvm.loongarch.lasx.xvfcmp.saf.s" => "__builtin_lasx_xvfcmp_saf_s",
+ "llvm.loongarch.lasx.xvfcmp.seq.d" => "__builtin_lasx_xvfcmp_seq_d",
+ "llvm.loongarch.lasx.xvfcmp.seq.s" => "__builtin_lasx_xvfcmp_seq_s",
+ "llvm.loongarch.lasx.xvfcmp.sle.d" => "__builtin_lasx_xvfcmp_sle_d",
+ "llvm.loongarch.lasx.xvfcmp.sle.s" => "__builtin_lasx_xvfcmp_sle_s",
+ "llvm.loongarch.lasx.xvfcmp.slt.d" => "__builtin_lasx_xvfcmp_slt_d",
+ "llvm.loongarch.lasx.xvfcmp.slt.s" => "__builtin_lasx_xvfcmp_slt_s",
+ "llvm.loongarch.lasx.xvfcmp.sne.d" => "__builtin_lasx_xvfcmp_sne_d",
+ "llvm.loongarch.lasx.xvfcmp.sne.s" => "__builtin_lasx_xvfcmp_sne_s",
+ "llvm.loongarch.lasx.xvfcmp.sor.d" => "__builtin_lasx_xvfcmp_sor_d",
+ "llvm.loongarch.lasx.xvfcmp.sor.s" => "__builtin_lasx_xvfcmp_sor_s",
+ "llvm.loongarch.lasx.xvfcmp.sueq.d" => "__builtin_lasx_xvfcmp_sueq_d",
+ "llvm.loongarch.lasx.xvfcmp.sueq.s" => "__builtin_lasx_xvfcmp_sueq_s",
+ "llvm.loongarch.lasx.xvfcmp.sule.d" => "__builtin_lasx_xvfcmp_sule_d",
+ "llvm.loongarch.lasx.xvfcmp.sule.s" => "__builtin_lasx_xvfcmp_sule_s",
+ "llvm.loongarch.lasx.xvfcmp.sult.d" => "__builtin_lasx_xvfcmp_sult_d",
+ "llvm.loongarch.lasx.xvfcmp.sult.s" => "__builtin_lasx_xvfcmp_sult_s",
+ "llvm.loongarch.lasx.xvfcmp.sun.d" => "__builtin_lasx_xvfcmp_sun_d",
+ "llvm.loongarch.lasx.xvfcmp.sun.s" => "__builtin_lasx_xvfcmp_sun_s",
+ "llvm.loongarch.lasx.xvfcmp.sune.d" => "__builtin_lasx_xvfcmp_sune_d",
+ "llvm.loongarch.lasx.xvfcmp.sune.s" => "__builtin_lasx_xvfcmp_sune_s",
+ "llvm.loongarch.lasx.xvfcvt.h.s" => "__builtin_lasx_xvfcvt_h_s",
+ "llvm.loongarch.lasx.xvfcvt.s.d" => "__builtin_lasx_xvfcvt_s_d",
+ "llvm.loongarch.lasx.xvfcvth.d.s" => "__builtin_lasx_xvfcvth_d_s",
+ "llvm.loongarch.lasx.xvfcvth.s.h" => "__builtin_lasx_xvfcvth_s_h",
+ "llvm.loongarch.lasx.xvfcvtl.d.s" => "__builtin_lasx_xvfcvtl_d_s",
+ "llvm.loongarch.lasx.xvfcvtl.s.h" => "__builtin_lasx_xvfcvtl_s_h",
+ "llvm.loongarch.lasx.xvfdiv.d" => "__builtin_lasx_xvfdiv_d",
+ "llvm.loongarch.lasx.xvfdiv.s" => "__builtin_lasx_xvfdiv_s",
+ "llvm.loongarch.lasx.xvffint.d.l" => "__builtin_lasx_xvffint_d_l",
+ "llvm.loongarch.lasx.xvffint.d.lu" => "__builtin_lasx_xvffint_d_lu",
+ "llvm.loongarch.lasx.xvffint.s.l" => "__builtin_lasx_xvffint_s_l",
+ "llvm.loongarch.lasx.xvffint.s.w" => "__builtin_lasx_xvffint_s_w",
+ "llvm.loongarch.lasx.xvffint.s.wu" => "__builtin_lasx_xvffint_s_wu",
+ "llvm.loongarch.lasx.xvffinth.d.w" => "__builtin_lasx_xvffinth_d_w",
+ "llvm.loongarch.lasx.xvffintl.d.w" => "__builtin_lasx_xvffintl_d_w",
+ "llvm.loongarch.lasx.xvflogb.d" => "__builtin_lasx_xvflogb_d",
+ "llvm.loongarch.lasx.xvflogb.s" => "__builtin_lasx_xvflogb_s",
+ "llvm.loongarch.lasx.xvfmadd.d" => "__builtin_lasx_xvfmadd_d",
+ "llvm.loongarch.lasx.xvfmadd.s" => "__builtin_lasx_xvfmadd_s",
+ "llvm.loongarch.lasx.xvfmax.d" => "__builtin_lasx_xvfmax_d",
+ "llvm.loongarch.lasx.xvfmax.s" => "__builtin_lasx_xvfmax_s",
+ "llvm.loongarch.lasx.xvfmaxa.d" => "__builtin_lasx_xvfmaxa_d",
+ "llvm.loongarch.lasx.xvfmaxa.s" => "__builtin_lasx_xvfmaxa_s",
+ "llvm.loongarch.lasx.xvfmin.d" => "__builtin_lasx_xvfmin_d",
+ "llvm.loongarch.lasx.xvfmin.s" => "__builtin_lasx_xvfmin_s",
+ "llvm.loongarch.lasx.xvfmina.d" => "__builtin_lasx_xvfmina_d",
+ "llvm.loongarch.lasx.xvfmina.s" => "__builtin_lasx_xvfmina_s",
+ "llvm.loongarch.lasx.xvfmsub.d" => "__builtin_lasx_xvfmsub_d",
+ "llvm.loongarch.lasx.xvfmsub.s" => "__builtin_lasx_xvfmsub_s",
+ "llvm.loongarch.lasx.xvfmul.d" => "__builtin_lasx_xvfmul_d",
+ "llvm.loongarch.lasx.xvfmul.s" => "__builtin_lasx_xvfmul_s",
+ "llvm.loongarch.lasx.xvfnmadd.d" => "__builtin_lasx_xvfnmadd_d",
+ "llvm.loongarch.lasx.xvfnmadd.s" => "__builtin_lasx_xvfnmadd_s",
+ "llvm.loongarch.lasx.xvfnmsub.d" => "__builtin_lasx_xvfnmsub_d",
+ "llvm.loongarch.lasx.xvfnmsub.s" => "__builtin_lasx_xvfnmsub_s",
+ "llvm.loongarch.lasx.xvfrecip.d" => "__builtin_lasx_xvfrecip_d",
+ "llvm.loongarch.lasx.xvfrecip.s" => "__builtin_lasx_xvfrecip_s",
+ "llvm.loongarch.lasx.xvfrint.d" => "__builtin_lasx_xvfrint_d",
+ "llvm.loongarch.lasx.xvfrint.s" => "__builtin_lasx_xvfrint_s",
+ "llvm.loongarch.lasx.xvfrintrm.d" => "__builtin_lasx_xvfrintrm_d",
+ "llvm.loongarch.lasx.xvfrintrm.s" => "__builtin_lasx_xvfrintrm_s",
+ "llvm.loongarch.lasx.xvfrintrne.d" => "__builtin_lasx_xvfrintrne_d",
+ "llvm.loongarch.lasx.xvfrintrne.s" => "__builtin_lasx_xvfrintrne_s",
+ "llvm.loongarch.lasx.xvfrintrp.d" => "__builtin_lasx_xvfrintrp_d",
+ "llvm.loongarch.lasx.xvfrintrp.s" => "__builtin_lasx_xvfrintrp_s",
+ "llvm.loongarch.lasx.xvfrintrz.d" => "__builtin_lasx_xvfrintrz_d",
+ "llvm.loongarch.lasx.xvfrintrz.s" => "__builtin_lasx_xvfrintrz_s",
+ "llvm.loongarch.lasx.xvfrsqrt.d" => "__builtin_lasx_xvfrsqrt_d",
+ "llvm.loongarch.lasx.xvfrsqrt.s" => "__builtin_lasx_xvfrsqrt_s",
+ "llvm.loongarch.lasx.xvfrstp.b" => "__builtin_lasx_xvfrstp_b",
+ "llvm.loongarch.lasx.xvfrstp.h" => "__builtin_lasx_xvfrstp_h",
+ "llvm.loongarch.lasx.xvfrstpi.b" => "__builtin_lasx_xvfrstpi_b",
+ "llvm.loongarch.lasx.xvfrstpi.h" => "__builtin_lasx_xvfrstpi_h",
+ "llvm.loongarch.lasx.xvfsqrt.d" => "__builtin_lasx_xvfsqrt_d",
+ "llvm.loongarch.lasx.xvfsqrt.s" => "__builtin_lasx_xvfsqrt_s",
+ "llvm.loongarch.lasx.xvfsub.d" => "__builtin_lasx_xvfsub_d",
+ "llvm.loongarch.lasx.xvfsub.s" => "__builtin_lasx_xvfsub_s",
+ "llvm.loongarch.lasx.xvftint.l.d" => "__builtin_lasx_xvftint_l_d",
+ "llvm.loongarch.lasx.xvftint.lu.d" => "__builtin_lasx_xvftint_lu_d",
+ "llvm.loongarch.lasx.xvftint.w.d" => "__builtin_lasx_xvftint_w_d",
+ "llvm.loongarch.lasx.xvftint.w.s" => "__builtin_lasx_xvftint_w_s",
+ "llvm.loongarch.lasx.xvftint.wu.s" => "__builtin_lasx_xvftint_wu_s",
+ "llvm.loongarch.lasx.xvftinth.l.s" => "__builtin_lasx_xvftinth_l_s",
+ "llvm.loongarch.lasx.xvftintl.l.s" => "__builtin_lasx_xvftintl_l_s",
+ "llvm.loongarch.lasx.xvftintrm.l.d" => "__builtin_lasx_xvftintrm_l_d",
+ "llvm.loongarch.lasx.xvftintrm.w.d" => "__builtin_lasx_xvftintrm_w_d",
+ "llvm.loongarch.lasx.xvftintrm.w.s" => "__builtin_lasx_xvftintrm_w_s",
+ "llvm.loongarch.lasx.xvftintrmh.l.s" => "__builtin_lasx_xvftintrmh_l_s",
+ "llvm.loongarch.lasx.xvftintrml.l.s" => "__builtin_lasx_xvftintrml_l_s",
+ "llvm.loongarch.lasx.xvftintrne.l.d" => "__builtin_lasx_xvftintrne_l_d",
+ "llvm.loongarch.lasx.xvftintrne.w.d" => "__builtin_lasx_xvftintrne_w_d",
+ "llvm.loongarch.lasx.xvftintrne.w.s" => "__builtin_lasx_xvftintrne_w_s",
+ "llvm.loongarch.lasx.xvftintrneh.l.s" => "__builtin_lasx_xvftintrneh_l_s",
+ "llvm.loongarch.lasx.xvftintrnel.l.s" => "__builtin_lasx_xvftintrnel_l_s",
+ "llvm.loongarch.lasx.xvftintrp.l.d" => "__builtin_lasx_xvftintrp_l_d",
+ "llvm.loongarch.lasx.xvftintrp.w.d" => "__builtin_lasx_xvftintrp_w_d",
+ "llvm.loongarch.lasx.xvftintrp.w.s" => "__builtin_lasx_xvftintrp_w_s",
+ "llvm.loongarch.lasx.xvftintrph.l.s" => "__builtin_lasx_xvftintrph_l_s",
+ "llvm.loongarch.lasx.xvftintrpl.l.s" => "__builtin_lasx_xvftintrpl_l_s",
+ "llvm.loongarch.lasx.xvftintrz.l.d" => "__builtin_lasx_xvftintrz_l_d",
+ "llvm.loongarch.lasx.xvftintrz.lu.d" => "__builtin_lasx_xvftintrz_lu_d",
+ "llvm.loongarch.lasx.xvftintrz.w.d" => "__builtin_lasx_xvftintrz_w_d",
+ "llvm.loongarch.lasx.xvftintrz.w.s" => "__builtin_lasx_xvftintrz_w_s",
+ "llvm.loongarch.lasx.xvftintrz.wu.s" => "__builtin_lasx_xvftintrz_wu_s",
+ "llvm.loongarch.lasx.xvftintrzh.l.s" => "__builtin_lasx_xvftintrzh_l_s",
+ "llvm.loongarch.lasx.xvftintrzl.l.s" => "__builtin_lasx_xvftintrzl_l_s",
+ "llvm.loongarch.lasx.xvhaddw.d.w" => "__builtin_lasx_xvhaddw_d_w",
+ "llvm.loongarch.lasx.xvhaddw.du.wu" => "__builtin_lasx_xvhaddw_du_wu",
+ "llvm.loongarch.lasx.xvhaddw.h.b" => "__builtin_lasx_xvhaddw_h_b",
+ "llvm.loongarch.lasx.xvhaddw.hu.bu" => "__builtin_lasx_xvhaddw_hu_bu",
+ "llvm.loongarch.lasx.xvhaddw.q.d" => "__builtin_lasx_xvhaddw_q_d",
+ "llvm.loongarch.lasx.xvhaddw.qu.du" => "__builtin_lasx_xvhaddw_qu_du",
+ "llvm.loongarch.lasx.xvhaddw.w.h" => "__builtin_lasx_xvhaddw_w_h",
+ "llvm.loongarch.lasx.xvhaddw.wu.hu" => "__builtin_lasx_xvhaddw_wu_hu",
+ "llvm.loongarch.lasx.xvhsubw.d.w" => "__builtin_lasx_xvhsubw_d_w",
+ "llvm.loongarch.lasx.xvhsubw.du.wu" => "__builtin_lasx_xvhsubw_du_wu",
+ "llvm.loongarch.lasx.xvhsubw.h.b" => "__builtin_lasx_xvhsubw_h_b",
+ "llvm.loongarch.lasx.xvhsubw.hu.bu" => "__builtin_lasx_xvhsubw_hu_bu",
+ "llvm.loongarch.lasx.xvhsubw.q.d" => "__builtin_lasx_xvhsubw_q_d",
+ "llvm.loongarch.lasx.xvhsubw.qu.du" => "__builtin_lasx_xvhsubw_qu_du",
+ "llvm.loongarch.lasx.xvhsubw.w.h" => "__builtin_lasx_xvhsubw_w_h",
+ "llvm.loongarch.lasx.xvhsubw.wu.hu" => "__builtin_lasx_xvhsubw_wu_hu",
+ "llvm.loongarch.lasx.xvilvh.b" => "__builtin_lasx_xvilvh_b",
+ "llvm.loongarch.lasx.xvilvh.d" => "__builtin_lasx_xvilvh_d",
+ "llvm.loongarch.lasx.xvilvh.h" => "__builtin_lasx_xvilvh_h",
+ "llvm.loongarch.lasx.xvilvh.w" => "__builtin_lasx_xvilvh_w",
+ "llvm.loongarch.lasx.xvilvl.b" => "__builtin_lasx_xvilvl_b",
+ "llvm.loongarch.lasx.xvilvl.d" => "__builtin_lasx_xvilvl_d",
+ "llvm.loongarch.lasx.xvilvl.h" => "__builtin_lasx_xvilvl_h",
+ "llvm.loongarch.lasx.xvilvl.w" => "__builtin_lasx_xvilvl_w",
+ "llvm.loongarch.lasx.xvinsgr2vr.d" => "__builtin_lasx_xvinsgr2vr_d",
+ "llvm.loongarch.lasx.xvinsgr2vr.w" => "__builtin_lasx_xvinsgr2vr_w",
+ "llvm.loongarch.lasx.xvinsve0.d" => "__builtin_lasx_xvinsve0_d",
+ "llvm.loongarch.lasx.xvinsve0.w" => "__builtin_lasx_xvinsve0_w",
+ "llvm.loongarch.lasx.xvld" => "__builtin_lasx_xvld",
+ "llvm.loongarch.lasx.xvldi" => "__builtin_lasx_xvldi",
+ "llvm.loongarch.lasx.xvldrepl.b" => "__builtin_lasx_xvldrepl_b",
+ "llvm.loongarch.lasx.xvldrepl.d" => "__builtin_lasx_xvldrepl_d",
+ "llvm.loongarch.lasx.xvldrepl.h" => "__builtin_lasx_xvldrepl_h",
+ "llvm.loongarch.lasx.xvldrepl.w" => "__builtin_lasx_xvldrepl_w",
+ "llvm.loongarch.lasx.xvldx" => "__builtin_lasx_xvldx",
+ "llvm.loongarch.lasx.xvmadd.b" => "__builtin_lasx_xvmadd_b",
+ "llvm.loongarch.lasx.xvmadd.d" => "__builtin_lasx_xvmadd_d",
+ "llvm.loongarch.lasx.xvmadd.h" => "__builtin_lasx_xvmadd_h",
+ "llvm.loongarch.lasx.xvmadd.w" => "__builtin_lasx_xvmadd_w",
+ "llvm.loongarch.lasx.xvmaddwev.d.w" => "__builtin_lasx_xvmaddwev_d_w",
+ "llvm.loongarch.lasx.xvmaddwev.d.wu" => "__builtin_lasx_xvmaddwev_d_wu",
+ "llvm.loongarch.lasx.xvmaddwev.d.wu.w" => "__builtin_lasx_xvmaddwev_d_wu_w",
+ "llvm.loongarch.lasx.xvmaddwev.h.b" => "__builtin_lasx_xvmaddwev_h_b",
+ "llvm.loongarch.lasx.xvmaddwev.h.bu" => "__builtin_lasx_xvmaddwev_h_bu",
+ "llvm.loongarch.lasx.xvmaddwev.h.bu.b" => "__builtin_lasx_xvmaddwev_h_bu_b",
+ "llvm.loongarch.lasx.xvmaddwev.q.d" => "__builtin_lasx_xvmaddwev_q_d",
+ "llvm.loongarch.lasx.xvmaddwev.q.du" => "__builtin_lasx_xvmaddwev_q_du",
+ "llvm.loongarch.lasx.xvmaddwev.q.du.d" => "__builtin_lasx_xvmaddwev_q_du_d",
+ "llvm.loongarch.lasx.xvmaddwev.w.h" => "__builtin_lasx_xvmaddwev_w_h",
+ "llvm.loongarch.lasx.xvmaddwev.w.hu" => "__builtin_lasx_xvmaddwev_w_hu",
+ "llvm.loongarch.lasx.xvmaddwev.w.hu.h" => "__builtin_lasx_xvmaddwev_w_hu_h",
+ "llvm.loongarch.lasx.xvmaddwod.d.w" => "__builtin_lasx_xvmaddwod_d_w",
+ "llvm.loongarch.lasx.xvmaddwod.d.wu" => "__builtin_lasx_xvmaddwod_d_wu",
+ "llvm.loongarch.lasx.xvmaddwod.d.wu.w" => "__builtin_lasx_xvmaddwod_d_wu_w",
+ "llvm.loongarch.lasx.xvmaddwod.h.b" => "__builtin_lasx_xvmaddwod_h_b",
+ "llvm.loongarch.lasx.xvmaddwod.h.bu" => "__builtin_lasx_xvmaddwod_h_bu",
+ "llvm.loongarch.lasx.xvmaddwod.h.bu.b" => "__builtin_lasx_xvmaddwod_h_bu_b",
+ "llvm.loongarch.lasx.xvmaddwod.q.d" => "__builtin_lasx_xvmaddwod_q_d",
+ "llvm.loongarch.lasx.xvmaddwod.q.du" => "__builtin_lasx_xvmaddwod_q_du",
+ "llvm.loongarch.lasx.xvmaddwod.q.du.d" => "__builtin_lasx_xvmaddwod_q_du_d",
+ "llvm.loongarch.lasx.xvmaddwod.w.h" => "__builtin_lasx_xvmaddwod_w_h",
+ "llvm.loongarch.lasx.xvmaddwod.w.hu" => "__builtin_lasx_xvmaddwod_w_hu",
+ "llvm.loongarch.lasx.xvmaddwod.w.hu.h" => "__builtin_lasx_xvmaddwod_w_hu_h",
+ "llvm.loongarch.lasx.xvmax.b" => "__builtin_lasx_xvmax_b",
+ "llvm.loongarch.lasx.xvmax.bu" => "__builtin_lasx_xvmax_bu",
+ "llvm.loongarch.lasx.xvmax.d" => "__builtin_lasx_xvmax_d",
+ "llvm.loongarch.lasx.xvmax.du" => "__builtin_lasx_xvmax_du",
+ "llvm.loongarch.lasx.xvmax.h" => "__builtin_lasx_xvmax_h",
+ "llvm.loongarch.lasx.xvmax.hu" => "__builtin_lasx_xvmax_hu",
+ "llvm.loongarch.lasx.xvmax.w" => "__builtin_lasx_xvmax_w",
+ "llvm.loongarch.lasx.xvmax.wu" => "__builtin_lasx_xvmax_wu",
+ "llvm.loongarch.lasx.xvmaxi.b" => "__builtin_lasx_xvmaxi_b",
+ "llvm.loongarch.lasx.xvmaxi.bu" => "__builtin_lasx_xvmaxi_bu",
+ "llvm.loongarch.lasx.xvmaxi.d" => "__builtin_lasx_xvmaxi_d",
+ "llvm.loongarch.lasx.xvmaxi.du" => "__builtin_lasx_xvmaxi_du",
+ "llvm.loongarch.lasx.xvmaxi.h" => "__builtin_lasx_xvmaxi_h",
+ "llvm.loongarch.lasx.xvmaxi.hu" => "__builtin_lasx_xvmaxi_hu",
+ "llvm.loongarch.lasx.xvmaxi.w" => "__builtin_lasx_xvmaxi_w",
+ "llvm.loongarch.lasx.xvmaxi.wu" => "__builtin_lasx_xvmaxi_wu",
+ "llvm.loongarch.lasx.xvmin.b" => "__builtin_lasx_xvmin_b",
+ "llvm.loongarch.lasx.xvmin.bu" => "__builtin_lasx_xvmin_bu",
+ "llvm.loongarch.lasx.xvmin.d" => "__builtin_lasx_xvmin_d",
+ "llvm.loongarch.lasx.xvmin.du" => "__builtin_lasx_xvmin_du",
+ "llvm.loongarch.lasx.xvmin.h" => "__builtin_lasx_xvmin_h",
+ "llvm.loongarch.lasx.xvmin.hu" => "__builtin_lasx_xvmin_hu",
+ "llvm.loongarch.lasx.xvmin.w" => "__builtin_lasx_xvmin_w",
+ "llvm.loongarch.lasx.xvmin.wu" => "__builtin_lasx_xvmin_wu",
+ "llvm.loongarch.lasx.xvmini.b" => "__builtin_lasx_xvmini_b",
+ "llvm.loongarch.lasx.xvmini.bu" => "__builtin_lasx_xvmini_bu",
+ "llvm.loongarch.lasx.xvmini.d" => "__builtin_lasx_xvmini_d",
+ "llvm.loongarch.lasx.xvmini.du" => "__builtin_lasx_xvmini_du",
+ "llvm.loongarch.lasx.xvmini.h" => "__builtin_lasx_xvmini_h",
+ "llvm.loongarch.lasx.xvmini.hu" => "__builtin_lasx_xvmini_hu",
+ "llvm.loongarch.lasx.xvmini.w" => "__builtin_lasx_xvmini_w",
+ "llvm.loongarch.lasx.xvmini.wu" => "__builtin_lasx_xvmini_wu",
+ "llvm.loongarch.lasx.xvmod.b" => "__builtin_lasx_xvmod_b",
+ "llvm.loongarch.lasx.xvmod.bu" => "__builtin_lasx_xvmod_bu",
+ "llvm.loongarch.lasx.xvmod.d" => "__builtin_lasx_xvmod_d",
+ "llvm.loongarch.lasx.xvmod.du" => "__builtin_lasx_xvmod_du",
+ "llvm.loongarch.lasx.xvmod.h" => "__builtin_lasx_xvmod_h",
+ "llvm.loongarch.lasx.xvmod.hu" => "__builtin_lasx_xvmod_hu",
+ "llvm.loongarch.lasx.xvmod.w" => "__builtin_lasx_xvmod_w",
+ "llvm.loongarch.lasx.xvmod.wu" => "__builtin_lasx_xvmod_wu",
+ "llvm.loongarch.lasx.xvmskgez.b" => "__builtin_lasx_xvmskgez_b",
+ "llvm.loongarch.lasx.xvmskltz.b" => "__builtin_lasx_xvmskltz_b",
+ "llvm.loongarch.lasx.xvmskltz.d" => "__builtin_lasx_xvmskltz_d",
+ "llvm.loongarch.lasx.xvmskltz.h" => "__builtin_lasx_xvmskltz_h",
+ "llvm.loongarch.lasx.xvmskltz.w" => "__builtin_lasx_xvmskltz_w",
+ "llvm.loongarch.lasx.xvmsknz.b" => "__builtin_lasx_xvmsknz_b",
+ "llvm.loongarch.lasx.xvmsub.b" => "__builtin_lasx_xvmsub_b",
+ "llvm.loongarch.lasx.xvmsub.d" => "__builtin_lasx_xvmsub_d",
+ "llvm.loongarch.lasx.xvmsub.h" => "__builtin_lasx_xvmsub_h",
+ "llvm.loongarch.lasx.xvmsub.w" => "__builtin_lasx_xvmsub_w",
+ "llvm.loongarch.lasx.xvmuh.b" => "__builtin_lasx_xvmuh_b",
+ "llvm.loongarch.lasx.xvmuh.bu" => "__builtin_lasx_xvmuh_bu",
+ "llvm.loongarch.lasx.xvmuh.d" => "__builtin_lasx_xvmuh_d",
+ "llvm.loongarch.lasx.xvmuh.du" => "__builtin_lasx_xvmuh_du",
+ "llvm.loongarch.lasx.xvmuh.h" => "__builtin_lasx_xvmuh_h",
+ "llvm.loongarch.lasx.xvmuh.hu" => "__builtin_lasx_xvmuh_hu",
+ "llvm.loongarch.lasx.xvmuh.w" => "__builtin_lasx_xvmuh_w",
+ "llvm.loongarch.lasx.xvmuh.wu" => "__builtin_lasx_xvmuh_wu",
+ "llvm.loongarch.lasx.xvmul.b" => "__builtin_lasx_xvmul_b",
+ "llvm.loongarch.lasx.xvmul.d" => "__builtin_lasx_xvmul_d",
+ "llvm.loongarch.lasx.xvmul.h" => "__builtin_lasx_xvmul_h",
+ "llvm.loongarch.lasx.xvmul.w" => "__builtin_lasx_xvmul_w",
+ "llvm.loongarch.lasx.xvmulwev.d.w" => "__builtin_lasx_xvmulwev_d_w",
+ "llvm.loongarch.lasx.xvmulwev.d.wu" => "__builtin_lasx_xvmulwev_d_wu",
+ "llvm.loongarch.lasx.xvmulwev.d.wu.w" => "__builtin_lasx_xvmulwev_d_wu_w",
+ "llvm.loongarch.lasx.xvmulwev.h.b" => "__builtin_lasx_xvmulwev_h_b",
+ "llvm.loongarch.lasx.xvmulwev.h.bu" => "__builtin_lasx_xvmulwev_h_bu",
+ "llvm.loongarch.lasx.xvmulwev.h.bu.b" => "__builtin_lasx_xvmulwev_h_bu_b",
+ "llvm.loongarch.lasx.xvmulwev.q.d" => "__builtin_lasx_xvmulwev_q_d",
+ "llvm.loongarch.lasx.xvmulwev.q.du" => "__builtin_lasx_xvmulwev_q_du",
+ "llvm.loongarch.lasx.xvmulwev.q.du.d" => "__builtin_lasx_xvmulwev_q_du_d",
+ "llvm.loongarch.lasx.xvmulwev.w.h" => "__builtin_lasx_xvmulwev_w_h",
+ "llvm.loongarch.lasx.xvmulwev.w.hu" => "__builtin_lasx_xvmulwev_w_hu",
+ "llvm.loongarch.lasx.xvmulwev.w.hu.h" => "__builtin_lasx_xvmulwev_w_hu_h",
+ "llvm.loongarch.lasx.xvmulwod.d.w" => "__builtin_lasx_xvmulwod_d_w",
+ "llvm.loongarch.lasx.xvmulwod.d.wu" => "__builtin_lasx_xvmulwod_d_wu",
+ "llvm.loongarch.lasx.xvmulwod.d.wu.w" => "__builtin_lasx_xvmulwod_d_wu_w",
+ "llvm.loongarch.lasx.xvmulwod.h.b" => "__builtin_lasx_xvmulwod_h_b",
+ "llvm.loongarch.lasx.xvmulwod.h.bu" => "__builtin_lasx_xvmulwod_h_bu",
+ "llvm.loongarch.lasx.xvmulwod.h.bu.b" => "__builtin_lasx_xvmulwod_h_bu_b",
+ "llvm.loongarch.lasx.xvmulwod.q.d" => "__builtin_lasx_xvmulwod_q_d",
+ "llvm.loongarch.lasx.xvmulwod.q.du" => "__builtin_lasx_xvmulwod_q_du",
+ "llvm.loongarch.lasx.xvmulwod.q.du.d" => "__builtin_lasx_xvmulwod_q_du_d",
+ "llvm.loongarch.lasx.xvmulwod.w.h" => "__builtin_lasx_xvmulwod_w_h",
+ "llvm.loongarch.lasx.xvmulwod.w.hu" => "__builtin_lasx_xvmulwod_w_hu",
+ "llvm.loongarch.lasx.xvmulwod.w.hu.h" => "__builtin_lasx_xvmulwod_w_hu_h",
+ "llvm.loongarch.lasx.xvneg.b" => "__builtin_lasx_xvneg_b",
+ "llvm.loongarch.lasx.xvneg.d" => "__builtin_lasx_xvneg_d",
+ "llvm.loongarch.lasx.xvneg.h" => "__builtin_lasx_xvneg_h",
+ "llvm.loongarch.lasx.xvneg.w" => "__builtin_lasx_xvneg_w",
+ "llvm.loongarch.lasx.xvnor.v" => "__builtin_lasx_xvnor_v",
+ "llvm.loongarch.lasx.xvnori.b" => "__builtin_lasx_xvnori_b",
+ "llvm.loongarch.lasx.xvor.v" => "__builtin_lasx_xvor_v",
+ "llvm.loongarch.lasx.xvori.b" => "__builtin_lasx_xvori_b",
+ "llvm.loongarch.lasx.xvorn.v" => "__builtin_lasx_xvorn_v",
+ "llvm.loongarch.lasx.xvpackev.b" => "__builtin_lasx_xvpackev_b",
+ "llvm.loongarch.lasx.xvpackev.d" => "__builtin_lasx_xvpackev_d",
+ "llvm.loongarch.lasx.xvpackev.h" => "__builtin_lasx_xvpackev_h",
+ "llvm.loongarch.lasx.xvpackev.w" => "__builtin_lasx_xvpackev_w",
+ "llvm.loongarch.lasx.xvpackod.b" => "__builtin_lasx_xvpackod_b",
+ "llvm.loongarch.lasx.xvpackod.d" => "__builtin_lasx_xvpackod_d",
+ "llvm.loongarch.lasx.xvpackod.h" => "__builtin_lasx_xvpackod_h",
+ "llvm.loongarch.lasx.xvpackod.w" => "__builtin_lasx_xvpackod_w",
+ "llvm.loongarch.lasx.xvpcnt.b" => "__builtin_lasx_xvpcnt_b",
+ "llvm.loongarch.lasx.xvpcnt.d" => "__builtin_lasx_xvpcnt_d",
+ "llvm.loongarch.lasx.xvpcnt.h" => "__builtin_lasx_xvpcnt_h",
+ "llvm.loongarch.lasx.xvpcnt.w" => "__builtin_lasx_xvpcnt_w",
+ "llvm.loongarch.lasx.xvperm.w" => "__builtin_lasx_xvperm_w",
+ "llvm.loongarch.lasx.xvpermi.d" => "__builtin_lasx_xvpermi_d",
+ "llvm.loongarch.lasx.xvpermi.q" => "__builtin_lasx_xvpermi_q",
+ "llvm.loongarch.lasx.xvpermi.w" => "__builtin_lasx_xvpermi_w",
+ "llvm.loongarch.lasx.xvpickev.b" => "__builtin_lasx_xvpickev_b",
+ "llvm.loongarch.lasx.xvpickev.d" => "__builtin_lasx_xvpickev_d",
+ "llvm.loongarch.lasx.xvpickev.h" => "__builtin_lasx_xvpickev_h",
+ "llvm.loongarch.lasx.xvpickev.w" => "__builtin_lasx_xvpickev_w",
+ "llvm.loongarch.lasx.xvpickod.b" => "__builtin_lasx_xvpickod_b",
+ "llvm.loongarch.lasx.xvpickod.d" => "__builtin_lasx_xvpickod_d",
+ "llvm.loongarch.lasx.xvpickod.h" => "__builtin_lasx_xvpickod_h",
+ "llvm.loongarch.lasx.xvpickod.w" => "__builtin_lasx_xvpickod_w",
+ "llvm.loongarch.lasx.xvpickve.d" => "__builtin_lasx_xvpickve_d",
+ "llvm.loongarch.lasx.xvpickve.d.f" => "__builtin_lasx_xvpickve_d_f",
+ "llvm.loongarch.lasx.xvpickve.w" => "__builtin_lasx_xvpickve_w",
+ "llvm.loongarch.lasx.xvpickve.w.f" => "__builtin_lasx_xvpickve_w_f",
+ "llvm.loongarch.lasx.xvpickve2gr.d" => "__builtin_lasx_xvpickve2gr_d",
+ "llvm.loongarch.lasx.xvpickve2gr.du" => "__builtin_lasx_xvpickve2gr_du",
+ "llvm.loongarch.lasx.xvpickve2gr.w" => "__builtin_lasx_xvpickve2gr_w",
+ "llvm.loongarch.lasx.xvpickve2gr.wu" => "__builtin_lasx_xvpickve2gr_wu",
+ "llvm.loongarch.lasx.xvrepl128vei.b" => "__builtin_lasx_xvrepl128vei_b",
+ "llvm.loongarch.lasx.xvrepl128vei.d" => "__builtin_lasx_xvrepl128vei_d",
+ "llvm.loongarch.lasx.xvrepl128vei.h" => "__builtin_lasx_xvrepl128vei_h",
+ "llvm.loongarch.lasx.xvrepl128vei.w" => "__builtin_lasx_xvrepl128vei_w",
+ "llvm.loongarch.lasx.xvreplgr2vr.b" => "__builtin_lasx_xvreplgr2vr_b",
+ "llvm.loongarch.lasx.xvreplgr2vr.d" => "__builtin_lasx_xvreplgr2vr_d",
+ "llvm.loongarch.lasx.xvreplgr2vr.h" => "__builtin_lasx_xvreplgr2vr_h",
+ "llvm.loongarch.lasx.xvreplgr2vr.w" => "__builtin_lasx_xvreplgr2vr_w",
+ "llvm.loongarch.lasx.xvrepli.b" => "__builtin_lasx_xvrepli_b",
+ "llvm.loongarch.lasx.xvrepli.d" => "__builtin_lasx_xvrepli_d",
+ "llvm.loongarch.lasx.xvrepli.h" => "__builtin_lasx_xvrepli_h",
+ "llvm.loongarch.lasx.xvrepli.w" => "__builtin_lasx_xvrepli_w",
+ "llvm.loongarch.lasx.xvreplve.b" => "__builtin_lasx_xvreplve_b",
+ "llvm.loongarch.lasx.xvreplve.d" => "__builtin_lasx_xvreplve_d",
+ "llvm.loongarch.lasx.xvreplve.h" => "__builtin_lasx_xvreplve_h",
+ "llvm.loongarch.lasx.xvreplve.w" => "__builtin_lasx_xvreplve_w",
+ "llvm.loongarch.lasx.xvreplve0.b" => "__builtin_lasx_xvreplve0_b",
+ "llvm.loongarch.lasx.xvreplve0.d" => "__builtin_lasx_xvreplve0_d",
+ "llvm.loongarch.lasx.xvreplve0.h" => "__builtin_lasx_xvreplve0_h",
+ "llvm.loongarch.lasx.xvreplve0.q" => "__builtin_lasx_xvreplve0_q",
+ "llvm.loongarch.lasx.xvreplve0.w" => "__builtin_lasx_xvreplve0_w",
+ "llvm.loongarch.lasx.xvrotr.b" => "__builtin_lasx_xvrotr_b",
+ "llvm.loongarch.lasx.xvrotr.d" => "__builtin_lasx_xvrotr_d",
+ "llvm.loongarch.lasx.xvrotr.h" => "__builtin_lasx_xvrotr_h",
+ "llvm.loongarch.lasx.xvrotr.w" => "__builtin_lasx_xvrotr_w",
+ "llvm.loongarch.lasx.xvrotri.b" => "__builtin_lasx_xvrotri_b",
+ "llvm.loongarch.lasx.xvrotri.d" => "__builtin_lasx_xvrotri_d",
+ "llvm.loongarch.lasx.xvrotri.h" => "__builtin_lasx_xvrotri_h",
+ "llvm.loongarch.lasx.xvrotri.w" => "__builtin_lasx_xvrotri_w",
+ "llvm.loongarch.lasx.xvsadd.b" => "__builtin_lasx_xvsadd_b",
+ "llvm.loongarch.lasx.xvsadd.bu" => "__builtin_lasx_xvsadd_bu",
+ "llvm.loongarch.lasx.xvsadd.d" => "__builtin_lasx_xvsadd_d",
+ "llvm.loongarch.lasx.xvsadd.du" => "__builtin_lasx_xvsadd_du",
+ "llvm.loongarch.lasx.xvsadd.h" => "__builtin_lasx_xvsadd_h",
+ "llvm.loongarch.lasx.xvsadd.hu" => "__builtin_lasx_xvsadd_hu",
+ "llvm.loongarch.lasx.xvsadd.w" => "__builtin_lasx_xvsadd_w",
+ "llvm.loongarch.lasx.xvsadd.wu" => "__builtin_lasx_xvsadd_wu",
+ "llvm.loongarch.lasx.xvsat.b" => "__builtin_lasx_xvsat_b",
+ "llvm.loongarch.lasx.xvsat.bu" => "__builtin_lasx_xvsat_bu",
+ "llvm.loongarch.lasx.xvsat.d" => "__builtin_lasx_xvsat_d",
+ "llvm.loongarch.lasx.xvsat.du" => "__builtin_lasx_xvsat_du",
+ "llvm.loongarch.lasx.xvsat.h" => "__builtin_lasx_xvsat_h",
+ "llvm.loongarch.lasx.xvsat.hu" => "__builtin_lasx_xvsat_hu",
+ "llvm.loongarch.lasx.xvsat.w" => "__builtin_lasx_xvsat_w",
+ "llvm.loongarch.lasx.xvsat.wu" => "__builtin_lasx_xvsat_wu",
+ "llvm.loongarch.lasx.xvseq.b" => "__builtin_lasx_xvseq_b",
+ "llvm.loongarch.lasx.xvseq.d" => "__builtin_lasx_xvseq_d",
+ "llvm.loongarch.lasx.xvseq.h" => "__builtin_lasx_xvseq_h",
+ "llvm.loongarch.lasx.xvseq.w" => "__builtin_lasx_xvseq_w",
+ "llvm.loongarch.lasx.xvseqi.b" => "__builtin_lasx_xvseqi_b",
+ "llvm.loongarch.lasx.xvseqi.d" => "__builtin_lasx_xvseqi_d",
+ "llvm.loongarch.lasx.xvseqi.h" => "__builtin_lasx_xvseqi_h",
+ "llvm.loongarch.lasx.xvseqi.w" => "__builtin_lasx_xvseqi_w",
+ "llvm.loongarch.lasx.xvshuf.b" => "__builtin_lasx_xvshuf_b",
+ "llvm.loongarch.lasx.xvshuf.d" => "__builtin_lasx_xvshuf_d",
+ "llvm.loongarch.lasx.xvshuf.h" => "__builtin_lasx_xvshuf_h",
+ "llvm.loongarch.lasx.xvshuf.w" => "__builtin_lasx_xvshuf_w",
+ "llvm.loongarch.lasx.xvshuf4i.b" => "__builtin_lasx_xvshuf4i_b",
+ "llvm.loongarch.lasx.xvshuf4i.d" => "__builtin_lasx_xvshuf4i_d",
+ "llvm.loongarch.lasx.xvshuf4i.h" => "__builtin_lasx_xvshuf4i_h",
+ "llvm.loongarch.lasx.xvshuf4i.w" => "__builtin_lasx_xvshuf4i_w",
+ "llvm.loongarch.lasx.xvsigncov.b" => "__builtin_lasx_xvsigncov_b",
+ "llvm.loongarch.lasx.xvsigncov.d" => "__builtin_lasx_xvsigncov_d",
+ "llvm.loongarch.lasx.xvsigncov.h" => "__builtin_lasx_xvsigncov_h",
+ "llvm.loongarch.lasx.xvsigncov.w" => "__builtin_lasx_xvsigncov_w",
+ "llvm.loongarch.lasx.xvsle.b" => "__builtin_lasx_xvsle_b",
+ "llvm.loongarch.lasx.xvsle.bu" => "__builtin_lasx_xvsle_bu",
+ "llvm.loongarch.lasx.xvsle.d" => "__builtin_lasx_xvsle_d",
+ "llvm.loongarch.lasx.xvsle.du" => "__builtin_lasx_xvsle_du",
+ "llvm.loongarch.lasx.xvsle.h" => "__builtin_lasx_xvsle_h",
+ "llvm.loongarch.lasx.xvsle.hu" => "__builtin_lasx_xvsle_hu",
+ "llvm.loongarch.lasx.xvsle.w" => "__builtin_lasx_xvsle_w",
+ "llvm.loongarch.lasx.xvsle.wu" => "__builtin_lasx_xvsle_wu",
+ "llvm.loongarch.lasx.xvslei.b" => "__builtin_lasx_xvslei_b",
+ "llvm.loongarch.lasx.xvslei.bu" => "__builtin_lasx_xvslei_bu",
+ "llvm.loongarch.lasx.xvslei.d" => "__builtin_lasx_xvslei_d",
+ "llvm.loongarch.lasx.xvslei.du" => "__builtin_lasx_xvslei_du",
+ "llvm.loongarch.lasx.xvslei.h" => "__builtin_lasx_xvslei_h",
+ "llvm.loongarch.lasx.xvslei.hu" => "__builtin_lasx_xvslei_hu",
+ "llvm.loongarch.lasx.xvslei.w" => "__builtin_lasx_xvslei_w",
+ "llvm.loongarch.lasx.xvslei.wu" => "__builtin_lasx_xvslei_wu",
+ "llvm.loongarch.lasx.xvsll.b" => "__builtin_lasx_xvsll_b",
+ "llvm.loongarch.lasx.xvsll.d" => "__builtin_lasx_xvsll_d",
+ "llvm.loongarch.lasx.xvsll.h" => "__builtin_lasx_xvsll_h",
+ "llvm.loongarch.lasx.xvsll.w" => "__builtin_lasx_xvsll_w",
+ "llvm.loongarch.lasx.xvslli.b" => "__builtin_lasx_xvslli_b",
+ "llvm.loongarch.lasx.xvslli.d" => "__builtin_lasx_xvslli_d",
+ "llvm.loongarch.lasx.xvslli.h" => "__builtin_lasx_xvslli_h",
+ "llvm.loongarch.lasx.xvslli.w" => "__builtin_lasx_xvslli_w",
+ "llvm.loongarch.lasx.xvsllwil.d.w" => "__builtin_lasx_xvsllwil_d_w",
+ "llvm.loongarch.lasx.xvsllwil.du.wu" => "__builtin_lasx_xvsllwil_du_wu",
+ "llvm.loongarch.lasx.xvsllwil.h.b" => "__builtin_lasx_xvsllwil_h_b",
+ "llvm.loongarch.lasx.xvsllwil.hu.bu" => "__builtin_lasx_xvsllwil_hu_bu",
+ "llvm.loongarch.lasx.xvsllwil.w.h" => "__builtin_lasx_xvsllwil_w_h",
+ "llvm.loongarch.lasx.xvsllwil.wu.hu" => "__builtin_lasx_xvsllwil_wu_hu",
+ "llvm.loongarch.lasx.xvslt.b" => "__builtin_lasx_xvslt_b",
+ "llvm.loongarch.lasx.xvslt.bu" => "__builtin_lasx_xvslt_bu",
+ "llvm.loongarch.lasx.xvslt.d" => "__builtin_lasx_xvslt_d",
+ "llvm.loongarch.lasx.xvslt.du" => "__builtin_lasx_xvslt_du",
+ "llvm.loongarch.lasx.xvslt.h" => "__builtin_lasx_xvslt_h",
+ "llvm.loongarch.lasx.xvslt.hu" => "__builtin_lasx_xvslt_hu",
+ "llvm.loongarch.lasx.xvslt.w" => "__builtin_lasx_xvslt_w",
+ "llvm.loongarch.lasx.xvslt.wu" => "__builtin_lasx_xvslt_wu",
+ "llvm.loongarch.lasx.xvslti.b" => "__builtin_lasx_xvslti_b",
+ "llvm.loongarch.lasx.xvslti.bu" => "__builtin_lasx_xvslti_bu",
+ "llvm.loongarch.lasx.xvslti.d" => "__builtin_lasx_xvslti_d",
+ "llvm.loongarch.lasx.xvslti.du" => "__builtin_lasx_xvslti_du",
+ "llvm.loongarch.lasx.xvslti.h" => "__builtin_lasx_xvslti_h",
+ "llvm.loongarch.lasx.xvslti.hu" => "__builtin_lasx_xvslti_hu",
+ "llvm.loongarch.lasx.xvslti.w" => "__builtin_lasx_xvslti_w",
+ "llvm.loongarch.lasx.xvslti.wu" => "__builtin_lasx_xvslti_wu",
+ "llvm.loongarch.lasx.xvsra.b" => "__builtin_lasx_xvsra_b",
+ "llvm.loongarch.lasx.xvsra.d" => "__builtin_lasx_xvsra_d",
+ "llvm.loongarch.lasx.xvsra.h" => "__builtin_lasx_xvsra_h",
+ "llvm.loongarch.lasx.xvsra.w" => "__builtin_lasx_xvsra_w",
+ "llvm.loongarch.lasx.xvsrai.b" => "__builtin_lasx_xvsrai_b",
+ "llvm.loongarch.lasx.xvsrai.d" => "__builtin_lasx_xvsrai_d",
+ "llvm.loongarch.lasx.xvsrai.h" => "__builtin_lasx_xvsrai_h",
+ "llvm.loongarch.lasx.xvsrai.w" => "__builtin_lasx_xvsrai_w",
+ "llvm.loongarch.lasx.xvsran.b.h" => "__builtin_lasx_xvsran_b_h",
+ "llvm.loongarch.lasx.xvsran.h.w" => "__builtin_lasx_xvsran_h_w",
+ "llvm.loongarch.lasx.xvsran.w.d" => "__builtin_lasx_xvsran_w_d",
+ "llvm.loongarch.lasx.xvsrani.b.h" => "__builtin_lasx_xvsrani_b_h",
+ "llvm.loongarch.lasx.xvsrani.d.q" => "__builtin_lasx_xvsrani_d_q",
+ "llvm.loongarch.lasx.xvsrani.h.w" => "__builtin_lasx_xvsrani_h_w",
+ "llvm.loongarch.lasx.xvsrani.w.d" => "__builtin_lasx_xvsrani_w_d",
+ "llvm.loongarch.lasx.xvsrar.b" => "__builtin_lasx_xvsrar_b",
+ "llvm.loongarch.lasx.xvsrar.d" => "__builtin_lasx_xvsrar_d",
+ "llvm.loongarch.lasx.xvsrar.h" => "__builtin_lasx_xvsrar_h",
+ "llvm.loongarch.lasx.xvsrar.w" => "__builtin_lasx_xvsrar_w",
+ "llvm.loongarch.lasx.xvsrari.b" => "__builtin_lasx_xvsrari_b",
+ "llvm.loongarch.lasx.xvsrari.d" => "__builtin_lasx_xvsrari_d",
+ "llvm.loongarch.lasx.xvsrari.h" => "__builtin_lasx_xvsrari_h",
+ "llvm.loongarch.lasx.xvsrari.w" => "__builtin_lasx_xvsrari_w",
+ "llvm.loongarch.lasx.xvsrarn.b.h" => "__builtin_lasx_xvsrarn_b_h",
+ "llvm.loongarch.lasx.xvsrarn.h.w" => "__builtin_lasx_xvsrarn_h_w",
+ "llvm.loongarch.lasx.xvsrarn.w.d" => "__builtin_lasx_xvsrarn_w_d",
+ "llvm.loongarch.lasx.xvsrarni.b.h" => "__builtin_lasx_xvsrarni_b_h",
+ "llvm.loongarch.lasx.xvsrarni.d.q" => "__builtin_lasx_xvsrarni_d_q",
+ "llvm.loongarch.lasx.xvsrarni.h.w" => "__builtin_lasx_xvsrarni_h_w",
+ "llvm.loongarch.lasx.xvsrarni.w.d" => "__builtin_lasx_xvsrarni_w_d",
+ "llvm.loongarch.lasx.xvsrl.b" => "__builtin_lasx_xvsrl_b",
+ "llvm.loongarch.lasx.xvsrl.d" => "__builtin_lasx_xvsrl_d",
+ "llvm.loongarch.lasx.xvsrl.h" => "__builtin_lasx_xvsrl_h",
+ "llvm.loongarch.lasx.xvsrl.w" => "__builtin_lasx_xvsrl_w",
+ "llvm.loongarch.lasx.xvsrli.b" => "__builtin_lasx_xvsrli_b",
+ "llvm.loongarch.lasx.xvsrli.d" => "__builtin_lasx_xvsrli_d",
+ "llvm.loongarch.lasx.xvsrli.h" => "__builtin_lasx_xvsrli_h",
+ "llvm.loongarch.lasx.xvsrli.w" => "__builtin_lasx_xvsrli_w",
+ "llvm.loongarch.lasx.xvsrln.b.h" => "__builtin_lasx_xvsrln_b_h",
+ "llvm.loongarch.lasx.xvsrln.h.w" => "__builtin_lasx_xvsrln_h_w",
+ "llvm.loongarch.lasx.xvsrln.w.d" => "__builtin_lasx_xvsrln_w_d",
+ "llvm.loongarch.lasx.xvsrlni.b.h" => "__builtin_lasx_xvsrlni_b_h",
+ "llvm.loongarch.lasx.xvsrlni.d.q" => "__builtin_lasx_xvsrlni_d_q",
+ "llvm.loongarch.lasx.xvsrlni.h.w" => "__builtin_lasx_xvsrlni_h_w",
+ "llvm.loongarch.lasx.xvsrlni.w.d" => "__builtin_lasx_xvsrlni_w_d",
+ "llvm.loongarch.lasx.xvsrlr.b" => "__builtin_lasx_xvsrlr_b",
+ "llvm.loongarch.lasx.xvsrlr.d" => "__builtin_lasx_xvsrlr_d",
+ "llvm.loongarch.lasx.xvsrlr.h" => "__builtin_lasx_xvsrlr_h",
+ "llvm.loongarch.lasx.xvsrlr.w" => "__builtin_lasx_xvsrlr_w",
+ "llvm.loongarch.lasx.xvsrlri.b" => "__builtin_lasx_xvsrlri_b",
+ "llvm.loongarch.lasx.xvsrlri.d" => "__builtin_lasx_xvsrlri_d",
+ "llvm.loongarch.lasx.xvsrlri.h" => "__builtin_lasx_xvsrlri_h",
+ "llvm.loongarch.lasx.xvsrlri.w" => "__builtin_lasx_xvsrlri_w",
+ "llvm.loongarch.lasx.xvsrlrn.b.h" => "__builtin_lasx_xvsrlrn_b_h",
+ "llvm.loongarch.lasx.xvsrlrn.h.w" => "__builtin_lasx_xvsrlrn_h_w",
+ "llvm.loongarch.lasx.xvsrlrn.w.d" => "__builtin_lasx_xvsrlrn_w_d",
+ "llvm.loongarch.lasx.xvsrlrni.b.h" => "__builtin_lasx_xvsrlrni_b_h",
+ "llvm.loongarch.lasx.xvsrlrni.d.q" => "__builtin_lasx_xvsrlrni_d_q",
+ "llvm.loongarch.lasx.xvsrlrni.h.w" => "__builtin_lasx_xvsrlrni_h_w",
+ "llvm.loongarch.lasx.xvsrlrni.w.d" => "__builtin_lasx_xvsrlrni_w_d",
+ "llvm.loongarch.lasx.xvssran.b.h" => "__builtin_lasx_xvssran_b_h",
+ "llvm.loongarch.lasx.xvssran.bu.h" => "__builtin_lasx_xvssran_bu_h",
+ "llvm.loongarch.lasx.xvssran.h.w" => "__builtin_lasx_xvssran_h_w",
+ "llvm.loongarch.lasx.xvssran.hu.w" => "__builtin_lasx_xvssran_hu_w",
+ "llvm.loongarch.lasx.xvssran.w.d" => "__builtin_lasx_xvssran_w_d",
+ "llvm.loongarch.lasx.xvssran.wu.d" => "__builtin_lasx_xvssran_wu_d",
+ "llvm.loongarch.lasx.xvssrani.b.h" => "__builtin_lasx_xvssrani_b_h",
+ "llvm.loongarch.lasx.xvssrani.bu.h" => "__builtin_lasx_xvssrani_bu_h",
+ "llvm.loongarch.lasx.xvssrani.d.q" => "__builtin_lasx_xvssrani_d_q",
+ "llvm.loongarch.lasx.xvssrani.du.q" => "__builtin_lasx_xvssrani_du_q",
+ "llvm.loongarch.lasx.xvssrani.h.w" => "__builtin_lasx_xvssrani_h_w",
+ "llvm.loongarch.lasx.xvssrani.hu.w" => "__builtin_lasx_xvssrani_hu_w",
+ "llvm.loongarch.lasx.xvssrani.w.d" => "__builtin_lasx_xvssrani_w_d",
+ "llvm.loongarch.lasx.xvssrani.wu.d" => "__builtin_lasx_xvssrani_wu_d",
+ "llvm.loongarch.lasx.xvssrarn.b.h" => "__builtin_lasx_xvssrarn_b_h",
+ "llvm.loongarch.lasx.xvssrarn.bu.h" => "__builtin_lasx_xvssrarn_bu_h",
+ "llvm.loongarch.lasx.xvssrarn.h.w" => "__builtin_lasx_xvssrarn_h_w",
+ "llvm.loongarch.lasx.xvssrarn.hu.w" => "__builtin_lasx_xvssrarn_hu_w",
+ "llvm.loongarch.lasx.xvssrarn.w.d" => "__builtin_lasx_xvssrarn_w_d",
+ "llvm.loongarch.lasx.xvssrarn.wu.d" => "__builtin_lasx_xvssrarn_wu_d",
+ "llvm.loongarch.lasx.xvssrarni.b.h" => "__builtin_lasx_xvssrarni_b_h",
+ "llvm.loongarch.lasx.xvssrarni.bu.h" => "__builtin_lasx_xvssrarni_bu_h",
+ "llvm.loongarch.lasx.xvssrarni.d.q" => "__builtin_lasx_xvssrarni_d_q",
+ "llvm.loongarch.lasx.xvssrarni.du.q" => "__builtin_lasx_xvssrarni_du_q",
+ "llvm.loongarch.lasx.xvssrarni.h.w" => "__builtin_lasx_xvssrarni_h_w",
+ "llvm.loongarch.lasx.xvssrarni.hu.w" => "__builtin_lasx_xvssrarni_hu_w",
+ "llvm.loongarch.lasx.xvssrarni.w.d" => "__builtin_lasx_xvssrarni_w_d",
+ "llvm.loongarch.lasx.xvssrarni.wu.d" => "__builtin_lasx_xvssrarni_wu_d",
+ "llvm.loongarch.lasx.xvssrln.b.h" => "__builtin_lasx_xvssrln_b_h",
+ "llvm.loongarch.lasx.xvssrln.bu.h" => "__builtin_lasx_xvssrln_bu_h",
+ "llvm.loongarch.lasx.xvssrln.h.w" => "__builtin_lasx_xvssrln_h_w",
+ "llvm.loongarch.lasx.xvssrln.hu.w" => "__builtin_lasx_xvssrln_hu_w",
+ "llvm.loongarch.lasx.xvssrln.w.d" => "__builtin_lasx_xvssrln_w_d",
+ "llvm.loongarch.lasx.xvssrln.wu.d" => "__builtin_lasx_xvssrln_wu_d",
+ "llvm.loongarch.lasx.xvssrlni.b.h" => "__builtin_lasx_xvssrlni_b_h",
+ "llvm.loongarch.lasx.xvssrlni.bu.h" => "__builtin_lasx_xvssrlni_bu_h",
+ "llvm.loongarch.lasx.xvssrlni.d.q" => "__builtin_lasx_xvssrlni_d_q",
+ "llvm.loongarch.lasx.xvssrlni.du.q" => "__builtin_lasx_xvssrlni_du_q",
+ "llvm.loongarch.lasx.xvssrlni.h.w" => "__builtin_lasx_xvssrlni_h_w",
+ "llvm.loongarch.lasx.xvssrlni.hu.w" => "__builtin_lasx_xvssrlni_hu_w",
+ "llvm.loongarch.lasx.xvssrlni.w.d" => "__builtin_lasx_xvssrlni_w_d",
+ "llvm.loongarch.lasx.xvssrlni.wu.d" => "__builtin_lasx_xvssrlni_wu_d",
+ "llvm.loongarch.lasx.xvssrlrn.b.h" => "__builtin_lasx_xvssrlrn_b_h",
+ "llvm.loongarch.lasx.xvssrlrn.bu.h" => "__builtin_lasx_xvssrlrn_bu_h",
+ "llvm.loongarch.lasx.xvssrlrn.h.w" => "__builtin_lasx_xvssrlrn_h_w",
+ "llvm.loongarch.lasx.xvssrlrn.hu.w" => "__builtin_lasx_xvssrlrn_hu_w",
+ "llvm.loongarch.lasx.xvssrlrn.w.d" => "__builtin_lasx_xvssrlrn_w_d",
+ "llvm.loongarch.lasx.xvssrlrn.wu.d" => "__builtin_lasx_xvssrlrn_wu_d",
+ "llvm.loongarch.lasx.xvssrlrni.b.h" => "__builtin_lasx_xvssrlrni_b_h",
+ "llvm.loongarch.lasx.xvssrlrni.bu.h" => "__builtin_lasx_xvssrlrni_bu_h",
+ "llvm.loongarch.lasx.xvssrlrni.d.q" => "__builtin_lasx_xvssrlrni_d_q",
+ "llvm.loongarch.lasx.xvssrlrni.du.q" => "__builtin_lasx_xvssrlrni_du_q",
+ "llvm.loongarch.lasx.xvssrlrni.h.w" => "__builtin_lasx_xvssrlrni_h_w",
+ "llvm.loongarch.lasx.xvssrlrni.hu.w" => "__builtin_lasx_xvssrlrni_hu_w",
+ "llvm.loongarch.lasx.xvssrlrni.w.d" => "__builtin_lasx_xvssrlrni_w_d",
+ "llvm.loongarch.lasx.xvssrlrni.wu.d" => "__builtin_lasx_xvssrlrni_wu_d",
+ "llvm.loongarch.lasx.xvssub.b" => "__builtin_lasx_xvssub_b",
+ "llvm.loongarch.lasx.xvssub.bu" => "__builtin_lasx_xvssub_bu",
+ "llvm.loongarch.lasx.xvssub.d" => "__builtin_lasx_xvssub_d",
+ "llvm.loongarch.lasx.xvssub.du" => "__builtin_lasx_xvssub_du",
+ "llvm.loongarch.lasx.xvssub.h" => "__builtin_lasx_xvssub_h",
+ "llvm.loongarch.lasx.xvssub.hu" => "__builtin_lasx_xvssub_hu",
+ "llvm.loongarch.lasx.xvssub.w" => "__builtin_lasx_xvssub_w",
+ "llvm.loongarch.lasx.xvssub.wu" => "__builtin_lasx_xvssub_wu",
+ "llvm.loongarch.lasx.xvst" => "__builtin_lasx_xvst",
+ "llvm.loongarch.lasx.xvstelm.b" => "__builtin_lasx_xvstelm_b",
+ "llvm.loongarch.lasx.xvstelm.d" => "__builtin_lasx_xvstelm_d",
+ "llvm.loongarch.lasx.xvstelm.h" => "__builtin_lasx_xvstelm_h",
+ "llvm.loongarch.lasx.xvstelm.w" => "__builtin_lasx_xvstelm_w",
+ "llvm.loongarch.lasx.xvstx" => "__builtin_lasx_xvstx",
+ "llvm.loongarch.lasx.xvsub.b" => "__builtin_lasx_xvsub_b",
+ "llvm.loongarch.lasx.xvsub.d" => "__builtin_lasx_xvsub_d",
+ "llvm.loongarch.lasx.xvsub.h" => "__builtin_lasx_xvsub_h",
+ "llvm.loongarch.lasx.xvsub.q" => "__builtin_lasx_xvsub_q",
+ "llvm.loongarch.lasx.xvsub.w" => "__builtin_lasx_xvsub_w",
+ "llvm.loongarch.lasx.xvsubi.bu" => "__builtin_lasx_xvsubi_bu",
+ "llvm.loongarch.lasx.xvsubi.du" => "__builtin_lasx_xvsubi_du",
+ "llvm.loongarch.lasx.xvsubi.hu" => "__builtin_lasx_xvsubi_hu",
+ "llvm.loongarch.lasx.xvsubi.wu" => "__builtin_lasx_xvsubi_wu",
+ "llvm.loongarch.lasx.xvsubwev.d.w" => "__builtin_lasx_xvsubwev_d_w",
+ "llvm.loongarch.lasx.xvsubwev.d.wu" => "__builtin_lasx_xvsubwev_d_wu",
+ "llvm.loongarch.lasx.xvsubwev.h.b" => "__builtin_lasx_xvsubwev_h_b",
+ "llvm.loongarch.lasx.xvsubwev.h.bu" => "__builtin_lasx_xvsubwev_h_bu",
+ "llvm.loongarch.lasx.xvsubwev.q.d" => "__builtin_lasx_xvsubwev_q_d",
+ "llvm.loongarch.lasx.xvsubwev.q.du" => "__builtin_lasx_xvsubwev_q_du",
+ "llvm.loongarch.lasx.xvsubwev.w.h" => "__builtin_lasx_xvsubwev_w_h",
+ "llvm.loongarch.lasx.xvsubwev.w.hu" => "__builtin_lasx_xvsubwev_w_hu",
+ "llvm.loongarch.lasx.xvsubwod.d.w" => "__builtin_lasx_xvsubwod_d_w",
+ "llvm.loongarch.lasx.xvsubwod.d.wu" => "__builtin_lasx_xvsubwod_d_wu",
+ "llvm.loongarch.lasx.xvsubwod.h.b" => "__builtin_lasx_xvsubwod_h_b",
+ "llvm.loongarch.lasx.xvsubwod.h.bu" => "__builtin_lasx_xvsubwod_h_bu",
+ "llvm.loongarch.lasx.xvsubwod.q.d" => "__builtin_lasx_xvsubwod_q_d",
+ "llvm.loongarch.lasx.xvsubwod.q.du" => "__builtin_lasx_xvsubwod_q_du",
+ "llvm.loongarch.lasx.xvsubwod.w.h" => "__builtin_lasx_xvsubwod_w_h",
+ "llvm.loongarch.lasx.xvsubwod.w.hu" => "__builtin_lasx_xvsubwod_w_hu",
+ "llvm.loongarch.lasx.xvxor.v" => "__builtin_lasx_xvxor_v",
+ "llvm.loongarch.lasx.xvxori.b" => "__builtin_lasx_xvxori_b",
+ "llvm.loongarch.lddir.d" => "__builtin_loongarch_lddir_d",
+ "llvm.loongarch.ldpte.d" => "__builtin_loongarch_ldpte_d",
+ "llvm.loongarch.lsx.bnz.b" => "__builtin_lsx_bnz_b",
+ "llvm.loongarch.lsx.bnz.d" => "__builtin_lsx_bnz_d",
+ "llvm.loongarch.lsx.bnz.h" => "__builtin_lsx_bnz_h",
+ "llvm.loongarch.lsx.bnz.v" => "__builtin_lsx_bnz_v",
+ "llvm.loongarch.lsx.bnz.w" => "__builtin_lsx_bnz_w",
+ "llvm.loongarch.lsx.bz.b" => "__builtin_lsx_bz_b",
+ "llvm.loongarch.lsx.bz.d" => "__builtin_lsx_bz_d",
+ "llvm.loongarch.lsx.bz.h" => "__builtin_lsx_bz_h",
+ "llvm.loongarch.lsx.bz.v" => "__builtin_lsx_bz_v",
+ "llvm.loongarch.lsx.bz.w" => "__builtin_lsx_bz_w",
+ "llvm.loongarch.lsx.vabsd.b" => "__builtin_lsx_vabsd_b",
+ "llvm.loongarch.lsx.vabsd.bu" => "__builtin_lsx_vabsd_bu",
+ "llvm.loongarch.lsx.vabsd.d" => "__builtin_lsx_vabsd_d",
+ "llvm.loongarch.lsx.vabsd.du" => "__builtin_lsx_vabsd_du",
+ "llvm.loongarch.lsx.vabsd.h" => "__builtin_lsx_vabsd_h",
+ "llvm.loongarch.lsx.vabsd.hu" => "__builtin_lsx_vabsd_hu",
+ "llvm.loongarch.lsx.vabsd.w" => "__builtin_lsx_vabsd_w",
+ "llvm.loongarch.lsx.vabsd.wu" => "__builtin_lsx_vabsd_wu",
+ "llvm.loongarch.lsx.vadd.b" => "__builtin_lsx_vadd_b",
+ "llvm.loongarch.lsx.vadd.d" => "__builtin_lsx_vadd_d",
+ "llvm.loongarch.lsx.vadd.h" => "__builtin_lsx_vadd_h",
+ "llvm.loongarch.lsx.vadd.q" => "__builtin_lsx_vadd_q",
+ "llvm.loongarch.lsx.vadd.w" => "__builtin_lsx_vadd_w",
+ "llvm.loongarch.lsx.vadda.b" => "__builtin_lsx_vadda_b",
+ "llvm.loongarch.lsx.vadda.d" => "__builtin_lsx_vadda_d",
+ "llvm.loongarch.lsx.vadda.h" => "__builtin_lsx_vadda_h",
+ "llvm.loongarch.lsx.vadda.w" => "__builtin_lsx_vadda_w",
+ "llvm.loongarch.lsx.vaddi.bu" => "__builtin_lsx_vaddi_bu",
+ "llvm.loongarch.lsx.vaddi.du" => "__builtin_lsx_vaddi_du",
+ "llvm.loongarch.lsx.vaddi.hu" => "__builtin_lsx_vaddi_hu",
+ "llvm.loongarch.lsx.vaddi.wu" => "__builtin_lsx_vaddi_wu",
+ "llvm.loongarch.lsx.vaddwev.d.w" => "__builtin_lsx_vaddwev_d_w",
+ "llvm.loongarch.lsx.vaddwev.d.wu" => "__builtin_lsx_vaddwev_d_wu",
+ "llvm.loongarch.lsx.vaddwev.d.wu.w" => "__builtin_lsx_vaddwev_d_wu_w",
+ "llvm.loongarch.lsx.vaddwev.h.b" => "__builtin_lsx_vaddwev_h_b",
+ "llvm.loongarch.lsx.vaddwev.h.bu" => "__builtin_lsx_vaddwev_h_bu",
+ "llvm.loongarch.lsx.vaddwev.h.bu.b" => "__builtin_lsx_vaddwev_h_bu_b",
+ "llvm.loongarch.lsx.vaddwev.q.d" => "__builtin_lsx_vaddwev_q_d",
+ "llvm.loongarch.lsx.vaddwev.q.du" => "__builtin_lsx_vaddwev_q_du",
+ "llvm.loongarch.lsx.vaddwev.q.du.d" => "__builtin_lsx_vaddwev_q_du_d",
+ "llvm.loongarch.lsx.vaddwev.w.h" => "__builtin_lsx_vaddwev_w_h",
+ "llvm.loongarch.lsx.vaddwev.w.hu" => "__builtin_lsx_vaddwev_w_hu",
+ "llvm.loongarch.lsx.vaddwev.w.hu.h" => "__builtin_lsx_vaddwev_w_hu_h",
+ "llvm.loongarch.lsx.vaddwod.d.w" => "__builtin_lsx_vaddwod_d_w",
+ "llvm.loongarch.lsx.vaddwod.d.wu" => "__builtin_lsx_vaddwod_d_wu",
+ "llvm.loongarch.lsx.vaddwod.d.wu.w" => "__builtin_lsx_vaddwod_d_wu_w",
+ "llvm.loongarch.lsx.vaddwod.h.b" => "__builtin_lsx_vaddwod_h_b",
+ "llvm.loongarch.lsx.vaddwod.h.bu" => "__builtin_lsx_vaddwod_h_bu",
+ "llvm.loongarch.lsx.vaddwod.h.bu.b" => "__builtin_lsx_vaddwod_h_bu_b",
+ "llvm.loongarch.lsx.vaddwod.q.d" => "__builtin_lsx_vaddwod_q_d",
+ "llvm.loongarch.lsx.vaddwod.q.du" => "__builtin_lsx_vaddwod_q_du",
+ "llvm.loongarch.lsx.vaddwod.q.du.d" => "__builtin_lsx_vaddwod_q_du_d",
+ "llvm.loongarch.lsx.vaddwod.w.h" => "__builtin_lsx_vaddwod_w_h",
+ "llvm.loongarch.lsx.vaddwod.w.hu" => "__builtin_lsx_vaddwod_w_hu",
+ "llvm.loongarch.lsx.vaddwod.w.hu.h" => "__builtin_lsx_vaddwod_w_hu_h",
+ "llvm.loongarch.lsx.vand.v" => "__builtin_lsx_vand_v",
+ "llvm.loongarch.lsx.vandi.b" => "__builtin_lsx_vandi_b",
+ "llvm.loongarch.lsx.vandn.v" => "__builtin_lsx_vandn_v",
+ "llvm.loongarch.lsx.vavg.b" => "__builtin_lsx_vavg_b",
+ "llvm.loongarch.lsx.vavg.bu" => "__builtin_lsx_vavg_bu",
+ "llvm.loongarch.lsx.vavg.d" => "__builtin_lsx_vavg_d",
+ "llvm.loongarch.lsx.vavg.du" => "__builtin_lsx_vavg_du",
+ "llvm.loongarch.lsx.vavg.h" => "__builtin_lsx_vavg_h",
+ "llvm.loongarch.lsx.vavg.hu" => "__builtin_lsx_vavg_hu",
+ "llvm.loongarch.lsx.vavg.w" => "__builtin_lsx_vavg_w",
+ "llvm.loongarch.lsx.vavg.wu" => "__builtin_lsx_vavg_wu",
+ "llvm.loongarch.lsx.vavgr.b" => "__builtin_lsx_vavgr_b",
+ "llvm.loongarch.lsx.vavgr.bu" => "__builtin_lsx_vavgr_bu",
+ "llvm.loongarch.lsx.vavgr.d" => "__builtin_lsx_vavgr_d",
+ "llvm.loongarch.lsx.vavgr.du" => "__builtin_lsx_vavgr_du",
+ "llvm.loongarch.lsx.vavgr.h" => "__builtin_lsx_vavgr_h",
+ "llvm.loongarch.lsx.vavgr.hu" => "__builtin_lsx_vavgr_hu",
+ "llvm.loongarch.lsx.vavgr.w" => "__builtin_lsx_vavgr_w",
+ "llvm.loongarch.lsx.vavgr.wu" => "__builtin_lsx_vavgr_wu",
+ "llvm.loongarch.lsx.vbitclr.b" => "__builtin_lsx_vbitclr_b",
+ "llvm.loongarch.lsx.vbitclr.d" => "__builtin_lsx_vbitclr_d",
+ "llvm.loongarch.lsx.vbitclr.h" => "__builtin_lsx_vbitclr_h",
+ "llvm.loongarch.lsx.vbitclr.w" => "__builtin_lsx_vbitclr_w",
+ "llvm.loongarch.lsx.vbitclri.b" => "__builtin_lsx_vbitclri_b",
+ "llvm.loongarch.lsx.vbitclri.d" => "__builtin_lsx_vbitclri_d",
+ "llvm.loongarch.lsx.vbitclri.h" => "__builtin_lsx_vbitclri_h",
+ "llvm.loongarch.lsx.vbitclri.w" => "__builtin_lsx_vbitclri_w",
+ "llvm.loongarch.lsx.vbitrev.b" => "__builtin_lsx_vbitrev_b",
+ "llvm.loongarch.lsx.vbitrev.d" => "__builtin_lsx_vbitrev_d",
+ "llvm.loongarch.lsx.vbitrev.h" => "__builtin_lsx_vbitrev_h",
+ "llvm.loongarch.lsx.vbitrev.w" => "__builtin_lsx_vbitrev_w",
+ "llvm.loongarch.lsx.vbitrevi.b" => "__builtin_lsx_vbitrevi_b",
+ "llvm.loongarch.lsx.vbitrevi.d" => "__builtin_lsx_vbitrevi_d",
+ "llvm.loongarch.lsx.vbitrevi.h" => "__builtin_lsx_vbitrevi_h",
+ "llvm.loongarch.lsx.vbitrevi.w" => "__builtin_lsx_vbitrevi_w",
+ "llvm.loongarch.lsx.vbitsel.v" => "__builtin_lsx_vbitsel_v",
+ "llvm.loongarch.lsx.vbitseli.b" => "__builtin_lsx_vbitseli_b",
+ "llvm.loongarch.lsx.vbitset.b" => "__builtin_lsx_vbitset_b",
+ "llvm.loongarch.lsx.vbitset.d" => "__builtin_lsx_vbitset_d",
+ "llvm.loongarch.lsx.vbitset.h" => "__builtin_lsx_vbitset_h",
+ "llvm.loongarch.lsx.vbitset.w" => "__builtin_lsx_vbitset_w",
+ "llvm.loongarch.lsx.vbitseti.b" => "__builtin_lsx_vbitseti_b",
+ "llvm.loongarch.lsx.vbitseti.d" => "__builtin_lsx_vbitseti_d",
+ "llvm.loongarch.lsx.vbitseti.h" => "__builtin_lsx_vbitseti_h",
+ "llvm.loongarch.lsx.vbitseti.w" => "__builtin_lsx_vbitseti_w",
+ "llvm.loongarch.lsx.vbsll.v" => "__builtin_lsx_vbsll_v",
+ "llvm.loongarch.lsx.vbsrl.v" => "__builtin_lsx_vbsrl_v",
+ "llvm.loongarch.lsx.vclo.b" => "__builtin_lsx_vclo_b",
+ "llvm.loongarch.lsx.vclo.d" => "__builtin_lsx_vclo_d",
+ "llvm.loongarch.lsx.vclo.h" => "__builtin_lsx_vclo_h",
+ "llvm.loongarch.lsx.vclo.w" => "__builtin_lsx_vclo_w",
+ "llvm.loongarch.lsx.vclz.b" => "__builtin_lsx_vclz_b",
+ "llvm.loongarch.lsx.vclz.d" => "__builtin_lsx_vclz_d",
+ "llvm.loongarch.lsx.vclz.h" => "__builtin_lsx_vclz_h",
+ "llvm.loongarch.lsx.vclz.w" => "__builtin_lsx_vclz_w",
+ "llvm.loongarch.lsx.vdiv.b" => "__builtin_lsx_vdiv_b",
+ "llvm.loongarch.lsx.vdiv.bu" => "__builtin_lsx_vdiv_bu",
+ "llvm.loongarch.lsx.vdiv.d" => "__builtin_lsx_vdiv_d",
+ "llvm.loongarch.lsx.vdiv.du" => "__builtin_lsx_vdiv_du",
+ "llvm.loongarch.lsx.vdiv.h" => "__builtin_lsx_vdiv_h",
+ "llvm.loongarch.lsx.vdiv.hu" => "__builtin_lsx_vdiv_hu",
+ "llvm.loongarch.lsx.vdiv.w" => "__builtin_lsx_vdiv_w",
+ "llvm.loongarch.lsx.vdiv.wu" => "__builtin_lsx_vdiv_wu",
+ "llvm.loongarch.lsx.vexth.d.w" => "__builtin_lsx_vexth_d_w",
+ "llvm.loongarch.lsx.vexth.du.wu" => "__builtin_lsx_vexth_du_wu",
+ "llvm.loongarch.lsx.vexth.h.b" => "__builtin_lsx_vexth_h_b",
+ "llvm.loongarch.lsx.vexth.hu.bu" => "__builtin_lsx_vexth_hu_bu",
+ "llvm.loongarch.lsx.vexth.q.d" => "__builtin_lsx_vexth_q_d",
+ "llvm.loongarch.lsx.vexth.qu.du" => "__builtin_lsx_vexth_qu_du",
+ "llvm.loongarch.lsx.vexth.w.h" => "__builtin_lsx_vexth_w_h",
+ "llvm.loongarch.lsx.vexth.wu.hu" => "__builtin_lsx_vexth_wu_hu",
+ "llvm.loongarch.lsx.vextl.q.d" => "__builtin_lsx_vextl_q_d",
+ "llvm.loongarch.lsx.vextl.qu.du" => "__builtin_lsx_vextl_qu_du",
+ "llvm.loongarch.lsx.vextrins.b" => "__builtin_lsx_vextrins_b",
+ "llvm.loongarch.lsx.vextrins.d" => "__builtin_lsx_vextrins_d",
+ "llvm.loongarch.lsx.vextrins.h" => "__builtin_lsx_vextrins_h",
+ "llvm.loongarch.lsx.vextrins.w" => "__builtin_lsx_vextrins_w",
+ "llvm.loongarch.lsx.vfadd.d" => "__builtin_lsx_vfadd_d",
+ "llvm.loongarch.lsx.vfadd.s" => "__builtin_lsx_vfadd_s",
+ "llvm.loongarch.lsx.vfclass.d" => "__builtin_lsx_vfclass_d",
+ "llvm.loongarch.lsx.vfclass.s" => "__builtin_lsx_vfclass_s",
+ "llvm.loongarch.lsx.vfcmp.caf.d" => "__builtin_lsx_vfcmp_caf_d",
+ "llvm.loongarch.lsx.vfcmp.caf.s" => "__builtin_lsx_vfcmp_caf_s",
+ "llvm.loongarch.lsx.vfcmp.ceq.d" => "__builtin_lsx_vfcmp_ceq_d",
+ "llvm.loongarch.lsx.vfcmp.ceq.s" => "__builtin_lsx_vfcmp_ceq_s",
+ "llvm.loongarch.lsx.vfcmp.cle.d" => "__builtin_lsx_vfcmp_cle_d",
+ "llvm.loongarch.lsx.vfcmp.cle.s" => "__builtin_lsx_vfcmp_cle_s",
+ "llvm.loongarch.lsx.vfcmp.clt.d" => "__builtin_lsx_vfcmp_clt_d",
+ "llvm.loongarch.lsx.vfcmp.clt.s" => "__builtin_lsx_vfcmp_clt_s",
+ "llvm.loongarch.lsx.vfcmp.cne.d" => "__builtin_lsx_vfcmp_cne_d",
+ "llvm.loongarch.lsx.vfcmp.cne.s" => "__builtin_lsx_vfcmp_cne_s",
+ "llvm.loongarch.lsx.vfcmp.cor.d" => "__builtin_lsx_vfcmp_cor_d",
+ "llvm.loongarch.lsx.vfcmp.cor.s" => "__builtin_lsx_vfcmp_cor_s",
+ "llvm.loongarch.lsx.vfcmp.cueq.d" => "__builtin_lsx_vfcmp_cueq_d",
+ "llvm.loongarch.lsx.vfcmp.cueq.s" => "__builtin_lsx_vfcmp_cueq_s",
+ "llvm.loongarch.lsx.vfcmp.cule.d" => "__builtin_lsx_vfcmp_cule_d",
+ "llvm.loongarch.lsx.vfcmp.cule.s" => "__builtin_lsx_vfcmp_cule_s",
+ "llvm.loongarch.lsx.vfcmp.cult.d" => "__builtin_lsx_vfcmp_cult_d",
+ "llvm.loongarch.lsx.vfcmp.cult.s" => "__builtin_lsx_vfcmp_cult_s",
+ "llvm.loongarch.lsx.vfcmp.cun.d" => "__builtin_lsx_vfcmp_cun_d",
+ "llvm.loongarch.lsx.vfcmp.cun.s" => "__builtin_lsx_vfcmp_cun_s",
+ "llvm.loongarch.lsx.vfcmp.cune.d" => "__builtin_lsx_vfcmp_cune_d",
+ "llvm.loongarch.lsx.vfcmp.cune.s" => "__builtin_lsx_vfcmp_cune_s",
+ "llvm.loongarch.lsx.vfcmp.saf.d" => "__builtin_lsx_vfcmp_saf_d",
+ "llvm.loongarch.lsx.vfcmp.saf.s" => "__builtin_lsx_vfcmp_saf_s",
+ "llvm.loongarch.lsx.vfcmp.seq.d" => "__builtin_lsx_vfcmp_seq_d",
+ "llvm.loongarch.lsx.vfcmp.seq.s" => "__builtin_lsx_vfcmp_seq_s",
+ "llvm.loongarch.lsx.vfcmp.sle.d" => "__builtin_lsx_vfcmp_sle_d",
+ "llvm.loongarch.lsx.vfcmp.sle.s" => "__builtin_lsx_vfcmp_sle_s",
+ "llvm.loongarch.lsx.vfcmp.slt.d" => "__builtin_lsx_vfcmp_slt_d",
+ "llvm.loongarch.lsx.vfcmp.slt.s" => "__builtin_lsx_vfcmp_slt_s",
+ "llvm.loongarch.lsx.vfcmp.sne.d" => "__builtin_lsx_vfcmp_sne_d",
+ "llvm.loongarch.lsx.vfcmp.sne.s" => "__builtin_lsx_vfcmp_sne_s",
+ "llvm.loongarch.lsx.vfcmp.sor.d" => "__builtin_lsx_vfcmp_sor_d",
+ "llvm.loongarch.lsx.vfcmp.sor.s" => "__builtin_lsx_vfcmp_sor_s",
+ "llvm.loongarch.lsx.vfcmp.sueq.d" => "__builtin_lsx_vfcmp_sueq_d",
+ "llvm.loongarch.lsx.vfcmp.sueq.s" => "__builtin_lsx_vfcmp_sueq_s",
+ "llvm.loongarch.lsx.vfcmp.sule.d" => "__builtin_lsx_vfcmp_sule_d",
+ "llvm.loongarch.lsx.vfcmp.sule.s" => "__builtin_lsx_vfcmp_sule_s",
+ "llvm.loongarch.lsx.vfcmp.sult.d" => "__builtin_lsx_vfcmp_sult_d",
+ "llvm.loongarch.lsx.vfcmp.sult.s" => "__builtin_lsx_vfcmp_sult_s",
+ "llvm.loongarch.lsx.vfcmp.sun.d" => "__builtin_lsx_vfcmp_sun_d",
+ "llvm.loongarch.lsx.vfcmp.sun.s" => "__builtin_lsx_vfcmp_sun_s",
+ "llvm.loongarch.lsx.vfcmp.sune.d" => "__builtin_lsx_vfcmp_sune_d",
+ "llvm.loongarch.lsx.vfcmp.sune.s" => "__builtin_lsx_vfcmp_sune_s",
+ "llvm.loongarch.lsx.vfcvt.h.s" => "__builtin_lsx_vfcvt_h_s",
+ "llvm.loongarch.lsx.vfcvt.s.d" => "__builtin_lsx_vfcvt_s_d",
+ "llvm.loongarch.lsx.vfcvth.d.s" => "__builtin_lsx_vfcvth_d_s",
+ "llvm.loongarch.lsx.vfcvth.s.h" => "__builtin_lsx_vfcvth_s_h",
+ "llvm.loongarch.lsx.vfcvtl.d.s" => "__builtin_lsx_vfcvtl_d_s",
+ "llvm.loongarch.lsx.vfcvtl.s.h" => "__builtin_lsx_vfcvtl_s_h",
+ "llvm.loongarch.lsx.vfdiv.d" => "__builtin_lsx_vfdiv_d",
+ "llvm.loongarch.lsx.vfdiv.s" => "__builtin_lsx_vfdiv_s",
+ "llvm.loongarch.lsx.vffint.d.l" => "__builtin_lsx_vffint_d_l",
+ "llvm.loongarch.lsx.vffint.d.lu" => "__builtin_lsx_vffint_d_lu",
+ "llvm.loongarch.lsx.vffint.s.l" => "__builtin_lsx_vffint_s_l",
+ "llvm.loongarch.lsx.vffint.s.w" => "__builtin_lsx_vffint_s_w",
+ "llvm.loongarch.lsx.vffint.s.wu" => "__builtin_lsx_vffint_s_wu",
+ "llvm.loongarch.lsx.vffinth.d.w" => "__builtin_lsx_vffinth_d_w",
+ "llvm.loongarch.lsx.vffintl.d.w" => "__builtin_lsx_vffintl_d_w",
+ "llvm.loongarch.lsx.vflogb.d" => "__builtin_lsx_vflogb_d",
+ "llvm.loongarch.lsx.vflogb.s" => "__builtin_lsx_vflogb_s",
+ "llvm.loongarch.lsx.vfmadd.d" => "__builtin_lsx_vfmadd_d",
+ "llvm.loongarch.lsx.vfmadd.s" => "__builtin_lsx_vfmadd_s",
+ "llvm.loongarch.lsx.vfmax.d" => "__builtin_lsx_vfmax_d",
+ "llvm.loongarch.lsx.vfmax.s" => "__builtin_lsx_vfmax_s",
+ "llvm.loongarch.lsx.vfmaxa.d" => "__builtin_lsx_vfmaxa_d",
+ "llvm.loongarch.lsx.vfmaxa.s" => "__builtin_lsx_vfmaxa_s",
+ "llvm.loongarch.lsx.vfmin.d" => "__builtin_lsx_vfmin_d",
+ "llvm.loongarch.lsx.vfmin.s" => "__builtin_lsx_vfmin_s",
+ "llvm.loongarch.lsx.vfmina.d" => "__builtin_lsx_vfmina_d",
+ "llvm.loongarch.lsx.vfmina.s" => "__builtin_lsx_vfmina_s",
+ "llvm.loongarch.lsx.vfmsub.d" => "__builtin_lsx_vfmsub_d",
+ "llvm.loongarch.lsx.vfmsub.s" => "__builtin_lsx_vfmsub_s",
+ "llvm.loongarch.lsx.vfmul.d" => "__builtin_lsx_vfmul_d",
+ "llvm.loongarch.lsx.vfmul.s" => "__builtin_lsx_vfmul_s",
+ "llvm.loongarch.lsx.vfnmadd.d" => "__builtin_lsx_vfnmadd_d",
+ "llvm.loongarch.lsx.vfnmadd.s" => "__builtin_lsx_vfnmadd_s",
+ "llvm.loongarch.lsx.vfnmsub.d" => "__builtin_lsx_vfnmsub_d",
+ "llvm.loongarch.lsx.vfnmsub.s" => "__builtin_lsx_vfnmsub_s",
+ "llvm.loongarch.lsx.vfrecip.d" => "__builtin_lsx_vfrecip_d",
+ "llvm.loongarch.lsx.vfrecip.s" => "__builtin_lsx_vfrecip_s",
+ "llvm.loongarch.lsx.vfrint.d" => "__builtin_lsx_vfrint_d",
+ "llvm.loongarch.lsx.vfrint.s" => "__builtin_lsx_vfrint_s",
+ "llvm.loongarch.lsx.vfrintrm.d" => "__builtin_lsx_vfrintrm_d",
+ "llvm.loongarch.lsx.vfrintrm.s" => "__builtin_lsx_vfrintrm_s",
+ "llvm.loongarch.lsx.vfrintrne.d" => "__builtin_lsx_vfrintrne_d",
+ "llvm.loongarch.lsx.vfrintrne.s" => "__builtin_lsx_vfrintrne_s",
+ "llvm.loongarch.lsx.vfrintrp.d" => "__builtin_lsx_vfrintrp_d",
+ "llvm.loongarch.lsx.vfrintrp.s" => "__builtin_lsx_vfrintrp_s",
+ "llvm.loongarch.lsx.vfrintrz.d" => "__builtin_lsx_vfrintrz_d",
+ "llvm.loongarch.lsx.vfrintrz.s" => "__builtin_lsx_vfrintrz_s",
+ "llvm.loongarch.lsx.vfrsqrt.d" => "__builtin_lsx_vfrsqrt_d",
+ "llvm.loongarch.lsx.vfrsqrt.s" => "__builtin_lsx_vfrsqrt_s",
+ "llvm.loongarch.lsx.vfrstp.b" => "__builtin_lsx_vfrstp_b",
+ "llvm.loongarch.lsx.vfrstp.h" => "__builtin_lsx_vfrstp_h",
+ "llvm.loongarch.lsx.vfrstpi.b" => "__builtin_lsx_vfrstpi_b",
+ "llvm.loongarch.lsx.vfrstpi.h" => "__builtin_lsx_vfrstpi_h",
+ "llvm.loongarch.lsx.vfsqrt.d" => "__builtin_lsx_vfsqrt_d",
+ "llvm.loongarch.lsx.vfsqrt.s" => "__builtin_lsx_vfsqrt_s",
+ "llvm.loongarch.lsx.vfsub.d" => "__builtin_lsx_vfsub_d",
+ "llvm.loongarch.lsx.vfsub.s" => "__builtin_lsx_vfsub_s",
+ "llvm.loongarch.lsx.vftint.l.d" => "__builtin_lsx_vftint_l_d",
+ "llvm.loongarch.lsx.vftint.lu.d" => "__builtin_lsx_vftint_lu_d",
+ "llvm.loongarch.lsx.vftint.w.d" => "__builtin_lsx_vftint_w_d",
+ "llvm.loongarch.lsx.vftint.w.s" => "__builtin_lsx_vftint_w_s",
+ "llvm.loongarch.lsx.vftint.wu.s" => "__builtin_lsx_vftint_wu_s",
+ "llvm.loongarch.lsx.vftinth.l.s" => "__builtin_lsx_vftinth_l_s",
+ "llvm.loongarch.lsx.vftintl.l.s" => "__builtin_lsx_vftintl_l_s",
+ "llvm.loongarch.lsx.vftintrm.l.d" => "__builtin_lsx_vftintrm_l_d",
+ "llvm.loongarch.lsx.vftintrm.w.d" => "__builtin_lsx_vftintrm_w_d",
+ "llvm.loongarch.lsx.vftintrm.w.s" => "__builtin_lsx_vftintrm_w_s",
+ "llvm.loongarch.lsx.vftintrmh.l.s" => "__builtin_lsx_vftintrmh_l_s",
+ "llvm.loongarch.lsx.vftintrml.l.s" => "__builtin_lsx_vftintrml_l_s",
+ "llvm.loongarch.lsx.vftintrne.l.d" => "__builtin_lsx_vftintrne_l_d",
+ "llvm.loongarch.lsx.vftintrne.w.d" => "__builtin_lsx_vftintrne_w_d",
+ "llvm.loongarch.lsx.vftintrne.w.s" => "__builtin_lsx_vftintrne_w_s",
+ "llvm.loongarch.lsx.vftintrneh.l.s" => "__builtin_lsx_vftintrneh_l_s",
+ "llvm.loongarch.lsx.vftintrnel.l.s" => "__builtin_lsx_vftintrnel_l_s",
+ "llvm.loongarch.lsx.vftintrp.l.d" => "__builtin_lsx_vftintrp_l_d",
+ "llvm.loongarch.lsx.vftintrp.w.d" => "__builtin_lsx_vftintrp_w_d",
+ "llvm.loongarch.lsx.vftintrp.w.s" => "__builtin_lsx_vftintrp_w_s",
+ "llvm.loongarch.lsx.vftintrph.l.s" => "__builtin_lsx_vftintrph_l_s",
+ "llvm.loongarch.lsx.vftintrpl.l.s" => "__builtin_lsx_vftintrpl_l_s",
+ "llvm.loongarch.lsx.vftintrz.l.d" => "__builtin_lsx_vftintrz_l_d",
+ "llvm.loongarch.lsx.vftintrz.lu.d" => "__builtin_lsx_vftintrz_lu_d",
+ "llvm.loongarch.lsx.vftintrz.w.d" => "__builtin_lsx_vftintrz_w_d",
+ "llvm.loongarch.lsx.vftintrz.w.s" => "__builtin_lsx_vftintrz_w_s",
+ "llvm.loongarch.lsx.vftintrz.wu.s" => "__builtin_lsx_vftintrz_wu_s",
+ "llvm.loongarch.lsx.vftintrzh.l.s" => "__builtin_lsx_vftintrzh_l_s",
+ "llvm.loongarch.lsx.vftintrzl.l.s" => "__builtin_lsx_vftintrzl_l_s",
+ "llvm.loongarch.lsx.vhaddw.d.w" => "__builtin_lsx_vhaddw_d_w",
+ "llvm.loongarch.lsx.vhaddw.du.wu" => "__builtin_lsx_vhaddw_du_wu",
+ "llvm.loongarch.lsx.vhaddw.h.b" => "__builtin_lsx_vhaddw_h_b",
+ "llvm.loongarch.lsx.vhaddw.hu.bu" => "__builtin_lsx_vhaddw_hu_bu",
+ "llvm.loongarch.lsx.vhaddw.q.d" => "__builtin_lsx_vhaddw_q_d",
+ "llvm.loongarch.lsx.vhaddw.qu.du" => "__builtin_lsx_vhaddw_qu_du",
+ "llvm.loongarch.lsx.vhaddw.w.h" => "__builtin_lsx_vhaddw_w_h",
+ "llvm.loongarch.lsx.vhaddw.wu.hu" => "__builtin_lsx_vhaddw_wu_hu",
+ "llvm.loongarch.lsx.vhsubw.d.w" => "__builtin_lsx_vhsubw_d_w",
+ "llvm.loongarch.lsx.vhsubw.du.wu" => "__builtin_lsx_vhsubw_du_wu",
+ "llvm.loongarch.lsx.vhsubw.h.b" => "__builtin_lsx_vhsubw_h_b",
+ "llvm.loongarch.lsx.vhsubw.hu.bu" => "__builtin_lsx_vhsubw_hu_bu",
+ "llvm.loongarch.lsx.vhsubw.q.d" => "__builtin_lsx_vhsubw_q_d",
+ "llvm.loongarch.lsx.vhsubw.qu.du" => "__builtin_lsx_vhsubw_qu_du",
+ "llvm.loongarch.lsx.vhsubw.w.h" => "__builtin_lsx_vhsubw_w_h",
+ "llvm.loongarch.lsx.vhsubw.wu.hu" => "__builtin_lsx_vhsubw_wu_hu",
+ "llvm.loongarch.lsx.vilvh.b" => "__builtin_lsx_vilvh_b",
+ "llvm.loongarch.lsx.vilvh.d" => "__builtin_lsx_vilvh_d",
+ "llvm.loongarch.lsx.vilvh.h" => "__builtin_lsx_vilvh_h",
+ "llvm.loongarch.lsx.vilvh.w" => "__builtin_lsx_vilvh_w",
+ "llvm.loongarch.lsx.vilvl.b" => "__builtin_lsx_vilvl_b",
+ "llvm.loongarch.lsx.vilvl.d" => "__builtin_lsx_vilvl_d",
+ "llvm.loongarch.lsx.vilvl.h" => "__builtin_lsx_vilvl_h",
+ "llvm.loongarch.lsx.vilvl.w" => "__builtin_lsx_vilvl_w",
+ "llvm.loongarch.lsx.vinsgr2vr.b" => "__builtin_lsx_vinsgr2vr_b",
+ "llvm.loongarch.lsx.vinsgr2vr.d" => "__builtin_lsx_vinsgr2vr_d",
+ "llvm.loongarch.lsx.vinsgr2vr.h" => "__builtin_lsx_vinsgr2vr_h",
+ "llvm.loongarch.lsx.vinsgr2vr.w" => "__builtin_lsx_vinsgr2vr_w",
+ "llvm.loongarch.lsx.vld" => "__builtin_lsx_vld",
+ "llvm.loongarch.lsx.vldi" => "__builtin_lsx_vldi",
+ "llvm.loongarch.lsx.vldrepl.b" => "__builtin_lsx_vldrepl_b",
+ "llvm.loongarch.lsx.vldrepl.d" => "__builtin_lsx_vldrepl_d",
+ "llvm.loongarch.lsx.vldrepl.h" => "__builtin_lsx_vldrepl_h",
+ "llvm.loongarch.lsx.vldrepl.w" => "__builtin_lsx_vldrepl_w",
+ "llvm.loongarch.lsx.vldx" => "__builtin_lsx_vldx",
+ "llvm.loongarch.lsx.vmadd.b" => "__builtin_lsx_vmadd_b",
+ "llvm.loongarch.lsx.vmadd.d" => "__builtin_lsx_vmadd_d",
+ "llvm.loongarch.lsx.vmadd.h" => "__builtin_lsx_vmadd_h",
+ "llvm.loongarch.lsx.vmadd.w" => "__builtin_lsx_vmadd_w",
+ "llvm.loongarch.lsx.vmaddwev.d.w" => "__builtin_lsx_vmaddwev_d_w",
+ "llvm.loongarch.lsx.vmaddwev.d.wu" => "__builtin_lsx_vmaddwev_d_wu",
+ "llvm.loongarch.lsx.vmaddwev.d.wu.w" => "__builtin_lsx_vmaddwev_d_wu_w",
+ "llvm.loongarch.lsx.vmaddwev.h.b" => "__builtin_lsx_vmaddwev_h_b",
+ "llvm.loongarch.lsx.vmaddwev.h.bu" => "__builtin_lsx_vmaddwev_h_bu",
+ "llvm.loongarch.lsx.vmaddwev.h.bu.b" => "__builtin_lsx_vmaddwev_h_bu_b",
+ "llvm.loongarch.lsx.vmaddwev.q.d" => "__builtin_lsx_vmaddwev_q_d",
+ "llvm.loongarch.lsx.vmaddwev.q.du" => "__builtin_lsx_vmaddwev_q_du",
+ "llvm.loongarch.lsx.vmaddwev.q.du.d" => "__builtin_lsx_vmaddwev_q_du_d",
+ "llvm.loongarch.lsx.vmaddwev.w.h" => "__builtin_lsx_vmaddwev_w_h",
+ "llvm.loongarch.lsx.vmaddwev.w.hu" => "__builtin_lsx_vmaddwev_w_hu",
+ "llvm.loongarch.lsx.vmaddwev.w.hu.h" => "__builtin_lsx_vmaddwev_w_hu_h",
+ "llvm.loongarch.lsx.vmaddwod.d.w" => "__builtin_lsx_vmaddwod_d_w",
+ "llvm.loongarch.lsx.vmaddwod.d.wu" => "__builtin_lsx_vmaddwod_d_wu",
+ "llvm.loongarch.lsx.vmaddwod.d.wu.w" => "__builtin_lsx_vmaddwod_d_wu_w",
+ "llvm.loongarch.lsx.vmaddwod.h.b" => "__builtin_lsx_vmaddwod_h_b",
+ "llvm.loongarch.lsx.vmaddwod.h.bu" => "__builtin_lsx_vmaddwod_h_bu",
+ "llvm.loongarch.lsx.vmaddwod.h.bu.b" => "__builtin_lsx_vmaddwod_h_bu_b",
+ "llvm.loongarch.lsx.vmaddwod.q.d" => "__builtin_lsx_vmaddwod_q_d",
+ "llvm.loongarch.lsx.vmaddwod.q.du" => "__builtin_lsx_vmaddwod_q_du",
+ "llvm.loongarch.lsx.vmaddwod.q.du.d" => "__builtin_lsx_vmaddwod_q_du_d",
+ "llvm.loongarch.lsx.vmaddwod.w.h" => "__builtin_lsx_vmaddwod_w_h",
+ "llvm.loongarch.lsx.vmaddwod.w.hu" => "__builtin_lsx_vmaddwod_w_hu",
+ "llvm.loongarch.lsx.vmaddwod.w.hu.h" => "__builtin_lsx_vmaddwod_w_hu_h",
+ "llvm.loongarch.lsx.vmax.b" => "__builtin_lsx_vmax_b",
+ "llvm.loongarch.lsx.vmax.bu" => "__builtin_lsx_vmax_bu",
+ "llvm.loongarch.lsx.vmax.d" => "__builtin_lsx_vmax_d",
+ "llvm.loongarch.lsx.vmax.du" => "__builtin_lsx_vmax_du",
+ "llvm.loongarch.lsx.vmax.h" => "__builtin_lsx_vmax_h",
+ "llvm.loongarch.lsx.vmax.hu" => "__builtin_lsx_vmax_hu",
+ "llvm.loongarch.lsx.vmax.w" => "__builtin_lsx_vmax_w",
+ "llvm.loongarch.lsx.vmax.wu" => "__builtin_lsx_vmax_wu",
+ "llvm.loongarch.lsx.vmaxi.b" => "__builtin_lsx_vmaxi_b",
+ "llvm.loongarch.lsx.vmaxi.bu" => "__builtin_lsx_vmaxi_bu",
+ "llvm.loongarch.lsx.vmaxi.d" => "__builtin_lsx_vmaxi_d",
+ "llvm.loongarch.lsx.vmaxi.du" => "__builtin_lsx_vmaxi_du",
+ "llvm.loongarch.lsx.vmaxi.h" => "__builtin_lsx_vmaxi_h",
+ "llvm.loongarch.lsx.vmaxi.hu" => "__builtin_lsx_vmaxi_hu",
+ "llvm.loongarch.lsx.vmaxi.w" => "__builtin_lsx_vmaxi_w",
+ "llvm.loongarch.lsx.vmaxi.wu" => "__builtin_lsx_vmaxi_wu",
+ "llvm.loongarch.lsx.vmin.b" => "__builtin_lsx_vmin_b",
+ "llvm.loongarch.lsx.vmin.bu" => "__builtin_lsx_vmin_bu",
+ "llvm.loongarch.lsx.vmin.d" => "__builtin_lsx_vmin_d",
+ "llvm.loongarch.lsx.vmin.du" => "__builtin_lsx_vmin_du",
+ "llvm.loongarch.lsx.vmin.h" => "__builtin_lsx_vmin_h",
+ "llvm.loongarch.lsx.vmin.hu" => "__builtin_lsx_vmin_hu",
+ "llvm.loongarch.lsx.vmin.w" => "__builtin_lsx_vmin_w",
+ "llvm.loongarch.lsx.vmin.wu" => "__builtin_lsx_vmin_wu",
+ "llvm.loongarch.lsx.vmini.b" => "__builtin_lsx_vmini_b",
+ "llvm.loongarch.lsx.vmini.bu" => "__builtin_lsx_vmini_bu",
+ "llvm.loongarch.lsx.vmini.d" => "__builtin_lsx_vmini_d",
+ "llvm.loongarch.lsx.vmini.du" => "__builtin_lsx_vmini_du",
+ "llvm.loongarch.lsx.vmini.h" => "__builtin_lsx_vmini_h",
+ "llvm.loongarch.lsx.vmini.hu" => "__builtin_lsx_vmini_hu",
+ "llvm.loongarch.lsx.vmini.w" => "__builtin_lsx_vmini_w",
+ "llvm.loongarch.lsx.vmini.wu" => "__builtin_lsx_vmini_wu",
+ "llvm.loongarch.lsx.vmod.b" => "__builtin_lsx_vmod_b",
+ "llvm.loongarch.lsx.vmod.bu" => "__builtin_lsx_vmod_bu",
+ "llvm.loongarch.lsx.vmod.d" => "__builtin_lsx_vmod_d",
+ "llvm.loongarch.lsx.vmod.du" => "__builtin_lsx_vmod_du",
+ "llvm.loongarch.lsx.vmod.h" => "__builtin_lsx_vmod_h",
+ "llvm.loongarch.lsx.vmod.hu" => "__builtin_lsx_vmod_hu",
+ "llvm.loongarch.lsx.vmod.w" => "__builtin_lsx_vmod_w",
+ "llvm.loongarch.lsx.vmod.wu" => "__builtin_lsx_vmod_wu",
+ "llvm.loongarch.lsx.vmskgez.b" => "__builtin_lsx_vmskgez_b",
+ "llvm.loongarch.lsx.vmskltz.b" => "__builtin_lsx_vmskltz_b",
+ "llvm.loongarch.lsx.vmskltz.d" => "__builtin_lsx_vmskltz_d",
+ "llvm.loongarch.lsx.vmskltz.h" => "__builtin_lsx_vmskltz_h",
+ "llvm.loongarch.lsx.vmskltz.w" => "__builtin_lsx_vmskltz_w",
+ "llvm.loongarch.lsx.vmsknz.b" => "__builtin_lsx_vmsknz_b",
+ "llvm.loongarch.lsx.vmsub.b" => "__builtin_lsx_vmsub_b",
+ "llvm.loongarch.lsx.vmsub.d" => "__builtin_lsx_vmsub_d",
+ "llvm.loongarch.lsx.vmsub.h" => "__builtin_lsx_vmsub_h",
+ "llvm.loongarch.lsx.vmsub.w" => "__builtin_lsx_vmsub_w",
+ "llvm.loongarch.lsx.vmuh.b" => "__builtin_lsx_vmuh_b",
+ "llvm.loongarch.lsx.vmuh.bu" => "__builtin_lsx_vmuh_bu",
+ "llvm.loongarch.lsx.vmuh.d" => "__builtin_lsx_vmuh_d",
+ "llvm.loongarch.lsx.vmuh.du" => "__builtin_lsx_vmuh_du",
+ "llvm.loongarch.lsx.vmuh.h" => "__builtin_lsx_vmuh_h",
+ "llvm.loongarch.lsx.vmuh.hu" => "__builtin_lsx_vmuh_hu",
+ "llvm.loongarch.lsx.vmuh.w" => "__builtin_lsx_vmuh_w",
+ "llvm.loongarch.lsx.vmuh.wu" => "__builtin_lsx_vmuh_wu",
+ "llvm.loongarch.lsx.vmul.b" => "__builtin_lsx_vmul_b",
+ "llvm.loongarch.lsx.vmul.d" => "__builtin_lsx_vmul_d",
+ "llvm.loongarch.lsx.vmul.h" => "__builtin_lsx_vmul_h",
+ "llvm.loongarch.lsx.vmul.w" => "__builtin_lsx_vmul_w",
+ "llvm.loongarch.lsx.vmulwev.d.w" => "__builtin_lsx_vmulwev_d_w",
+ "llvm.loongarch.lsx.vmulwev.d.wu" => "__builtin_lsx_vmulwev_d_wu",
+ "llvm.loongarch.lsx.vmulwev.d.wu.w" => "__builtin_lsx_vmulwev_d_wu_w",
+ "llvm.loongarch.lsx.vmulwev.h.b" => "__builtin_lsx_vmulwev_h_b",
+ "llvm.loongarch.lsx.vmulwev.h.bu" => "__builtin_lsx_vmulwev_h_bu",
+ "llvm.loongarch.lsx.vmulwev.h.bu.b" => "__builtin_lsx_vmulwev_h_bu_b",
+ "llvm.loongarch.lsx.vmulwev.q.d" => "__builtin_lsx_vmulwev_q_d",
+ "llvm.loongarch.lsx.vmulwev.q.du" => "__builtin_lsx_vmulwev_q_du",
+ "llvm.loongarch.lsx.vmulwev.q.du.d" => "__builtin_lsx_vmulwev_q_du_d",
+ "llvm.loongarch.lsx.vmulwev.w.h" => "__builtin_lsx_vmulwev_w_h",
+ "llvm.loongarch.lsx.vmulwev.w.hu" => "__builtin_lsx_vmulwev_w_hu",
+ "llvm.loongarch.lsx.vmulwev.w.hu.h" => "__builtin_lsx_vmulwev_w_hu_h",
+ "llvm.loongarch.lsx.vmulwod.d.w" => "__builtin_lsx_vmulwod_d_w",
+ "llvm.loongarch.lsx.vmulwod.d.wu" => "__builtin_lsx_vmulwod_d_wu",
+ "llvm.loongarch.lsx.vmulwod.d.wu.w" => "__builtin_lsx_vmulwod_d_wu_w",
+ "llvm.loongarch.lsx.vmulwod.h.b" => "__builtin_lsx_vmulwod_h_b",
+ "llvm.loongarch.lsx.vmulwod.h.bu" => "__builtin_lsx_vmulwod_h_bu",
+ "llvm.loongarch.lsx.vmulwod.h.bu.b" => "__builtin_lsx_vmulwod_h_bu_b",
+ "llvm.loongarch.lsx.vmulwod.q.d" => "__builtin_lsx_vmulwod_q_d",
+ "llvm.loongarch.lsx.vmulwod.q.du" => "__builtin_lsx_vmulwod_q_du",
+ "llvm.loongarch.lsx.vmulwod.q.du.d" => "__builtin_lsx_vmulwod_q_du_d",
+ "llvm.loongarch.lsx.vmulwod.w.h" => "__builtin_lsx_vmulwod_w_h",
+ "llvm.loongarch.lsx.vmulwod.w.hu" => "__builtin_lsx_vmulwod_w_hu",
+ "llvm.loongarch.lsx.vmulwod.w.hu.h" => "__builtin_lsx_vmulwod_w_hu_h",
+ "llvm.loongarch.lsx.vneg.b" => "__builtin_lsx_vneg_b",
+ "llvm.loongarch.lsx.vneg.d" => "__builtin_lsx_vneg_d",
+ "llvm.loongarch.lsx.vneg.h" => "__builtin_lsx_vneg_h",
+ "llvm.loongarch.lsx.vneg.w" => "__builtin_lsx_vneg_w",
+ "llvm.loongarch.lsx.vnor.v" => "__builtin_lsx_vnor_v",
+ "llvm.loongarch.lsx.vnori.b" => "__builtin_lsx_vnori_b",
+ "llvm.loongarch.lsx.vor.v" => "__builtin_lsx_vor_v",
+ "llvm.loongarch.lsx.vori.b" => "__builtin_lsx_vori_b",
+ "llvm.loongarch.lsx.vorn.v" => "__builtin_lsx_vorn_v",
+ "llvm.loongarch.lsx.vpackev.b" => "__builtin_lsx_vpackev_b",
+ "llvm.loongarch.lsx.vpackev.d" => "__builtin_lsx_vpackev_d",
+ "llvm.loongarch.lsx.vpackev.h" => "__builtin_lsx_vpackev_h",
+ "llvm.loongarch.lsx.vpackev.w" => "__builtin_lsx_vpackev_w",
+ "llvm.loongarch.lsx.vpackod.b" => "__builtin_lsx_vpackod_b",
+ "llvm.loongarch.lsx.vpackod.d" => "__builtin_lsx_vpackod_d",
+ "llvm.loongarch.lsx.vpackod.h" => "__builtin_lsx_vpackod_h",
+ "llvm.loongarch.lsx.vpackod.w" => "__builtin_lsx_vpackod_w",
+ "llvm.loongarch.lsx.vpcnt.b" => "__builtin_lsx_vpcnt_b",
+ "llvm.loongarch.lsx.vpcnt.d" => "__builtin_lsx_vpcnt_d",
+ "llvm.loongarch.lsx.vpcnt.h" => "__builtin_lsx_vpcnt_h",
+ "llvm.loongarch.lsx.vpcnt.w" => "__builtin_lsx_vpcnt_w",
+ "llvm.loongarch.lsx.vpermi.w" => "__builtin_lsx_vpermi_w",
+ "llvm.loongarch.lsx.vpickev.b" => "__builtin_lsx_vpickev_b",
+ "llvm.loongarch.lsx.vpickev.d" => "__builtin_lsx_vpickev_d",
+ "llvm.loongarch.lsx.vpickev.h" => "__builtin_lsx_vpickev_h",
+ "llvm.loongarch.lsx.vpickev.w" => "__builtin_lsx_vpickev_w",
+ "llvm.loongarch.lsx.vpickod.b" => "__builtin_lsx_vpickod_b",
+ "llvm.loongarch.lsx.vpickod.d" => "__builtin_lsx_vpickod_d",
+ "llvm.loongarch.lsx.vpickod.h" => "__builtin_lsx_vpickod_h",
+ "llvm.loongarch.lsx.vpickod.w" => "__builtin_lsx_vpickod_w",
+ "llvm.loongarch.lsx.vpickve2gr.b" => "__builtin_lsx_vpickve2gr_b",
+ "llvm.loongarch.lsx.vpickve2gr.bu" => "__builtin_lsx_vpickve2gr_bu",
+ "llvm.loongarch.lsx.vpickve2gr.d" => "__builtin_lsx_vpickve2gr_d",
+ "llvm.loongarch.lsx.vpickve2gr.du" => "__builtin_lsx_vpickve2gr_du",
+ "llvm.loongarch.lsx.vpickve2gr.h" => "__builtin_lsx_vpickve2gr_h",
+ "llvm.loongarch.lsx.vpickve2gr.hu" => "__builtin_lsx_vpickve2gr_hu",
+ "llvm.loongarch.lsx.vpickve2gr.w" => "__builtin_lsx_vpickve2gr_w",
+ "llvm.loongarch.lsx.vpickve2gr.wu" => "__builtin_lsx_vpickve2gr_wu",
+ "llvm.loongarch.lsx.vreplgr2vr.b" => "__builtin_lsx_vreplgr2vr_b",
+ "llvm.loongarch.lsx.vreplgr2vr.d" => "__builtin_lsx_vreplgr2vr_d",
+ "llvm.loongarch.lsx.vreplgr2vr.h" => "__builtin_lsx_vreplgr2vr_h",
+ "llvm.loongarch.lsx.vreplgr2vr.w" => "__builtin_lsx_vreplgr2vr_w",
+ "llvm.loongarch.lsx.vrepli.b" => "__builtin_lsx_vrepli_b",
+ "llvm.loongarch.lsx.vrepli.d" => "__builtin_lsx_vrepli_d",
+ "llvm.loongarch.lsx.vrepli.h" => "__builtin_lsx_vrepli_h",
+ "llvm.loongarch.lsx.vrepli.w" => "__builtin_lsx_vrepli_w",
+ "llvm.loongarch.lsx.vreplve.b" => "__builtin_lsx_vreplve_b",
+ "llvm.loongarch.lsx.vreplve.d" => "__builtin_lsx_vreplve_d",
+ "llvm.loongarch.lsx.vreplve.h" => "__builtin_lsx_vreplve_h",
+ "llvm.loongarch.lsx.vreplve.w" => "__builtin_lsx_vreplve_w",
+ "llvm.loongarch.lsx.vreplvei.b" => "__builtin_lsx_vreplvei_b",
+ "llvm.loongarch.lsx.vreplvei.d" => "__builtin_lsx_vreplvei_d",
+ "llvm.loongarch.lsx.vreplvei.h" => "__builtin_lsx_vreplvei_h",
+ "llvm.loongarch.lsx.vreplvei.w" => "__builtin_lsx_vreplvei_w",
+ "llvm.loongarch.lsx.vrotr.b" => "__builtin_lsx_vrotr_b",
+ "llvm.loongarch.lsx.vrotr.d" => "__builtin_lsx_vrotr_d",
+ "llvm.loongarch.lsx.vrotr.h" => "__builtin_lsx_vrotr_h",
+ "llvm.loongarch.lsx.vrotr.w" => "__builtin_lsx_vrotr_w",
+ "llvm.loongarch.lsx.vrotri.b" => "__builtin_lsx_vrotri_b",
+ "llvm.loongarch.lsx.vrotri.d" => "__builtin_lsx_vrotri_d",
+ "llvm.loongarch.lsx.vrotri.h" => "__builtin_lsx_vrotri_h",
+ "llvm.loongarch.lsx.vrotri.w" => "__builtin_lsx_vrotri_w",
+ "llvm.loongarch.lsx.vsadd.b" => "__builtin_lsx_vsadd_b",
+ "llvm.loongarch.lsx.vsadd.bu" => "__builtin_lsx_vsadd_bu",
+ "llvm.loongarch.lsx.vsadd.d" => "__builtin_lsx_vsadd_d",
+ "llvm.loongarch.lsx.vsadd.du" => "__builtin_lsx_vsadd_du",
+ "llvm.loongarch.lsx.vsadd.h" => "__builtin_lsx_vsadd_h",
+ "llvm.loongarch.lsx.vsadd.hu" => "__builtin_lsx_vsadd_hu",
+ "llvm.loongarch.lsx.vsadd.w" => "__builtin_lsx_vsadd_w",
+ "llvm.loongarch.lsx.vsadd.wu" => "__builtin_lsx_vsadd_wu",
+ "llvm.loongarch.lsx.vsat.b" => "__builtin_lsx_vsat_b",
+ "llvm.loongarch.lsx.vsat.bu" => "__builtin_lsx_vsat_bu",
+ "llvm.loongarch.lsx.vsat.d" => "__builtin_lsx_vsat_d",
+ "llvm.loongarch.lsx.vsat.du" => "__builtin_lsx_vsat_du",
+ "llvm.loongarch.lsx.vsat.h" => "__builtin_lsx_vsat_h",
+ "llvm.loongarch.lsx.vsat.hu" => "__builtin_lsx_vsat_hu",
+ "llvm.loongarch.lsx.vsat.w" => "__builtin_lsx_vsat_w",
+ "llvm.loongarch.lsx.vsat.wu" => "__builtin_lsx_vsat_wu",
+ "llvm.loongarch.lsx.vseq.b" => "__builtin_lsx_vseq_b",
+ "llvm.loongarch.lsx.vseq.d" => "__builtin_lsx_vseq_d",
+ "llvm.loongarch.lsx.vseq.h" => "__builtin_lsx_vseq_h",
+ "llvm.loongarch.lsx.vseq.w" => "__builtin_lsx_vseq_w",
+ "llvm.loongarch.lsx.vseqi.b" => "__builtin_lsx_vseqi_b",
+ "llvm.loongarch.lsx.vseqi.d" => "__builtin_lsx_vseqi_d",
+ "llvm.loongarch.lsx.vseqi.h" => "__builtin_lsx_vseqi_h",
+ "llvm.loongarch.lsx.vseqi.w" => "__builtin_lsx_vseqi_w",
+ "llvm.loongarch.lsx.vshuf.b" => "__builtin_lsx_vshuf_b",
+ "llvm.loongarch.lsx.vshuf.d" => "__builtin_lsx_vshuf_d",
+ "llvm.loongarch.lsx.vshuf.h" => "__builtin_lsx_vshuf_h",
+ "llvm.loongarch.lsx.vshuf.w" => "__builtin_lsx_vshuf_w",
+ "llvm.loongarch.lsx.vshuf4i.b" => "__builtin_lsx_vshuf4i_b",
+ "llvm.loongarch.lsx.vshuf4i.d" => "__builtin_lsx_vshuf4i_d",
+ "llvm.loongarch.lsx.vshuf4i.h" => "__builtin_lsx_vshuf4i_h",
+ "llvm.loongarch.lsx.vshuf4i.w" => "__builtin_lsx_vshuf4i_w",
+ "llvm.loongarch.lsx.vsigncov.b" => "__builtin_lsx_vsigncov_b",
+ "llvm.loongarch.lsx.vsigncov.d" => "__builtin_lsx_vsigncov_d",
+ "llvm.loongarch.lsx.vsigncov.h" => "__builtin_lsx_vsigncov_h",
+ "llvm.loongarch.lsx.vsigncov.w" => "__builtin_lsx_vsigncov_w",
+ "llvm.loongarch.lsx.vsle.b" => "__builtin_lsx_vsle_b",
+ "llvm.loongarch.lsx.vsle.bu" => "__builtin_lsx_vsle_bu",
+ "llvm.loongarch.lsx.vsle.d" => "__builtin_lsx_vsle_d",
+ "llvm.loongarch.lsx.vsle.du" => "__builtin_lsx_vsle_du",
+ "llvm.loongarch.lsx.vsle.h" => "__builtin_lsx_vsle_h",
+ "llvm.loongarch.lsx.vsle.hu" => "__builtin_lsx_vsle_hu",
+ "llvm.loongarch.lsx.vsle.w" => "__builtin_lsx_vsle_w",
+ "llvm.loongarch.lsx.vsle.wu" => "__builtin_lsx_vsle_wu",
+ "llvm.loongarch.lsx.vslei.b" => "__builtin_lsx_vslei_b",
+ "llvm.loongarch.lsx.vslei.bu" => "__builtin_lsx_vslei_bu",
+ "llvm.loongarch.lsx.vslei.d" => "__builtin_lsx_vslei_d",
+ "llvm.loongarch.lsx.vslei.du" => "__builtin_lsx_vslei_du",
+ "llvm.loongarch.lsx.vslei.h" => "__builtin_lsx_vslei_h",
+ "llvm.loongarch.lsx.vslei.hu" => "__builtin_lsx_vslei_hu",
+ "llvm.loongarch.lsx.vslei.w" => "__builtin_lsx_vslei_w",
+ "llvm.loongarch.lsx.vslei.wu" => "__builtin_lsx_vslei_wu",
+ "llvm.loongarch.lsx.vsll.b" => "__builtin_lsx_vsll_b",
+ "llvm.loongarch.lsx.vsll.d" => "__builtin_lsx_vsll_d",
+ "llvm.loongarch.lsx.vsll.h" => "__builtin_lsx_vsll_h",
+ "llvm.loongarch.lsx.vsll.w" => "__builtin_lsx_vsll_w",
+ "llvm.loongarch.lsx.vslli.b" => "__builtin_lsx_vslli_b",
+ "llvm.loongarch.lsx.vslli.d" => "__builtin_lsx_vslli_d",
+ "llvm.loongarch.lsx.vslli.h" => "__builtin_lsx_vslli_h",
+ "llvm.loongarch.lsx.vslli.w" => "__builtin_lsx_vslli_w",
+ "llvm.loongarch.lsx.vsllwil.d.w" => "__builtin_lsx_vsllwil_d_w",
+ "llvm.loongarch.lsx.vsllwil.du.wu" => "__builtin_lsx_vsllwil_du_wu",
+ "llvm.loongarch.lsx.vsllwil.h.b" => "__builtin_lsx_vsllwil_h_b",
+ "llvm.loongarch.lsx.vsllwil.hu.bu" => "__builtin_lsx_vsllwil_hu_bu",
+ "llvm.loongarch.lsx.vsllwil.w.h" => "__builtin_lsx_vsllwil_w_h",
+ "llvm.loongarch.lsx.vsllwil.wu.hu" => "__builtin_lsx_vsllwil_wu_hu",
+ "llvm.loongarch.lsx.vslt.b" => "__builtin_lsx_vslt_b",
+ "llvm.loongarch.lsx.vslt.bu" => "__builtin_lsx_vslt_bu",
+ "llvm.loongarch.lsx.vslt.d" => "__builtin_lsx_vslt_d",
+ "llvm.loongarch.lsx.vslt.du" => "__builtin_lsx_vslt_du",
+ "llvm.loongarch.lsx.vslt.h" => "__builtin_lsx_vslt_h",
+ "llvm.loongarch.lsx.vslt.hu" => "__builtin_lsx_vslt_hu",
+ "llvm.loongarch.lsx.vslt.w" => "__builtin_lsx_vslt_w",
+ "llvm.loongarch.lsx.vslt.wu" => "__builtin_lsx_vslt_wu",
+ "llvm.loongarch.lsx.vslti.b" => "__builtin_lsx_vslti_b",
+ "llvm.loongarch.lsx.vslti.bu" => "__builtin_lsx_vslti_bu",
+ "llvm.loongarch.lsx.vslti.d" => "__builtin_lsx_vslti_d",
+ "llvm.loongarch.lsx.vslti.du" => "__builtin_lsx_vslti_du",
+ "llvm.loongarch.lsx.vslti.h" => "__builtin_lsx_vslti_h",
+ "llvm.loongarch.lsx.vslti.hu" => "__builtin_lsx_vslti_hu",
+ "llvm.loongarch.lsx.vslti.w" => "__builtin_lsx_vslti_w",
+ "llvm.loongarch.lsx.vslti.wu" => "__builtin_lsx_vslti_wu",
+ "llvm.loongarch.lsx.vsra.b" => "__builtin_lsx_vsra_b",
+ "llvm.loongarch.lsx.vsra.d" => "__builtin_lsx_vsra_d",
+ "llvm.loongarch.lsx.vsra.h" => "__builtin_lsx_vsra_h",
+ "llvm.loongarch.lsx.vsra.w" => "__builtin_lsx_vsra_w",
+ "llvm.loongarch.lsx.vsrai.b" => "__builtin_lsx_vsrai_b",
+ "llvm.loongarch.lsx.vsrai.d" => "__builtin_lsx_vsrai_d",
+ "llvm.loongarch.lsx.vsrai.h" => "__builtin_lsx_vsrai_h",
+ "llvm.loongarch.lsx.vsrai.w" => "__builtin_lsx_vsrai_w",
+ "llvm.loongarch.lsx.vsran.b.h" => "__builtin_lsx_vsran_b_h",
+ "llvm.loongarch.lsx.vsran.h.w" => "__builtin_lsx_vsran_h_w",
+ "llvm.loongarch.lsx.vsran.w.d" => "__builtin_lsx_vsran_w_d",
+ "llvm.loongarch.lsx.vsrani.b.h" => "__builtin_lsx_vsrani_b_h",
+ "llvm.loongarch.lsx.vsrani.d.q" => "__builtin_lsx_vsrani_d_q",
+ "llvm.loongarch.lsx.vsrani.h.w" => "__builtin_lsx_vsrani_h_w",
+ "llvm.loongarch.lsx.vsrani.w.d" => "__builtin_lsx_vsrani_w_d",
+ "llvm.loongarch.lsx.vsrar.b" => "__builtin_lsx_vsrar_b",
+ "llvm.loongarch.lsx.vsrar.d" => "__builtin_lsx_vsrar_d",
+ "llvm.loongarch.lsx.vsrar.h" => "__builtin_lsx_vsrar_h",
+ "llvm.loongarch.lsx.vsrar.w" => "__builtin_lsx_vsrar_w",
+ "llvm.loongarch.lsx.vsrari.b" => "__builtin_lsx_vsrari_b",
+ "llvm.loongarch.lsx.vsrari.d" => "__builtin_lsx_vsrari_d",
+ "llvm.loongarch.lsx.vsrari.h" => "__builtin_lsx_vsrari_h",
+ "llvm.loongarch.lsx.vsrari.w" => "__builtin_lsx_vsrari_w",
+ "llvm.loongarch.lsx.vsrarn.b.h" => "__builtin_lsx_vsrarn_b_h",
+ "llvm.loongarch.lsx.vsrarn.h.w" => "__builtin_lsx_vsrarn_h_w",
+ "llvm.loongarch.lsx.vsrarn.w.d" => "__builtin_lsx_vsrarn_w_d",
+ "llvm.loongarch.lsx.vsrarni.b.h" => "__builtin_lsx_vsrarni_b_h",
+ "llvm.loongarch.lsx.vsrarni.d.q" => "__builtin_lsx_vsrarni_d_q",
+ "llvm.loongarch.lsx.vsrarni.h.w" => "__builtin_lsx_vsrarni_h_w",
+ "llvm.loongarch.lsx.vsrarni.w.d" => "__builtin_lsx_vsrarni_w_d",
+ "llvm.loongarch.lsx.vsrl.b" => "__builtin_lsx_vsrl_b",
+ "llvm.loongarch.lsx.vsrl.d" => "__builtin_lsx_vsrl_d",
+ "llvm.loongarch.lsx.vsrl.h" => "__builtin_lsx_vsrl_h",
+ "llvm.loongarch.lsx.vsrl.w" => "__builtin_lsx_vsrl_w",
+ "llvm.loongarch.lsx.vsrli.b" => "__builtin_lsx_vsrli_b",
+ "llvm.loongarch.lsx.vsrli.d" => "__builtin_lsx_vsrli_d",
+ "llvm.loongarch.lsx.vsrli.h" => "__builtin_lsx_vsrli_h",
+ "llvm.loongarch.lsx.vsrli.w" => "__builtin_lsx_vsrli_w",
+ "llvm.loongarch.lsx.vsrln.b.h" => "__builtin_lsx_vsrln_b_h",
+ "llvm.loongarch.lsx.vsrln.h.w" => "__builtin_lsx_vsrln_h_w",
+ "llvm.loongarch.lsx.vsrln.w.d" => "__builtin_lsx_vsrln_w_d",
+ "llvm.loongarch.lsx.vsrlni.b.h" => "__builtin_lsx_vsrlni_b_h",
+ "llvm.loongarch.lsx.vsrlni.d.q" => "__builtin_lsx_vsrlni_d_q",
+ "llvm.loongarch.lsx.vsrlni.h.w" => "__builtin_lsx_vsrlni_h_w",
+ "llvm.loongarch.lsx.vsrlni.w.d" => "__builtin_lsx_vsrlni_w_d",
+ "llvm.loongarch.lsx.vsrlr.b" => "__builtin_lsx_vsrlr_b",
+ "llvm.loongarch.lsx.vsrlr.d" => "__builtin_lsx_vsrlr_d",
+ "llvm.loongarch.lsx.vsrlr.h" => "__builtin_lsx_vsrlr_h",
+ "llvm.loongarch.lsx.vsrlr.w" => "__builtin_lsx_vsrlr_w",
+ "llvm.loongarch.lsx.vsrlri.b" => "__builtin_lsx_vsrlri_b",
+ "llvm.loongarch.lsx.vsrlri.d" => "__builtin_lsx_vsrlri_d",
+ "llvm.loongarch.lsx.vsrlri.h" => "__builtin_lsx_vsrlri_h",
+ "llvm.loongarch.lsx.vsrlri.w" => "__builtin_lsx_vsrlri_w",
+ "llvm.loongarch.lsx.vsrlrn.b.h" => "__builtin_lsx_vsrlrn_b_h",
+ "llvm.loongarch.lsx.vsrlrn.h.w" => "__builtin_lsx_vsrlrn_h_w",
+ "llvm.loongarch.lsx.vsrlrn.w.d" => "__builtin_lsx_vsrlrn_w_d",
+ "llvm.loongarch.lsx.vsrlrni.b.h" => "__builtin_lsx_vsrlrni_b_h",
+ "llvm.loongarch.lsx.vsrlrni.d.q" => "__builtin_lsx_vsrlrni_d_q",
+ "llvm.loongarch.lsx.vsrlrni.h.w" => "__builtin_lsx_vsrlrni_h_w",
+ "llvm.loongarch.lsx.vsrlrni.w.d" => "__builtin_lsx_vsrlrni_w_d",
+ "llvm.loongarch.lsx.vssran.b.h" => "__builtin_lsx_vssran_b_h",
+ "llvm.loongarch.lsx.vssran.bu.h" => "__builtin_lsx_vssran_bu_h",
+ "llvm.loongarch.lsx.vssran.h.w" => "__builtin_lsx_vssran_h_w",
+ "llvm.loongarch.lsx.vssran.hu.w" => "__builtin_lsx_vssran_hu_w",
+ "llvm.loongarch.lsx.vssran.w.d" => "__builtin_lsx_vssran_w_d",
+ "llvm.loongarch.lsx.vssran.wu.d" => "__builtin_lsx_vssran_wu_d",
+ "llvm.loongarch.lsx.vssrani.b.h" => "__builtin_lsx_vssrani_b_h",
+ "llvm.loongarch.lsx.vssrani.bu.h" => "__builtin_lsx_vssrani_bu_h",
+ "llvm.loongarch.lsx.vssrani.d.q" => "__builtin_lsx_vssrani_d_q",
+ "llvm.loongarch.lsx.vssrani.du.q" => "__builtin_lsx_vssrani_du_q",
+ "llvm.loongarch.lsx.vssrani.h.w" => "__builtin_lsx_vssrani_h_w",
+ "llvm.loongarch.lsx.vssrani.hu.w" => "__builtin_lsx_vssrani_hu_w",
+ "llvm.loongarch.lsx.vssrani.w.d" => "__builtin_lsx_vssrani_w_d",
+ "llvm.loongarch.lsx.vssrani.wu.d" => "__builtin_lsx_vssrani_wu_d",
+ "llvm.loongarch.lsx.vssrarn.b.h" => "__builtin_lsx_vssrarn_b_h",
+ "llvm.loongarch.lsx.vssrarn.bu.h" => "__builtin_lsx_vssrarn_bu_h",
+ "llvm.loongarch.lsx.vssrarn.h.w" => "__builtin_lsx_vssrarn_h_w",
+ "llvm.loongarch.lsx.vssrarn.hu.w" => "__builtin_lsx_vssrarn_hu_w",
+ "llvm.loongarch.lsx.vssrarn.w.d" => "__builtin_lsx_vssrarn_w_d",
+ "llvm.loongarch.lsx.vssrarn.wu.d" => "__builtin_lsx_vssrarn_wu_d",
+ "llvm.loongarch.lsx.vssrarni.b.h" => "__builtin_lsx_vssrarni_b_h",
+ "llvm.loongarch.lsx.vssrarni.bu.h" => "__builtin_lsx_vssrarni_bu_h",
+ "llvm.loongarch.lsx.vssrarni.d.q" => "__builtin_lsx_vssrarni_d_q",
+ "llvm.loongarch.lsx.vssrarni.du.q" => "__builtin_lsx_vssrarni_du_q",
+ "llvm.loongarch.lsx.vssrarni.h.w" => "__builtin_lsx_vssrarni_h_w",
+ "llvm.loongarch.lsx.vssrarni.hu.w" => "__builtin_lsx_vssrarni_hu_w",
+ "llvm.loongarch.lsx.vssrarni.w.d" => "__builtin_lsx_vssrarni_w_d",
+ "llvm.loongarch.lsx.vssrarni.wu.d" => "__builtin_lsx_vssrarni_wu_d",
+ "llvm.loongarch.lsx.vssrln.b.h" => "__builtin_lsx_vssrln_b_h",
+ "llvm.loongarch.lsx.vssrln.bu.h" => "__builtin_lsx_vssrln_bu_h",
+ "llvm.loongarch.lsx.vssrln.h.w" => "__builtin_lsx_vssrln_h_w",
+ "llvm.loongarch.lsx.vssrln.hu.w" => "__builtin_lsx_vssrln_hu_w",
+ "llvm.loongarch.lsx.vssrln.w.d" => "__builtin_lsx_vssrln_w_d",
+ "llvm.loongarch.lsx.vssrln.wu.d" => "__builtin_lsx_vssrln_wu_d",
+ "llvm.loongarch.lsx.vssrlni.b.h" => "__builtin_lsx_vssrlni_b_h",
+ "llvm.loongarch.lsx.vssrlni.bu.h" => "__builtin_lsx_vssrlni_bu_h",
+ "llvm.loongarch.lsx.vssrlni.d.q" => "__builtin_lsx_vssrlni_d_q",
+ "llvm.loongarch.lsx.vssrlni.du.q" => "__builtin_lsx_vssrlni_du_q",
+ "llvm.loongarch.lsx.vssrlni.h.w" => "__builtin_lsx_vssrlni_h_w",
+ "llvm.loongarch.lsx.vssrlni.hu.w" => "__builtin_lsx_vssrlni_hu_w",
+ "llvm.loongarch.lsx.vssrlni.w.d" => "__builtin_lsx_vssrlni_w_d",
+ "llvm.loongarch.lsx.vssrlni.wu.d" => "__builtin_lsx_vssrlni_wu_d",
+ "llvm.loongarch.lsx.vssrlrn.b.h" => "__builtin_lsx_vssrlrn_b_h",
+ "llvm.loongarch.lsx.vssrlrn.bu.h" => "__builtin_lsx_vssrlrn_bu_h",
+ "llvm.loongarch.lsx.vssrlrn.h.w" => "__builtin_lsx_vssrlrn_h_w",
+ "llvm.loongarch.lsx.vssrlrn.hu.w" => "__builtin_lsx_vssrlrn_hu_w",
+ "llvm.loongarch.lsx.vssrlrn.w.d" => "__builtin_lsx_vssrlrn_w_d",
+ "llvm.loongarch.lsx.vssrlrn.wu.d" => "__builtin_lsx_vssrlrn_wu_d",
+ "llvm.loongarch.lsx.vssrlrni.b.h" => "__builtin_lsx_vssrlrni_b_h",
+ "llvm.loongarch.lsx.vssrlrni.bu.h" => "__builtin_lsx_vssrlrni_bu_h",
+ "llvm.loongarch.lsx.vssrlrni.d.q" => "__builtin_lsx_vssrlrni_d_q",
+ "llvm.loongarch.lsx.vssrlrni.du.q" => "__builtin_lsx_vssrlrni_du_q",
+ "llvm.loongarch.lsx.vssrlrni.h.w" => "__builtin_lsx_vssrlrni_h_w",
+ "llvm.loongarch.lsx.vssrlrni.hu.w" => "__builtin_lsx_vssrlrni_hu_w",
+ "llvm.loongarch.lsx.vssrlrni.w.d" => "__builtin_lsx_vssrlrni_w_d",
+ "llvm.loongarch.lsx.vssrlrni.wu.d" => "__builtin_lsx_vssrlrni_wu_d",
+ "llvm.loongarch.lsx.vssub.b" => "__builtin_lsx_vssub_b",
+ "llvm.loongarch.lsx.vssub.bu" => "__builtin_lsx_vssub_bu",
+ "llvm.loongarch.lsx.vssub.d" => "__builtin_lsx_vssub_d",
+ "llvm.loongarch.lsx.vssub.du" => "__builtin_lsx_vssub_du",
+ "llvm.loongarch.lsx.vssub.h" => "__builtin_lsx_vssub_h",
+ "llvm.loongarch.lsx.vssub.hu" => "__builtin_lsx_vssub_hu",
+ "llvm.loongarch.lsx.vssub.w" => "__builtin_lsx_vssub_w",
+ "llvm.loongarch.lsx.vssub.wu" => "__builtin_lsx_vssub_wu",
+ "llvm.loongarch.lsx.vst" => "__builtin_lsx_vst",
+ "llvm.loongarch.lsx.vstelm.b" => "__builtin_lsx_vstelm_b",
+ "llvm.loongarch.lsx.vstelm.d" => "__builtin_lsx_vstelm_d",
+ "llvm.loongarch.lsx.vstelm.h" => "__builtin_lsx_vstelm_h",
+ "llvm.loongarch.lsx.vstelm.w" => "__builtin_lsx_vstelm_w",
+ "llvm.loongarch.lsx.vstx" => "__builtin_lsx_vstx",
+ "llvm.loongarch.lsx.vsub.b" => "__builtin_lsx_vsub_b",
+ "llvm.loongarch.lsx.vsub.d" => "__builtin_lsx_vsub_d",
+ "llvm.loongarch.lsx.vsub.h" => "__builtin_lsx_vsub_h",
+ "llvm.loongarch.lsx.vsub.q" => "__builtin_lsx_vsub_q",
+ "llvm.loongarch.lsx.vsub.w" => "__builtin_lsx_vsub_w",
+ "llvm.loongarch.lsx.vsubi.bu" => "__builtin_lsx_vsubi_bu",
+ "llvm.loongarch.lsx.vsubi.du" => "__builtin_lsx_vsubi_du",
+ "llvm.loongarch.lsx.vsubi.hu" => "__builtin_lsx_vsubi_hu",
+ "llvm.loongarch.lsx.vsubi.wu" => "__builtin_lsx_vsubi_wu",
+ "llvm.loongarch.lsx.vsubwev.d.w" => "__builtin_lsx_vsubwev_d_w",
+ "llvm.loongarch.lsx.vsubwev.d.wu" => "__builtin_lsx_vsubwev_d_wu",
+ "llvm.loongarch.lsx.vsubwev.h.b" => "__builtin_lsx_vsubwev_h_b",
+ "llvm.loongarch.lsx.vsubwev.h.bu" => "__builtin_lsx_vsubwev_h_bu",
+ "llvm.loongarch.lsx.vsubwev.q.d" => "__builtin_lsx_vsubwev_q_d",
+ "llvm.loongarch.lsx.vsubwev.q.du" => "__builtin_lsx_vsubwev_q_du",
+ "llvm.loongarch.lsx.vsubwev.w.h" => "__builtin_lsx_vsubwev_w_h",
+ "llvm.loongarch.lsx.vsubwev.w.hu" => "__builtin_lsx_vsubwev_w_hu",
+ "llvm.loongarch.lsx.vsubwod.d.w" => "__builtin_lsx_vsubwod_d_w",
+ "llvm.loongarch.lsx.vsubwod.d.wu" => "__builtin_lsx_vsubwod_d_wu",
+ "llvm.loongarch.lsx.vsubwod.h.b" => "__builtin_lsx_vsubwod_h_b",
+ "llvm.loongarch.lsx.vsubwod.h.bu" => "__builtin_lsx_vsubwod_h_bu",
+ "llvm.loongarch.lsx.vsubwod.q.d" => "__builtin_lsx_vsubwod_q_d",
+ "llvm.loongarch.lsx.vsubwod.q.du" => "__builtin_lsx_vsubwod_q_du",
+ "llvm.loongarch.lsx.vsubwod.w.h" => "__builtin_lsx_vsubwod_w_h",
+ "llvm.loongarch.lsx.vsubwod.w.hu" => "__builtin_lsx_vsubwod_w_hu",
+ "llvm.loongarch.lsx.vxor.v" => "__builtin_lsx_vxor_v",
+ "llvm.loongarch.lsx.vxori.b" => "__builtin_lsx_vxori_b",
+ "llvm.loongarch.movfcsr2gr" => "__builtin_loongarch_movfcsr2gr",
+ "llvm.loongarch.movgr2fcsr" => "__builtin_loongarch_movgr2fcsr",
+ "llvm.loongarch.syscall" => "__builtin_loongarch_syscall",
// mips
"llvm.mips.absq.s.ph" => "__builtin_mips_absq_s_ph",
"llvm.mips.absq.s.qb" => "__builtin_mips_absq_s_qb",
@@ -2954,6 +4442,8 @@ match name {
"llvm.nvvm.barrier0.and" => "__nvvm_bar0_and",
"llvm.nvvm.barrier0.or" => "__nvvm_bar0_or",
"llvm.nvvm.barrier0.popc" => "__nvvm_bar0_popc",
+ "llvm.nvvm.bf2h.rn" => "__nvvm_bf2h_rn",
+ "llvm.nvvm.bf2h.rn.ftz" => "__nvvm_bf2h_rn_ftz",
"llvm.nvvm.bitcast.d2ll" => "__nvvm_bitcast_d2ll",
"llvm.nvvm.bitcast.f2i" => "__nvvm_bitcast_f2i",
"llvm.nvvm.bitcast.i2f" => "__nvvm_bitcast_i2f",
@@ -3016,8 +4506,6 @@ match name {
"llvm.nvvm.div.rz.ftz.f" => "__nvvm_div_rz_ftz_f",
"llvm.nvvm.ex2.approx.d" => "__nvvm_ex2_approx_d",
"llvm.nvvm.ex2.approx.f" => "__nvvm_ex2_approx_f",
- "llvm.nvvm.ex2.approx.f16" => "__nvvm_ex2_approx_f16",
- "llvm.nvvm.ex2.approx.f16x2" => "__nvvm_ex2_approx_f16x2",
"llvm.nvvm.ex2.approx.ftz.f" => "__nvvm_ex2_approx_ftz_f",
"llvm.nvvm.f2bf16.rn" => "__nvvm_f2bf16_rn",
"llvm.nvvm.f2bf16.rn.relu" => "__nvvm_f2bf16_rn_relu",
@@ -3079,11 +4567,17 @@ match name {
"llvm.nvvm.fma.rn.bf16x2" => "__nvvm_fma_rn_bf16x2",
"llvm.nvvm.fma.rn.d" => "__nvvm_fma_rn_d",
"llvm.nvvm.fma.rn.f" => "__nvvm_fma_rn_f",
- "llvm.nvvm.fma.rn.f16" => "__nvvm_fma_rn_f16",
- "llvm.nvvm.fma.rn.f16x2" => "__nvvm_fma_rn_f16x2",
+ "llvm.nvvm.fma.rn.ftz.bf16" => "__nvvm_fma_rn_ftz_bf16",
+ "llvm.nvvm.fma.rn.ftz.bf16x2" => "__nvvm_fma_rn_ftz_bf16x2",
"llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f",
+ "llvm.nvvm.fma.rn.ftz.relu.bf16" => "__nvvm_fma_rn_ftz_relu_bf16",
+ "llvm.nvvm.fma.rn.ftz.relu.bf16x2" => "__nvvm_fma_rn_ftz_relu_bf16x2",
+ "llvm.nvvm.fma.rn.ftz.sat.bf16" => "__nvvm_fma_rn_ftz_sat_bf16",
+ "llvm.nvvm.fma.rn.ftz.sat.bf16x2" => "__nvvm_fma_rn_ftz_sat_bf16x2",
"llvm.nvvm.fma.rn.relu.bf16" => "__nvvm_fma_rn_relu_bf16",
"llvm.nvvm.fma.rn.relu.bf16x2" => "__nvvm_fma_rn_relu_bf16x2",
+ "llvm.nvvm.fma.rn.sat.bf16" => "__nvvm_fma_rn_sat_bf16",
+ "llvm.nvvm.fma.rn.sat.bf16x2" => "__nvvm_fma_rn_sat_bf16x2",
"llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d",
"llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f",
"llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f",
@@ -3094,11 +4588,17 @@ match name {
"llvm.nvvm.fmax.bf16x2" => "__nvvm_fmax_bf16x2",
"llvm.nvvm.fmax.d" => "__nvvm_fmax_d",
"llvm.nvvm.fmax.f" => "__nvvm_fmax_f",
- "llvm.nvvm.fmax.f16" => "__nvvm_fmax_f16",
- "llvm.nvvm.fmax.f16x2" => "__nvvm_fmax_f16x2",
+ "llvm.nvvm.fmax.ftz.bf16" => "__nvvm_fmax_ftz_bf16",
+ "llvm.nvvm.fmax.ftz.bf16x2" => "__nvvm_fmax_ftz_bf16x2",
"llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f",
+ "llvm.nvvm.fmax.ftz.nan.bf16" => "__nvvm_fmax_ftz_nan_bf16",
+ "llvm.nvvm.fmax.ftz.nan.bf16x2" => "__nvvm_fmax_ftz_nan_bf16x2",
"llvm.nvvm.fmax.ftz.nan.f" => "__nvvm_fmax_ftz_nan_f",
+ "llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16" => "__nvvm_fmax_ftz_nan_xorsign_abs_bf16",
+ "llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16x2" => "__nvvm_fmax_ftz_nan_xorsign_abs_bf16x2",
"llvm.nvvm.fmax.ftz.nan.xorsign.abs.f" => "__nvvm_fmax_ftz_nan_xorsign_abs_f",
+ "llvm.nvvm.fmax.ftz.xorsign.abs.bf16" => "__nvvm_fmax_ftz_xorsign_abs_bf16",
+ "llvm.nvvm.fmax.ftz.xorsign.abs.bf16x2" => "__nvvm_fmax_ftz_xorsign_abs_bf16x2",
"llvm.nvvm.fmax.ftz.xorsign.abs.f" => "__nvvm_fmax_ftz_xorsign_abs_f",
"llvm.nvvm.fmax.nan.bf16" => "__nvvm_fmax_nan_bf16",
"llvm.nvvm.fmax.nan.bf16x2" => "__nvvm_fmax_nan_bf16x2",
@@ -3113,11 +4613,17 @@ match name {
"llvm.nvvm.fmin.bf16x2" => "__nvvm_fmin_bf16x2",
"llvm.nvvm.fmin.d" => "__nvvm_fmin_d",
"llvm.nvvm.fmin.f" => "__nvvm_fmin_f",
- "llvm.nvvm.fmin.f16" => "__nvvm_fmin_f16",
- "llvm.nvvm.fmin.f16x2" => "__nvvm_fmin_f16x2",
+ "llvm.nvvm.fmin.ftz.bf16" => "__nvvm_fmin_ftz_bf16",
+ "llvm.nvvm.fmin.ftz.bf16x2" => "__nvvm_fmin_ftz_bf16x2",
"llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f",
+ "llvm.nvvm.fmin.ftz.nan.bf16" => "__nvvm_fmin_ftz_nan_bf16",
+ "llvm.nvvm.fmin.ftz.nan.bf16x2" => "__nvvm_fmin_ftz_nan_bf16x2",
"llvm.nvvm.fmin.ftz.nan.f" => "__nvvm_fmin_ftz_nan_f",
+ "llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16" => "__nvvm_fmin_ftz_nan_xorsign_abs_bf16",
+ "llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16x2" => "__nvvm_fmin_ftz_nan_xorsign_abs_bf16x2",
"llvm.nvvm.fmin.ftz.nan.xorsign.abs.f" => "__nvvm_fmin_ftz_nan_xorsign_abs_f",
+ "llvm.nvvm.fmin.ftz.xorsign.abs.bf16" => "__nvvm_fmin_ftz_xorsign_abs_bf16",
+ "llvm.nvvm.fmin.ftz.xorsign.abs.bf16x2" => "__nvvm_fmin_ftz_xorsign_abs_bf16x2",
"llvm.nvvm.fmin.ftz.xorsign.abs.f" => "__nvvm_fmin_ftz_xorsign_abs_f",
"llvm.nvvm.fmin.nan.bf16" => "__nvvm_fmin_nan_bf16",
"llvm.nvvm.fmin.nan.bf16x2" => "__nvvm_fmin_nan_bf16x2",
@@ -3979,6 +5485,7 @@ match name {
"llvm.ppc.maddhd" => "__builtin_ppc_maddhd",
"llvm.ppc.maddhdu" => "__builtin_ppc_maddhdu",
"llvm.ppc.maddld" => "__builtin_ppc_maddld",
+ "llvm.ppc.mffsl" => "__builtin_ppc_mffsl",
"llvm.ppc.mfmsr" => "__builtin_ppc_mfmsr",
"llvm.ppc.mftbu" => "__builtin_ppc_mftbu",
"llvm.ppc.mtfsb0" => "__builtin_ppc_mtfsb0",
@@ -4213,6 +5720,28 @@ match name {
"llvm.r600.read.tgid.x" => "__builtin_r600_read_tgid_x",
"llvm.r600.read.tgid.y" => "__builtin_r600_read_tgid_y",
"llvm.r600.read.tgid.z" => "__builtin_r600_read_tgid_z",
+ // riscv
+ "llvm.riscv.aes32dsi" => "__builtin_riscv_aes32dsi",
+ "llvm.riscv.aes32dsmi" => "__builtin_riscv_aes32dsmi",
+ "llvm.riscv.aes32esi" => "__builtin_riscv_aes32esi",
+ "llvm.riscv.aes32esmi" => "__builtin_riscv_aes32esmi",
+ "llvm.riscv.aes64ds" => "__builtin_riscv_aes64ds",
+ "llvm.riscv.aes64dsm" => "__builtin_riscv_aes64dsm",
+ "llvm.riscv.aes64es" => "__builtin_riscv_aes64es",
+ "llvm.riscv.aes64esm" => "__builtin_riscv_aes64esm",
+ "llvm.riscv.aes64im" => "__builtin_riscv_aes64im",
+ "llvm.riscv.aes64ks1i" => "__builtin_riscv_aes64ks1i",
+ "llvm.riscv.aes64ks2" => "__builtin_riscv_aes64ks2",
+ "llvm.riscv.sha512sig0" => "__builtin_riscv_sha512sig0",
+ "llvm.riscv.sha512sig0h" => "__builtin_riscv_sha512sig0h",
+ "llvm.riscv.sha512sig0l" => "__builtin_riscv_sha512sig0l",
+ "llvm.riscv.sha512sig1" => "__builtin_riscv_sha512sig1",
+ "llvm.riscv.sha512sig1h" => "__builtin_riscv_sha512sig1h",
+ "llvm.riscv.sha512sig1l" => "__builtin_riscv_sha512sig1l",
+ "llvm.riscv.sha512sum0" => "__builtin_riscv_sha512sum0",
+ "llvm.riscv.sha512sum0r" => "__builtin_riscv_sha512sum0r",
+ "llvm.riscv.sha512sum1" => "__builtin_riscv_sha512sum1",
+ "llvm.riscv.sha512sum1r" => "__builtin_riscv_sha512sum1r",
// s390
"llvm.s390.efpc" => "__builtin_s390_efpc",
"llvm.s390.etnd" => "__builtin_tx_nesting_depth",
@@ -5912,6 +7441,18 @@ match name {
"llvm.x86.avx2.vpdpbuud.256" => "__builtin_ia32_vpdpbuud256",
"llvm.x86.avx2.vpdpbuuds.128" => "__builtin_ia32_vpdpbuuds128",
"llvm.x86.avx2.vpdpbuuds.256" => "__builtin_ia32_vpdpbuuds256",
+ "llvm.x86.avx2.vpdpwsud.128" => "__builtin_ia32_vpdpwsud128",
+ "llvm.x86.avx2.vpdpwsud.256" => "__builtin_ia32_vpdpwsud256",
+ "llvm.x86.avx2.vpdpwsuds.128" => "__builtin_ia32_vpdpwsuds128",
+ "llvm.x86.avx2.vpdpwsuds.256" => "__builtin_ia32_vpdpwsuds256",
+ "llvm.x86.avx2.vpdpwusd.128" => "__builtin_ia32_vpdpwusd128",
+ "llvm.x86.avx2.vpdpwusd.256" => "__builtin_ia32_vpdpwusd256",
+ "llvm.x86.avx2.vpdpwusds.128" => "__builtin_ia32_vpdpwusds128",
+ "llvm.x86.avx2.vpdpwusds.256" => "__builtin_ia32_vpdpwusds256",
+ "llvm.x86.avx2.vpdpwuud.128" => "__builtin_ia32_vpdpwuud128",
+ "llvm.x86.avx2.vpdpwuud.256" => "__builtin_ia32_vpdpwuud256",
+ "llvm.x86.avx2.vpdpwuuds.128" => "__builtin_ia32_vpdpwuuds128",
+ "llvm.x86.avx2.vpdpwuuds.256" => "__builtin_ia32_vpdpwuuds256",
"llvm.x86.avx2.vperm2i128" => "__builtin_ia32_permti256",
"llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512",
"llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512",
@@ -7882,6 +9423,8 @@ match name {
"llvm.x86.tpause" => "__builtin_ia32_tpause",
"llvm.x86.umonitor" => "__builtin_ia32_umonitor",
"llvm.x86.umwait" => "__builtin_ia32_umwait",
+ "llvm.x86.urdmsr" => "__builtin_ia32_urdmsr",
+ "llvm.x86.uwrmsr" => "__builtin_ia32_uwrmsr",
"llvm.x86.vbcstnebf162ps128" => "__builtin_ia32_vbcstnebf162ps128",
"llvm.x86.vbcstnebf162ps256" => "__builtin_ia32_vbcstnebf162ps256",
"llvm.x86.vbcstnesh2ps128" => "__builtin_ia32_vbcstnesh2ps128",
@@ -7909,6 +9452,16 @@ match name {
"llvm.x86.vgf2p8mulb.128" => "__builtin_ia32_vgf2p8mulb_v16qi",
"llvm.x86.vgf2p8mulb.256" => "__builtin_ia32_vgf2p8mulb_v32qi",
"llvm.x86.vgf2p8mulb.512" => "__builtin_ia32_vgf2p8mulb_v64qi",
+ "llvm.x86.vsha512msg1" => "__builtin_ia32_vsha512msg1",
+ "llvm.x86.vsha512msg2" => "__builtin_ia32_vsha512msg2",
+ "llvm.x86.vsha512rnds2" => "__builtin_ia32_vsha512rnds2",
+ "llvm.x86.vsm3msg1" => "__builtin_ia32_vsm3msg1",
+ "llvm.x86.vsm3msg2" => "__builtin_ia32_vsm3msg2",
+ "llvm.x86.vsm3rnds2" => "__builtin_ia32_vsm3rnds2",
+ "llvm.x86.vsm4key4128" => "__builtin_ia32_vsm4key4128",
+ "llvm.x86.vsm4key4256" => "__builtin_ia32_vsm4key4256",
+ "llvm.x86.vsm4rnds4128" => "__builtin_ia32_vsm4rnds4128",
+ "llvm.x86.vsm4rnds4256" => "__builtin_ia32_vsm4rnds4256",
"llvm.x86.wbinvd" => "__builtin_ia32_wbinvd",
"llvm.x86.wbnoinvd" => "__builtin_ia32_wbnoinvd",
"llvm.x86.wrfsbase.32" => "__builtin_ia32_wrfsbase32",
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
index f28348380..35eb4a110 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
@@ -236,11 +236,17 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
let arg2 = builder.context.new_cast(None, arg2, arg2_type);
args = vec![new_args[0], arg2].into();
},
+ // These builtins are sent one more argument than needed.
"__builtin_prefetch" => {
let mut new_args = args.to_vec();
new_args.pop();
args = new_args.into();
},
+ // The GCC version returns one value of the tuple through a pointer.
+ "__builtin_ia32_rdrand64_step" => {
+ let arg = builder.current_func().new_local(None, builder.ulonglong_type, "return_rdrand_arg");
+ args = vec![arg.get_address(None)].into();
+ },
_ => (),
}
}
@@ -361,6 +367,19 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc,
// builtin twice, we overwrite the return value with a dummy value.
return_value = builder.context.new_rvalue_zero(builder.int_type);
},
+ "__builtin_ia32_rdrand64_step" => {
+ let random_number = args[0].dereference(None).to_rvalue();
+ let success_variable = builder.current_func().new_local(None, return_value.get_type(), "success");
+ builder.llbb().add_assignment(None, success_variable, return_value);
+
+ let field1 = builder.context.new_field(None, random_number.get_type(), "random_number");
+ let field2 = builder.context.new_field(None, return_value.get_type(), "success");
+ let struct_type = builder.context.new_struct_type(None, "rdrand_result", &[field1, field2]);
+ return_value = builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[
+ random_number,
+ success_variable.to_rvalue(),
+ ]);
+ },
_ => (),
}
@@ -413,15 +432,21 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool {
#[cfg(not(feature="master"))]
pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
- match name {
- "llvm.x86.xgetbv" | "llvm.x86.sse2.pause" => {
- let gcc_name = "__builtin_trap";
- let func = cx.context.get_builtin_function(gcc_name);
- cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
- return func;
- },
- _ => unimplemented!("unsupported LLVM intrinsic {}", name),
- }
+ let gcc_name =
+ match name {
+ "llvm.x86.sse2.pause" => {
+ // NOTE: pause is only a hint, so we use a dummy built-in because target built-ins
+ // are not supported in libgccjit 12.
+ "__builtin_inff"
+ },
+ "llvm.x86.xgetbv" => {
+ "__builtin_trap"
+ },
+ _ => unimplemented!("unsupported LLVM intrinsic {}", name),
+ };
+ let func = cx.context.get_builtin_function(gcc_name);
+ cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+ return func;
}
#[cfg(feature="master")]
@@ -613,6 +638,7 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
"llvm.fshr.v8i16" => "__builtin_ia32_vpshrdv_v8hi",
"llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd3",
"llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss3",
+ "llvm.x86.rdrand.64" => "__builtin_ia32_rdrand64_step",
// The above doc points to unknown builtins for the following, so override them:
"llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gathersiv4si",
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index 68a087a1d..ba1cae03f 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -4,15 +4,17 @@ mod simd;
#[cfg(feature="master")]
use std::iter;
-use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType};
+#[cfg(feature="master")]
+use gccjit::FunctionType;
+use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
use rustc_codegen_ssa::MemFlags;
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::common::IntPredicate;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
-use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
+use rustc_codegen_ssa::traits::{ArgAbiMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
#[cfg(feature="master")]
-use rustc_codegen_ssa::traits::MiscMethods;
+use rustc_codegen_ssa::traits::{BaseTypeMethods, MiscMethods};
use rustc_codegen_ssa::errors::InvalidMonomorphization;
use rustc_middle::bug;
use rustc_middle::ty::{self, Instance, Ty};
@@ -143,11 +145,15 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = fn_args.type_at(0);
- let mut ptr = args[0].immediate();
- if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
- ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
- }
- let load = self.volatile_load(ptr.get_type(), ptr);
+ let ptr = args[0].immediate();
+ let load =
+ if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
+ let gcc_ty = ty.gcc_type(self);
+ self.volatile_load(gcc_ty, ptr)
+ }
+ else {
+ self.volatile_load(self.layout_of(tp_ty).gcc_type(self), ptr)
+ };
// TODO(antoyo): set alignment.
self.to_immediate(load, self.layout_of(tp_ty))
}
@@ -819,75 +825,58 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
value
};
- if value_type.is_u128(&self.cx) {
- // TODO(antoyo): implement in the normal algorithm below to have a more efficient
- // implementation (that does not require a call to __popcountdi2).
- let popcount = self.context.get_builtin_function("__builtin_popcountll");
+ // only break apart 128-bit ints if they're not natively supported
+ // TODO(antoyo): remove this if/when native 128-bit integers land in libgccjit
+ if value_type.is_u128(&self.cx) && !self.cx.supports_128bit_integers {
let sixty_four = self.gcc_int(value_type, 64);
let right_shift = self.gcc_lshr(value, sixty_four);
let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
- let high = self.context.new_call(None, popcount, &[high]);
+ let high = self.pop_count(high);
let low = self.gcc_int_cast(value, self.cx.ulonglong_type);
- let low = self.context.new_call(None, popcount, &[low]);
+ let low = self.pop_count(low);
let res = high + low;
return self.gcc_int_cast(res, result_type);
}
- // First step.
- let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
- let left = value & mask;
- let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
- let right = shifted & mask;
- let value = left + right;
-
- // Second step.
- let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
- let left = value & mask;
- let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
- let right = shifted & mask;
- let value = left + right;
-
- // Third step.
- let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
- let left = value & mask;
- let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
- let right = shifted & mask;
- let value = left + right;
-
- if value_type.is_u8(&self.cx) {
- return self.context.new_cast(None, value, result_type);
- }
-
- // Fourth step.
- let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
- let left = value & mask;
- let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
- let right = shifted & mask;
- let value = left + right;
-
- if value_type.is_u16(&self.cx) {
- return self.context.new_cast(None, value, result_type);
- }
-
- // Fifth step.
- let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
- let left = value & mask;
- let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
- let right = shifted & mask;
- let value = left + right;
-
- if value_type.is_u32(&self.cx) {
- return self.context.new_cast(None, value, result_type);
- }
-
- // Sixth step.
- let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
- let left = value & mask;
- let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
- let right = shifted & mask;
- let value = left + right;
-
- self.context.new_cast(None, value, result_type)
+ // Use Wenger's algorithm for population count, gcc's seems to play better with it
+ // for (int counter = 0; value != 0; counter++) {
+ // value &= value - 1;
+ // }
+ let func = self.current_func.borrow().expect("func");
+ let loop_head = func.new_block("head");
+ let loop_body = func.new_block("body");
+ let loop_tail = func.new_block("tail");
+
+ let counter_type = self.int_type;
+ let counter = self.current_func().new_local(None, counter_type, "popcount_counter");
+ let val = self.current_func().new_local(None, value_type, "popcount_value");
+ let zero = self.gcc_zero(counter_type);
+ self.llbb().add_assignment(None, counter, zero);
+ self.llbb().add_assignment(None, val, value);
+ self.br(loop_head);
+
+ // check if value isn't zero
+ self.switch_to_block(loop_head);
+ let zero = self.gcc_zero(value_type);
+ let cond = self.gcc_icmp(IntPredicate::IntNE, val.to_rvalue(), zero);
+ self.cond_br(cond, loop_body, loop_tail);
+
+ // val &= val - 1;
+ self.switch_to_block(loop_body);
+ let one = self.gcc_int(value_type, 1);
+ let sub = self.gcc_sub(val.to_rvalue(), one);
+ let op = self.gcc_and(val.to_rvalue(), sub);
+ loop_body.add_assignment(None, val, op);
+
+ // counter += 1
+ let one = self.gcc_int(counter_type, 1);
+ let op = self.gcc_add(counter.to_rvalue(), one);
+ loop_body.add_assignment(None, counter, op);
+ self.br(loop_head);
+
+ // end of loop
+ self.switch_to_block(loop_tail);
+ self.gcc_int_cast(counter.to_rvalue(), result_type)
}
// Algorithm from: https://blog.regehr.org/archives/1063
@@ -947,15 +936,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
128 => "__rust_i128_addo",
_ => unreachable!(),
};
- let param_a = self.context.new_parameter(None, result_type, "a");
- let param_b = self.context.new_parameter(None, result_type, "b");
- let result_field = self.context.new_field(None, result_type, "result");
- let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
- let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
- let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
- let result = self.context.new_call(None, func, &[lhs, rhs]);
- let overflow = result.access_field(None, overflow_field);
- let int_result = result.access_field(None, result_field);
+ let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
self.llbb().add_assignment(None, res, int_result);
overflow
};
@@ -1017,15 +998,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
128 => "__rust_i128_subo",
_ => unreachable!(),
};
- let param_a = self.context.new_parameter(None, result_type, "a");
- let param_b = self.context.new_parameter(None, result_type, "b");
- let result_field = self.context.new_field(None, result_type, "result");
- let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
- let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
- let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
- let result = self.context.new_call(None, func, &[lhs, rhs]);
- let overflow = result.access_field(None, overflow_field);
- let int_result = result.access_field(None, result_field);
+ let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
self.llbb().add_assignment(None, res, int_result);
overflow
};
@@ -1197,7 +1170,7 @@ fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut
#[cfg(feature="master")]
fn gen_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, name: &str, rust_fn_sig: ty::PolyFnSig<'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) {
let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
- let (typ, _, _, _) = fn_abi.gcc_type(cx);
+ let return_type = fn_abi.gcc_type(cx).return_type;
// FIXME(eddyb) find a nicer way to do this.
cx.linkage.set(FunctionType::Internal);
let func = cx.declare_fn(name, fn_abi);
@@ -1207,5 +1180,5 @@ fn gen_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, name: &str, rust_fn_sig
let block = Builder::append_block(cx, func_val, "entry-block");
let bx = Builder::build(cx, block);
codegen(bx);
- (typ, func)
+ (return_type, func)
}
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index ce7e31682..a530fc994 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -2,10 +2,19 @@
* TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
* TODO(antoyo): support #[inline] attributes.
* TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one — https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html).
+ * For Thin LTO, this might be helpful:
+ * In gcc 4.6 -fwhopr was removed and became default with -flto. The non-whopr path can still be executed via -flto-partition=none.
+ *
+ * Maybe some missing optizations enabled by rustc's LTO is in there: https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
+ * Like -fipa-icf (should be already enabled) and maybe -fdevirtualize-at-ltrans.
+ * TODO: disable debug info always being emitted. Perhaps this slows down things?
*
* TODO(antoyo): remove the patches.
*/
+#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
#![feature(
rustc_private,
decl_macro,
@@ -28,6 +37,7 @@ extern crate rustc_codegen_ssa;
extern crate rustc_data_structures;
extern crate rustc_errors;
extern crate rustc_fluent_macro;
+extern crate rustc_fs_util;
extern crate rustc_hir;
extern crate rustc_macros;
extern crate rustc_metadata;
@@ -35,7 +45,8 @@ extern crate rustc_middle;
extern crate rustc_session;
extern crate rustc_span;
extern crate rustc_target;
-extern crate tempfile;
+#[macro_use]
+extern crate tracing;
// This prevents duplicating functions and statics that are already part of the host rustc process.
#[allow(unused_extern_crates)]
@@ -57,6 +68,7 @@ mod coverageinfo;
mod debuginfo;
mod declare;
mod errors;
+mod gcc_util;
mod int;
mod intrinsic;
mod mono_item;
@@ -64,18 +76,29 @@ mod type_;
mod type_of;
use std::any::Any;
-use std::sync::{Arc, Mutex};
-
-use crate::errors::LTONotSupported;
-use gccjit::{Context, OptimizationLevel, CType};
+use std::fmt::Debug;
+use std::sync::Arc;
+use std::sync::Mutex;
+#[cfg(not(feature="master"))]
+use std::sync::atomic::AtomicBool;
+#[cfg(not(feature="master"))]
+use std::sync::atomic::Ordering;
+
+use gccjit::{Context, OptimizationLevel};
+#[cfg(feature="master")]
+use gccjit::TargetInfo;
+#[cfg(not(feature="master"))]
+use gccjit::CType;
+use errors::LTONotSupported;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
use rustc_codegen_ssa::base::codegen_crate;
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn};
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
use rustc_codegen_ssa::target_features::supported_target_features;
-use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::sync::IntoDynSyncSend;
+use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ThinBufferMethods, WriteBackendMethods};
use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
use rustc_metadata::EncodedMetadata;
@@ -88,6 +111,9 @@ use rustc_span::Symbol;
use rustc_span::fatal_error::FatalError;
use tempfile::TempDir;
+use crate::back::lto::ModuleBuffer;
+use crate::gcc_util::target_cpu;
+
fluent_messages! { "../messages.ftl" }
pub struct PrintOnPanic<F: Fn() -> String>(pub F);
@@ -100,9 +126,47 @@ impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
}
}
+#[cfg(not(feature="master"))]
+#[derive(Debug)]
+pub struct TargetInfo {
+ supports_128bit_integers: AtomicBool,
+}
+
+#[cfg(not(feature="master"))]
+impl TargetInfo {
+ fn cpu_supports(&self, _feature: &str) -> bool {
+ false
+ }
+
+ fn supports_128bit_int(&self) -> bool {
+ self.supports_128bit_integers.load(Ordering::SeqCst)
+ }
+}
+
+#[derive(Clone)]
+pub struct LockedTargetInfo {
+ info: Arc<Mutex<IntoDynSyncSend<TargetInfo>>>,
+}
+
+impl Debug for LockedTargetInfo {
+ fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ self.info.lock().expect("lock").fmt(formatter)
+ }
+}
+
+impl LockedTargetInfo {
+ fn cpu_supports(&self, feature: &str) -> bool {
+ self.info.lock().expect("lock").cpu_supports(feature)
+ }
+
+ fn supports_128bit_int(&self) -> bool {
+ self.info.lock().expect("lock").supports_128bit_int()
+ }
+}
+
#[derive(Clone)]
pub struct GccCodegenBackend {
- supports_128bit_integers: Arc<Mutex<bool>>,
+ target_info: LockedTargetInfo,
}
impl CodegenBackend for GccCodegenBackend {
@@ -112,24 +176,40 @@ impl CodegenBackend for GccCodegenBackend {
fn init(&self, sess: &Session) {
#[cfg(feature="master")]
+ {
+ let target_cpu = target_cpu(sess);
+
+ // Get the second TargetInfo with the correct CPU features by setting the arch.
+ let context = Context::default();
+ if target_cpu != "generic" {
+ context.add_command_line_option(&format!("-march={}", target_cpu));
+ }
+
+ **self.target_info.info.lock().expect("lock") = context.get_target_info();
+ }
+
+ #[cfg(feature="master")]
gccjit::set_global_personality_function_name(b"rust_eh_personality\0");
- if sess.lto() != Lto::No {
+ if sess.lto() == Lto::Thin {
sess.emit_warning(LTONotSupported {});
}
- let temp_dir = TempDir::new().expect("cannot create temporary directory");
- let temp_file = temp_dir.into_path().join("result.asm");
- let check_context = Context::default();
- check_context.set_print_errors_to_stderr(false);
- let _int128_ty = check_context.new_c_type(CType::UInt128t);
- // NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
- check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
- *self.supports_128bit_integers.lock().expect("lock") = check_context.get_last_error() == Ok(None);
+ #[cfg(not(feature="master"))]
+ {
+ let temp_dir = TempDir::new().expect("cannot create temporary directory");
+ let temp_file = temp_dir.into_path().join("result.asm");
+ let check_context = Context::default();
+ check_context.set_print_errors_to_stderr(false);
+ let _int128_ty = check_context.new_c_type(CType::UInt128t);
+ // NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
+ check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
+ self.target_info.info.lock().expect("lock").supports_128bit_integers.store(check_context.get_last_error() == Ok(None), Ordering::SeqCst);
+ }
}
fn provide(&self, providers: &mut Providers) {
- // FIXME(antoyo) compute list of enabled features from cli flags
- providers.global_backend_features = |_tcx, ()| vec![];
+ providers.global_backend_features =
+ |tcx, ()| gcc_util::global_gcc_features(tcx.sess, true)
}
fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
@@ -160,7 +240,7 @@ impl CodegenBackend for GccCodegenBackend {
}
fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
- target_features(sess, allow_unstable)
+ target_features(sess, allow_unstable, &self.target_info)
}
}
@@ -168,13 +248,19 @@ impl ExtraBackendMethods for GccCodegenBackend {
fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module {
let mut mods = GccContext {
context: Context::default(),
+ should_combine_object_files: false,
+ temp_dir: None,
};
+
+ if tcx.sess.target.arch == "x86" || tcx.sess.target.arch == "x86_64" {
+ mods.context.add_command_line_option("-masm=intel");
+ }
unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); }
mods
}
fn compile_codegen_unit(&self, tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
- base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock"))
+ base::compile_codegen_unit(tcx, cgu_name, self.target_info.clone())
}
fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn<Self> {
@@ -185,14 +271,6 @@ impl ExtraBackendMethods for GccCodegenBackend {
}
}
-pub struct ModuleBuffer;
-
-impl ModuleBufferMethods for ModuleBuffer {
- fn data(&self) -> &[u8] {
- unimplemented!();
- }
-}
-
pub struct ThinBuffer;
impl ThinBufferMethods for ThinBuffer {
@@ -203,6 +281,9 @@ impl ThinBufferMethods for ThinBuffer {
pub struct GccContext {
context: Context<'static>,
+ should_combine_object_files: bool,
+ // Temporary directory used by LTO. We keep it here so that it's not removed before linking.
+ temp_dir: Option<TempDir>,
}
unsafe impl Send for GccContext {}
@@ -217,18 +298,8 @@ impl WriteBackendMethods for GccCodegenBackend {
type ThinData = ();
type ThinBuffer = ThinBuffer;
- fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLtoInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
- // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
- // NOTE: implemented elsewhere.
- // TODO(antoyo): what is implemented elsewhere ^ ?
- let module =
- match modules.remove(0) {
- FatLtoInput::InMemory(module) => module,
- FatLtoInput::Serialized { .. } => {
- unimplemented!();
- }
- };
- Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: vec![] })
+ fn run_fat_lto(cgcx: &CodegenContext<Self>, modules: Vec<FatLtoInput<Self>>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
+ back::lto::run_fat(cgcx, modules, cached_modules)
}
fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
@@ -277,8 +348,19 @@ impl WriteBackendMethods for GccCodegenBackend {
/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+ #[cfg(feature="master")]
+ let info = {
+ // Check whether the target supports 128-bit integers.
+ let context = Context::default();
+ Arc::new(Mutex::new(IntoDynSyncSend(context.get_target_info())))
+ };
+ #[cfg(not(feature="master"))]
+ let info = Arc::new(Mutex::new(IntoDynSyncSend(TargetInfo {
+ supports_128bit_integers: AtomicBool::new(false),
+ })));
+
Box::new(GccCodegenBackend {
- supports_128bit_integers: Arc::new(Mutex::new(false)),
+ target_info: LockedTargetInfo { info },
})
}
@@ -297,22 +379,7 @@ fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
}
}
-fn handle_native(name: &str) -> &str {
- if name != "native" {
- return name;
- }
-
- unimplemented!();
-}
-
-pub fn target_cpu(sess: &Session) -> &str {
- match sess.opts.cg.target_cpu {
- Some(ref name) => handle_native(name),
- None => handle_native(sess.target.cpu.as_ref()),
- }
-}
-
-pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+pub fn target_features(sess: &Session, allow_unstable: bool, target_info: &LockedTargetInfo) -> Vec<Symbol> {
supported_target_features(sess)
.iter()
.filter_map(
@@ -321,26 +388,13 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
},
)
.filter(|_feature| {
- // TODO(antoyo): implement a way to get enabled feature in libgccjit.
- // Probably using the equivalent of __builtin_cpu_supports.
- // TODO(antoyo): maybe use whatever outputs the following command:
- // gcc -march=native -Q --help=target
- #[cfg(feature="master")]
- {
- // NOTE: the CPU in the CI doesn't support sse4a, so disable it to make the stdarch tests pass in the CI.
- (_feature.contains("sse") || _feature.contains("avx")) && !_feature.contains("avx512") && !_feature.contains("sse4a")
- }
- #[cfg(not(feature="master"))]
- {
- false
- }
+ target_info.cpu_supports(_feature)
/*
adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512ifma,
avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq,
bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm,
sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves
*/
- //false
})
.map(|feature| Symbol::intern(feature))
.collect()
diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs
index 318997405..7a89fe81d 100644
--- a/compiler/rustc_codegen_gcc/src/type_.rs
+++ b/compiler/rustc_codegen_gcc/src/type_.rs
@@ -119,11 +119,11 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
fn type_f32(&self) -> Type<'gcc> {
- self.context.new_type::<f32>()
+ self.float_type
}
fn type_f64(&self) -> Type<'gcc> {
- self.context.new_type::<f64>()
+ self.double_type
}
fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
@@ -216,17 +216,17 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
value.get_type()
}
- fn type_array(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
- // TODO: remove this as well?
- /*if let Some(struct_type) = ty.is_struct() {
+ #[cfg_attr(feature="master", allow(unused_mut))]
+ fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
+ #[cfg(not(feature="master"))]
+ if let Some(struct_type) = ty.is_struct() {
if struct_type.get_field_count() == 0 {
// NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
// size of usize::MAX in test_binary_search, we workaround this by setting the size to
// zero for ZSTs.
- // FIXME(antoyo): fix gccjit API.
len = 0;
}
- }*/
+ }
self.context.new_array_type(None, ty, len)
}
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
index cc467801b..479a81478 100644
--- a/compiler/rustc_codegen_gcc/src/type_of.rs
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -9,7 +9,7 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants};
use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
-use crate::abi::{FnAbiGccExt, GccType};
+use crate::abi::{FnAbiGcc, FnAbiGccExt, GccType};
use crate::context::CodegenCx;
use crate::type_::struct_fields;
@@ -87,7 +87,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
// FIXME(eddyb) producing readable type names for trait objects can result
// in problematically distinct types due to HRTB and subtyping (see #47638).
// ty::Dynamic(..) |
- ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Coroutine(..) | ty::Str
if !cx.sess().fewer_names() =>
{
let mut name = with_no_trimmed_paths!(layout.ty.to_string());
@@ -98,10 +98,10 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
write!(&mut name, "::{}", def.variant(index).name).unwrap();
}
}
- if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+ if let (&ty::Coroutine(_, _, _), &Variants::Single { index }) =
(layout.ty.kind(), &layout.variants)
{
- write!(&mut name, "::{}", ty::GeneratorArgs::variant_name(index)).unwrap();
+ write!(&mut name, "::{}", ty::CoroutineArgs::variant_name(index)).unwrap();
}
Some(name)
}
@@ -182,6 +182,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ use crate::rustc_middle::ty::layout::FnAbiOf;
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
@@ -191,7 +192,14 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
return ty;
}
- let ty = self.scalar_gcc_type_at(cx, scalar, Size::ZERO);
+ let ty =
+ match *self.ty.kind() {
+ // NOTE: we cannot remove this match like in the LLVM codegen because the call
+ // to fn_ptr_backend_type handle the on-stack attribute.
+ // TODO(antoyo): find a less hackish way to hande the on-stack attribute.
+ ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())),
+ _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
+ };
cx.scalar_types.borrow_mut().insert(self.ty, ty);
return ty;
}
@@ -364,7 +372,13 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
- let (return_type, param_types, variadic, _) = fn_abi.gcc_type(self);
- self.context.new_function_pointer_type(None, return_type, &param_types, variadic)
+ // FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`?
+ let FnAbiGcc {
+ return_type,
+ arguments_type,
+ is_c_variadic,
+ ..
+ } = fn_abi.gcc_type(self);
+ self.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic)
}
}