From d1b2d29528b7794b41e66fc2136e395a02f8529b Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 30 May 2024 05:59:35 +0200 Subject: Merging upstream version 1.73.0+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_codegen_gcc/src/allocator.rs | 4 +- compiler/rustc_codegen_gcc/src/asm.rs | 9 +- compiler/rustc_codegen_gcc/src/base.rs | 4 +- compiler/rustc_codegen_gcc/src/builder.rs | 1 - compiler/rustc_codegen_gcc/src/callee.rs | 6 +- compiler/rustc_codegen_gcc/src/common.rs | 8 +- compiler/rustc_codegen_gcc/src/context.rs | 2 +- compiler/rustc_codegen_gcc/src/errors.rs | 198 +-------- compiler/rustc_codegen_gcc/src/intrinsic/mod.rs | 29 +- compiler/rustc_codegen_gcc/src/intrinsic/simd.rs | 536 ++++++++++++----------- compiler/rustc_codegen_gcc/src/lib.rs | 12 +- compiler/rustc_codegen_gcc/src/mono_item.rs | 2 +- compiler/rustc_codegen_gcc/src/type_.rs | 26 +- compiler/rustc_codegen_gcc/src/type_of.rs | 4 +- 14 files changed, 364 insertions(+), 477 deletions(-) (limited to 'compiler/rustc_codegen_gcc/src') diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs index 13f88192b..edd7ab722 100644 --- a/compiler/rustc_codegen_gcc/src/allocator.rs +++ b/compiler/rustc_codegen_gcc/src/allocator.rs @@ -27,8 +27,8 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam if kind == AllocatorKind::Default { for method in ALLOCATOR_METHODS { let mut types = Vec::with_capacity(method.inputs.len()); - for ty in method.inputs.iter() { - match *ty { + for input in method.inputs.iter() { + match input.ty { AllocatorTy::Layout => { types.push(usize); types.push(usize); diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs index 4c3b7f503..905fdac92 100644 --- a/compiler/rustc_codegen_gcc/src/asm.rs +++ b/compiler/rustc_codegen_gcc/src/asm.rs @@ -107,7 +107,7 @@ enum ConstraintOrRegister { impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) { + fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) { if options.contains(InlineAsmOptions::MAY_UNWIND) { self.sess() .create_err(UnwindingInlineAsm { span: span[0] }) @@ -173,7 +173,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let is_target_supported = reg.reg_class().supported_types(asm_arch).iter() .any(|&(_, feature)| { if let Some(feature) = feature { - self.tcx.sess.target_features.contains(&feature) + self.tcx.asm_target_features(instance.def_id()).contains(&feature) } else { true // Register class is unconditionally supported } @@ -597,6 +597,8 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r", InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a", InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d", + InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f", InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r" InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f", InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r", @@ -673,6 +675,8 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(), + InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::Msp430(_) => unimplemented!(), @@ -860,6 +864,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::S390x(_) => None, InlineAsmRegClass::Msp430(_) => None, InlineAsmRegClass::M68k(_) => None, + InlineAsmRegClass::CSKY(_) => None, InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs index dcd560b3d..9e614ca4a 100644 --- a/compiler/rustc_codegen_gcc/src/base.rs +++ b/compiler/rustc_codegen_gcc/src/base.rs @@ -159,8 +159,8 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers); let mono_items = cgu.items_in_deterministic_order(tcx); - for &(mono_item, (linkage, visibility)) in &mono_items { - mono_item.predefine::>(&cx, linkage, visibility); + for &(mono_item, data) in &mono_items { + mono_item.predefine::>(&cx, data.linkage, data.visibility); } // ... and now that we have everything pre-defined, fill out those definitions. diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 43d0aafbd..0b1f2fe6a 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -27,7 +27,6 @@ use rustc_codegen_ssa::traits::{ BaseTypeMethods, BuilderMethods, ConstMethods, - DerivedTypeMethods, LayoutTypeMethods, HasCodegen, OverflowOp, diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs index 433b2585f..a96bd66ba 100644 --- a/compiler/rustc_codegen_gcc/src/callee.rs +++ b/compiler/rustc_codegen_gcc/src/callee.rs @@ -17,8 +17,8 @@ use crate::context::CodegenCx; pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> Function<'gcc> { let tcx = cx.tcx(); - assert!(!instance.substs.has_infer()); - assert!(!instance.substs.has_escaping_bound_vars()); + assert!(!instance.args.has_infer()); + assert!(!instance.args.has_escaping_bound_vars()); let sym = tcx.symbol_name(instance).name; @@ -100,7 +100,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) // whether we are sharing generics or not. The important thing here is // that the visibility we apply to the declaration is the same one that // has been applied to the definition (wherever that definition may be). - let is_generic = instance.substs.non_erasable_generics().next().is_some(); + let is_generic = instance.args.non_erasable_generics().next().is_some(); if is_generic { // This is a monomorphization. Its expected visibility depends diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index b62f4676f..5f54cb16d 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -16,6 +16,10 @@ use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { + self.context.new_cast(None, val, ty) + } + pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> { bytes_in_context(self, bytes) } @@ -242,10 +246,6 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { const_alloc_to_gcc(self, alloc) } - fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { - self.context.new_cast(None, val, ty) - } - fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { if value.get_type() == self.bool_type.make_pointer() { if let Some(pointee) = typ.get_pointee() { diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs index 08507e196..88dcafa73 100644 --- a/compiler/rustc_codegen_gcc/src/context.rs +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -476,7 +476,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { - if let LayoutError::SizeOverflow(_) = err { + if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { self.sess().emit_fatal(respan(span, err.into_diagnostic())) } else { span_bug!(span, "failed to get layout for `{}`: {}", ty, err) diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs index 9305bd1e0..693367192 100644 --- a/compiler/rustc_codegen_gcc/src/errors.rs +++ b/compiler/rustc_codegen_gcc/src/errors.rs @@ -1,7 +1,6 @@ use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg}; use rustc_macros::Diagnostic; -use rustc_middle::ty::Ty; -use rustc_span::{Span, Symbol}; +use rustc_span::Span; use std::borrow::Cow; struct ExitCode(Option); @@ -16,201 +15,6 @@ impl IntoDiagnosticArg for ExitCode { } } -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_basic_integer, code = "E0511")] -pub(crate) struct InvalidMonomorphizationBasicInteger<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_invalid_float_vector, code = "E0511")] -pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub elem_ty: &'a str, - pub vec_ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_not_float, code = "E0511")] -pub(crate) struct InvalidMonomorphizationNotFloat<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_unrecognized, code = "E0511")] -pub(crate) struct InvalidMonomorphizationUnrecognized { - #[primary_span] - pub span: Span, - pub name: Symbol, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_expected_signed_unsigned, code = "E0511")] -pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub elem_ty: Ty<'a>, - pub vec_ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_unsupported_element, code = "E0511")] -pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub in_ty: Ty<'a>, - pub elem_ty: Ty<'a>, - pub ret_ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_invalid_bitmask, code = "E0511")] -pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub ty: Ty<'a>, - pub expected_int_bits: u64, - pub expected_bytes: u64, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_simd_shuffle, code = "E0511")] -pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_expected_simd, code = "E0511")] -pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub position: &'a str, - pub found_ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_mask_type, code = "E0511")] -pub(crate) struct InvalidMonomorphizationMaskType<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_return_length, code = "E0511")] -pub(crate) struct InvalidMonomorphizationReturnLength<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub in_len: u64, - pub ret_ty: Ty<'a>, - pub out_len: u64, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_return_length_input_type, code = "E0511")] -pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub in_len: u64, - pub in_ty: Ty<'a>, - pub ret_ty: Ty<'a>, - pub out_len: u64, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_return_element, code = "E0511")] -pub(crate) struct InvalidMonomorphizationReturnElement<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub in_elem: Ty<'a>, - pub in_ty: Ty<'a>, - pub ret_ty: Ty<'a>, - pub out_ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_return_type, code = "E0511")] -pub(crate) struct InvalidMonomorphizationReturnType<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub in_elem: Ty<'a>, - pub in_ty: Ty<'a>, - pub ret_ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_inserted_type, code = "E0511")] -pub(crate) struct InvalidMonomorphizationInsertedType<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub in_elem: Ty<'a>, - pub in_ty: Ty<'a>, - pub out_ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_return_integer_type, code = "E0511")] -pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub ret_ty: Ty<'a>, - pub out_ty: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_mismatched_lengths, code = "E0511")] -pub(crate) struct InvalidMonomorphizationMismatchedLengths { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub m_len: u64, - pub v_len: u64, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_unsupported_cast, code = "E0511")] -pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub in_ty: Ty<'a>, - pub in_elem: Ty<'a>, - pub ret_ty: Ty<'a>, - pub out_elem: Ty<'a>, -} - -#[derive(Diagnostic)] -#[diag(codegen_gcc_invalid_monomorphization_unsupported_operation, code = "E0511")] -pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { - #[primary_span] - pub span: Span, - pub name: Symbol, - pub in_ty: Ty<'a>, - pub in_elem: Ty<'a>, -} - #[derive(Diagnostic)] #[diag(codegen_gcc_lto_not_supported)] pub(crate) struct LTONotSupported; diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 0b208be4e..f8c32c6db 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -12,7 +12,8 @@ use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; #[cfg(feature="master")] -use rustc_codegen_ssa::traits::{DerivedTypeMethods, MiscMethods}; +use rustc_codegen_ssa::traits::MiscMethods; +use rustc_codegen_ssa::errors::InvalidMonomorphization; use rustc_middle::bug; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; @@ -31,7 +32,6 @@ use crate::abi::FnAbiGccExt; use crate::builder::Builder; use crate::common::{SignType, TypeReflection}; use crate::context::CodegenCx; -use crate::errors::InvalidMonomorphizationBasicInteger; use crate::type_of::LayoutGccExt; use crate::intrinsic::simd::generic_simd_intrinsic; @@ -92,8 +92,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let tcx = self.tcx; let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); - let (def_id, substs) = match *callee_ty.kind() { - ty::FnDef(def_id, substs) => (def_id, substs), + let (def_id, fn_args) = match *callee_ty.kind() { + ty::FnDef(def_id, fn_args) => (def_id, fn_args), _ => bug!("expected fn item type, found {}", callee_ty), }; @@ -142,7 +142,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } sym::volatile_load | sym::unaligned_volatile_load => { - let tp_ty = substs.type_at(0); + let tp_ty = fn_args.type_at(0); let mut ptr = args[0].immediate(); if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); @@ -256,7 +256,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { _ => bug!(), }, None => { - tcx.sess.emit_err(InvalidMonomorphizationBasicInteger { span, name, ty }); + tcx.sess.emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty }); return; } } @@ -264,7 +264,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { sym::raw_eq => { use rustc_target::abi::Abi::*; - let tp_ty = substs.type_at(0); + let tp_ty = fn_args.type_at(0); let layout = self.layout_of(tp_ty).layout; let _use_integer_compare = match layout.abi() { Scalar(_) | ScalarPair(_, _) => true, @@ -302,6 +302,21 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } } + sym::compare_bytes => { + let a = args[0].immediate(); + let b = args[1].immediate(); + let n = args[2].immediate(); + + let void_ptr_type = self.context.new_type::<*const ()>(); + let a_ptr = self.bitcast(a, void_ptr_type); + let b_ptr = self.bitcast(b, void_ptr_type); + + // Here we assume that the `memcmp` provided by the target is a NOP for size 0. + let builtin = self.context.get_builtin_function("memcmp"); + let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]); + self.sext(cmp, self.type_ix(32)) + } + sym::black_box => { args[0].val.store(self, result); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index 9115cf971..85d3e7234 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -1,11 +1,11 @@ -#[cfg(feature="master")] -use gccjit::{ComparisonOp, UnaryOp}; use gccjit::ToRValue; use gccjit::{BinaryOp, RValue, Type}; +#[cfg(feature = "master")] +use gccjit::{ComparisonOp, UnaryOp}; use rustc_codegen_ssa::base::compare_simd_types; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; -#[cfg(feature="master")] +#[cfg(feature = "master")] use rustc_codegen_ssa::errors::ExpectedPointerMutability; use rustc_codegen_ssa::errors::InvalidMonomorphization; use rustc_codegen_ssa::mir::operand::OperandRef; @@ -19,21 +19,8 @@ use rustc_span::{sym, Span, Symbol}; use rustc_target::abi::Align; use crate::builder::Builder; -#[cfg(feature="master")] +#[cfg(feature = "master")] use crate::context::CodegenCx; -#[cfg(feature="master")] -use crate::errors::{InvalidMonomorphizationExpectedSignedUnsigned, InvalidMonomorphizationInsertedType}; -use crate::errors::{ - InvalidMonomorphizationExpectedSimd, - InvalidMonomorphizationInvalidBitmask, - InvalidMonomorphizationInvalidFloatVector, InvalidMonomorphizationMaskType, - InvalidMonomorphizationMismatchedLengths, InvalidMonomorphizationNotFloat, - InvalidMonomorphizationReturnElement, InvalidMonomorphizationReturnIntegerType, - InvalidMonomorphizationReturnLength, InvalidMonomorphizationReturnLengthInputType, - InvalidMonomorphizationReturnType, InvalidMonomorphizationSimdShuffle, - InvalidMonomorphizationUnrecognized, InvalidMonomorphizationUnsupportedElement, - InvalidMonomorphizationUnsupportedOperation, -}; pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( bx: &mut Builder<'a, 'gcc, 'tcx>, @@ -59,16 +46,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }; } macro_rules! require_simd { - ($ty: expr, $position: expr) => { - require!( - $ty.is_simd(), - InvalidMonomorphizationExpectedSimd { - span, - name, - position: $position, - found_ty: $ty - } - ) + ($ty: expr, $diag: expr) => { + require!($ty.is_simd(), $diag) }; } @@ -78,7 +57,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let arg_tys = sig.inputs(); if name == sym::simd_select_bitmask { - require_simd!(arg_tys[1], "argument"); + require_simd!( + arg_tys[1], + InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } + ); let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); let expected_int_bits = (len.max(8) - 1).next_power_of_two(); @@ -99,10 +81,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty)); bx.load(int_ty, ptr, Align::ONE) } - _ => return_error!(InvalidMonomorphizationInvalidBitmask { + _ => return_error!(InvalidMonomorphization::InvalidBitmask { span, name, - ty: mask_ty, + mask_ty, expected_int_bits, expected_bytes }), @@ -116,7 +98,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // NOTE: since the arguments can be vectors of floats, make sure the mask is a vector of // integer. let mask_element_type = bx.type_ix(arg1_element_type.get_size() as u64 * 8); - let vector_mask_type = bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64); + let vector_mask_type = + bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64); let mut elements = vec![]; let one = bx.context.new_rvalue_one(mask.get_type()); @@ -131,7 +114,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } // every intrinsic below takes a SIMD vector as its first argument - require_simd!(arg_tys[0], "input"); + require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] }); let in_ty = arg_tys[0]; let comparison = match name { @@ -146,12 +129,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx()); if let Some(cmp_op) = comparison { - require_simd!(ret_ty, "return"); + require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); require!( in_len == out_len, - InvalidMonomorphizationReturnLengthInputType { + InvalidMonomorphization::ReturnLengthInputType { span, name, in_len, @@ -162,7 +145,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( ); require!( bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, - InvalidMonomorphizationReturnIntegerType { span, name, ret_ty, out_ty } + InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty } ); let arg1 = args[0].immediate(); @@ -170,48 +153,34 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // compare them as equal, so bitcast. // FIXME(antoyo): allow comparing vector types as equal in libgccjit. let arg2 = bx.context.new_bitcast(None, args[1].immediate(), arg1.get_type()); - return Ok(compare_simd_types( - bx, - arg1, - arg2, - in_elem, - llret_ty, - cmp_op, - )); + return Ok(compare_simd_types(bx, arg1, arg2, in_elem, llret_ty, cmp_op)); } - if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") { - let n: u64 = if stripped.is_empty() { - // Make sure this is actually an array, since typeck only checks the length-suffixed - // version of this intrinsic. - match args[2].layout.ty.kind() { - ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { - len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else( - || span_bug!(span, "could not evaluate shuffle index array length"), - ) - } - _ => return_error!(InvalidMonomorphizationSimdShuffle { - span, - name, - ty: args[2].layout.ty - }), + if name == sym::simd_shuffle { + // Make sure this is actually an array, since typeck only checks the length-suffixed + // version of this intrinsic. + let n: u64 = match args[2].layout.ty.kind() { + ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { + len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else( + || span_bug!(span, "could not evaluate shuffle index array length"), + ) } - } else { - stripped.parse().unwrap_or_else(|_| { - span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") - }) + _ => return_error!(InvalidMonomorphization::SimdShuffle { + span, + name, + ty: args[2].layout.ty + }), }; - - require_simd!(ret_ty, "return"); + require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); require!( out_len == n, - InvalidMonomorphizationReturnLength { span, name, in_len: n, ret_ty, out_len } + InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len } ); require!( in_elem == out_ty, - InvalidMonomorphizationReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } + InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } ); let vector = args[2].immediate(); @@ -223,7 +192,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( if name == sym::simd_insert { require!( in_elem == arg_tys[2], - InvalidMonomorphizationInsertedType { span, name, in_elem, in_ty, out_ty: arg_tys[2] } + InvalidMonomorphization::InsertedType { + span, + name, + in_elem, + in_ty, + out_ty: arg_tys[2] + } ); let vector = args[0].immediate(); let index = args[1].immediate(); @@ -240,7 +215,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( if name == sym::simd_extract { require!( ret_ty == in_elem, - InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } + InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } ); let vector = args[0].immediate(); return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue()); @@ -249,26 +224,29 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( if name == sym::simd_select { let m_elem_ty = in_elem; let m_len = in_len; - require_simd!(arg_tys[1], "argument"); + require_simd!( + arg_tys[1], + InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } + ); let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); require!( m_len == v_len, - InvalidMonomorphizationMismatchedLengths { span, name, m_len, v_len } + InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } ); match m_elem_ty.kind() { ty::Int(_) => {} - _ => return_error!(InvalidMonomorphizationMaskType { span, name, ty: m_elem_ty }), + _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }), } return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate())); } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_cast || name == sym::simd_as { - require_simd!(ret_ty, "return"); + require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); require!( in_len == out_len, - InvalidMonomorphizationReturnLengthInputType { + InvalidMonomorphization::ReturnLengthInputType { span, name, in_len, @@ -288,19 +266,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( Unsupported, } - let in_style = - match in_elem.kind() { - ty::Int(_) | ty::Uint(_) => Style::Int, - ty::Float(_) => Style::Float, - _ => Style::Unsupported, - }; + let in_style = match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => Style::Int, + ty::Float(_) => Style::Float, + _ => Style::Unsupported, + }; - let out_style = - match out_elem.kind() { - ty::Int(_) | ty::Uint(_) => Style::Int, - ty::Float(_) => Style::Float, - _ => Style::Unsupported, - }; + let out_style = match out_elem.kind() { + ty::Int(_) | ty::Uint(_) => Style::Int, + ty::Float(_) => Style::Float, + _ => Style::Unsupported, + }; match (in_style, out_style) { (Style::Unsupported, Style::Unsupported) => { @@ -315,7 +291,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( out_elem } ); - }, + } _ => return Ok(bx.context.convert_vector(None, args[0].immediate(), llret_ty)), } } @@ -329,7 +305,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( })* _ => {}, } - return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem }) + return_error!(InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }) })* } } @@ -363,10 +339,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let mut shift = 0; for i in 0..in_len { - let elem = bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32)); + let elem = + bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32)); let shifted = elem >> sign_shift; let masked = shifted & one; - result = result | (bx.context.new_cast(None, masked, result_type) << bx.context.new_rvalue_from_int(result_type, shift)); + result = result + | (bx.context.new_cast(None, masked, result_type) + << bx.context.new_rvalue_from_int(result_type, shift)); shift += 1; } @@ -415,46 +394,50 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return Err(()); }}; } - let (elem_ty_str, elem_ty) = - if let ty::Float(f) = in_elem.kind() { - let elem_ty = bx.cx.type_float_from_ty(*f); - match f.bit_width() { - 32 => ("f", elem_ty), - 64 => ("", elem_ty), - _ => { - return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty }); - } + let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() { + let elem_ty = bx.cx.type_float_from_ty(*f); + match f.bit_width() { + 32 => ("f", elem_ty), + 64 => ("", elem_ty), + _ => { + return_error!(InvalidMonomorphization::FloatingPointVector { + span, + name, + f_ty: *f, + in_ty + }); } } - else { - return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); - }; + } else { + return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty }); + }; let vec_ty = bx.cx.type_vector(elem_ty, in_len); - let intr_name = - match name { - sym::simd_ceil => "ceil", - sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103 - sym::simd_fcos => "cos", - sym::simd_fexp2 => "exp2", - sym::simd_fexp => "exp", - sym::simd_flog10 => "log10", - sym::simd_flog2 => "log2", - sym::simd_flog => "log", - sym::simd_floor => "floor", - sym::simd_fma => "fma", - sym::simd_fpowi => "__builtin_powi", - sym::simd_fpow => "pow", - sym::simd_fsin => "sin", - sym::simd_fsqrt => "sqrt", - sym::simd_round => "round", - sym::simd_trunc => "trunc", - _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }) - }; + let intr_name = match name { + sym::simd_ceil => "ceil", + sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103 + sym::simd_fcos => "cos", + sym::simd_fexp2 => "exp2", + sym::simd_fexp => "exp", + sym::simd_flog10 => "log10", + sym::simd_flog2 => "log2", + sym::simd_flog => "log", + sym::simd_floor => "floor", + sym::simd_fma => "fma", + sym::simd_fpowi => "__builtin_powi", + sym::simd_fpow => "pow", + sym::simd_fsin => "sin", + sym::simd_fsqrt => "sqrt", + sym::simd_round => "round", + sym::simd_trunc => "trunc", + _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }), + }; let builtin_name = format!("{}{}", intr_name, elem_ty_str); let funcs = bx.cx.functions.borrow(); - let function = funcs.get(&builtin_name).unwrap_or_else(|| panic!("unable to find builtin function {}", builtin_name)); + let function = funcs + .get(&builtin_name) + .unwrap_or_else(|| panic!("unable to find builtin function {}", builtin_name)); // TODO(antoyo): add platform-specific behavior here for architectures that have these // intrinsics as instructions (for instance, gpus) @@ -500,8 +483,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args); } - #[cfg(feature="master")] - fn vector_ty<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, elem_ty: Ty<'tcx>, vec_len: u64) -> Type<'gcc> { + #[cfg(feature = "master")] + fn vector_ty<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + elem_ty: Ty<'tcx>, + vec_len: u64, + ) -> Type<'gcc> { // FIXME: use cx.layout_of(ty).llvm_type() ? let elem_ty = match *elem_ty.kind() { ty::Int(v) => cx.type_int_from_ty(v), @@ -512,15 +499,22 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( cx.type_vector(elem_ty, vec_len) } - #[cfg(feature="master")] - fn gather<'a, 'gcc, 'tcx>(default: RValue<'gcc>, pointers: RValue<'gcc>, mask: RValue<'gcc>, pointer_count: usize, bx: &mut Builder<'a, 'gcc, 'tcx>, in_len: u64, underlying_ty: Ty<'tcx>, invert: bool) -> RValue<'gcc> { - let vector_type = - if pointer_count > 1 { - bx.context.new_vector_type(bx.usize_type, in_len) - } - else { - vector_ty(bx, underlying_ty, in_len) - }; + #[cfg(feature = "master")] + fn gather<'a, 'gcc, 'tcx>( + default: RValue<'gcc>, + pointers: RValue<'gcc>, + mask: RValue<'gcc>, + pointer_count: usize, + bx: &mut Builder<'a, 'gcc, 'tcx>, + in_len: u64, + underlying_ty: Ty<'tcx>, + invert: bool, + ) -> RValue<'gcc> { + let vector_type = if pointer_count > 1 { + bx.context.new_vector_type(bx.usize_type, in_len) + } else { + vector_ty(bx, underlying_ty, in_len) + }; let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type(); let mut values = vec![]; @@ -551,13 +545,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( if invert { bx.shuffle_vector(vector, default, mask) - } - else { + } else { bx.shuffle_vector(default, vector, mask) } } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_gather { // simd_gather(values: , pointers: , // mask: ) -> @@ -566,10 +559,16 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // * M: any integer width is supported, will be truncated to i1 // All types must be simd vector types - require_simd!(in_ty, "first"); - require_simd!(arg_tys[1], "second"); - require_simd!(arg_tys[2], "third"); - require_simd!(ret_ty, "return"); + require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); + require_simd!( + arg_tys[1], + InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } + ); + require_simd!( + arg_tys[2], + InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } + ); + require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); // Of the same length: let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); @@ -662,10 +661,19 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } } - return Ok(gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, false)); + return Ok(gather( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + pointer_count, + bx, + in_len, + underlying_ty, + false, + )); } - #[cfg(feature="master")] + #[cfg(feature = "master")] if name == sym::simd_scatter { // simd_scatter(values: , pointers: , // mask: ) -> () @@ -674,9 +682,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // * M: any integer width is supported, will be truncated to i1 // All types must be simd vector types - require_simd!(in_ty, "first"); - require_simd!(arg_tys[1], "second"); - require_simd!(arg_tys[2], "third"); + require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); + require_simd!( + arg_tys[1], + InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } + ); + require_simd!( + arg_tys[2], + InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } + ); // Of the same length: let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); @@ -765,17 +779,24 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } } - let result = gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, true); + let result = gather( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + pointer_count, + bx, + in_len, + underlying_ty, + true, + ); let pointers = args[1].immediate(); - let vector_type = - if pointer_count > 1 { - bx.context.new_vector_type(bx.usize_type, in_len) - } - else { - vector_ty(bx, underlying_ty, in_len) - }; + let vector_type = if pointer_count > 1 { + bx.context.new_vector_type(bx.usize_type, in_len) + } else { + vector_ty(bx, underlying_ty, in_len) + }; let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type(); for i in 0..in_len { @@ -815,7 +836,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( })* _ => {}, } - return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem }) + return_error!(InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }) })* } } @@ -830,91 +851,97 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let rhs = args[1].immediate(); let is_add = name == sym::simd_saturating_add; let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; - let (signed, elem_width, elem_ty) = - match *in_elem.kind() { - ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)), - ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i)), - _ => { - return_error!(InvalidMonomorphizationExpectedSignedUnsigned { + let (signed, elem_width, elem_ty) = match *in_elem.kind() { + ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)), + ty::Uint(i) => { + (false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i)) + } + _ => { + return_error!(InvalidMonomorphization::ExpectedVectorElementType { span, name, - elem_ty: arg_tys[0].simd_size_and_type(bx.tcx()).1, - vec_ty: arg_tys[0], + expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1, + vector_type: arg_tys[0], }); } }; - let result = - match (signed, is_add) { - (false, true) => { - let res = lhs + rhs; - let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs); - res | cmp - }, - (true, true) => { - // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition - // TODO(antoyo): improve using conditional operators if possible. - // TODO(antoyo): dyncast_vector should not require a call to unqualified. - let arg_type = lhs.get_type().unqualified(); - // TODO(antoyo): convert lhs and rhs to unsigned. - let sum = lhs + rhs; - let vector_type = arg_type.dyncast_vector().expect("vector type"); - let unit = vector_type.get_num_units(); - let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); - let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); - - let xor1 = lhs ^ rhs; - let xor2 = lhs ^ sum; - let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; - let mask = and >> width; - - let one = bx.context.new_rvalue_one(elem_ty); - let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); - let shift1 = ones << width; - let shift2 = sum >> width; - let mask_min = shift1 ^ shift2; - - let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; - let and2 = mask & mask_min; - - and1 + and2 - }, - (false, false) => { - let res = lhs - rhs; - let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs); - res & cmp - }, - (true, false) => { - // TODO(antoyo): dyncast_vector should not require a call to unqualified. - let arg_type = lhs.get_type().unqualified(); - // TODO(antoyo): this uses the same algorithm from saturating add, but add the - // negative of the right operand. Find a proper subtraction algorithm. - let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs); - - // TODO(antoyo): convert lhs and rhs to unsigned. - let sum = lhs + rhs; - let vector_type = arg_type.dyncast_vector().expect("vector type"); - let unit = vector_type.get_num_units(); - let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); - let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); - - let xor1 = lhs ^ rhs; - let xor2 = lhs ^ sum; - let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; - let mask = and >> width; - - let one = bx.context.new_rvalue_one(elem_ty); - let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); - let shift1 = ones << width; - let shift2 = sum >> width; - let mask_min = shift1 ^ shift2; - - let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; - let and2 = mask & mask_min; - - and1 + and2 - } - }; + let result = match (signed, is_add) { + (false, true) => { + let res = lhs + rhs; + let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs); + res | cmp + } + (true, true) => { + // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition + // TODO(antoyo): improve using conditional operators if possible. + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let arg_type = lhs.get_type().unqualified(); + // TODO(antoyo): convert lhs and rhs to unsigned. + let sum = lhs + rhs; + let vector_type = arg_type.dyncast_vector().expect("vector type"); + let unit = vector_type.get_num_units(); + let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); + let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); + + let xor1 = lhs ^ rhs; + let xor2 = lhs ^ sum; + let and = + bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; + let mask = and >> width; + + let one = bx.context.new_rvalue_one(elem_ty); + let ones = + bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); + let shift1 = ones << width; + let shift2 = sum >> width; + let mask_min = shift1 ^ shift2; + + let and1 = + bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; + let and2 = mask & mask_min; + + and1 + and2 + } + (false, false) => { + let res = lhs - rhs; + let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs); + res & cmp + } + (true, false) => { + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let arg_type = lhs.get_type().unqualified(); + // TODO(antoyo): this uses the same algorithm from saturating add, but add the + // negative of the right operand. Find a proper subtraction algorithm. + let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs); + + // TODO(antoyo): convert lhs and rhs to unsigned. + let sum = lhs + rhs; + let vector_type = arg_type.dyncast_vector().expect("vector type"); + let unit = vector_type.get_num_units(); + let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); + let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); + + let xor1 = lhs ^ rhs; + let xor2 = lhs ^ sum; + let and = + bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; + let mask = and >> width; + + let one = bx.context.new_rvalue_one(elem_ty); + let ones = + bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); + let shift1 = ones << width; + let shift2 = sum >> width; + let mask_min = shift1 ^ shift2; + + let and1 = + bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; + let and2 = mask & mask_min; + + and1 + and2 + } + }; return Ok(result); } @@ -925,7 +952,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( if name == sym::$name { require!( ret_ty == in_elem, - InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } + InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } ); return match in_elem.kind() { ty::Int(_) | ty::Uint(_) => { @@ -947,11 +974,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op)) } } - _ => return_error!(InvalidMonomorphizationUnsupportedElement { + _ => return_error!(InvalidMonomorphization::UnsupportedSymbol { span, name, + symbol: sym::$name, in_ty, - elem_ty: in_elem, + in_elem, ret_ty }), }; @@ -988,18 +1016,24 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( 1.0 ); - macro_rules! minmax_red { ($name:ident: $int_red:ident, $float_red:ident) => { if name == sym::$name { require!( ret_ty == in_elem, - InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } + InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } ); return match in_elem.kind() { ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())), ty::Float(_) => Ok(bx.$float_red(args[0].immediate())), - _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), + _ => return_error!(InvalidMonomorphization::UnsupportedSymbol { + span, + name, + symbol: sym::$name, + in_ty, + in_elem, + ret_ty + }), }; } }; @@ -1017,17 +1051,18 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let input = if !$boolean { require!( ret_ty == in_elem, - InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } + InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } ); args[0].immediate() } else { match in_elem.kind() { ty::Int(_) | ty::Uint(_) => {} - _ => return_error!(InvalidMonomorphizationUnsupportedElement { + _ => return_error!(InvalidMonomorphization::UnsupportedSymbol { span, name, + symbol: sym::$name, in_ty, - elem_ty: in_elem, + in_elem, ret_ty }), } @@ -1037,13 +1072,22 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return match in_elem.kind() { ty::Int(_) | ty::Uint(_) => { let r = bx.vector_reduce_op(input, $op); - Ok(if !$boolean { r } else { bx.icmp(IntPredicate::IntNE, r, bx.context.new_rvalue_zero(r.get_type())) }) + Ok(if !$boolean { + r + } else { + bx.icmp( + IntPredicate::IntNE, + r, + bx.context.new_rvalue_zero(r.get_type()), + ) + }) } - _ => return_error!(InvalidMonomorphizationUnsupportedElement { + _ => return_error!(InvalidMonomorphization::UnsupportedSymbol { span, name, + symbol: sym::$name, in_ty, - elem_ty: in_elem, + in_elem, ret_ty }), }; diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 2a6b64278..697ae015f 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -71,7 +71,7 @@ use gccjit::{Context, OptimizationLevel, CType}; use rustc_ast::expand::allocator::AllocatorKind; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen}; use rustc_codegen_ssa::base::codegen_crate; -use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn}; +use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn}; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; use rustc_codegen_ssa::target_features::supported_target_features; use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; @@ -217,14 +217,14 @@ impl WriteBackendMethods for GccCodegenBackend { type ThinData = (); type ThinBuffer = ThinBuffer; - fn run_fat_lto(_cgcx: &CodegenContext, mut modules: Vec>, _cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result, FatalError> { + fn run_fat_lto(_cgcx: &CodegenContext, mut modules: Vec>, _cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result, FatalError> { // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins. // NOTE: implemented elsewhere. // TODO(antoyo): what is implemented elsewhere ^ ? let module = match modules.remove(0) { - FatLTOInput::InMemory(module) => module, - FatLTOInput::Serialized { .. } => { + FatLtoInput::InMemory(module) => module, + FatLtoInput::Serialized { .. } => { unimplemented!(); } }; @@ -239,6 +239,10 @@ impl WriteBackendMethods for GccCodegenBackend { unimplemented!(); } + fn print_statistics(&self) { + unimplemented!() + } + unsafe fn optimize(_cgcx: &CodegenContext, _diag_handler: &Handler, module: &ModuleCodegen, config: &ModuleConfig) -> Result<(), FatalError> { module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level)); Ok(()) diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs index 342b830ce..3322d5651 100644 --- a/compiler/rustc_codegen_gcc/src/mono_item.rs +++ b/compiler/rustc_codegen_gcc/src/mono_item.rs @@ -31,7 +31,7 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { #[cfg_attr(not(feature="master"), allow(unused_variables))] fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) { - assert!(!instance.substs.has_infer()); + assert!(!instance.args.has_infer()); let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); self.linkage.set(base::linkage_to_gcc(linkage)); diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs index 521b64ad3..318997405 100644 --- a/compiler/rustc_codegen_gcc/src/type_.rs +++ b/compiler/rustc_codegen_gcc/src/type_.rs @@ -54,6 +54,23 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.u128_type } + pub fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { + ty.make_pointer() + } + + pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { + // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? + ty.make_pointer() + } + + pub fn type_i8p(&self) -> Type<'gcc> { + self.type_ptr_to(self.type_i8()) + } + + pub fn type_i8p_ext(&self, address_space: AddressSpace) -> Type<'gcc> { + self.type_ptr_to_ext(self.type_i8(), address_space) + } + pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> { // FIXME(eddyb) We could find a better approximation if ity.align < align. let ity = Integer::approximate_align(self, align); @@ -149,13 +166,12 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } } - fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { - ty.make_pointer() + fn type_ptr(&self) -> Type<'gcc> { + self.type_ptr_to(self.type_void()) } - fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { - // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? - ty.make_pointer() + fn type_ptr_ext(&self, address_space: AddressSpace) -> Type<'gcc> { + self.type_ptr_to_ext(self.type_void(), address_space) } fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> { diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index e0823888f..84d578385 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -101,7 +101,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout if let (&ty::Generator(_, _, _), &Variants::Single { index }) = (layout.ty.kind(), &layout.variants) { - write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap(); + write!(&mut name, "::{}", ty::GeneratorArgs::variant_name(index)).unwrap(); } Some(name) } @@ -282,7 +282,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { } // only wide pointer boxes are handled as pointers // thin pointer boxes with scalar allocators are handled by the general logic below - ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => { + ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => { let ptr_ty = Ty::new_mut_ptr(cx.tcx,self.ty.boxed_ty()); return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate); } -- cgit v1.2.3