From ef24de24a82fe681581cc130f342363c47c0969a Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 7 Jun 2024 07:48:48 +0200 Subject: Merging upstream version 1.75.0+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_target/src/abi/call/csky.rs | 38 +++++++++++++++++++++++------- compiler/rustc_target/src/abi/call/x86.rs | 3 ++- 2 files changed, 32 insertions(+), 9 deletions(-) (limited to 'compiler/rustc_target/src/abi/call') diff --git a/compiler/rustc_target/src/abi/call/csky.rs b/compiler/rustc_target/src/abi/call/csky.rs index bbe95fa20..706493b0a 100644 --- a/compiler/rustc_target/src/abi/call/csky.rs +++ b/compiler/rustc_target/src/abi/call/csky.rs @@ -1,17 +1,39 @@ -// See https://github.com/llvm/llvm-project/blob/d85b94bf0080dcd780656c0f5e6342800720eba9/llvm/lib/Target/CSKY/CSKYCallingConv.td -use crate::abi::call::{ArgAbi, FnAbi}; +// Reference: CSKY ABI Manual +// https://occ-oss-prod.oss-cn-hangzhou.aliyuncs.com/resource//1695027452256/T-HEAD_800_Series_ABI_Standards_Manual.pdf +// +// Reference: Clang CSKY lowering code +// https://github.com/llvm/llvm-project/blob/4a074f32a6914f2a8d7215d78758c24942dddc3d/clang/lib/CodeGen/Targets/CSKY.cpp#L76-L162 -fn classify_ret(ret: &mut ArgAbi<'_, Ty>) { - if ret.layout.is_aggregate() || ret.layout.size.bits() > 64 { - ret.make_indirect(); +use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform}; + +fn classify_ret(arg: &mut ArgAbi<'_, Ty>) { + // For return type, aggregate which <= 2*XLen will be returned in registers. + // Otherwise, aggregate will be returned indirectly. + if arg.layout.is_aggregate() { + let total = arg.layout.size; + if total.bits() > 64 { + arg.make_indirect(); + } else if total.bits() > 32 { + arg.cast_to(Uniform { unit: Reg::i32(), total }); + } else { + arg.cast_to(Reg::i32()); + } } else { - ret.extend_integer_width_to(32); + arg.extend_integer_width_to(32); } } fn classify_arg(arg: &mut ArgAbi<'_, Ty>) { - if arg.layout.is_aggregate() || arg.layout.size.bits() > 64 { - arg.make_indirect(); + // For argument type, the first 4*XLen parts of aggregate will be passed + // in registers, and the rest will be passed in stack. + // So we can coerce to integers directly and let backend handle it correctly. + if arg.layout.is_aggregate() { + let total = arg.layout.size; + if total.bits() > 32 { + arg.cast_to(Uniform { unit: Reg::i32(), total }); + } else { + arg.cast_to(Reg::i32()); + } } else { arg.extend_integer_width_to(32); } diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs index afa1b70ef..c27f1e6dd 100644 --- a/compiler/rustc_target/src/abi/call/x86.rs +++ b/compiler/rustc_target/src/abi/call/x86.rs @@ -72,7 +72,8 @@ where // - For backwards compatibility, arguments with natural alignment > 4 are still passed // on stack (via `byval`). For example, this includes `double`, `int64_t`, // and structs containing them, provided they lack an explicit alignment attribute. - assert!(arg.layout.align.abi >= max_repr_align, + assert!( + arg.layout.align.abi >= max_repr_align, "abi alignment {:?} less than requested alignment {max_repr_align:?}", arg.layout.align.abi, ); -- cgit v1.2.3