Skip to content

Commit a1ce91d

Browse files
authored
Unrolled build for rust-lang#133102
Rollup merge of rust-lang#133102 - RalfJung:aarch64-softfloat, r=davidtwco,wesleywiser aarch64 softfloat target: always pass floats in int registers This is a part of rust-lang#131058: on softfloat aarch64 targets, the float registers may be unavailable. And yet, LLVM will happily use them to pass float types if the corresponding target features are enabled. That's a problem as it means enabling/disabling `neon` instructions can change the ABI. Other targets have a `soft-float` target feature that forces the use of the soft-float ABI no matter whether float registers are enabled or not; aarch64 has nothing like that. So we follow the aarch64 [softfloat ABI](rust-lang#131058 (comment)) and treat floats like integers for `extern "C"` functions. For the "Rust" ABI, we do the same for scalars, and then just do something reasonable for ScalarPair that avoids the pointer indirection. Cc ```@workingjubilee```
2 parents 743003b + 666bcbd commit a1ce91d

File tree

3 files changed

+106
-5
lines changed

3 files changed

+106
-5
lines changed

compiler/rustc_target/src/callconv/aarch64.rs

+57-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
1+
use std::iter;
2+
3+
use rustc_abi::{BackendRepr, Primitive};
4+
15
use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
26
use crate::abi::{HasDataLayout, TyAbiInterface};
7+
use crate::spec::{HasTargetSpec, Target};
38

49
/// Indicates the variant of the AArch64 ABI we are compiling for.
510
/// Used to accommodate Apple and Microsoft's deviations from the usual AAPCS ABI.
@@ -15,7 +20,7 @@ pub(crate) enum AbiKind {
1520
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
1621
where
1722
Ty: TyAbiInterface<'a, C> + Copy,
18-
C: HasDataLayout,
23+
C: HasDataLayout + HasTargetSpec,
1924
{
2025
arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
2126
let size = arg.layout.size;
@@ -27,18 +32,52 @@ where
2732

2833
let valid_unit = match unit.kind {
2934
RegKind::Integer => false,
30-
RegKind::Float => true,
35+
// The softfloat ABI treats floats like integers, so they
36+
// do not get homogeneous aggregate treatment.
37+
RegKind::Float => cx.target_spec().abi != "softfloat",
3138
RegKind::Vector => size.bits() == 64 || size.bits() == 128,
3239
};
3340

3441
valid_unit.then_some(Uniform::consecutive(unit, size))
3542
})
3643
}
3744

45+
fn softfloat_float_abi<Ty>(target: &Target, arg: &mut ArgAbi<'_, Ty>) {
46+
if target.abi != "softfloat" {
47+
return;
48+
}
49+
// Do *not* use the float registers for passing arguments, as that would make LLVM pick the ABI
50+
// and its choice depends on whether `neon` instructions are enabled. Instead, we follow the
51+
// AAPCS "softfloat" ABI, which specifies that floats should be passed as equivalently-sized
52+
// integers. Nominally this only exists for "R" profile chips, but sometimes people don't want
53+
// to use hardfloats even if the hardware supports them, so we do this for all softfloat
54+
// targets.
55+
if let BackendRepr::Scalar(s) = arg.layout.backend_repr
56+
&& let Primitive::Float(f) = s.primitive()
57+
{
58+
arg.cast_to(Reg { kind: RegKind::Integer, size: f.size() });
59+
} else if let BackendRepr::ScalarPair(s1, s2) = arg.layout.backend_repr
60+
&& (matches!(s1.primitive(), Primitive::Float(_))
61+
|| matches!(s2.primitive(), Primitive::Float(_)))
62+
{
63+
// This case can only be reached for the Rust ABI, so we can do whatever we want here as
64+
// long as it does not depend on target features (i.e., as long as we do not use float
65+
// registers). So we pass small things in integer registers and large things via pointer
66+
// indirection. This means we lose the nice "pass it as two arguments" optimization, but we
67+
// currently just have to way to combine a `PassMode::Cast` with that optimization (and we
68+
// need a cast since we want to pass the float as an int).
69+
if arg.layout.size.bits() <= target.pointer_width.into() {
70+
arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
71+
} else {
72+
arg.make_indirect();
73+
}
74+
}
75+
}
76+
3877
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, kind: AbiKind)
3978
where
4079
Ty: TyAbiInterface<'a, C> + Copy,
41-
C: HasDataLayout,
80+
C: HasDataLayout + HasTargetSpec,
4281
{
4382
if !ret.layout.is_sized() {
4483
// Not touching this...
@@ -51,6 +90,7 @@ where
5190
// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
5291
ret.extend_integer_width_to(32)
5392
}
93+
softfloat_float_abi(cx.target_spec(), ret);
5494
return;
5595
}
5696
if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
@@ -69,7 +109,7 @@ where
69109
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, kind: AbiKind)
70110
where
71111
Ty: TyAbiInterface<'a, C> + Copy,
72-
C: HasDataLayout,
112+
C: HasDataLayout + HasTargetSpec,
73113
{
74114
if !arg.layout.is_sized() {
75115
// Not touching this...
@@ -82,6 +122,8 @@ where
82122
// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
83123
arg.extend_integer_width_to(32);
84124
}
125+
softfloat_float_abi(cx.target_spec(), arg);
126+
85127
return;
86128
}
87129
if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
@@ -112,7 +154,7 @@ where
112154
pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, kind: AbiKind)
113155
where
114156
Ty: TyAbiInterface<'a, C> + Copy,
115-
C: HasDataLayout,
157+
C: HasDataLayout + HasTargetSpec,
116158
{
117159
if !fn_abi.ret.is_ignore() {
118160
classify_ret(cx, &mut fn_abi.ret, kind);
@@ -125,3 +167,13 @@ where
125167
classify_arg(cx, arg, kind);
126168
}
127169
}
170+
171+
pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
172+
where
173+
Ty: TyAbiInterface<'a, C> + Copy,
174+
C: HasDataLayout + HasTargetSpec,
175+
{
176+
for arg in fn_abi.args.iter_mut().chain(iter::once(&mut fn_abi.ret)) {
177+
softfloat_float_abi(cx.target_spec(), arg);
178+
}
179+
}

compiler/rustc_target/src/callconv/mod.rs

+1
Original file line numberDiff line numberDiff line change
@@ -738,6 +738,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
738738
"x86" => x86::compute_rust_abi_info(cx, self, abi),
739739
"riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self, abi),
740740
"loongarch64" => loongarch::compute_rust_abi_info(cx, self, abi),
741+
"aarch64" => aarch64::compute_rust_abi_info(cx, self),
741742
_ => {}
742743
};
743744

tests/codegen/aarch64-softfloat.rs

+48
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
//@ compile-flags: --target aarch64-unknown-none-softfloat -Zmerge-functions=disabled
2+
//@ needs-llvm-components: aarch64
3+
#![crate_type = "lib"]
4+
#![feature(no_core, lang_items)]
5+
#![no_core]
6+
7+
#[lang = "sized"]
8+
trait Sized {}
9+
#[lang = "copy"]
10+
trait Copy {}
11+
impl Copy for f32 {}
12+
impl Copy for f64 {}
13+
14+
// CHECK: i64 @pass_f64_C(i64 {{[^,]*}})
15+
#[no_mangle]
16+
extern "C" fn pass_f64_C(x: f64) -> f64 {
17+
x
18+
}
19+
20+
// CHECK: i64 @pass_f32_pair_C(i64 {{[^,]*}})
21+
#[no_mangle]
22+
extern "C" fn pass_f32_pair_C(x: (f32, f32)) -> (f32, f32) {
23+
x
24+
}
25+
26+
// CHECK: [2 x i64] @pass_f64_pair_C([2 x i64] {{[^,]*}})
27+
#[no_mangle]
28+
extern "C" fn pass_f64_pair_C(x: (f64, f64)) -> (f64, f64) {
29+
x
30+
}
31+
32+
// CHECK: i64 @pass_f64_Rust(i64 {{[^,]*}})
33+
#[no_mangle]
34+
fn pass_f64_Rust(x: f64) -> f64 {
35+
x
36+
}
37+
38+
// CHECK: i64 @pass_f32_pair_Rust(i64 {{[^,]*}})
39+
#[no_mangle]
40+
fn pass_f32_pair_Rust(x: (f32, f32)) -> (f32, f32) {
41+
x
42+
}
43+
44+
// CHECK: void @pass_f64_pair_Rust(ptr {{[^,]*}}, ptr {{[^,]*}})
45+
#[no_mangle]
46+
fn pass_f64_pair_Rust(x: (f64, f64)) -> (f64, f64) {
47+
x
48+
}

0 commit comments

Comments
 (0)