Skip to content

Commit 2b8161f

Browse files
committed
callconv: adapt mips padding logic to mips64
Similar to mips, mips64 also adds a padding register when an aggregate argument is not passed with an aligned offset, for any byte alignment `a` where `8 < a <= 16` bytes. This fix uses an i32 padding, but it should work just fine because i32 is aligned like i64 for arguments.
1 parent 6a884ad commit 2b8161f

File tree

1 file changed

+62
-52
lines changed

1 file changed

+62
-52
lines changed

compiler/rustc_target/src/callconv/mips64.rs

Lines changed: 62 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ where
3434
}
3535
}
3636

37-
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
37+
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, offset: &mut Size)
3838
where
3939
Ty: TyAbiInterface<'a, C> + Copy,
4040
C: HasDataLayout,
@@ -70,88 +70,98 @@ where
7070
ret.cast_to(Uniform::new(Reg::i64(), size));
7171
} else {
7272
ret.make_indirect();
73+
*offset += cx.data_layout().pointer_size();
7374
}
7475
}
7576

76-
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
77+
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, offset: &mut Size)
7778
where
7879
Ty: TyAbiInterface<'a, C> + Copy,
7980
C: HasDataLayout,
8081
{
81-
if !arg.layout.is_aggregate() {
82-
extend_integer_width_mips(arg, 64);
83-
return;
84-
}
85-
8682
let dl = cx.data_layout();
8783
let size = arg.layout.size;
8884
let mut prefix = [None; 8];
8985
let mut prefix_index = 0;
9086

91-
match arg.layout.fields {
92-
FieldsShape::Primitive => unreachable!(),
93-
FieldsShape::Array { .. } => {
94-
// Arrays are passed indirectly
95-
arg.make_indirect();
96-
return;
97-
}
98-
FieldsShape::Union(_) => {
99-
// Unions and are always treated as a series of 64-bit integer chunks
100-
}
101-
FieldsShape::Arbitrary { .. } => {
102-
// Structures are split up into a series of 64-bit integer chunks, but any aligned
103-
// doubles not part of another aggregate are passed as floats.
104-
let mut last_offset = Size::ZERO;
105-
106-
for i in 0..arg.layout.fields.count() {
107-
let field = arg.layout.field(cx, i);
108-
let offset = arg.layout.fields.offset(i);
109-
110-
// We only care about aligned doubles
111-
if let BackendRepr::Scalar(scalar) = field.backend_repr {
112-
if scalar.primitive() == Primitive::Float(Float::F64) {
113-
if offset.is_aligned(dl.f64_align) {
114-
// Insert enough integers to cover [last_offset, offset)
115-
assert!(last_offset.is_aligned(dl.f64_align));
116-
for _ in 0..((offset - last_offset).bits() / 64)
117-
.min((prefix.len() - prefix_index) as u64)
118-
{
119-
prefix[prefix_index] = Some(Reg::i64());
120-
prefix_index += 1;
121-
}
87+
// Detect need for padding
88+
let align = arg.layout.align.abi.max(dl.i64_align).min(dl.i128_align);
89+
let pad_i32 = !offset.is_aligned(align);
12290

123-
if prefix_index == prefix.len() {
124-
break;
91+
if !arg.layout.is_aggregate() {
92+
extend_integer_width_mips(arg, 64);
93+
} else {
94+
match arg.layout.fields {
95+
FieldsShape::Primitive => unreachable!(),
96+
FieldsShape::Array { .. } => {
97+
// Arrays are passed indirectly
98+
arg.make_indirect();
99+
}
100+
FieldsShape::Union(_) => {
101+
// Unions and are always treated as a series of 64-bit integer chunks
102+
}
103+
FieldsShape::Arbitrary { .. } => {
104+
// Structures are split up into a series of 64-bit integer chunks, but any aligned
105+
// doubles not part of another aggregate are passed as floats.
106+
let mut last_offset = Size::ZERO;
107+
108+
for i in 0..arg.layout.fields.count() {
109+
let field = arg.layout.field(cx, i);
110+
let offset = arg.layout.fields.offset(i);
111+
112+
// We only care about aligned doubles
113+
if let BackendRepr::Scalar(scalar) = field.backend_repr {
114+
if scalar.primitive() == Primitive::Float(Float::F64) {
115+
if offset.is_aligned(dl.f64_align) {
116+
// Insert enough integers to cover [last_offset, offset)
117+
assert!(last_offset.is_aligned(dl.f64_align));
118+
for _ in 0..((offset - last_offset).bits() / 64)
119+
.min((prefix.len() - prefix_index) as u64)
120+
{
121+
prefix[prefix_index] = Some(Reg::i64());
122+
prefix_index += 1;
123+
}
124+
125+
if prefix_index == prefix.len() {
126+
break;
127+
}
128+
129+
prefix[prefix_index] = Some(Reg::f64());
130+
prefix_index += 1;
131+
last_offset = offset + Reg::f64().size;
125132
}
126-
127-
prefix[prefix_index] = Some(Reg::f64());
128-
prefix_index += 1;
129-
last_offset = offset + Reg::f64().size;
130133
}
131134
}
132135
}
133136
}
134-
}
135-
};
136-
137-
// Extract first 8 chunks as the prefix
138-
let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
139-
arg.cast_to(CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size)));
137+
};
138+
// Extract first 8 chunks as the prefix
139+
let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
140+
arg.cast_to_and_pad_i32(
141+
CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size)),
142+
pad_i32,
143+
);
144+
}
145+
*offset = offset.align_to(align) + size.align_to(align);
140146
}
141147

142148
pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
143149
where
144150
Ty: TyAbiInterface<'a, C> + Copy,
145151
C: HasDataLayout,
146152
{
153+
// mips64 argument passing is also affected by the alignment of aggregates.
154+
// see mips.rs for how the offset is used
155+
let mut offset = Size::ZERO;
156+
147157
if !fn_abi.ret.is_ignore() {
148-
classify_ret(cx, &mut fn_abi.ret);
158+
classify_ret(cx, &mut fn_abi.ret, &mut offset);
149159
}
150160

151161
for arg in fn_abi.args.iter_mut() {
152162
if arg.is_ignore() {
153163
continue;
154164
}
155-
classify_arg(cx, arg);
165+
classify_arg(cx, arg, &mut offset);
156166
}
157167
}

0 commit comments

Comments
 (0)