|
34 | 34 | } |
35 | 35 | } |
36 | 36 |
|
37 | | -fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>) |
| 37 | +fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, offset: &mut Size) |
38 | 38 | where |
39 | 39 | Ty: TyAbiInterface<'a, C> + Copy, |
40 | 40 | C: HasDataLayout, |
@@ -70,88 +70,98 @@ where |
70 | 70 | ret.cast_to(Uniform::new(Reg::i64(), size)); |
71 | 71 | } else { |
72 | 72 | ret.make_indirect(); |
| 73 | + *offset += cx.data_layout().pointer_size(); |
73 | 74 | } |
74 | 75 | } |
75 | 76 |
|
76 | | -fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) |
| 77 | +fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, offset: &mut Size) |
77 | 78 | where |
78 | 79 | Ty: TyAbiInterface<'a, C> + Copy, |
79 | 80 | C: HasDataLayout, |
80 | 81 | { |
81 | | - if !arg.layout.is_aggregate() { |
82 | | - extend_integer_width_mips(arg, 64); |
83 | | - return; |
84 | | - } |
85 | | - |
86 | 82 | let dl = cx.data_layout(); |
87 | 83 | let size = arg.layout.size; |
88 | 84 | let mut prefix = [None; 8]; |
89 | 85 | let mut prefix_index = 0; |
90 | 86 |
|
91 | | - match arg.layout.fields { |
92 | | - FieldsShape::Primitive => unreachable!(), |
93 | | - FieldsShape::Array { .. } => { |
94 | | - // Arrays are passed indirectly |
95 | | - arg.make_indirect(); |
96 | | - return; |
97 | | - } |
98 | | - FieldsShape::Union(_) => { |
99 | | - // Unions and are always treated as a series of 64-bit integer chunks |
100 | | - } |
101 | | - FieldsShape::Arbitrary { .. } => { |
102 | | - // Structures are split up into a series of 64-bit integer chunks, but any aligned |
103 | | - // doubles not part of another aggregate are passed as floats. |
104 | | - let mut last_offset = Size::ZERO; |
105 | | - |
106 | | - for i in 0..arg.layout.fields.count() { |
107 | | - let field = arg.layout.field(cx, i); |
108 | | - let offset = arg.layout.fields.offset(i); |
109 | | - |
110 | | - // We only care about aligned doubles |
111 | | - if let BackendRepr::Scalar(scalar) = field.backend_repr { |
112 | | - if scalar.primitive() == Primitive::Float(Float::F64) { |
113 | | - if offset.is_aligned(dl.f64_align) { |
114 | | - // Insert enough integers to cover [last_offset, offset) |
115 | | - assert!(last_offset.is_aligned(dl.f64_align)); |
116 | | - for _ in 0..((offset - last_offset).bits() / 64) |
117 | | - .min((prefix.len() - prefix_index) as u64) |
118 | | - { |
119 | | - prefix[prefix_index] = Some(Reg::i64()); |
120 | | - prefix_index += 1; |
121 | | - } |
| 87 | + // Detect need for padding |
| 88 | + let align = arg.layout.align.abi.max(dl.i64_align).min(dl.i128_align); |
| 89 | + let pad_i32 = !offset.is_aligned(align); |
122 | 90 |
|
123 | | - if prefix_index == prefix.len() { |
124 | | - break; |
| 91 | + if !arg.layout.is_aggregate() { |
| 92 | + extend_integer_width_mips(arg, 64); |
| 93 | + } else { |
| 94 | + match arg.layout.fields { |
| 95 | + FieldsShape::Primitive => unreachable!(), |
| 96 | + FieldsShape::Array { .. } => { |
| 97 | + // Arrays are passed indirectly |
| 98 | + arg.make_indirect(); |
| 99 | + } |
| 100 | + FieldsShape::Union(_) => { |
| 101 | + // Unions and are always treated as a series of 64-bit integer chunks |
| 102 | + } |
| 103 | + FieldsShape::Arbitrary { .. } => { |
| 104 | + // Structures are split up into a series of 64-bit integer chunks, but any aligned |
| 105 | + // doubles not part of another aggregate are passed as floats. |
| 106 | + let mut last_offset = Size::ZERO; |
| 107 | + |
| 108 | + for i in 0..arg.layout.fields.count() { |
| 109 | + let field = arg.layout.field(cx, i); |
| 110 | + let offset = arg.layout.fields.offset(i); |
| 111 | + |
| 112 | + // We only care about aligned doubles |
| 113 | + if let BackendRepr::Scalar(scalar) = field.backend_repr { |
| 114 | + if scalar.primitive() == Primitive::Float(Float::F64) { |
| 115 | + if offset.is_aligned(dl.f64_align) { |
| 116 | + // Insert enough integers to cover [last_offset, offset) |
| 117 | + assert!(last_offset.is_aligned(dl.f64_align)); |
| 118 | + for _ in 0..((offset - last_offset).bits() / 64) |
| 119 | + .min((prefix.len() - prefix_index) as u64) |
| 120 | + { |
| 121 | + prefix[prefix_index] = Some(Reg::i64()); |
| 122 | + prefix_index += 1; |
| 123 | + } |
| 124 | + |
| 125 | + if prefix_index == prefix.len() { |
| 126 | + break; |
| 127 | + } |
| 128 | + |
| 129 | + prefix[prefix_index] = Some(Reg::f64()); |
| 130 | + prefix_index += 1; |
| 131 | + last_offset = offset + Reg::f64().size; |
125 | 132 | } |
126 | | - |
127 | | - prefix[prefix_index] = Some(Reg::f64()); |
128 | | - prefix_index += 1; |
129 | | - last_offset = offset + Reg::f64().size; |
130 | 133 | } |
131 | 134 | } |
132 | 135 | } |
133 | 136 | } |
134 | | - } |
135 | | - }; |
136 | | - |
137 | | - // Extract first 8 chunks as the prefix |
138 | | - let rest_size = size - Size::from_bytes(8) * prefix_index as u64; |
139 | | - arg.cast_to(CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size))); |
| 137 | + }; |
| 138 | + // Extract first 8 chunks as the prefix |
| 139 | + let rest_size = size - Size::from_bytes(8) * prefix_index as u64; |
| 140 | + arg.cast_to_and_pad_i32( |
| 141 | + CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size)), |
| 142 | + pad_i32, |
| 143 | + ); |
| 144 | + } |
| 145 | + *offset = offset.align_to(align) + size.align_to(align); |
140 | 146 | } |
141 | 147 |
|
142 | 148 | pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>) |
143 | 149 | where |
144 | 150 | Ty: TyAbiInterface<'a, C> + Copy, |
145 | 151 | C: HasDataLayout, |
146 | 152 | { |
| 153 | + // mips64 argument passing is also affected by the alignment of aggregates. |
| 154 | + // see mips.rs for how the offset is used |
| 155 | + let mut offset = Size::ZERO; |
| 156 | + |
147 | 157 | if !fn_abi.ret.is_ignore() { |
148 | | - classify_ret(cx, &mut fn_abi.ret); |
| 158 | + classify_ret(cx, &mut fn_abi.ret, &mut offset); |
149 | 159 | } |
150 | 160 |
|
151 | 161 | for arg in fn_abi.args.iter_mut() { |
152 | 162 | if arg.is_ignore() { |
153 | 163 | continue; |
154 | 164 | } |
155 | | - classify_arg(cx, arg); |
| 165 | + classify_arg(cx, arg, &mut offset); |
156 | 166 | } |
157 | 167 | } |
0 commit comments