Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit aeda5fc

Browse files
committedMar 16, 2024·
Auto merge of rust-lang#122053 - erikdesjardins:alloca, r=nikic
Stop using LLVM struct types for alloca The alloca type has no semantic meaning, only the size (and alignment, but we specify it explicitly) matter. Using `[N x i8]` is a more direct way to specify that we want `N` bytes, and avoids relying on LLVM's struct layout. It is likely that a future LLVM version will change to an untyped alloca representation. Split out from rust-lang#121577. r? `@ghost`
2 parents c03ea3d + 8536da4 commit aeda5fc

30 files changed

+151
-128
lines changed
 

‎compiler/rustc_codegen_gcc/src/builder.rs

+4-16
Original file line numberDiff line numberDiff line change
@@ -897,26 +897,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
897897
self.gcc_checked_binop(oop, typ, lhs, rhs)
898898
}
899899

900-
fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
901-
// FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
902-
// Ideally, we shouldn't need to do this check.
903-
let aligned_type = if ty == self.cx.u128_type || ty == self.cx.i128_type {
904-
ty
905-
} else {
906-
ty.get_aligned(align.bytes())
907-
};
900+
fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> {
901+
let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes());
908902
// TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
909903
self.stack_var_count.set(self.stack_var_count.get() + 1);
910-
self.current_func()
911-
.new_local(
912-
self.location,
913-
aligned_type,
914-
&format!("stack_var_{}", self.stack_var_count.get()),
915-
)
916-
.get_address(self.location)
904+
self.current_func().new_local(None, ty, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
917905
}
918906

919-
fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
907+
fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
920908
unimplemented!();
921909
}
922910

‎compiler/rustc_codegen_gcc/src/intrinsic/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -530,7 +530,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
530530
// We instead thus allocate some scratch space...
531531
let scratch_size = cast.size(bx);
532532
let scratch_align = cast.align(bx);
533-
let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
533+
let llscratch = bx.alloca(scratch_size, scratch_align);
534534
bx.lifetime_start(llscratch, scratch_size);
535535

536536
// ... where we first store the value...

‎compiler/rustc_codegen_gcc/src/intrinsic/simd.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ use rustc_middle::span_bug;
1818
use rustc_middle::ty::layout::HasTyCtxt;
1919
use rustc_middle::ty::{self, Ty};
2020
use rustc_span::{sym, Span, Symbol};
21-
use rustc_target::abi::Align;
21+
use rustc_target::abi::{Align, Size};
2222

2323
use crate::builder::Builder;
2424
#[cfg(not(feature = "master"))]
@@ -558,7 +558,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
558558
let ze = bx.zext(result, bx.type_ix(expected_bytes * 8));
559559

560560
// Convert the integer to a byte array
561-
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
561+
let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
562562
bx.store(ze, ptr, Align::ONE);
563563
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
564564
let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));

‎compiler/rustc_codegen_llvm/src/abi.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
238238
// We instead thus allocate some scratch space...
239239
let scratch_size = cast.size(bx);
240240
let scratch_align = cast.align(bx);
241-
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
241+
let llscratch = bx.alloca(scratch_size, scratch_align);
242242
bx.lifetime_start(llscratch, scratch_size);
243243

244244
// ... where we first store the value...

‎compiler/rustc_codegen_llvm/src/builder.rs

+4-3
Original file line numberDiff line numberDiff line change
@@ -466,20 +466,21 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
466466
val
467467
}
468468

469-
fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
469+
fn alloca(&mut self, size: Size, align: Align) -> &'ll Value {
470470
let mut bx = Builder::with_cx(self.cx);
471471
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
472+
let ty = self.cx().type_array(self.cx().type_i8(), size.bytes());
472473
unsafe {
473474
let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
474475
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
475476
alloca
476477
}
477478
}
478479

479-
fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
480+
fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value {
480481
unsafe {
481482
let alloca =
482-
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
483+
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED);
483484
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
484485
alloca
485486
}

‎compiler/rustc_codegen_llvm/src/intrinsic.rs

+10-10
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
1818
use rustc_middle::ty::{self, GenericArgsRef, Ty};
1919
use rustc_middle::{bug, span_bug};
2020
use rustc_span::{sym, Span, Symbol};
21-
use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
21+
use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size};
2222
use rustc_target::spec::{HasTargetSpec, PanicStrategy};
2323

2424
use std::cmp::Ordering;
@@ -637,8 +637,9 @@ fn codegen_msvc_try<'ll>(
637637
// }
638638
//
639639
// More information can be found in libstd's seh.rs implementation.
640+
let ptr_size = bx.tcx().data_layout.pointer_size;
640641
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
641-
let slot = bx.alloca(bx.type_ptr(), ptr_align);
642+
let slot = bx.alloca(ptr_size, ptr_align);
642643
let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
643644
bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
644645

@@ -908,15 +909,14 @@ fn codegen_emcc_try<'ll>(
908909

909910
// We need to pass two values to catch_func (ptr and is_rust_panic), so
910911
// create an alloca and pass a pointer to that.
912+
let ptr_size = bx.tcx().data_layout.pointer_size;
911913
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
912914
let i8_align = bx.tcx().data_layout.i8_align.abi;
913-
let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
914-
let catch_data = bx.alloca(catch_data_type, ptr_align);
915-
let catch_data_0 =
916-
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
917-
bx.store(ptr, catch_data_0, ptr_align);
918-
let catch_data_1 =
919-
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
915+
// Required in order for there to be no padding between the fields.
916+
assert!(i8_align <= ptr_align);
917+
let catch_data = bx.alloca(2 * ptr_size, ptr_align);
918+
bx.store(ptr, catch_data, ptr_align);
919+
let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
920920
bx.store(is_rust_panic, catch_data_1, i8_align);
921921

922922
let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
@@ -1362,7 +1362,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
13621362
let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
13631363

13641364
// Convert the integer to a byte array
1365-
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
1365+
let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
13661366
bx.store(ze, ptr, Align::ONE);
13671367
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
13681368
return Ok(bx.load(array_ty, ptr, Align::ONE));

‎compiler/rustc_codegen_ssa/src/base.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
506506
let ptr_size = bx.tcx().data_layout.pointer_size;
507507
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
508508
let arg_argc = bx.const_int(cx.type_isize(), 2);
509-
let arg_argv = bx.alloca(cx.type_array(cx.type_ptr(), 2), ptr_align);
509+
let arg_argv = bx.alloca(2 * ptr_size, ptr_align);
510510
bx.store(param_handle, arg_argv, ptr_align);
511511
let arg_argv_el1 = bx.inbounds_ptradd(arg_argv, bx.const_usize(ptr_size.bytes()));
512512
bx.store(param_system_table, arg_argv_el1, ptr_align);

‎compiler/rustc_codegen_ssa/src/mir/operand.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
324324
let llfield_ty = bx.cx().backend_type(field);
325325

326326
// Can't bitcast an aggregate, so round trip through memory.
327-
let llptr = bx.alloca(llfield_ty, field.align.abi);
327+
let llptr = bx.alloca(field.size, field.align.abi);
328328
bx.store(*llval, llptr, field.align.abi);
329329
*llval = bx.load(llfield_ty, llptr, field.align.abi);
330330
}
@@ -472,7 +472,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
472472
let align_minus_1 = bx.sub(align, one);
473473
let size_extra = bx.add(size, align_minus_1);
474474
let min_align = Align::ONE;
475-
let alloca = bx.byte_array_alloca(size_extra, min_align);
475+
let alloca = bx.dynamic_alloca(size_extra, min_align);
476476
let address = bx.ptrtoint(alloca, bx.type_isize());
477477
let neg_address = bx.neg(address);
478478
let offset = bx.and(neg_address, align_minus_1);

‎compiler/rustc_codegen_ssa/src/mir/place.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
5757
align: Align,
5858
) -> Self {
5959
assert!(layout.is_sized(), "tried to statically allocate unsized place");
60-
let tmp = bx.alloca(bx.cx().backend_type(layout), align);
60+
let tmp = bx.alloca(layout.size, align);
6161
Self::new_sized_aligned(tmp, layout, align)
6262
}
6363

‎compiler/rustc_codegen_ssa/src/traits/builder.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -141,8 +141,8 @@ pub trait BuilderMethods<'a, 'tcx>:
141141
}
142142
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
143143

144-
fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
145-
fn byte_array_alloca(&mut self, len: Self::Value, align: Align) -> Self::Value;
144+
fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
145+
fn dynamic_alloca(&mut self, size: Self::Value, align: Align) -> Self::Value;
146146

147147
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
148148
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;

‎tests/assembly/stack-protector/stack-protector-heuristics-effect.rs

+15-40
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,11 @@
1111
//@ compile-flags: -C opt-level=2 -Z merge-functions=disabled
1212
//@ min-llvm-version: 17.0.2
1313

14+
// NOTE: the heuristics for stack smash protection inappropriately rely on types in LLVM IR,
15+
// despite those types having no semantic meaning. This means that the `basic` and `strong`
16+
// settings do not behave in a coherent way. This is a known issue in LLVM.
17+
// See comments on https://github.com/rust-lang/rust/issues/114903.
18+
1419
#![crate_type = "lib"]
1520

1621
#![allow(incomplete_features)]
@@ -39,23 +44,9 @@ pub fn array_char(f: fn(*const char)) {
3944
f(&b as *const _);
4045
f(&c as *const _);
4146

42-
// Any type of local array variable leads to stack protection with the
43-
// "strong" heuristic. The 'basic' heuristic only adds stack protection to
44-
// functions with local array variables of a byte-sized type, however. Since
45-
// 'char' is 4 bytes in Rust, this function is not protected by the 'basic'
46-
// heuristic
47-
//
48-
// (This test *also* takes the address of the local stack variables. We
49-
// cannot know that this isn't what triggers the `strong` heuristic.
50-
// However, the test strategy of passing the address of a stack array to an
51-
// external function is sufficient to trigger the `basic` heuristic (see
52-
// test `array_u8_large()`). Since the `basic` heuristic only checks for the
53-
// presence of stack-local array variables, we can be confident that this
54-
// test also captures this part of the `strong` heuristic specification.)
55-
5647
// all: __stack_chk_fail
5748
// strong: __stack_chk_fail
58-
// basic-NOT: __stack_chk_fail
49+
// basic: __stack_chk_fail
5950
// none-NOT: __stack_chk_fail
6051
// missing-NOT: __stack_chk_fail
6152
}
@@ -163,26 +154,11 @@ pub fn local_string_addr_taken(f: fn(&String)) {
163154
f(&x);
164155

165156
// Taking the address of the local variable `x` leads to stack smash
166-
// protection with the `strong` heuristic, but not with the `basic`
167-
// heuristic. It does not matter that the reference is not mut.
168-
//
169-
// An interesting note is that a similar function in C++ *would* be
170-
// protected by the `basic` heuristic, because `std::string` has a char
171-
// array internally as a small object optimization:
172-
// ```
173-
// cat <<EOF | clang++ -O2 -fstack-protector -S -x c++ - -o - | grep stack_chk
174-
// #include <string>
175-
// void f(void (*g)(const std::string&)) {
176-
// std::string x;
177-
// g(x);
178-
// }
179-
// EOF
180-
// ```
181-
//
157+
// protection. It does not matter that the reference is not mut.
182158

183159
// all: __stack_chk_fail
184160
// strong: __stack_chk_fail
185-
// basic-NOT: __stack_chk_fail
161+
// basic: __stack_chk_fail
186162
// none-NOT: __stack_chk_fail
187163
// missing-NOT: __stack_chk_fail
188164
}
@@ -233,8 +209,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
233209
// Even though the local variable conceptually doesn't have its address
234210
// taken, it's so large that the "move" is implemented with a reference to a
235211
// stack-local variable in the ABI. Consequently, this function *is*
236-
// protected by the `strong` heuristic. This is also the case for
237-
// rvalue-references in C++, regardless of struct size:
212+
// protected. This is also the case for rvalue-references in C++,
213+
// regardless of struct size:
238214
// ```
239215
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
240216
// #include <cstdint>
@@ -248,7 +224,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
248224

249225
// all: __stack_chk_fail
250226
// strong: __stack_chk_fail
251-
// basic-NOT: __stack_chk_fail
227+
// basic: __stack_chk_fail
252228
// none-NOT: __stack_chk_fail
253229
// missing-NOT: __stack_chk_fail
254230
}
@@ -261,9 +237,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
261237
// A new instance of `Gigastruct` is passed to `f()`, without any apparent
262238
// connection to this stack frame. Still, since instances of `Gigastruct`
263239
// are sufficiently large, it is allocated in the caller stack frame and
264-
// passed as a pointer. As such, this function is *also* protected by the
265-
// `strong` heuristic, just like `local_large_var_moved`. This is also the
266-
// case for pass-by-value of sufficiently large structs in C++:
240+
// passed as a pointer. As such, this function is *also* protected, just
241+
// like `local_large_var_moved`. This is also the case for pass-by-value
242+
// of sufficiently large structs in C++:
267243
// ```
268244
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
269245
// #include <cstdint>
@@ -275,10 +251,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
275251
// EOF
276252
// ```
277253

278-
279254
// all: __stack_chk_fail
280255
// strong: __stack_chk_fail
281-
// basic-NOT: __stack_chk_fail
256+
// basic: __stack_chk_fail
282257
// none-NOT: __stack_chk_fail
283258
// missing-NOT: __stack_chk_fail
284259
}

‎tests/codegen/align-byval-alignment-mismatch.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ extern "C" {
5656
#[no_mangle]
5757
pub unsafe fn rust_to_c_increases_alignment(x: Align1) {
5858
// i686-linux: start:
59-
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align1, align 4
59+
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 4
6060
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 4 {{.*}}[[ALLOCA]], ptr {{.*}}align 1 {{.*}}%x
6161
// i686-linux-NEXT: call void @extern_c_align1({{.+}} [[ALLOCA]])
6262

@@ -90,7 +90,7 @@ pub unsafe extern "C" fn c_to_rust_decreases_alignment(x: Align1) {
9090
#[no_mangle]
9191
pub unsafe extern "C" fn c_to_rust_increases_alignment(x: Align16) {
9292
// i686-linux: start:
93-
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align16, align 16
93+
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 16
9494
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 16 {{.*}}[[ALLOCA]], ptr {{.*}}align 4 {{.*}}%0
9595
// i686-linux-NEXT: call void @extern_rust_align16({{.+}} [[ALLOCA]])
9696

@@ -116,7 +116,7 @@ pub unsafe extern "C" fn c_to_rust_ref_decreases_alignment(x: Align1) {
116116
#[no_mangle]
117117
pub unsafe extern "C" fn c_to_rust_ref_increases_alignment(x: Align16) {
118118
// i686-linux: start:
119-
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align16, align 16
119+
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 16
120120
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 16 {{.*}}[[ALLOCA]], ptr {{.*}}align 4 {{.*}}%0
121121
// i686-linux-NEXT: call void @extern_rust_ref_align16({{.+}} [[ALLOCA]])
122122

‎tests/codegen/align-byval.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -106,20 +106,20 @@ pub struct ForceAlign16 {
106106
pub unsafe fn call_na1(x: NaturalAlign1) {
107107
// CHECK: start:
108108

109-
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
109+
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
110110
// m68k: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
111111

112-
// wasm: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
112+
// wasm: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
113113
// wasm: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
114114

115115
// x86_64-linux: call void @natural_align_1(i16
116116

117117
// x86_64-windows: call void @natural_align_1(i16
118118

119-
// i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
119+
// i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4
120120
// i686-linux: call void @natural_align_1({{.*}}byval([2 x i8]) align 4{{.*}} [[ALLOCA]])
121121

122-
// i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
122+
// i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4
123123
// i686-windows: call void @natural_align_1({{.*}}byval([2 x i8]) align 4{{.*}} [[ALLOCA]])
124124
natural_align_1(x);
125125
}
@@ -134,10 +134,10 @@ pub unsafe fn call_na2(x: NaturalAlign2) {
134134
// x86_64-linux-NEXT: call void @natural_align_2
135135
// x86_64-windows-NEXT: call void @natural_align_2
136136

137-
// i686-linux: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4
137+
// i686-linux: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4
138138
// i686-linux: call void @natural_align_2({{.*}}byval([34 x i8]) align 4{{.*}} [[ALLOCA]])
139139

140-
// i686-windows: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4
140+
// i686-windows: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4
141141
// i686-windows: call void @natural_align_2({{.*}}byval([34 x i8]) align 4{{.*}} [[ALLOCA]])
142142
natural_align_2(x);
143143
}

‎tests/codegen/align-enum.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ pub struct Nested64 {
1818
// CHECK-LABEL: @align64
1919
#[no_mangle]
2020
pub fn align64(a: u32) -> Align64 {
21-
// CHECK: %a64 = alloca %Align64, align 64
21+
// CHECK: %a64 = alloca [64 x i8], align 64
2222
// CHECK: call void @llvm.memcpy.{{.*}}(ptr align 64 %{{.*}}, ptr align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false)
2323
let a64 = Align64::A(a);
2424
a64
@@ -27,7 +27,7 @@ pub fn align64(a: u32) -> Align64 {
2727
// CHECK-LABEL: @nested64
2828
#[no_mangle]
2929
pub fn nested64(a: u8, b: u32, c: u16) -> Nested64 {
30-
// CHECK: %n64 = alloca %Nested64, align 64
30+
// CHECK: %n64 = alloca [128 x i8], align 64
3131
let n64 = Nested64 { a, b: Align64::B(b), c };
3232
n64
3333
}

0 commit comments

Comments
 (0)
Please sign in to comment.