-
Notifications
You must be signed in to change notification settings - Fork 13.3k
/
Copy pathi128-x86-align.rs
105 lines (94 loc) · 3.55 KB
/
i128-x86-align.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
//@ only-x86_64
//@ compile-flags: -Copt-level=3 -C no-prepopulate-passes --crate-type=lib
// On LLVM 17 and earlier LLVM's own data layout specifies that i128 has 8 byte alignment,
// while rustc wants it to have 16 byte alignment. This test checks that we handle this
// correctly.
// CHECK: %ScalarPair = type { i32, [3 x i32], i128 }
#![feature(core_intrinsics)]
#[repr(C)]
#[derive(Clone, Copy)]
pub struct ScalarPair {
a: i32,
b: i128,
}
#[no_mangle]
pub fn load(x: &ScalarPair) -> ScalarPair {
// CHECK-LABEL: @load(
// CHECK-SAME: sret([32 x i8]) align 16
// CHECK-SAME: dereferenceable(32) %_0,
// CHECK-SAME: align 16
// CHECK-SAME: dereferenceable(32) %x
// CHECK: [[A:%.*]] = load i32, ptr %x, align 16
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16
// CHECK-NEXT: [[B:%.*]] = load i128, ptr [[GEP]], align 16
// CHECK-NEXT: store i32 [[A]], ptr %_0, align 16
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %_0, i64 16
// CHECK-NEXT: store i128 [[B]], ptr [[GEP]], align 16
// CHECK-NEXT: ret void
*x
}
#[no_mangle]
pub fn store(x: &mut ScalarPair) {
// CHECK-LABEL: @store(
// CHECK-SAME: align 16
// CHECK-SAME: dereferenceable(32) %x
// CHECK: store i32 1, ptr %x, align 16
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16
// CHECK-NEXT: store i128 2, ptr [[GEP]], align 16
*x = ScalarPair { a: 1, b: 2 };
}
#[no_mangle]
pub fn alloca() {
// CHECK-LABEL: @alloca(
// CHECK: [[X:%.*]] = alloca [32 x i8], align 16
// CHECK: store i32 1, ptr %x, align 16
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %x, i64 16
// CHECK-NEXT: store i128 2, ptr [[GEP]], align 16
let mut x = ScalarPair { a: 1, b: 2 };
store(&mut x);
}
#[no_mangle]
pub fn load_volatile(x: &ScalarPair) -> ScalarPair {
// CHECK-LABEL: @load_volatile(
// CHECK-SAME: sret([32 x i8]) align 16
// CHECK-SAME: dereferenceable(32) %_0,
// CHECK-SAME: align 16
// CHECK-SAME: dereferenceable(32) %x
// CHECK: [[LOAD:%.*]] = load volatile %ScalarPair, ptr %x, align 16
// CHECK-NEXT: store %ScalarPair [[LOAD]], ptr %_0, align 16
// CHECK-NEXT: ret void
unsafe { std::intrinsics::volatile_load(x) }
}
#[no_mangle]
pub fn transmute(x: ScalarPair) -> (std::mem::MaybeUninit<i128>, i128) {
// CHECK-LABEL: @transmute(
// CHECK-SAME: sret([32 x i8]) align 16
// CHECK-SAME: dereferenceable(32) %_0,
// CHECK-SAME: i32 noundef %x.0, i128 noundef %x.1
// CHECK: store i32 %x.0, ptr %_0, align 16
// CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr %_0, i64 16
// CHECK-NEXT: store i128 %x.1, ptr [[GEP]], align 16
// CHECK-NEXT: ret void
unsafe { std::mem::transmute(x) }
}
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Struct {
a: i32,
b: i32,
c: i128,
}
#[no_mangle]
pub fn store_struct(x: &mut Struct) {
// CHECK-LABEL: @store_struct(
// CHECK-SAME: align 16
// CHECK-SAME: dereferenceable(32) %x
// CHECK: [[TMP:%.*]] = alloca [32 x i8], align 16
// CHECK: store i32 1, ptr [[TMP]], align 16
// CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 4
// CHECK-NEXT: store i32 2, ptr [[GEP1]], align 4
// CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 16
// CHECK-NEXT: store i128 3, ptr [[GEP2]], align 16
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 %x, ptr align 16 [[TMP]], i64 32, i1 false)
*x = Struct { a: 1, b: 2, c: 3 };
}