-
Notifications
You must be signed in to change notification settings - Fork 54
/
Copy path0004-RISCV-Implement-RISCV-ABI-lowering.patch
1186 lines (1177 loc) · 47.9 KB
/
0004-RISCV-Implement-RISCV-ABI-lowering.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb@lowrisc.org>
Subject: [RISCV] Implement RISCV ABI lowering
RISCVABIInfo is implemented in terms of XLen, supporting both RV32 and RV64.
Unfortunately we need to count argument registers in the frontend in order to
determine when to emit signext and zeroext attributes. Integer scalars are
extended according to their type up to 32-bits and then sign-extended to XLen
when passed in registers, but are anyext when passed on the stack. This patch
only implements the base integer (soft float) ABIs.
For more information on the RISC-V ABI, see [the ABI
doc](https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md),
my [golden model](https://github.com/lowRISC/riscv-calling-conv-model), and
the [LLVM RISC-V calling convention
patch](https://reviews.llvm.org/D39898#2d1595b4) (specifically the comment
documenting frontend expectations).
Differential Revision: https://reviews.llvm.org/D40023
---
lib/CodeGen/TargetInfo.cpp | 176 +++++++++++++++++
test/CodeGen/riscv32-abi.c | 423 ++++++++++++++++++++++++++++++++++++++++
test/CodeGen/riscv64-abi.c | 422 +++++++++++++++++++++++++++++++++++++++
test/Driver/riscv32-toolchain.c | 47 +++++
test/Driver/riscv64-toolchain.c | 47 +++++
5 files changed, 1115 insertions(+)
create mode 100644 test/CodeGen/riscv32-abi.c
create mode 100644 test/CodeGen/riscv64-abi.c
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 98b72c7a73..aadbd3016b 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -8762,6 +8762,182 @@ static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
return false;
}
+//===----------------------------------------------------------------------===//
+// RISCV ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RISCVABIInfo : public DefaultABIInfo {
+private:
+ unsigned XLen; // Size of the integer ('x') registers in bits.
+ static const int NumArgGPRs = 8;
+
+public:
+ RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen)
+ : DefaultABIInfo(CGT), XLen(XLen) {}
+
+ // DefaultABIInfo's classifyReturnType and classifyArgumentType are
+ // non-virtual, but computeInfo is virtual, so we overload it.
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed,
+ int &ArgGPRsLeft) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ ABIArgInfo extendType(QualType Ty) const;
+};
+} // end anonymous namespace
+
+void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ // IsRetIndirect is true if classifyArgumentType indicated the value should
+ // be passed indirect or if the type size is greater than 2*xlen. e.g. fp128
+ // is passed direct in LLVM IR, relying on the backend lowering code to
+ // rewrite the argument list and pass indirectly on RV32.
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect ||
+ getContext().getTypeSize(RetTy) > (2 * XLen);
+
+ // We must track the number of GPRs used in order to conform to the RISC-V
+ // ABI, as integer scalars passed in registers should have signext/zeroext
+ // when promoted, but are anyext if passed on the stack. As GPR usage is
+ // different for variadic arguments, we must also track whether we are
+ // examining a vararg or not.
+ int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
+ int NumFixedArgs = FI.getNumRequiredArgs();
+
+ int ArgNum = 0;
+ for (auto &ArgInfo : FI.arguments()) {
+ bool IsFixed = ArgNum < NumFixedArgs;
+ ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft);
+ ArgNum++;
+ }
+}
+
+ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
+ int &ArgGPRsLeft) const {
+ assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (ArgGPRsLeft)
+ ArgGPRsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ uint64_t NeededAlign = getContext().getTypeAlign(Ty);
+ bool MustUseStack = false;
+ // Determine the number of GPRs needed to pass the current argument
+ // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
+ // register pairs, so may consume 3 registers.
+ int NeededArgGPRs = 1;
+ if (!IsFixed && NeededAlign == 2 * XLen)
+ NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
+ else if (Size > XLen && Size <= 2 * XLen)
+ NeededArgGPRs = 2;
+
+ if (NeededArgGPRs > ArgGPRsLeft) {
+ MustUseStack = true;
+ NeededArgGPRs = ArgGPRsLeft;
+ }
+
+ ArgGPRsLeft -= NeededArgGPRs;
+
+ if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to XLen width, unless passed on the
+ // stack.
+ if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
+ return extendType(Ty);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ // Aggregates which are <= 2*XLen will be passed in registers if possible,
+ // so coerce to integers.
+ if (Size <= 2 * XLen) {
+ unsigned Alignment = getContext().getTypeAlign(Ty);
+
+ // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
+ // required, and a 2-element XLen array if only XLen alignment is required.
+ if (Size <= XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), XLen));
+ } else if (Alignment == 2 * XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), 2 * XLen));
+ } else {
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(
+ llvm::IntegerType::get(getVMContext(), XLen), 2));
+ }
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ int ArgGPRsLeft = 2;
+
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft);
+}
+
+Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
+ }
+
+ std::pair<CharUnits, CharUnits> SizeAndAlign =
+ getContext().getTypeInfoInChars(Ty);
+
+ // Arguments bigger than 2*Xlen bytes are passed indirectly.
+ bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
+ SlotSize, /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+ // RV64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return ABIArgInfo::getSignExtend(Ty);
+ return ABIArgInfo::getExtend(Ty);
+}
+
+namespace {
+class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen)
+ : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen)) {}
+};
+} // namespace
//===----------------------------------------------------------------------===//
// Driver code
diff --git a/test/CodeGen/riscv32-abi.c b/test/CodeGen/riscv32-abi.c
new file mode 100644
index 0000000000..d83a8f5d74
--- /dev/null
+++ b/test/CodeGen/riscv32-abi.c
@@ -0,0 +1,423 @@
+// RUN: %clang_cc1 -triple riscv32 -emit-llvm %s -o - | FileCheck %s
+
+#include <stddef.h>
+#include <stdint.h>
+
+// CHECK-LABEL: define void @f_void()
+void f_void(void) {}
+
+// Scalar arguments and return values smaller than the word size are extended
+// according to the sign of their type, up to 32 bits
+
+// CHECK-LABEL: define zeroext i1 @f_scalar_0(i1 zeroext %x)
+_Bool f_scalar_0(_Bool x) { return x; }
+
+// CHECK-LABEL: define signext i8 @f_scalar_1(i8 signext %x)
+int8_t f_scalar_1(int8_t x) { return x; }
+
+// CHECK-LABEL: define zeroext i8 @f_scalar_2(i8 zeroext %x)
+uint8_t f_scalar_2(uint8_t x) { return x; }
+
+// CHECK-LABEL: define i32 @f_scalar_3(i32 %x)
+int32_t f_scalar_3(int32_t x) { return x; }
+
+// CHECK-LABEL: define i64 @f_scalar_4(i64 %x)
+int64_t f_scalar_4(int64_t x) { return x; }
+
+// CHECK-LABEL: define float @f_fp_scalar_1(float %x)
+float f_fp_scalar_1(float x) { return x; }
+
+// CHECK-LABEL: define double @f_fp_scalar_2(double %x)
+double f_fp_scalar_2(double x) { return x; }
+
+// Scalars larger than 2*xlen are passed/returned indirect. However, the
+// RISC-V LLVM backend can handle this fine, so the function doesn't need to
+// be modified.
+
+// CHECK-LABEL: define fp128 @f_fp_scalar_3(fp128 %x)
+long double f_fp_scalar_3(long double x) { return x; }
+
+// Empty structs or unions are ignored.
+
+struct empty_s {};
+
+// CHECK-LABEL: define void @f_agg_empty_struct()
+struct empty_s f_agg_empty_struct(struct empty_s x) {
+ return x;
+}
+
+union empty_u {};
+
+// CHECK-LABEL: define void @f_agg_empty_union()
+union empty_u f_agg_empty_union(union empty_u x) {
+ return x;
+}
+
+// Aggregates <= 2*xlen may be passed in registers, so will be coerced to
+// integer arguments. The rules for return are the same.
+
+struct tiny {
+ uint8_t a, b, c, d;
+};
+
+// CHECK-LABEL: define void @f_agg_tiny(i32 %x.coerce)
+void f_agg_tiny(struct tiny x) {
+ x.a += x.b;
+ x.c += x.d;
+}
+
+// CHECK-LABEL: define i32 @f_agg_tiny_ret()
+struct tiny f_agg_tiny_ret() {
+ return (struct tiny){1, 2, 3, 4};
+}
+
+typedef uint8_t v4i8 __attribute__((vector_size(4)));
+typedef int32_t v1i32 __attribute__((vector_size(4)));
+
+// CHECK-LABEL: define void @f_vec_tiny_v4i8(i32 %x.coerce)
+void f_vec_tiny_v4i8(v4i8 x) {
+ x[0] = x[1];
+ x[2] = x[3];
+}
+
+// CHECK-LABEL: define i32 @f_vec_tiny_v4i8_ret()
+v4i8 f_vec_tiny_v4i8_ret() {
+ return (v4i8){1, 2, 3, 4};
+}
+
+// CHECK-LABEL: define void @f_vec_tiny_v1i32(i32 %x.coerce)
+void f_vec_tiny_v1i32(v1i32 x) {
+ x[0] = 114;
+}
+
+// CHECK-LABEL: define i32 @f_vec_tiny_v1i32_ret()
+v1i32 f_vec_tiny_v1i32_ret() {
+ return (v1i32){1};
+}
+
+struct small {
+ int32_t a, *b;
+};
+
+// CHECK-LABEL: define void @f_agg_small([2 x i32] %x.coerce)
+void f_agg_small(struct small x) {
+ x.a += *x.b;
+ x.b = &x.a;
+}
+
+// CHECK-LABEL: define [2 x i32] @f_agg_small_ret()
+struct small f_agg_small_ret() {
+ return (struct small){1, 0};
+}
+
+typedef uint8_t v8i8 __attribute__((vector_size(8)));
+typedef int64_t v1i64 __attribute__((vector_size(8)));
+
+// CHECK-LABEL: define void @f_vec_small_v8i8(i64 %x.coerce)
+void f_vec_small_v8i8(v8i8 x) {
+ x[0] = x[7];
+}
+
+// CHECK-LABEL: define i64 @f_vec_small_v8i8_ret()
+v8i8 f_vec_small_v8i8_ret() {
+ return (v8i8){1, 2, 3, 4, 5, 6, 7, 8};
+}
+
+// CHECK-LABEL: define void @f_vec_small_v1i64(i64 %x.coerce)
+void f_vec_small_v1i64(v1i64 x) {
+ x[0] = 114;
+}
+
+// CHECK-LABEL: define i64 @f_vec_small_v1i64_ret()
+v1i64 f_vec_small_v1i64_ret() {
+ return (v1i64){1};
+}
+
+// Aggregates of 2*xlen size and 2*xlen alignment should be coerced to a
+// single 2*xlen-sized argument, to ensure that alignment can be maintained if
+// passed on the stack.
+
+struct small_aligned {
+ int64_t a;
+};
+
+// CHECK-LABEL: define void @f_agg_small_aligned(i64 %x.coerce)
+void f_agg_small_aligned(struct small_aligned x) {
+ x.a += x.a;
+}
+
+// CHECK-LABEL: define i64 @f_agg_small_aligned_ret(i64 %x.coerce)
+struct small_aligned f_agg_small_aligned_ret(struct small_aligned x) {
+ return (struct small_aligned){10};
+}
+
+// Aggregates greater > 2*xlen will be passed and returned indirectly
+struct large {
+ int32_t a, b, c, d;
+};
+
+// CHECK-LABEL: define void @f_agg_large(%struct.large* %x)
+void f_agg_large(struct large x) {
+ x.a = x.b + x.c + x.d;
+}
+
+// The address where the struct should be written to will be the first
+// argument
+// CHECK-LABEL: define void @f_agg_large_ret(%struct.large* noalias sret %agg.result, i32 %i, i8 signext %j)
+struct large f_agg_large_ret(int32_t i, int8_t j) {
+ return (struct large){1, 2, 3, 4};
+}
+
+typedef unsigned char v16i8 __attribute__((vector_size(16)));
+
+// CHECK-LABEL: define void @f_vec_large_v16i8(<16 x i8>*)
+void f_vec_large_v16i8(v16i8 x) {
+ x[0] = x[7];
+}
+
+// CHECK-LABEL: define void @f_vec_large_v16i8_ret(<16 x i8>* noalias sret %agg.result)
+v16i8 f_vec_large_v16i8_ret() {
+ return (v16i8){1, 2, 3, 4, 5, 6, 7, 8};
+}
+
+// Scalars passed on the stack should have signext/zeroext attributes (they
+// are anyext).
+
+// CHECK-LABEL: define i32 @f_scalar_stack_1(i32 %a.coerce, [2 x i32] %b.coerce, i64 %c.coerce, %struct.large* %d, i8 zeroext %e, i8 signext %f, i8 %g, i8 %h)
+int f_scalar_stack_1(struct tiny a, struct small b, struct small_aligned c,
+ struct large d, uint8_t e, int8_t f, uint8_t g, int8_t h) {
+ return g + h;
+}
+
+// CHECK-LABEL: define i32 @f_scalar_stack_2(i32 %a, i64 %b, float %c, double %d, fp128 %e, i8 zeroext %f, i8 %g, i8 %h)
+int f_scalar_stack_2(int32_t a, int64_t b, float c, double d, long double e,
+ uint8_t f, int8_t g, uint8_t h) {
+ return g + h;
+}
+
+// Ensure that scalars passed on the stack are still determined correctly in
+// the presence of large return values that consume a register due to the need
+// to pass a pointer.
+
+// CHECK-LABEL: define void @f_scalar_stack_3(%struct.large* noalias sret %agg.result, i32 %a, i64 %b, double %c, fp128 %d, i8 zeroext %e, i8 %f, i8 %g)
+struct large f_scalar_stack_3(int32_t a, int64_t b, double c, long double d,
+ uint8_t e, int8_t f, uint8_t g) {
+ return (struct large){a, e, f, g};
+}
+
+// CHECK-LABEL: define fp128 @f_scalar_stack_4(i32 %a, i64 %b, double %c, fp128 %d, i8 zeroext %e, i8 %f, i8 %g)
+long double f_scalar_stack_4(int32_t a, int64_t b, double c, long double d,
+ uint8_t e, int8_t f, uint8_t g) {
+ return d;
+}
+
+// Aggregates and >=XLen scalars passed on the stack should be lowered just as
+// they would be if passed via registers.
+
+// CHECK-LABEL: define void @f_scalar_stack_5(double %a, i64 %b, double %c, i64 %d, i32 %e, i64 %f, float %g, double %h, fp128 %i)
+void f_scalar_stack_5(double a, int64_t b, double c, int64_t d, int e,
+ int64_t f, float g, double h, long double i) {}
+
+// CHECK-LABEL: define void @f_agg_stack(double %a, i64 %b, double %c, i64 %d, i32 %e.coerce, [2 x i32] %f.coerce, i64 %g.coerce, %struct.large* %h)
+void f_agg_stack(double a, int64_t b, double c, int64_t d, struct tiny e,
+ struct small f, struct small_aligned g, struct large h) {}
+
+// Ensure that ABI lowering happens as expected for vararg calls. For RV32
+// with the base integer calling convention there will be no observable
+// differences in the lowered IR for a call with varargs vs without.
+
+int f_va_callee(int, ...);
+
+// CHECK-LABEL: define void @f_va_caller()
+// CHECK: call i32 (i32, ...) @f_va_callee(i32 1, i32 2, i64 3, double 4.000000e+00, double 5.000000e+00, i32 {{%.*}}, [2 x i32] {{%.*}}, i64 {{%.*}}, %struct.large* {{%.*}})
+void f_va_caller() {
+ f_va_callee(1, 2, 3LL, 4.0f, 5.0, (struct tiny){6, 7, 8, 9},
+ (struct small){10, NULL}, (struct small_aligned){11},
+ (struct large){12, 13, 14, 15});
+}
+
+// CHECK-LABEL: define i32 @f_va_1(i8* %fmt, ...) {{.*}} {
+// CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 4
+// CHECK: [[VA:%.*]] = alloca i8*, align 4
+// CHECK: [[V:%.*]] = alloca i32, align 4
+// CHECK: store i8* %fmt, i8** [[FMT_ADDR]], align 4
+// CHECK: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK: call void @llvm.va_start(i8* [[VA1]])
+// CHECK: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i32 4
+// CHECK: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
+// CHECK: [[TMP0:%.*]] = bitcast i8* [[ARGP_CUR]] to i32*
+// CHECK: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
+// CHECK: store i32 [[TMP1]], i32* [[V]], align 4
+// CHECK: [[VA2:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK: call void @llvm.va_end(i8* [[VA2]])
+// CHECK: [[TMP2:%.*]] = load i32, i32* [[V]], align 4
+// CHECK: ret i32 [[TMP2]]
+// CHECK: }
+int f_va_1(char *fmt, ...) {
+ __builtin_va_list va;
+
+ __builtin_va_start(va, fmt);
+ int v = __builtin_va_arg(va, int);
+ __builtin_va_end(va);
+
+ return v;
+}
+
+// An "aligned" register pair (where the first register is even-numbered) is
+// used to pass varargs with 2x xlen alignment and 2x xlen size. Ensure the
+// correct offsets are used.
+
+// CHECK-LABEL: @f_va_2(
+// CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 4
+// CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 4
+// CHECK-NEXT: [[V:%.*]] = alloca double, align 8
+// CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 4
+// CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]])
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[ARGP_CUR]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 7
+// CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8
+// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = inttoptr i32 [[TMP2]] to i8*
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i32 8
+// CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to double*
+// CHECK-NEXT: [[TMP4:%.*]] = load double, double* [[TMP3]], align 8
+// CHECK-NEXT: store double [[TMP4]], double* [[V]], align 8
+// CHECK-NEXT: [[VA2:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_end(i8* [[VA2]])
+// CHECK-NEXT: [[TMP5:%.*]] = load double, double* [[V]], align 8
+// CHECK-NEXT: ret double [[TMP5]]
+double f_va_2(char *fmt, ...) {
+ __builtin_va_list va;
+
+ __builtin_va_start(va, fmt);
+ double v = __builtin_va_arg(va, double);
+ __builtin_va_end(va);
+
+ return v;
+}
+
+// Two "aligned" register pairs.
+
+// CHECK-LABEL: @f_va_3(
+// CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 4
+// CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 4
+// CHECK-NEXT: [[V:%.*]] = alloca double, align 8
+// CHECK-NEXT: [[W:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[X:%.*]] = alloca double, align 8
+// CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 4
+// CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]])
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[ARGP_CUR]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 7
+// CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8
+// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = inttoptr i32 [[TMP2]] to i8*
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i32 8
+// CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to double*
+// CHECK-NEXT: [[TMP4:%.*]] = load double, double* [[TMP3]], align 8
+// CHECK-NEXT: store double [[TMP4]], double* [[V]], align 8
+// CHECK-NEXT: [[ARGP_CUR2:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[ARGP_NEXT3:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR2]], i32 4
+// CHECK-NEXT: store i8* [[ARGP_NEXT3]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[ARGP_CUR2]] to i32*
+// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
+// CHECK-NEXT: store i32 [[TMP6]], i32* [[W]], align 4
+// CHECK-NEXT: [[ARGP_CUR4:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = ptrtoint i8* [[ARGP_CUR4]] to i32
+// CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], 7
+// CHECK-NEXT: [[TMP9:%.*]] = and i32 [[TMP8]], -8
+// CHECK-NEXT: [[ARGP_CUR4_ALIGNED:%.*]] = inttoptr i32 [[TMP9]] to i8*
+// CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR4_ALIGNED]], i32 8
+// CHECK-NEXT: store i8* [[ARGP_NEXT5]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = bitcast i8* [[ARGP_CUR4_ALIGNED]] to double*
+// CHECK-NEXT: [[TMP11:%.*]] = load double, double* [[TMP10]], align 8
+// CHECK-NEXT: store double [[TMP11]], double* [[X]], align 8
+// CHECK-NEXT: [[VA6:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_end(i8* [[VA6]])
+// CHECK-NEXT: [[TMP12:%.*]] = load double, double* [[V]], align 8
+// CHECK-NEXT: [[TMP13:%.*]] = load double, double* [[X]], align 8
+// CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
+// CHECK-NEXT: ret double [[ADD]]
+double f_va_3(char *fmt, ...) {
+ __builtin_va_list va;
+
+ __builtin_va_start(va, fmt);
+ double v = __builtin_va_arg(va, double);
+ int w = __builtin_va_arg(va, int);
+ double x = __builtin_va_arg(va, double);
+ __builtin_va_end(va);
+
+ return v + x;
+}
+
+// CHECK-LABEL: define i32 @f_va_4(i8* %fmt, ...) {{.*}} {
+// CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 4
+// CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 4
+// CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[LD:%.*]] = alloca fp128, align 16
+// CHECK-NEXT: [[TS:%.*]] = alloca [[STRUCT_TINY:%.*]], align 1
+// CHECK-NEXT: [[SS:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 4
+// CHECK-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 4
+// CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 4
+// CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]])
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[ARGP_CUR]] to i32*
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
+// CHECK-NEXT: store i32 [[TMP1]], i32* [[V]], align 4
+// CHECK-NEXT: [[ARGP_CUR2:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[ARGP_NEXT3:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR2]], i32 4
+// CHECK-NEXT: store i8* [[ARGP_NEXT3]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[ARGP_CUR2]] to fp128**
+// CHECK-NEXT: [[TMP3:%.*]] = load fp128*, fp128** [[TMP2]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load fp128, fp128* [[TMP3]], align 16
+// CHECK-NEXT: store fp128 [[TMP4]], fp128* [[LD]], align 16
+// CHECK-NEXT: [[ARGP_CUR4:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR4]], i32 4
+// CHECK-NEXT: store i8* [[ARGP_NEXT5]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[ARGP_CUR4]] to %struct.tiny*
+// CHECK-NEXT: [[TMP6:%.*]] = bitcast %struct.tiny* [[TS]] to i8*
+// CHECK-NEXT: [[TMP7:%.*]] = bitcast %struct.tiny* [[TMP5]] to i8*
+// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP6]], i8* [[TMP7]], i32 4, i32 1, i1 false)
+// CHECK-NEXT: [[ARGP_CUR6:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[ARGP_NEXT7:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR6]], i32 8
+// CHECK-NEXT: store i8* [[ARGP_NEXT7]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[ARGP_CUR6]] to %struct.small*
+// CHECK-NEXT: [[TMP9:%.*]] = bitcast %struct.small* [[SS]] to i8*
+// CHECK-NEXT: [[TMP10:%.*]] = bitcast %struct.small* [[TMP8]] to i8*
+// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP9]], i8* [[TMP10]], i32 8, i32 4, i1 false)
+// CHECK-NEXT: [[ARGP_CUR8:%.*]] = load i8*, i8** [[VA]], align 4
+// CHECK-NEXT: [[ARGP_NEXT9:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR8]], i32 4
+// CHECK-NEXT: store i8* [[ARGP_NEXT9]], i8** [[VA]], align 4
+// CHECK-NEXT: [[TMP11:%.*]] = bitcast i8* [[ARGP_CUR8]] to %struct.large**
+// CHECK-NEXT: [[TMP12:%.*]] = load %struct.large*, %struct.large** [[TMP11]], align 4
+// CHECK-NEXT: [[TMP13:%.*]] = bitcast %struct.large* [[LS]] to i8*
+// CHECK-NEXT: [[TMP14:%.*]] = bitcast %struct.large* [[TMP12]] to i8*
+// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[TMP13]], i8* [[TMP14]], i32 16, i32 4, i1 false)
+// CHECK-NEXT: [[VA10:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_end(i8* [[VA10]])
+int f_va_4(char *fmt, ...) {
+ __builtin_va_list va;
+
+ __builtin_va_start(va, fmt);
+ int v = __builtin_va_arg(va, int);
+ long double ld = __builtin_va_arg(va, long double);
+ struct tiny ts = __builtin_va_arg(va, struct tiny);
+ struct small ss = __builtin_va_arg(va, struct small);
+ struct large ls = __builtin_va_arg(va, struct large);
+ __builtin_va_end(va);
+
+ int ret = (int)((long double)v + ld);
+ ret = ret + ts.a + ts.b + ts.c + ts.d;
+ ret = ret + ss.a + (int)ss.b;
+ ret = ret + ls.a + ls.b + ls.c + ls.d;
+
+ return ret;
+}
diff --git a/test/CodeGen/riscv64-abi.c b/test/CodeGen/riscv64-abi.c
new file mode 100644
index 0000000000..321c0e3df3
--- /dev/null
+++ b/test/CodeGen/riscv64-abi.c
@@ -0,0 +1,422 @@
+// RUN: %clang_cc1 -triple riscv64 -emit-llvm %s -o - | FileCheck %s
+
+#include <stddef.h>
+#include <stdint.h>
+
+// CHECK-LABEL: define void @f_void()
+void f_void(void) {}
+
+// Scalar arguments and return values smaller than the word size are extended
+// according to the sign of their type, up to 32 bits
+
+// CHECK-LABEL: define zeroext i1 @f_scalar_0(i1 zeroext %x)
+_Bool f_scalar_0(_Bool x) { return x; }
+
+// CHECK-LABEL: define signext i8 @f_scalar_1(i8 signext %x)
+int8_t f_scalar_1(int8_t x) { return x; }
+
+// CHECK-LABEL: define zeroext i8 @f_scalar_2(i8 zeroext %x)
+uint8_t f_scalar_2(uint8_t x) { return x; }
+
+// CHECK-LABEL: define signext i32 @f_scalar_3(i32 signext %x)
+uint32_t f_scalar_3(int32_t x) { return x; }
+
+// CHECK-LABEL: define i64 @f_scalar_4(i64 %x)
+int64_t f_scalar_4(int64_t x) { return x; }
+
+// CHECK-LABEL: define float @f_fp_scalar_1(float %x)
+float f_fp_scalar_1(float x) { return x; }
+
+// CHECK-LABEL: define double @f_fp_scalar_2(double %x)
+double f_fp_scalar_2(double x) { return x; }
+
+// CHECK-LABEL: define fp128 @f_fp_scalar_3(fp128 %x)
+long double f_fp_scalar_3(long double x) { return x; }
+
+// Empty structs or unions are ignored.
+
+struct empty_s {};
+
+// CHECK-LABEL: define void @f_agg_empty_struct()
+struct empty_s f_agg_empty_struct(struct empty_s x) {
+ return x;
+}
+
+union empty_u {};
+
+// CHECK-LABEL: define void @f_agg_empty_union()
+union empty_u f_agg_empty_union(union empty_u x) {
+ return x;
+}
+
+// Aggregates <= 2*xlen may be passed in registers, so will be coerced to
+// integer arguments. The rules for return are the same.
+
+struct tiny {
+ uint16_t a, b, c, d;
+};
+
+// CHECK-LABEL: define void @f_agg_tiny(i64 %x.coerce)
+void f_agg_tiny(struct tiny x) {
+ x.a += x.b;
+ x.c += x.d;
+}
+
+// CHECK-LABEL: define i64 @f_agg_tiny_ret()
+struct tiny f_agg_tiny_ret() {
+ return (struct tiny){1, 2, 3, 4};
+}
+
+typedef uint16_t v4i16 __attribute__((vector_size(8)));
+typedef int64_t v1i64 __attribute__((vector_size(8)));
+
+// CHECK-LABEL: define void @f_vec_tiny_v4i16(i64 %x.coerce)
+void f_vec_tiny_v4i16(v4i16 x) {
+ x[0] = x[1];
+ x[2] = x[3];
+}
+
+// CHECK-LABEL: define i64 @f_vec_tiny_v4i16_ret()
+v4i16 f_vec_tiny_v4i16_ret() {
+ return (v4i16){1, 2, 3, 4};
+}
+
+// CHECK-LABEL: define void @f_vec_tiny_v1i64(i64 %x.coerce)
+void f_vec_tiny_v1i64(v1i64 x) {
+ x[0] = 114;
+}
+
+// CHECK-LABEL: define i64 @f_vec_tiny_v1i64_ret()
+v1i64 f_vec_tiny_v1i64_ret() {
+ return (v1i64){1};
+}
+
+struct small {
+ int64_t a, *b;
+};
+
+// CHECK-LABEL: define void @f_agg_small([2 x i64] %x.coerce)
+void f_agg_small(struct small x) {
+ x.a += *x.b;
+ x.b = &x.a;
+}
+
+// CHECK-LABEL: define [2 x i64] @f_agg_small_ret()
+struct small f_agg_small_ret() {
+ return (struct small){1, 0};
+}
+
+typedef uint16_t v8i16 __attribute__((vector_size(16)));
+typedef __int128_t v1i128 __attribute__((vector_size(16)));
+
+// CHECK-LABEL: define void @f_vec_small_v8i16(i128 %x.coerce)
+void f_vec_small_v8i16(v8i16 x) {
+ x[0] = x[7];
+}
+
+// CHECK-LABEL: define i128 @f_vec_small_v8i16_ret()
+v8i16 f_vec_small_v8i16_ret() {
+ return (v8i16){1, 2, 3, 4, 5, 6, 7, 8};
+}
+
+// CHECK-LABEL: define void @f_vec_small_v1i128(i128 %x.coerce)
+void f_vec_small_v1i128(v1i128 x) {
+ x[0] = 114;
+}
+
+// CHECK-LABEL: define i128 @f_vec_small_v1i128_ret()
+v1i128 f_vec_small_v1i128_ret() {
+ return (v1i128){1};
+}
+
+// Aggregates of 2*xlen size and 2*xlen alignment should be coerced to a
+// single 2*xlen-sized argument, to ensure that alignment can be maintained if
+// passed on the stack.
+
+struct small_aligned {
+ __int128_t a;
+};
+
+// CHECK-LABEL: define void @f_agg_small_aligned(i128 %x.coerce)
+void f_agg_small_aligned(struct small_aligned x) {
+ x.a += x.a;
+}
+
+// CHECK-LABEL: define i128 @f_agg_small_aligned_ret(i128 %x.coerce)
+struct small_aligned f_agg_small_aligned_ret(struct small_aligned x) {
+ return (struct small_aligned){10};
+}
+
+// Aggregates greater > 2*xlen will be passed and returned indirectly
+struct large {
+ int64_t a, b, c, d;
+};
+
+// CHECK-LABEL: define void @f_agg_large(%struct.large* %x)
+void f_agg_large(struct large x) {
+ x.a = x.b + x.c + x.d;
+}
+
+// The address where the struct should be written to will be the first
+// argument
+// CHECK-LABEL: define void @f_agg_large_ret(%struct.large* noalias sret %agg.result, i32 signext %i, i8 signext %j)
+struct large f_agg_large_ret(int32_t i, int8_t j) {
+ return (struct large){1, 2, 3, 4};
+}
+
+typedef unsigned char v32i8 __attribute__((vector_size(32)));
+
+// CHECK-LABEL: define void @f_vec_large_v32i8(<32 x i8>*)
+void f_vec_large_v32i8(v32i8 x) {
+ x[0] = x[7];
+}
+
+// CHECK-LABEL: define void @f_vec_large_v32i8_ret(<32 x i8>* noalias sret %agg.result)
+v32i8 f_vec_large_v32i8_ret() {
+ return (v32i8){1, 2, 3, 4, 5, 6, 7, 8};
+}
+
+// Scalars passed on the stack should have signext/zeroext attributes (they
+// are anyext).
+
+// CHECK-LABEL: define signext i32 @f_scalar_stack_1(i64 %a.coerce, [2 x i64] %b.coerce, i128 %c.coerce, %struct.large* %d, i8 zeroext %e, i8 signext %f, i8 %g, i8 %h)
+int f_scalar_stack_1(struct tiny a, struct small b, struct small_aligned c,
+ struct large d, uint8_t e, int8_t f, uint8_t g, int8_t h) {
+ return g + h;
+}
+
+// CHECK-LABEL: define signext i32 @f_scalar_stack_2(i32 signext %a, i128 %b, float %c, fp128 %d, <32 x i8>*, i8 zeroext %f, i8 %g, i8 %h)
+int f_scalar_stack_2(int32_t a, __int128_t b, float c, long double d, v32i8 e,
+ uint8_t f, int8_t g, uint8_t h) {
+ return g + h;
+}
+
+// Ensure that scalars passed on the stack are still determined correctly in
+// the presence of large return values that consume a register due to the need
+// to pass a pointer.
+
+// CHECK-LABEL: define void @f_scalar_stack_3(%struct.large* noalias sret %agg.result, i32 signext %a, i128 %b, fp128 %c, <32 x i8>*, i8 zeroext %e, i8 %f, i8 %g)
+struct large f_scalar_stack_3(uint32_t a, __int128_t b, long double c, v32i8 d,
+ uint8_t e, int8_t f, uint8_t g) {
+ return (struct large){a, e, f, g};
+}
+
+// Ensure that ABI lowering happens as expected for vararg calls.
+// Specifically, ensure that signext is emitted for varargs that will be
+// passed in registers but not on the stack. Ensure this takes into account
+// the use of "aligned" register pairs for varargs with 2*xlen alignment.
+
+int f_va_callee(int, ...);
+
+// CHECK-LABEL: define void @f_va_caller()
+void f_va_caller() {
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i64 3, double 4.000000e+00, double 5.000000e+00, i64 {{%.*}}, [2 x i64] {{%.*}}, i128 {{%.*}}, %struct.large* {{%.*}})
+ f_va_callee(1, 2, 3LL, 4.0f, 5.0, (struct tiny){6, 7, 8, 9},
+ (struct small){10, NULL}, (struct small_aligned){11},
+ (struct large){12, 13, 14, 15});
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, fp128 0xL00000000000000004001400000000000, i32 signext 6, i32 signext 7, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, 5.0L, 6, 7, 8, 9);
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i128 {{%.*}}, i32 signext 6, i32 signext 7, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, (struct small_aligned){5}, 6, 7, 8, 9);
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, [2 x i64] {{%.*}}, i32 signext 6, i32 signext 7, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, (struct small){5, NULL}, 6, 7, 8, 9);
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, fp128 0xL00000000000000004001800000000000, i32 7, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, 5, 6.0L, 7, 8, 9);
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, i128 {{%.*}}, i32 7, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, 5, (struct small_aligned){6}, 7, 8, 9);
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, [2 x i64] {{%.*}}, i32 signext 7, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, 5, (struct small){6, NULL}, 7, 8, 9);
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, i32 signext 6, fp128 0xL00000000000000004001C00000000000, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, 5, 6, 7.0L, 8, 9);
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, i32 signext 6, i128 {{%.*}}, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, 5, 6, (struct small_aligned){7}, 8, 9);
+ // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i32 signext 3, i32 signext 4, i32 signext 5, i32 signext 6, [2 x i64] {{.*}}, i32 8, i32 9)
+ f_va_callee(1, 2, 3, 4, 5, 6, (struct small){7, NULL}, 8, 9);
+}
+
+// CHECK-LABEL: define signext i32 @f_va_1(i8* %fmt, ...) {{.*}} {
+// CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 8
+// CHECK: [[VA:%.*]] = alloca i8*, align 8
+// CHECK: [[V:%.*]] = alloca i32, align 4
+// CHECK: store i8* %fmt, i8** [[FMT_ADDR]], align 8
+// CHECK: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK: call void @llvm.va_start(i8* [[VA1]])
+// CHECK: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 8
+// CHECK: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i64 8
+// CHECK: store i8* [[ARGP_NEXT]], i8** [[VA]], align 8
+// CHECK: [[TMP0:%.*]] = bitcast i8* [[ARGP_CUR]] to i32*
+// CHECK: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 8
+// CHECK: store i32 [[TMP1]], i32* [[V]], align 4
+// CHECK: [[VA2:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK: call void @llvm.va_end(i8* [[VA2]])
+// CHECK: [[TMP2:%.*]] = load i32, i32* [[V]], align 4
+// CHECK: ret i32 [[TMP2]]
+// CHECK: }
+int f_va_1(char *fmt, ...) {
+ __builtin_va_list va;
+
+ __builtin_va_start(va, fmt);
+ int v = __builtin_va_arg(va, int);
+ __builtin_va_end(va);
+
+ return v;
+}
+
+// An "aligned" register pair (where the first register is even-numbered) is
+// used to pass varargs with 2x xlen alignment and 2x xlen size. Ensure the
+// correct offsets are used.
+
+// CHECK-LABEL: @f_va_2(
+// CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 8
+// CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 8
+// CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16
+// CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 8
+// CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]])
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[ARGP_CUR]] to i64
+// CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 15
+// CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -16
+// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = inttoptr i64 [[TMP2]] to i8*
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i64 16
+// CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 8
+// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to fp128*
+// CHECK-NEXT: [[TMP4:%.*]] = load fp128, fp128* [[TMP3]], align 16
+// CHECK-NEXT: store fp128 [[TMP4]], fp128* [[V]], align 16
+// CHECK-NEXT: [[VA2:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_end(i8* [[VA2]])
+// CHECK-NEXT: [[TMP5:%.*]] = load fp128, fp128* [[V]], align 16
+// CHECK-NEXT: ret fp128 [[TMP5]]
+long double f_va_2(char *fmt, ...) {
+ __builtin_va_list va;
+
+ __builtin_va_start(va, fmt);
+ long double v = __builtin_va_arg(va, long double);
+ __builtin_va_end(va);
+
+ return v;
+}
+
+// Two "aligned" register pairs.
+
+// CHECK-LABEL: @f_va_3(
+// CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 8
+// CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 8
+// CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16
+// CHECK-NEXT: [[W:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[X:%.*]] = alloca fp128, align 16
+// CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 8
+// CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]])
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[ARGP_CUR]] to i64
+// CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 15
+// CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -16
+// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = inttoptr i64 [[TMP2]] to i8*
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i64 16
+// CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 8
+// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to fp128*
+// CHECK-NEXT: [[TMP4:%.*]] = load fp128, fp128* [[TMP3]], align 16
+// CHECK-NEXT: store fp128 [[TMP4]], fp128* [[V]], align 16
+// CHECK-NEXT: [[ARGP_CUR2:%.*]] = load i8*, i8** [[VA]], align 8
+// CHECK-NEXT: [[ARGP_NEXT3:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR2]], i64 8
+// CHECK-NEXT: store i8* [[ARGP_NEXT3]], i8** [[VA]], align 8
+// CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[ARGP_CUR2]] to i32*
+// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 8
+// CHECK-NEXT: store i32 [[TMP6]], i32* [[W]], align 4
+// CHECK-NEXT: [[ARGP_CUR4:%.*]] = load i8*, i8** [[VA]], align 8
+// CHECK-NEXT: [[TMP7:%.*]] = ptrtoint i8* [[ARGP_CUR4]] to i64
+// CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 15
+// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -16
+// CHECK-NEXT: [[ARGP_CUR4_ALIGNED:%.*]] = inttoptr i64 [[TMP9]] to i8*
+// CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR4_ALIGNED]], i64 16
+// CHECK-NEXT: store i8* [[ARGP_NEXT5]], i8** [[VA]], align 8
+// CHECK-NEXT: [[TMP10:%.*]] = bitcast i8* [[ARGP_CUR4_ALIGNED]] to fp128*
+// CHECK-NEXT: [[TMP11:%.*]] = load fp128, fp128* [[TMP10]], align 16
+// CHECK-NEXT: store fp128 [[TMP11]], fp128* [[X]], align 16
+// CHECK-NEXT: [[VA6:%.*]] = bitcast i8** [[VA]] to i8*
+// CHECK-NEXT: call void @llvm.va_end(i8* [[VA6]])
+// CHECK-NEXT: [[TMP12:%.*]] = load fp128, fp128* [[V]], align 16
+// CHECK-NEXT: [[TMP13:%.*]] = load fp128, fp128* [[X]], align 16
+// CHECK-NEXT: [[ADD:%.*]] = fadd fp128 [[TMP12]], [[TMP13]]
+// CHECK-NEXT: ret fp128 [[ADD]]
+long double f_va_3(char *fmt, ...) {
+ __builtin_va_list va;
+
+ __builtin_va_start(va, fmt);
+ long double v = __builtin_va_arg(va, long double);
+ int w = __builtin_va_arg(va, int);
+ long double x = __builtin_va_arg(va, long double);