@@ -41,7 +41,7 @@ define void @floor() {
41
41
call <16 x double > @llvm.floor.v16f64 (<16 x double > undef )
42
42
call <vscale x 1 x double > @llvm.floor.nvx1f64 (<vscale x 1 x double > undef )
43
43
call <vscale x 2 x double > @llvm.floor.nvx2f64 (<vscale x 2 x double > undef )
44
- call <vscale x 4 x double > @llvm.floor.nvx5f64 (<vscale x 4 x double > undef )
44
+ call <vscale x 4 x double > @llvm.floor.nvx4f64 (<vscale x 4 x double > undef )
45
45
call <vscale x 8 x double > @llvm.floor.nvx8f64 (<vscale x 8 x double > undef )
46
46
ret void
47
47
}
@@ -86,7 +86,7 @@ define void @ceil() {
86
86
call <16 x double > @llvm.ceil.v16f64 (<16 x double > undef )
87
87
call <vscale x 1 x double > @llvm.ceil.nvx1f64 (<vscale x 1 x double > undef )
88
88
call <vscale x 2 x double > @llvm.ceil.nvx2f64 (<vscale x 2 x double > undef )
89
- call <vscale x 4 x double > @llvm.ceil.nvx5f64 (<vscale x 4 x double > undef )
89
+ call <vscale x 4 x double > @llvm.ceil.nvx4f64 (<vscale x 4 x double > undef )
90
90
call <vscale x 8 x double > @llvm.ceil.nvx8f64 (<vscale x 8 x double > undef )
91
91
ret void
92
92
}
@@ -131,7 +131,7 @@ define void @trunc() {
131
131
call <16 x double > @llvm.trunc.v16f64 (<16 x double > undef )
132
132
call <vscale x 1 x double > @llvm.trunc.nvx1f64 (<vscale x 1 x double > undef )
133
133
call <vscale x 2 x double > @llvm.trunc.nvx2f64 (<vscale x 2 x double > undef )
134
- call <vscale x 4 x double > @llvm.trunc.nvx5f64 (<vscale x 4 x double > undef )
134
+ call <vscale x 4 x double > @llvm.trunc.nvx4f64 (<vscale x 4 x double > undef )
135
135
call <vscale x 8 x double > @llvm.trunc.nvx8f64 (<vscale x 8 x double > undef )
136
136
ret void
137
137
}
@@ -176,7 +176,7 @@ define void @rint() {
176
176
call <16 x double > @llvm.rint.v16f64 (<16 x double > undef )
177
177
call <vscale x 1 x double > @llvm.rint.nvx1f64 (<vscale x 1 x double > undef )
178
178
call <vscale x 2 x double > @llvm.rint.nvx2f64 (<vscale x 2 x double > undef )
179
- call <vscale x 4 x double > @llvm.rint.nvx5f64 (<vscale x 4 x double > undef )
179
+ call <vscale x 4 x double > @llvm.rint.nvx4f64 (<vscale x 4 x double > undef )
180
180
call <vscale x 8 x double > @llvm.rint.nvx8f64 (<vscale x 8 x double > undef )
181
181
ret void
182
182
}
@@ -221,7 +221,7 @@ define void @nearbyint() {
221
221
call <16 x double > @llvm.nearbyint.v16f64 (<16 x double > undef )
222
222
call <vscale x 1 x double > @llvm.nearbyint.nvx1f64 (<vscale x 1 x double > undef )
223
223
call <vscale x 2 x double > @llvm.nearbyint.nvx2f64 (<vscale x 2 x double > undef )
224
- call <vscale x 4 x double > @llvm.nearbyint.nvx5f64 (<vscale x 4 x double > undef )
224
+ call <vscale x 4 x double > @llvm.nearbyint.nvx4f64 (<vscale x 4 x double > undef )
225
225
call <vscale x 8 x double > @llvm.nearbyint.nvx8f64 (<vscale x 8 x double > undef )
226
226
ret void
227
227
}
@@ -266,7 +266,7 @@ define void @round() {
266
266
call <16 x double > @llvm.round.v16f64 (<16 x double > undef )
267
267
call <vscale x 1 x double > @llvm.round.nvx1f64 (<vscale x 1 x double > undef )
268
268
call <vscale x 2 x double > @llvm.round.nvx2f64 (<vscale x 2 x double > undef )
269
- call <vscale x 4 x double > @llvm.round.nvx5f64 (<vscale x 4 x double > undef )
269
+ call <vscale x 4 x double > @llvm.round.nvx4f64 (<vscale x 4 x double > undef )
270
270
call <vscale x 8 x double > @llvm.round.nvx8f64 (<vscale x 8 x double > undef )
271
271
ret void
272
272
}
@@ -311,7 +311,7 @@ define void @roundeven() {
311
311
call <16 x double > @llvm.roundeven.v16f64 (<16 x double > undef )
312
312
call <vscale x 1 x double > @llvm.roundeven.nvx1f64 (<vscale x 1 x double > undef )
313
313
call <vscale x 2 x double > @llvm.roundeven.nvx2f64 (<vscale x 2 x double > undef )
314
- call <vscale x 4 x double > @llvm.roundeven.nvx5f64 (<vscale x 4 x double > undef )
314
+ call <vscale x 4 x double > @llvm.roundeven.nvx4f64 (<vscale x 4 x double > undef )
315
315
call <vscale x 8 x double > @llvm.roundeven.nvx8f64 (<vscale x 8 x double > undef )
316
316
ret void
317
317
}
@@ -352,7 +352,7 @@ define void @vp_ceil() {
352
352
call <16 x double > @llvm.vp.ceil.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
353
353
call <vscale x 1 x double > @llvm.vp.ceil.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
354
354
call <vscale x 2 x double > @llvm.vp.ceil.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
355
- call <vscale x 4 x double > @llvm.vp.ceil.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
355
+ call <vscale x 4 x double > @llvm.vp.ceil.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
356
356
call <vscale x 8 x double > @llvm.vp.ceil.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
357
357
ret void
358
358
}
@@ -393,7 +393,7 @@ define void @vp_floor() {
393
393
call <16 x double > @llvm.vp.floor.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
394
394
call <vscale x 1 x double > @llvm.vp.floor.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
395
395
call <vscale x 2 x double > @llvm.vp.floor.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
396
- call <vscale x 4 x double > @llvm.vp.floor.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
396
+ call <vscale x 4 x double > @llvm.vp.floor.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
397
397
call <vscale x 8 x double > @llvm.vp.floor.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
398
398
ret void
399
399
}
@@ -434,7 +434,7 @@ define void @vp_round() {
434
434
call <16 x double > @llvm.vp.round.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
435
435
call <vscale x 1 x double > @llvm.vp.round.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
436
436
call <vscale x 2 x double > @llvm.vp.round.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
437
- call <vscale x 4 x double > @llvm.vp.round.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
437
+ call <vscale x 4 x double > @llvm.vp.round.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
438
438
call <vscale x 8 x double > @llvm.vp.round.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
439
439
ret void
440
440
}
@@ -475,7 +475,7 @@ define void @vp_roundeven() {
475
475
call <16 x double > @llvm.vp.roundeven.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
476
476
call <vscale x 1 x double > @llvm.vp.roundeven.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
477
477
call <vscale x 2 x double > @llvm.vp.roundeven.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
478
- call <vscale x 4 x double > @llvm.vp.roundeven.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
478
+ call <vscale x 4 x double > @llvm.vp.roundeven.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
479
479
call <vscale x 8 x double > @llvm.vp.roundeven.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
480
480
ret void
481
481
}
@@ -516,7 +516,7 @@ define void @vp_roundtozero() {
516
516
call <16 x double > @llvm.vp.roundtozero.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
517
517
call <vscale x 1 x double > @llvm.vp.roundtozero.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
518
518
call <vscale x 2 x double > @llvm.vp.roundtozero.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
519
- call <vscale x 4 x double > @llvm.vp.roundtozero.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
519
+ call <vscale x 4 x double > @llvm.vp.roundtozero.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
520
520
call <vscale x 8 x double > @llvm.vp.roundtozero.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
521
521
ret void
522
522
}
@@ -557,7 +557,7 @@ define void @vp_rint() {
557
557
call <16 x double > @llvm.vp.rint.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
558
558
call <vscale x 1 x double > @llvm.vp.rint.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
559
559
call <vscale x 2 x double > @llvm.vp.rint.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
560
- call <vscale x 4 x double > @llvm.vp.rint.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
560
+ call <vscale x 4 x double > @llvm.vp.rint.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
561
561
call <vscale x 8 x double > @llvm.vp.rint.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
562
562
ret void
563
563
}
@@ -598,7 +598,7 @@ define void @vp_nearbyint() {
598
598
call <16 x double > @llvm.vp.nearbyint.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
599
599
call <vscale x 1 x double > @llvm.vp.nearbyint.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
600
600
call <vscale x 2 x double > @llvm.vp.nearbyint.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
601
- call <vscale x 4 x double > @llvm.vp.nearbyint.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
601
+ call <vscale x 4 x double > @llvm.vp.nearbyint.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
602
602
call <vscale x 8 x double > @llvm.vp.nearbyint.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
603
603
ret void
604
604
}
@@ -620,7 +620,7 @@ declare <8 x double> @llvm.floor.v8f64(<8 x double>)
620
620
declare <16 x double > @llvm.floor.v16f64 (<16 x double >)
621
621
declare <vscale x 1 x double > @llvm.floor.nvx1f64 (<vscale x 1 x double >)
622
622
declare <vscale x 2 x double > @llvm.floor.nvx2f64 (<vscale x 2 x double >)
623
- declare <vscale x 4 x double > @llvm.floor.nvx5f64 (<vscale x 4 x double >)
623
+ declare <vscale x 4 x double > @llvm.floor.nvx4f64 (<vscale x 4 x double >)
624
624
declare <vscale x 8 x double > @llvm.floor.nvx8f64 (<vscale x 8 x double >)
625
625
626
626
declare float @llvm.ceil.f32 (float )
@@ -640,7 +640,7 @@ declare <8 x double> @llvm.ceil.v8f64(<8 x double>)
640
640
declare <16 x double > @llvm.ceil.v16f64 (<16 x double >)
641
641
declare <vscale x 1 x double > @llvm.ceil.nvx1f64 (<vscale x 1 x double >)
642
642
declare <vscale x 2 x double > @llvm.ceil.nvx2f64 (<vscale x 2 x double >)
643
- declare <vscale x 4 x double > @llvm.ceil.nvx5f64 (<vscale x 4 x double >)
643
+ declare <vscale x 4 x double > @llvm.ceil.nvx4f64 (<vscale x 4 x double >)
644
644
declare <vscale x 8 x double > @llvm.ceil.nvx8f64 (<vscale x 8 x double >)
645
645
646
646
declare float @llvm.trunc.f32 (float )
@@ -660,7 +660,7 @@ declare <8 x double> @llvm.trunc.v8f64(<8 x double>)
660
660
declare <16 x double > @llvm.trunc.v16f64 (<16 x double >)
661
661
declare <vscale x 1 x double > @llvm.trunc.nvx1f64 (<vscale x 1 x double >)
662
662
declare <vscale x 2 x double > @llvm.trunc.nvx2f64 (<vscale x 2 x double >)
663
- declare <vscale x 4 x double > @llvm.trunc.nvx5f64 (<vscale x 4 x double >)
663
+ declare <vscale x 4 x double > @llvm.trunc.nvx4f64 (<vscale x 4 x double >)
664
664
declare <vscale x 8 x double > @llvm.trunc.nvx8f64 (<vscale x 8 x double >)
665
665
666
666
declare float @llvm.rint.f32 (float )
@@ -680,7 +680,7 @@ declare <8 x double> @llvm.rint.v8f64(<8 x double>)
680
680
declare <16 x double > @llvm.rint.v16f64 (<16 x double >)
681
681
declare <vscale x 1 x double > @llvm.rint.nvx1f64 (<vscale x 1 x double >)
682
682
declare <vscale x 2 x double > @llvm.rint.nvx2f64 (<vscale x 2 x double >)
683
- declare <vscale x 4 x double > @llvm.rint.nvx5f64 (<vscale x 4 x double >)
683
+ declare <vscale x 4 x double > @llvm.rint.nvx4f64 (<vscale x 4 x double >)
684
684
declare <vscale x 8 x double > @llvm.rint.nvx8f64 (<vscale x 8 x double >)
685
685
686
686
declare float @llvm.nearbyint.f32 (float )
@@ -700,7 +700,7 @@ declare <8 x double> @llvm.nearbyint.v8f64(<8 x double>)
700
700
declare <16 x double > @llvm.nearbyint.v16f64 (<16 x double >)
701
701
declare <vscale x 1 x double > @llvm.nearbyint.nvx1f64 (<vscale x 1 x double >)
702
702
declare <vscale x 2 x double > @llvm.nearbyint.nvx2f64 (<vscale x 2 x double >)
703
- declare <vscale x 4 x double > @llvm.nearbyint.nvx5f64 (<vscale x 4 x double >)
703
+ declare <vscale x 4 x double > @llvm.nearbyint.nvx4f64 (<vscale x 4 x double >)
704
704
declare <vscale x 8 x double > @llvm.nearbyint.nvx8f64 (<vscale x 8 x double >)
705
705
706
706
declare float @llvm.round.f32 (float )
@@ -720,7 +720,7 @@ declare <8 x double> @llvm.round.v8f64(<8 x double>)
720
720
declare <16 x double > @llvm.round.v16f64 (<16 x double >)
721
721
declare <vscale x 1 x double > @llvm.round.nvx1f64 (<vscale x 1 x double >)
722
722
declare <vscale x 2 x double > @llvm.round.nvx2f64 (<vscale x 2 x double >)
723
- declare <vscale x 4 x double > @llvm.round.nvx5f64 (<vscale x 4 x double >)
723
+ declare <vscale x 4 x double > @llvm.round.nvx4f64 (<vscale x 4 x double >)
724
724
declare <vscale x 8 x double > @llvm.round.nvx8f64 (<vscale x 8 x double >)
725
725
726
726
declare float @llvm.roundeven.f32 (float )
@@ -740,7 +740,7 @@ declare <8 x double> @llvm.roundeven.v8f64(<8 x double>)
740
740
declare <16 x double > @llvm.roundeven.v16f64 (<16 x double >)
741
741
declare <vscale x 1 x double > @llvm.roundeven.nvx1f64 (<vscale x 1 x double >)
742
742
declare <vscale x 2 x double > @llvm.roundeven.nvx2f64 (<vscale x 2 x double >)
743
- declare <vscale x 4 x double > @llvm.roundeven.nvx5f64 (<vscale x 4 x double >)
743
+ declare <vscale x 4 x double > @llvm.roundeven.nvx4f64 (<vscale x 4 x double >)
744
744
declare <vscale x 8 x double > @llvm.roundeven.nvx8f64 (<vscale x 8 x double >)
745
745
746
746
declare <2 x float > @llvm.vp.ceil.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -759,7 +759,7 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
759
759
declare <16 x double > @llvm.vp.ceil.v16f64 (<16 x double >, <16 x i1 >, i32 )
760
760
declare <vscale x 1 x double > @llvm.vp.ceil.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
761
761
declare <vscale x 2 x double > @llvm.vp.ceil.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
762
- declare <vscale x 4 x double > @llvm.vp.ceil.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
762
+ declare <vscale x 4 x double > @llvm.vp.ceil.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
763
763
declare <vscale x 8 x double > @llvm.vp.ceil.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
764
764
765
765
declare <2 x float > @llvm.vp.floor.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -778,7 +778,7 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
778
778
declare <16 x double > @llvm.vp.floor.v16f64 (<16 x double >, <16 x i1 >, i32 )
779
779
declare <vscale x 1 x double > @llvm.vp.floor.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
780
780
declare <vscale x 2 x double > @llvm.vp.floor.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
781
- declare <vscale x 4 x double > @llvm.vp.floor.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
781
+ declare <vscale x 4 x double > @llvm.vp.floor.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
782
782
declare <vscale x 8 x double > @llvm.vp.floor.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
783
783
784
784
declare <2 x float > @llvm.vp.round.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -797,7 +797,7 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
797
797
declare <16 x double > @llvm.vp.round.v16f64 (<16 x double >, <16 x i1 >, i32 )
798
798
declare <vscale x 1 x double > @llvm.vp.round.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
799
799
declare <vscale x 2 x double > @llvm.vp.round.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
800
- declare <vscale x 4 x double > @llvm.vp.round.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
800
+ declare <vscale x 4 x double > @llvm.vp.round.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
801
801
declare <vscale x 8 x double > @llvm.vp.round.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
802
802
803
803
declare <2 x float > @llvm.vp.roundeven.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -816,7 +816,7 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
816
816
declare <16 x double > @llvm.vp.roundeven.v16f64 (<16 x double >, <16 x i1 >, i32 )
817
817
declare <vscale x 1 x double > @llvm.vp.roundeven.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
818
818
declare <vscale x 2 x double > @llvm.vp.roundeven.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
819
- declare <vscale x 4 x double > @llvm.vp.roundeven.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
819
+ declare <vscale x 4 x double > @llvm.vp.roundeven.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
820
820
declare <vscale x 8 x double > @llvm.vp.roundeven.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
821
821
822
822
declare <2 x float > @llvm.vp.roundtozero.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -835,7 +835,7 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
835
835
declare <16 x double > @llvm.vp.roundtozero.v16f64 (<16 x double >, <16 x i1 >, i32 )
836
836
declare <vscale x 1 x double > @llvm.vp.roundtozero.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
837
837
declare <vscale x 2 x double > @llvm.vp.roundtozero.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
838
- declare <vscale x 4 x double > @llvm.vp.roundtozero.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
838
+ declare <vscale x 4 x double > @llvm.vp.roundtozero.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
839
839
declare <vscale x 8 x double > @llvm.vp.roundtozero.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
840
840
841
841
declare <2 x float > @llvm.vp.rint.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -854,7 +854,7 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
854
854
declare <16 x double > @llvm.vp.rint.v16f64 (<16 x double >, <16 x i1 >, i32 )
855
855
declare <vscale x 1 x double > @llvm.vp.rint.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
856
856
declare <vscale x 2 x double > @llvm.vp.rint.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
857
- declare <vscale x 4 x double > @llvm.vp.rint.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
857
+ declare <vscale x 4 x double > @llvm.vp.rint.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
858
858
declare <vscale x 8 x double > @llvm.vp.rint.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
859
859
860
860
declare <2 x float > @llvm.vp.nearbyint.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -873,5 +873,5 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
873
873
declare <16 x double > @llvm.vp.nearbyint.v16f64 (<16 x double >, <16 x i1 >, i32 )
874
874
declare <vscale x 1 x double > @llvm.vp.nearbyint.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
875
875
declare <vscale x 2 x double > @llvm.vp.nearbyint.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
876
- declare <vscale x 4 x double > @llvm.vp.nearbyint.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
876
+ declare <vscale x 4 x double > @llvm.vp.nearbyint.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
877
877
declare <vscale x 8 x double > @llvm.vp.nearbyint.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
0 commit comments