Skip to content

Commit e543650

Browse files
authored
[AArch64][SVE] Detect MOV (imm, pred, zeroing/merging) (llvm#116032)
Add patterns to fold MOV (scalar, predicated) to MOV (imm, pred, merging) or MOV (imm, pred, zeroing) as appropriate. This affects the `@llvm.aarch64.sve.dup` intrinsics, which currently generate MOV (scalar, predicated) instructions even when the immediate forms are possible. For example: ``` svuint8_t mov_z_b(svbool_t p) { return svdup_u8_z(p, 1); } ``` Currently generates: ``` mov_z_b(__SVBool_t): mov z0.b, #0 mov w8, #1 mov z0.b, p0/m, w8 ret ``` Instead of: ``` mov_z_b(__SVBool_t): mov z0.b, p0/z, #1 ret ```
1 parent 4163136 commit e543650

File tree

3 files changed

+105
-4
lines changed

3 files changed

+105
-4
lines changed

llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

+2-2
Original file line numberDiff line numberDiff line change
@@ -839,8 +839,8 @@ let Predicates = [HasSVEorSME] in {
839839
defm DUPM_ZI : sve_int_dup_mask_imm<"dupm">;
840840

841841
// Splat immediate (predicated)
842-
defm CPY_ZPmI : sve_int_dup_imm_pred_merge<"cpy">;
843-
defm CPY_ZPzI : sve_int_dup_imm_pred_zero<"cpy">;
842+
defm CPY_ZPmI : sve_int_dup_imm_pred_merge<"cpy", AArch64dup_mt>;
843+
defm CPY_ZPzI : sve_int_dup_imm_pred_zero<"cpy", AArch64dup_mt>;
844844
defm FCPY_ZPmI : sve_int_dup_fpimm_pred<"fcpy">;
845845

846846
// Splat scalar register (unpredicated, GPR or vector + element index)

llvm/lib/Target/AArch64/SVEInstrFormats.td

+20-2
Original file line numberDiff line numberDiff line change
@@ -5357,7 +5357,7 @@ multiclass sve_int_dup_imm_pred_merge_inst<
53575357
(!cast<Instruction>(NAME) $Zd, $Pg, $imm, $shift)>;
53585358
}
53595359

5360-
multiclass sve_int_dup_imm_pred_merge<string asm> {
5360+
multiclass sve_int_dup_imm_pred_merge<string asm, SDPatternOperator op> {
53615361
defm _B : sve_int_dup_imm_pred_merge_inst<0b00, asm, ZPR8, cpy_imm8_opt_lsl_i8,
53625362
nxv16i8, nxv16i1, i32, SVECpyDupImm8Pat>;
53635363
defm _H : sve_int_dup_imm_pred_merge_inst<0b01, asm, ZPR16, cpy_imm8_opt_lsl_i16,
@@ -5386,6 +5386,15 @@ multiclass sve_int_dup_imm_pred_merge<string asm> {
53865386
(!cast<Instruction>(NAME # _D) $Zd, $Pg, 0, 0)>;
53875387
def : Pat<(vselect PPRAny:$Pg, (SVEDup0), (nxv2f64 ZPR:$Zd)),
53885388
(!cast<Instruction>(NAME # _D) $Zd, $Pg, 0, 0)>;
5389+
5390+
def : Pat<(nxv16i8 (op nxv16i1:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), nxv16i8:$zd)),
5391+
(!cast<Instruction>(NAME # _B) $zd, $pg, $a, $b)>;
5392+
def : Pat<(nxv8i16 (op nxv8i1:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), nxv8i16:$zd)),
5393+
(!cast<Instruction>(NAME # _H) $zd, $pg, $a, $b)>;
5394+
def : Pat<(nxv4i32 (op nxv4i1:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), nxv4i32:$zd)),
5395+
(!cast<Instruction>(NAME # _S) $zd, $pg, $a, $b)>;
5396+
def : Pat<(nxv2i64 (op nxv2i1:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), nxv2i64:$zd)),
5397+
(!cast<Instruction>(NAME # _D) $zd, $pg, $a, $b)>;
53895398
}
53905399

53915400
multiclass sve_int_dup_imm_pred_zero_inst<
@@ -5407,7 +5416,7 @@ multiclass sve_int_dup_imm_pred_zero_inst<
54075416
(!cast<Instruction>(NAME) $Pg, $imm, $shift)>;
54085417
}
54095418

5410-
multiclass sve_int_dup_imm_pred_zero<string asm> {
5419+
multiclass sve_int_dup_imm_pred_zero<string asm, SDPatternOperator op> {
54115420
defm _B : sve_int_dup_imm_pred_zero_inst<0b00, asm, ZPR8, cpy_imm8_opt_lsl_i8,
54125421
nxv16i8, nxv16i1, i32, SVECpyDupImm8Pat>;
54135422
defm _H : sve_int_dup_imm_pred_zero_inst<0b01, asm, ZPR16, cpy_imm8_opt_lsl_i16,
@@ -5416,6 +5425,15 @@ multiclass sve_int_dup_imm_pred_zero<string asm> {
54165425
nxv4i32, nxv4i1, i32, SVECpyDupImm32Pat>;
54175426
defm _D : sve_int_dup_imm_pred_zero_inst<0b11, asm, ZPR64, cpy_imm8_opt_lsl_i64,
54185427
nxv2i64, nxv2i1, i64, SVECpyDupImm64Pat>;
5428+
5429+
def : Pat<(nxv16i8 (op nxv16i1:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (SVEDup0))),
5430+
(!cast<Instruction>(NAME # _B) $pg, $a, $b)>;
5431+
def : Pat<(nxv8i16 (op nxv8i1:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (SVEDup0))),
5432+
(!cast<Instruction>(NAME # _H) $pg, $a, $b)>;
5433+
def : Pat<(nxv4i32 (op nxv4i1:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (SVEDup0))),
5434+
(!cast<Instruction>(NAME # _S) $pg, $a, $b)>;
5435+
def : Pat<(nxv2i64 (op nxv2i1:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (SVEDup0))),
5436+
(!cast<Instruction>(NAME # _D) $pg, $a, $b)>;
54195437
}
54205438

54215439
//===----------------------------------------------------------------------===//
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3+
4+
; Zeroing.
5+
6+
define <vscale x 16 x i8> @mov_z_b(<vscale x 16 x i1> %pg) {
7+
; CHECK-LABEL: mov_z_b:
8+
; CHECK: // %bb.0:
9+
; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
10+
; CHECK-NEXT: ret
11+
%r = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 1)
12+
ret <vscale x 16 x i8> %r
13+
}
14+
15+
define <vscale x 8 x i16> @mov_z_h(<vscale x 8 x i1> %pg) {
16+
; CHECK-LABEL: mov_z_h:
17+
; CHECK: // %bb.0:
18+
; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1
19+
; CHECK-NEXT: ret
20+
%r = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, i16 1)
21+
ret <vscale x 8 x i16> %r
22+
}
23+
24+
define <vscale x 4 x i32> @mov_z_s(<vscale x 4 x i1> %pg) {
25+
; CHECK-LABEL: mov_z_s:
26+
; CHECK: // %bb.0:
27+
; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
28+
; CHECK-NEXT: ret
29+
%r = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i32 1)
30+
ret <vscale x 4 x i32> %r
31+
}
32+
33+
define <vscale x 2 x i64> @mov_z_d(<vscale x 2 x i1> %pg) {
34+
; CHECK-LABEL: mov_z_d:
35+
; CHECK: // %bb.0:
36+
; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1
37+
; CHECK-NEXT: ret
38+
%r = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, i64 1)
39+
ret <vscale x 2 x i64> %r
40+
}
41+
42+
; Merging.
43+
44+
define <vscale x 16 x i8> @mov_m_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg) {
45+
; CHECK-LABEL: mov_m_b:
46+
; CHECK: // %bb.0:
47+
; CHECK-NEXT: mov z0.b, p0/m, #1 // =0x1
48+
; CHECK-NEXT: ret
49+
%r = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i8 1)
50+
ret <vscale x 16 x i8> %r
51+
}
52+
53+
define <vscale x 8 x i16> @mov_m_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg) {
54+
; CHECK-LABEL: mov_m_h:
55+
; CHECK: // %bb.0:
56+
; CHECK-NEXT: mov z0.h, p0/m, #1 // =0x1
57+
; CHECK-NEXT: ret
58+
%r = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i16 1)
59+
ret <vscale x 8 x i16> %r
60+
}
61+
62+
define <vscale x 4 x i32> @mov_m_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg) {
63+
; CHECK-LABEL: mov_m_s:
64+
; CHECK: // %bb.0:
65+
; CHECK-NEXT: mov z0.s, p0/m, #1 // =0x1
66+
; CHECK-NEXT: ret
67+
%r = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 1)
68+
ret <vscale x 4 x i32> %r
69+
}
70+
71+
define <vscale x 2 x i64> @mov_m_d(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg) {
72+
; CHECK-LABEL: mov_m_d:
73+
; CHECK: // %bb.0:
74+
; CHECK-NEXT: mov z0.d, p0/m, #1 // =0x1
75+
; CHECK-NEXT: ret
76+
%r = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg, i64 1)
77+
ret <vscale x 2 x i64> %r
78+
}
79+
80+
declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8)
81+
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
82+
declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
83+
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)

0 commit comments

Comments
 (0)