-
Notifications
You must be signed in to change notification settings - Fork 8
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are: * unscaled * @llvm.aarch64.sve.ld1.gather.sxtw * @llvm.aarch64.sve.ld1.gather.uxtw * scaled (offsets become indices) * @llvm.arch64.sve.ld1.gather.sxtw.index * @llvm.arch64.sve.ld1.gather.uxtw.index The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits. These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words): * unscaled * ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] * ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] * scaled * ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] * ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] Committed on behalf of Andrzej Warzynski (andwar) Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma Reviewed By: sdesmalen Tags: #llvm Differential Revision: https://reviews.llvm.org/D70782
- v1.17.0
- llvm-ve-rv-v2.2.0
- llvm-ve-rv-v.2.1.0
- llvm-ve-rv-v2.0.0
- llvm-ve-rv-v1.7.0
- llvm-ve-rv-v1.6.0
- llvm-ve-rv-v1.5.1
- llvm-ve-rv-v1.5.0
- llvm-ve-rv-1.9.1
- llvm-ve-rv-1.9.0
- llvm-ve-rv-1.9b4
- llvm-ve-rv-1.9.b3
- llvm-ve-rv-1.8.0
- llvm-ve-1.20.0
- llvm-ve-1.19.0
- llvm-ve-1.18.0
- hpce/julia-merge-20220128-rv
- github_release_20201026
- github_release_20200908
- github_release_20200819
- github_release_20200706
- github_release_20200605
- github_release_20200515
- github_release_20200413
- github_release_20200305
- github_release_20200129
1 parent
8dd17a1
commit 8bf31e2
Showing
7 changed files
with
573 additions
and
46 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
198 changes: 198 additions & 0 deletions
198
llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-scaled-offsets.ll
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,198 @@ | ||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s | ||
|
||
; | ||
; LD1H, LD1W, LD1D: base + 32-bit scaled offset, sign (sxtw) or zero (uxtw) | ||
; extended to 64 bits | ||
; e.g. ld1h z0.d, p0/z, [x0, z0.d, uxtw #1] | ||
; | ||
|
||
; LD1H | ||
define <vscale x 4 x i32> @gld1h_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1h_s_uxtw_index: | ||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] | ||
; CHECK-NEXT: mov w8, #65535 | ||
; CHECK-NEXT: mov z1.s, w8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg, | ||
i16* %base, | ||
<vscale x 4 x i32> %b) | ||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32> | ||
ret <vscale x 4 x i32> %res | ||
} | ||
|
||
define <vscale x 4 x i32> @gld1h_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1h_s_sxtw_index: | ||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] | ||
; CHECK-NEXT: mov w8, #65535 | ||
; CHECK-NEXT: mov z1.s, w8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg, | ||
i16* %base, | ||
<vscale x 4 x i32> %b) | ||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32> | ||
ret <vscale x 4 x i32> %res | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1h_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1h_d_uxtw_index: | ||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] | ||
; CHECK-NEXT: mov w8, #65535 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg, | ||
i16* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1h_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1h_d_sxtw_index: | ||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1] | ||
; CHECK-NEXT: mov w8, #65535 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg, | ||
i16* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
; LD1W | ||
define <vscale x 4 x i32> @gld1w_s_uxtw_index(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1w_s_uxtw_index: | ||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg, | ||
i32* %base, | ||
<vscale x 4 x i32> %b) | ||
ret <vscale x 4 x i32> %load | ||
} | ||
|
||
define <vscale x 4 x i32> @gld1w_s_sxtw_index(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1w_s_sxtw_index: | ||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg, | ||
i32* %base, | ||
<vscale x 4 x i32> %b) | ||
ret <vscale x 4 x i32> %load | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1w_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1w_d_uxtw_index: | ||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2] | ||
; CHECK-NEXT: mov w8, #-1 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg, | ||
i32* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1w_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1w_d_sxtw_index: | ||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2] | ||
; CHECK-NEXT: mov w8, #-1 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg, | ||
i32* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
define <vscale x 4 x float> @gld1w_s_uxtw_index_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1w_s_uxtw_index_float: | ||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg, | ||
float* %base, | ||
<vscale x 4 x i32> %b) | ||
ret <vscale x 4 x float> %load | ||
} | ||
|
||
define <vscale x 4 x float> @gld1w_s_sxtw_index_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1w_s_sxtw_index_float: | ||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg, | ||
float* %base, | ||
<vscale x 4 x i32> %b) | ||
ret <vscale x 4 x float> %load | ||
} | ||
|
||
; LD1D | ||
define <vscale x 2 x i64> @gld1d_s_uxtw_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1d_s_uxtw_index: | ||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg, | ||
i64* %base, | ||
<vscale x 2 x i64> %b) | ||
ret <vscale x 2 x i64> %load | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1d_sxtw_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1d_sxtw_index: | ||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg, | ||
i64* %base, | ||
<vscale x 2 x i64> %b) | ||
ret <vscale x 2 x i64> %load | ||
} | ||
|
||
define <vscale x 2 x double> @gld1d_uxtw_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1d_uxtw_index_double: | ||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg, | ||
double* %base, | ||
<vscale x 2 x i64> %b) | ||
ret <vscale x 2 x double> %load | ||
} | ||
|
||
define <vscale x 2 x double> @gld1d_sxtw_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1d_sxtw_index_double: | ||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg, | ||
double* %base, | ||
<vscale x 2 x i64> %b) | ||
ret <vscale x 2 x double> %load | ||
} | ||
|
||
; LD1H | ||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>) | ||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>) | ||
|
||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>) | ||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>) | ||
|
||
; LD1W | ||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>) | ||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>) | ||
|
||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>) | ||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>) | ||
|
||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32.nxv4i32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>) | ||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32.nxv4i32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>) | ||
|
||
; LD1D | ||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i64.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>) | ||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i64.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>) | ||
|
||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2f64.nxv2i64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>) | ||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2f64.nxv2i64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>) |
259 changes: 259 additions & 0 deletions
259
llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-unscaled-offsets.ll
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,259 @@ | ||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s | ||
|
||
; | ||
; LD1B, LD1W, LD1H, LD1D: base + 32-bit unscaled offset, sign (sxtw) or zero | ||
; (uxtw) extended to 64 bits. | ||
; e.g. ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] | ||
; | ||
|
||
; LD1B | ||
define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1b_s_uxtw: | ||
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw] | ||
; CHECK-NEXT: mov w8, #255 | ||
; CHECK-NEXT: mov z1.s, w8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg, | ||
i8* %base, | ||
<vscale x 4 x i32> %b) | ||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32> | ||
ret <vscale x 4 x i32> %res | ||
} | ||
|
||
define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1b_s_sxtw: | ||
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw] | ||
; CHECK-NEXT: mov w8, #255 | ||
; CHECK-NEXT: mov z1.s, w8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg, | ||
i8* %base, | ||
<vscale x 4 x i32> %b) | ||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32> | ||
ret <vscale x 4 x i32> %res | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1b_d_uxtw: | ||
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw] | ||
; CHECK-NEXT: mov w8, #255 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg, | ||
i8* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1b_d_sxtw: | ||
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw] | ||
; CHECK-NEXT: mov w8, #255 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg, | ||
i8* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
; LD1H | ||
define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1h_s_uxtw: | ||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] | ||
; CHECK-NEXT: mov w8, #65535 | ||
; CHECK-NEXT: mov z1.s, w8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg, | ||
i16* %base, | ||
<vscale x 4 x i32> %b) | ||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32> | ||
ret <vscale x 4 x i32> %res | ||
} | ||
|
||
define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1h_s_sxtw: | ||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] | ||
; CHECK-NEXT: mov w8, #65535 | ||
; CHECK-NEXT: mov z1.s, w8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg, | ||
i16* %base, | ||
<vscale x 4 x i32> %b) | ||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32> | ||
ret <vscale x 4 x i32> %res | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1h_d_uxtw: | ||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] | ||
; CHECK-NEXT: mov w8, #65535 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg, | ||
i16* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1h_d_sxtw: | ||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw] | ||
; CHECK-NEXT: mov w8, #65535 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg, | ||
i16* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
; LD1W | ||
define <vscale x 4 x i32> @gld1w_s_uxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1w_s_uxtw: | ||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg, | ||
i32* %base, | ||
<vscale x 4 x i32> %b) | ||
ret <vscale x 4 x i32> %load | ||
} | ||
|
||
define <vscale x 4 x i32> @gld1w_s_sxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1w_s_sxtw: | ||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg, | ||
i32* %base, | ||
<vscale x 4 x i32> %b) | ||
ret <vscale x 4 x i32> %load | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1w_d_uxtw: | ||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw] | ||
; CHECK-NEXT: mov w8, #-1 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg, | ||
i32* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1w_d_sxtw: | ||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw] | ||
; CHECK-NEXT: mov w8, #-1 | ||
; CHECK-NEXT: mov z1.d, x8 | ||
; CHECK-NEXT: and z0.d, z0.d, z1.d | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg, | ||
i32* %base, | ||
<vscale x 2 x i64> %b) | ||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64> | ||
ret <vscale x 2 x i64> %res | ||
} | ||
|
||
define <vscale x 4 x float> @gld1w_s_uxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1w_s_uxtw_float: | ||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg, | ||
float* %base, | ||
<vscale x 4 x i32> %b) | ||
ret <vscale x 4 x float> %load | ||
} | ||
|
||
define <vscale x 4 x float> @gld1w_s_sxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) { | ||
; CHECK-LABEL: gld1w_s_sxtw_float: | ||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg, | ||
float* %base, | ||
<vscale x 4 x i32> %b) | ||
ret <vscale x 4 x float> %load | ||
} | ||
|
||
; LD1D | ||
define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1d_d_uxtw: | ||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg, | ||
i64* %base, | ||
<vscale x 2 x i64> %b) | ||
ret <vscale x 2 x i64> %load | ||
} | ||
|
||
define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1d_d_sxtw: | ||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg, | ||
i64* %base, | ||
<vscale x 2 x i64> %b) | ||
ret <vscale x 2 x i64> %load | ||
} | ||
|
||
define <vscale x 2 x double> @gld1d_d_uxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1d_d_uxtw_double: | ||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg, | ||
double* %base, | ||
<vscale x 2 x i64> %b) | ||
ret <vscale x 2 x double> %load | ||
} | ||
|
||
define <vscale x 2 x double> @gld1d_d_sxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) { | ||
; CHECK-LABEL: gld1d_d_sxtw_double: | ||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw] | ||
; CHECK-NEXT: ret | ||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg, | ||
double* %base, | ||
<vscale x 2 x i64> %b) | ||
ret <vscale x 2 x double> %load | ||
} | ||
|
||
; LD1B | ||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>) | ||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>) | ||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>) | ||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>) | ||
|
||
; LD1H | ||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>) | ||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>) | ||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>) | ||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>) | ||
|
||
; LD1W | ||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>) | ||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>) | ||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>) | ||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>) | ||
|
||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32.nxv4i32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>) | ||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32.nxv4i32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>) | ||
|
||
; LD1D | ||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>) | ||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>) | ||
|
||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64.nxv2i64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>) | ||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64.nxv2i64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>) |