diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index 567f1b812c180..707a1c72b5b7c 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -2278,17 +2278,24 @@ def : GCNPat < (REG_SEQUENCE SReg_64, $src, sub0, (i32 (IMPLICIT_DEF)), sub1) >; -class ZExt_i64_i1_Pat : GCNPat < - (i64 (ext i1:$src)), - (REG_SEQUENCE VReg_64, - (V_CNDMASK_B32_e64 /*src0mod*/(i32 0), /*src0*/(i32 0), - /*src1mod*/(i32 0), /*src1*/(i32 1), $src), - sub0, (S_MOV_B32 (i32 0)), sub1) ->; +multiclass ZExt_i64_i1_Pat { + def: GCNPat < + (i64 (ext i1:$src)), + (REG_SEQUENCE VReg_64, + (V_CNDMASK_B32_e64 /*src0mod*/(i32 0), /*src0*/(i32 0), + /*src1mod*/(i32 0), /*src1*/(i32 1), $src), + sub0, (S_MOV_B32 (i32 0)), sub1) + >; + + def : GCNPat < + (i64 (UniformUnaryFrag SCC)), + (S_CSELECT_B64 (i64 1), (i64 0)) + >; +} -def : ZExt_i64_i1_Pat; -def : ZExt_i64_i1_Pat; +defm : ZExt_i64_i1_Pat; +defm : ZExt_i64_i1_Pat; // FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that // REG_SEQUENCE patterns don't support instructions with multiple outputs. diff --git a/llvm/test/CodeGen/AMDGPU/saddo.ll b/llvm/test/CodeGen/AMDGPU/saddo.ll index cb3166d7a20d3..f6f3e47c3be7a 100644 --- a/llvm/test/CodeGen/AMDGPU/saddo.ll +++ b/llvm/test/CodeGen/AMDGPU/saddo.ll @@ -29,10 +29,12 @@ define amdgpu_kernel void @saddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %b) ; SI-NEXT: s_mov_b32 s0, s4 ; SI-NEXT: s_mov_b32 s1, s5 ; SI-NEXT: s_xor_b64 s[4:5], s[6:7], vcc -; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5] -; SI-NEXT: v_mov_b32_e32 v1, s11 -; SI-NEXT: v_add_i32_e32 v0, vcc, s10, v0 -; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; SI-NEXT: s_and_b64 s[4:5], s[4:5], exec +; SI-NEXT: s_cselect_b64 s[4:5], 1, 0 +; SI-NEXT: s_add_u32 s4, s10, s4 +; SI-NEXT: s_addc_u32 s5, s11, s5 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -45,15 +47,17 @@ define amdgpu_kernel void @saddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %b) ; VI-NEXT: s_add_u32 s2, s6, s0 ; VI-NEXT: v_mov_b32_e32 v2, s7 ; VI-NEXT: s_addc_u32 s3, s7, s1 -; VI-NEXT: v_cmp_lt_i64_e64 s[8:9], s[0:1], 0 ; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[1:2] -; VI-NEXT: v_mov_b32_e32 v3, s3 -; VI-NEXT: s_xor_b64 s[0:1], s[8:9], vcc -; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1] -; VI-NEXT: v_add_u32_e32 v2, vcc, s2, v2 +; VI-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0 ; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: s_xor_b64 s[0:1], s[0:1], vcc +; VI-NEXT: s_and_b64 s[0:1], s[0:1], exec +; VI-NEXT: s_cselect_b64 s[0:1], 1, 0 +; VI-NEXT: s_add_u32 s0, s2, s0 +; VI-NEXT: s_addc_u32 s1, s3, s1 +; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s5 -; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc +; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; VI-NEXT: s_endpgm ; @@ -67,13 +71,15 @@ define amdgpu_kernel void @saddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %b) ; GFX9-NEXT: s_add_u32 s0, s6, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s7, s3 -; GFX9-NEXT: v_cmp_lt_i64_e64 s[8:9], s[2:3], 0 ; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1] +; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], s[2:3], 0 +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], vcc +; GFX9-NEXT: s_and_b64 s[2:3], s[2:3], exec +; GFX9-NEXT: s_cselect_b64 s[2:3], 1, 0 +; GFX9-NEXT: s_add_u32 s0, s0, s2 +; GFX9-NEXT: s_addc_u32 s1, s1, s3 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: s_xor_b64 s[2:3], s[8:9], vcc -; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3] -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5] ; GFX9-NEXT: s_endpgm ; @@ -87,11 +93,14 @@ define amdgpu_kernel void @saddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %b) ; GFX10-NEXT: s_add_u32 s0, s6, s2 ; GFX10-NEXT: s_addc_u32 s1, s7, s3 ; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[2:3], 0 -; GFX10-NEXT: v_cmp_lt_i64_e64 s3, s[0:1], s[6:7] -; GFX10-NEXT: s_xor_b32 s2, s2, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2 -; GFX10-NEXT: v_add_co_u32 v0, s0, s0, v0 -; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s1, 0, s0 +; GFX10-NEXT: v_cmp_lt_i64_e64 s6, s[0:1], s[6:7] +; GFX10-NEXT: s_xor_b32 s2, s2, s6 +; GFX10-NEXT: s_and_b32 s2, s2, exec_lo +; GFX10-NEXT: s_cselect_b64 s[2:3], 1, 0 +; GFX10-NEXT: s_add_u32 s0, s0, s2 +; GFX10-NEXT: s_addc_u32 s1, s1, s3 +; GFX10-NEXT: v_mov_b32_e32 v0, s0 +; GFX10-NEXT: v_mov_b32_e32 v1, s1 ; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5] ; GFX10-NEXT: s_endpgm ; @@ -100,18 +109,20 @@ define amdgpu_kernel void @saddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %b) ; GFX11-NEXT: s_clause 0x1 ; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x24 ; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x34 -; GFX11-NEXT: v_mov_b32_e32 v2, 0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: s_add_u32 s2, s6, s0 ; GFX11-NEXT: s_addc_u32 s3, s7, s1 ; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[0:1], 0 -; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], s[6:7] +; GFX11-NEXT: v_cmp_lt_i64_e64 s6, s[2:3], s[6:7] ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: s_xor_b32 s0, s0, s1 -; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v0, s0, s2, v0 -; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s3, 0, s0 +; GFX11-NEXT: s_xor_b32 s0, s0, s6 +; GFX11-NEXT: s_and_b32 s0, s0, exec_lo +; GFX11-NEXT: s_cselect_b64 s[0:1], 1, 0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_add_u32 s0, s2, s0 +; GFX11-NEXT: s_addc_u32 s1, s3, s1 +; GFX11-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s1 ; GFX11-NEXT: global_store_b64 v2, v[0:1], s[4:5] ; GFX11-NEXT: s_nop 0 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) diff --git a/llvm/test/CodeGen/AMDGPU/uaddo.ll b/llvm/test/CodeGen/AMDGPU/uaddo.ll index 4363db2351e7a..0ebf3f5198203 100644 --- a/llvm/test/CodeGen/AMDGPU/uaddo.ll +++ b/llvm/test/CodeGen/AMDGPU/uaddo.ll @@ -7,21 +7,23 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 % ; SI-LABEL: s_uaddo_i64_zext: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd ; SI-NEXT: s_mov_b32 s3, 0xf000 -; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_add_u32 s0, s6, s0 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s7 +; SI-NEXT: s_addc_u32 s1, s7, s1 +; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1] +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: s_cselect_b64 s[6:7], 1, 0 +; SI-NEXT: s_add_u32 s6, s0, s6 +; SI-NEXT: s_addc_u32 s7, s1, s7 +; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_mov_b32 s0, s4 ; SI-NEXT: s_mov_b32 s1, s5 -; SI-NEXT: s_add_u32 s4, s6, s8 ; SI-NEXT: v_mov_b32_e32 v0, s6 ; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: s_addc_u32 s5, s7, s9 -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1] -; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: v_add_i32_e32 v0, vcc, s4, v0 -; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -30,17 +32,19 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 % ; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v2, s6 +; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: s_add_u32 s0, s6, s0 -; VI-NEXT: v_mov_b32_e32 v3, s7 ; VI-NEXT: s_addc_u32 s1, s7, s1 -; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3] -; VI-NEXT: v_mov_b32_e32 v3, s1 -; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc -; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; VI-NEXT: v_mov_b32_e32 v2, s7 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[1:2] ; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: s_and_b64 s[2:3], vcc, exec +; VI-NEXT: s_cselect_b64 s[2:3], 1, 0 +; VI-NEXT: s_add_u32 s0, s0, s2 +; VI-NEXT: s_addc_u32 s1, s1, s3 +; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s5 -; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc +; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; VI-NEXT: s_endpgm ; @@ -52,13 +56,15 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 % ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s6 ; GFX9-NEXT: s_add_u32 s0, s6, s2 -; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s7, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1] +; GFX9-NEXT: s_and_b64 s[2:3], vcc, exec +; GFX9-NEXT: s_cselect_b64 s[2:3], 1, 0 +; GFX9-NEXT: s_add_u32 s0, s0, s2 +; GFX9-NEXT: s_addc_u32 s1, s1, s3 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5] ; GFX9-NEXT: s_endpgm %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) diff --git a/llvm/test/CodeGen/AMDGPU/usubo.ll b/llvm/test/CodeGen/AMDGPU/usubo.ll index 37b5be3b672f2..ade0616137b17 100644 --- a/llvm/test/CodeGen/AMDGPU/usubo.ll +++ b/llvm/test/CodeGen/AMDGPU/usubo.ll @@ -8,21 +8,23 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 % ; SI-LABEL: s_usubo_i64_zext: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd ; SI-NEXT: s_mov_b32 s3, 0xf000 -; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_sub_u32 s0, s6, s0 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s7 +; SI-NEXT: s_subb_u32 s1, s7, s1 +; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1] +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: s_cselect_b64 s[6:7], 1, 0 +; SI-NEXT: s_add_u32 s6, s0, s6 +; SI-NEXT: s_addc_u32 s7, s1, s7 +; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_mov_b32 s0, s4 ; SI-NEXT: s_mov_b32 s1, s5 -; SI-NEXT: s_sub_u32 s4, s6, s8 ; SI-NEXT: v_mov_b32_e32 v0, s6 ; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: s_subb_u32 s5, s7, s9 -; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1] -; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: v_add_i32_e32 v0, vcc, s4, v0 -; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -31,17 +33,19 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 % ; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v2, s6 +; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: s_sub_u32 s0, s6, s0 -; VI-NEXT: v_mov_b32_e32 v3, s7 ; VI-NEXT: s_subb_u32 s1, s7, s1 -; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[2:3] -; VI-NEXT: v_mov_b32_e32 v3, s1 -; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc -; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; VI-NEXT: v_mov_b32_e32 v2, s7 +; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[1:2] ; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: s_and_b64 s[2:3], vcc, exec +; VI-NEXT: s_cselect_b64 s[2:3], 1, 0 +; VI-NEXT: s_add_u32 s0, s0, s2 +; VI-NEXT: s_addc_u32 s1, s1, s3 +; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s5 -; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc +; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; VI-NEXT: s_endpgm ; @@ -53,13 +57,15 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 % ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s6 ; GFX9-NEXT: s_sub_u32 s0, s6, s2 -; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_subb_u32 s1, s7, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1] +; GFX9-NEXT: s_and_b64 s[2:3], vcc, exec +; GFX9-NEXT: s_cselect_b64 s[2:3], 1, 0 +; GFX9-NEXT: s_add_u32 s0, s0, s2 +; GFX9-NEXT: s_addc_u32 s1, s1, s3 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5] ; GFX9-NEXT: s_endpgm %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0 diff --git a/llvm/test/CodeGen/AMDGPU/zero_extend.ll b/llvm/test/CodeGen/AMDGPU/zero_extend.ll index 1f532f2706de7..9933cdc18e5fd 100644 --- a/llvm/test/CodeGen/AMDGPU/zero_extend.ll +++ b/llvm/test/CodeGen/AMDGPU/zero_extend.ll @@ -38,7 +38,7 @@ define amdgpu_kernel void @s_arg_zext_i1_to_i64(ptr addrspace(1) %out, i1 zeroex ; GCN-LABEL: {{^}}s_cmp_zext_i1_to_i64: ; GCN-DAG: s_mov_b32 s{{[0-9]+}}, 0 ; GCN-DAG: s_cmp_eq_u32 -; GCN: v_cndmask_b32 +; GCN: s_cselect_b64 s[{{[0-9]+:[0-9]+}}], 1, 0 define amdgpu_kernel void @s_cmp_zext_i1_to_i64(ptr addrspace(1) %out, i32 %a, i32 %b) #0 { %cmp = icmp eq i32 %a, %b %ext = zext i1 %cmp to i64