Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RISCV] Ensure the valid vtype during copyPhysReg #118252

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -421,6 +421,36 @@ void RISCVInstrInfo::copyPhysRegVector(
auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), ActualDstReg);
bool UseVMV_V_I = RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_I;
bool UseVMV = UseVMV_V_I || RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_V;

// Address https://github.com/llvm/llvm-project/issues/114518
// Make sure each whole RVVReg move has valid vtype.
unsigned Opcode = MIB->getOpcode();
if (UseVMV || Opcode == RISCV::VMV1R_V || Opcode == RISCV::VMV2R_V ||
Opcode == RISCV::VMV4R_V || Opcode == RISCV::VMV8R_V) {

// TODO: Data-flow analysis for vtype status could help avoid the
// redundant one.
bool NeedVSETIVLI = true;

for (auto &CurrMI : MBB) {
unsigned CurrMIOpcode = CurrMI.getOpcode();
if (CurrMIOpcode == RISCV::PseudoVSETIVLI ||
CurrMIOpcode == RISCV::PseudoVSETVLI ||
CurrMIOpcode == RISCV::PseudoVSETVLIX0)
NeedVSETIVLI = false;
else if (CurrMI.isInlineAsm())
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we only check for inline assembly? Don't we need to handle calls?

NeedVSETIVLI = true;
else if (NeedVSETIVLI && &CurrMI == &*MIB) {
BuildMI(MBB, &*MIB, MIB->getDebugLoc(), get(RISCV::PseudoVSETIVLI))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What if there's a vsetvli in an earlier basic block and a later instruction in this basic block that is using the vtype from that vsetvli? Won't this new vsetvli invalidate that?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, yes. In this case, it will invalidate the vtype coming from the predecessor basic block. Maybe we could check the following RVV instructions in the same basic block without the explicit vsetvl to ensure there is a vtype from another basic block.

.addReg(RISCV::X0, RegState::Define | RegState::Dead)
.addImm(0)
.addImm(RISCVVType::encodeVTYPE(RISCVII::VLMUL::LMUL_1, 32, false,
false));
break;
}
}
}

if (UseVMV)
MIB.addReg(ActualDstReg, RegState::Undef);
if (UseVMV_V_I)
Expand Down
2 changes: 2 additions & 0 deletions llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ define <vscale x 1 x i8> @constraint_vd(<vscale x 1 x i8> %0, <vscale x 1 x i8>
define <vscale x 1 x i1> @constraint_vm(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1) nounwind {
; RV32I-LABEL: constraint_vm:
; RV32I: # %bb.0:
; RV32I-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32I-NEXT: vmv1r.v v9, v0
; RV32I-NEXT: vmv1r.v v0, v8
; RV32I-NEXT: #APP
Expand All @@ -54,6 +55,7 @@ define <vscale x 1 x i1> @constraint_vm(<vscale x 1 x i1> %0, <vscale x 1 x i1>
;
; RV64I-LABEL: constraint_vm:
; RV64I: # %bb.0:
; RV64I-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64I-NEXT: vmv1r.v v9, v0
; RV64I-NEXT: vmv1r.v v0, v8
; RV64I-NEXT: #APP
Expand Down
2 changes: 2 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -567,6 +567,7 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
Expand All @@ -590,6 +591,7 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3075,6 +3075,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
Expand Down Expand Up @@ -3121,6 +3122,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a3
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a3, a3, 3
Expand Down Expand Up @@ -3158,6 +3160,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
;
; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i16:
; CHECK-ZVBB: # %bb.0:
; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 1
Expand All @@ -3174,6 +3177,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1584,6 +1584,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
Expand All @@ -1609,6 +1610,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
Expand All @@ -1631,6 +1633,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
;
; CHECK-ZVKB-LABEL: vp_bswap_nxv64i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVKB-NEXT: vmv1r.v v24, v0
; CHECK-ZVKB-NEXT: csrr a1, vlenb
; CHECK-ZVKB-NEXT: srli a2, a1, 1
Expand All @@ -1647,6 +1650,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-ZVKB-NEXT: # %bb.1:
; CHECK-ZVKB-NEXT: mv a0, a1
; CHECK-ZVKB-NEXT: .LBB32_2:
; CHECK-ZVKB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVKB-NEXT: vmv1r.v v0, v24
; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vrev8.v v8, v8, v0.t
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: li a3, 2
; RV32-NEXT: vs8r.v v16, (a1)
; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v8, v0
; RV32-NEXT: vmv8r.v v16, v24
; RV32-NEXT: call ext2
Expand Down Expand Up @@ -374,6 +375,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV64-NEXT: add a1, a3, a1
; RV64-NEXT: li a3, 2
; RV64-NEXT: vs8r.v v16, (a1)
; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv8r.v v8, v0
; RV64-NEXT: vmv8r.v v16, v24
; RV64-NEXT: call ext2
Expand Down Expand Up @@ -451,6 +453,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 128
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v16, v0
; RV32-NEXT: call ext3
; RV32-NEXT: addi sp, s0, -144
Expand Down Expand Up @@ -523,6 +526,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 128
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv8r.v v16, v0
; RV64-NEXT: call ext3
; RV64-NEXT: addi sp, s0, -144
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return(
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: call callee_tuple_return
; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: vmv2r.v v10, v6
Expand All @@ -119,6 +120,7 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return(
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: call callee_tuple_return
; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: vmv2r.v v10, v6
Expand All @@ -144,6 +146,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: vmv2r.v v10, v6
Expand All @@ -160,6 +163,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: vmv2r.v v10, v6
Expand Down
22 changes: 22 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.ceil.nxv4bf16(<vscale x 4 x bfloat>, <vsc
define <vscale x 4 x bfloat> @vp_ceil_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
Expand Down Expand Up @@ -169,6 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.ceil.nxv8bf16(<vscale x 8 x bfloat>, <vsc
define <vscale x 8 x bfloat> @vp_ceil_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
Expand Down Expand Up @@ -221,6 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.ceil.nxv16bf16(<vscale x 16 x bfloat>, <
define <vscale x 16 x bfloat> @vp_ceil_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
Expand Down Expand Up @@ -279,6 +282,7 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16(<vscale x 32 x bfloat> %va,
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
Expand Down Expand Up @@ -317,6 +321,7 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16(<vscale x 32 x bfloat> %va,
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB10_2:
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v24, v0.t
Expand Down Expand Up @@ -582,6 +587,7 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
Expand Down Expand Up @@ -649,6 +655,7 @@ declare <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
Expand All @@ -668,6 +675,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
Expand Down Expand Up @@ -735,6 +743,7 @@ declare <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv16f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
Expand All @@ -754,6 +763,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
Expand Down Expand Up @@ -821,6 +831,7 @@ declare <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv32f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
Expand All @@ -846,6 +857,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
Expand Down Expand Up @@ -884,6 +896,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
Expand Down Expand Up @@ -1068,6 +1081,7 @@ declare <vscale x 4 x float> @llvm.vp.ceil.nxv4f32(<vscale x 4 x float>, <vscale
define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
Expand Down Expand Up @@ -1112,6 +1126,7 @@ declare <vscale x 8 x float> @llvm.vp.ceil.nxv8f32(<vscale x 8 x float>, <vscale
define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
Expand Down Expand Up @@ -1156,6 +1171,7 @@ declare <vscale x 16 x float> @llvm.vp.ceil.nxv16f32(<vscale x 16 x float>, <vsc
define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
Expand Down Expand Up @@ -1242,6 +1258,7 @@ declare <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
Expand Down Expand Up @@ -1286,6 +1303,7 @@ declare <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
Expand Down Expand Up @@ -1330,6 +1348,7 @@ declare <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv7f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
Expand Down Expand Up @@ -1374,6 +1393,7 @@ declare <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
Expand Down Expand Up @@ -1425,6 +1445,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
Expand Down Expand Up @@ -1458,6 +1479,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
Expand Down
2 changes: 2 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/compressstore.ll
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,7 @@ entry:
define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
; RV64-LABEL: test_compresstore_v256i8:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v7, v8
; RV64-NEXT: li a2, 128
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
Expand Down Expand Up @@ -230,6 +231,7 @@ define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: sub sp, sp, a2
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v16
; RV32-NEXT: li a2, 128
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
Expand Down
Loading