From 99ef8b18ac49872e530386129be1517c84dc11fe Mon Sep 17 00:00:00 2001 From: AinsleySnow <772571228@qq.com> Date: Mon, 4 Mar 2024 09:49:28 +0800 Subject: [PATCH] [LLVM][XTHeadVector] Implement intrinsics for `vmerge` and `vmv.v.x/i`. (#72) * [LLVM][XTHeadVector] Define intrinsic functions for vmerge and vmv.v.{x,i}. * [LLVM][XTHeadVector] Define pseudos and pats for vmerge. * [LLVM][XTHeadVector] Add test cases for vmerge. * [LLVM][XTHeadVector] Define policy-free pseudo nodes for vmv.v.{v/x/i}. Define pats for vmv.v.v. * [LLVM][XTHeadVector] Define ISD node for vmv.v.x and map it to pseudo nodes. * [LLVM][XTHeadVector] Select vmv.v.x using logic shared with its 1.0 version. * [LLVM][XTHeadVector] Don't add policy for xthead pseudo nodes. * [LLVM][XTHeadVector] Add test cases for vmv.v.x. * [LLVM][XTHeadVector] Update test cases since now pseudo vmv do not accept policy fields any more. * [NFC][XTHeadVector] Update readme. --- README.md | 1 + .../include/llvm/IR/IntrinsicsRISCVXTHeadV.td | 14 +- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 32 +- llvm/lib/Target/RISCV/RISCVISelLowering.h | 1 + llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 6 +- .../RISCV/RISCVInstrInfoXTHeadVPseudos.td | 82 +- .../RISCV/RISCVInstrInfoXTHeadVVLPatterns.td | 32 + llvm/test/CodeGen/RISCV/rvv0p71/vmerge.ll | 2097 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmv-copy.mir | 16 +- llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.v.ll | 677 ++++++ .../CodeGen/RISCV/rvv0p71/vmv.v.x-rv32.ll | 83 + llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x.ll | 609 +++++ 12 files changed, 3613 insertions(+), 37 deletions(-) create mode 100644 llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVVLPatterns.td create mode 100644 llvm/test/CodeGen/RISCV/rvv0p71/vmerge.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.v.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x.ll diff --git a/README.md b/README.md index 55eef8575bc68c..540b0e8b625233 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,7 @@ Any feature not listed below but present in the specification should be consider - (Done) `12.11. Vector Widening Integer Multiply Instructions` - (Done) `12.12. Vector Single-Width Integer Multiply-Add Instructions` - (Done) `12.13. Vector Widening Integer Multiply-Add Instructions` + - (Done) `12.14. Vector Integer Merge and Move Instructions` - (WIP) Clang intrinsics related to the `XTHeadVector` extension: - (WIP) `6. Configuration-Setting and Utility` - (Done) `6.1. Set vl and vtype` diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td b/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td index 1cef3ec470b45e..5b0f9d0d70ef99 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td @@ -770,10 +770,10 @@ let TargetPrefix = "riscv" in { defm th_vwmacc : XVTernaryWide; defm th_vwmaccus : XVTernaryWide; defm th_vwmaccsu : XVTernaryWide; -} // TargetPrefix = "riscv" -let TargetPrefix = "riscv" in { // 12.14. Vector Integer Merge and Move Instructions + defm th_vmerge : RISCVBinaryWithV0; + // Output: (vector) // Input: (passthru, vector_in, vl) def int_riscv_th_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty], @@ -783,4 +783,14 @@ let TargetPrefix = "riscv" in { [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; } + // Output: (vector) + // Input: (passthru, scalar, vl) + def int_riscv_th_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, + LLVMVectorElementType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 1; + let VLOperand = 2; + } } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 7c66bf96017b72..989c19c8d56cfa 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3554,7 +3554,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, SDValue Lo, SDValue Hi, SDValue VL, - SelectionDAG &DAG) { + SelectionDAG &DAG, bool HasVendorXTHeadV) { if (!Passthru) Passthru = DAG.getUNDEF(VT); if (isa(Lo) && isa(Hi)) { @@ -3563,7 +3563,9 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, // If Hi constant is all the same sign bit as Lo, lower this as a custom // node in order to try and match RVV vector/scalar instructions. if ((LoC >> 31) == HiC) - return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL); + return DAG.getNode(HasVendorXTHeadV ? + RISCVISD::TH_VMV_V_X_VL : RISCVISD::VMV_V_X_VL, + DL, VT, Passthru, Lo, VL); // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use // vmv.v.x whose EEW = 32 to lower it. @@ -3572,8 +3574,8 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, // TODO: if vl <= min(VLMAX), we can also do this. But we could not // access the subtarget here now. auto InterVec = DAG.getNode( - RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo, - DAG.getRegister(RISCV::X0, MVT::i32)); + HasVendorXTHeadV ? RISCVISD::TH_VMV_V_X_VL : RISCVISD::VMV_V_X_VL, + DL, InterVT, DAG.getUNDEF(InterVT), Lo, DAG.getRegister(RISCV::X0, MVT::i32)); return DAG.getNode(ISD::BITCAST, DL, VT, InterVec); } } @@ -3588,11 +3590,11 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, // of the halves. static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, SDValue Scalar, SDValue VL, - SelectionDAG &DAG) { + SelectionDAG &DAG, bool HasVendorXTHeadV) { assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!"); SDValue Lo, Hi; std::tie(Lo, Hi) = DAG.SplitScalar(Scalar, DL, MVT::i32, MVT::i32); - return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG); + return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG, HasVendorXTHeadV); } // This function lowers a splat of a scalar operand Splat with the vector @@ -3628,7 +3630,9 @@ static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL, if (isOneConstant(VL) && (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue()))) return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL); - return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL); + return DAG.getNode( + Subtarget.hasVendorXTHeadV() ? RISCVISD::TH_VMV_V_X_VL : RISCVISD::VMV_V_X_VL, + DL, VT, Passthru, Scalar, VL); } assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 && @@ -3639,7 +3643,8 @@ static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL, DAG.getConstant(0, DL, XLenVT), VL); // Otherwise use the more complicated splatting algorithm. - return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG); + return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, + DAG, Subtarget.hasVendorXTHeadV()); } static MVT getLMUL1VT(MVT VT) { @@ -6637,7 +6642,8 @@ SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op, auto VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second; SDValue Res = - splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG); + splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, + DAG, Subtarget.hasVendorXTHeadV()); return convertFromScalableVector(VecVT, Res, DAG, Subtarget); } @@ -7369,7 +7375,8 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, // We need to convert the scalar to a splat vector. SDValue VL = getVLOperand(Op); assert(VL.getValueType() == XLenVT); - ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG); + ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, + DAG, Subtarget.hasVendorXTHeadV()); return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); } @@ -7483,6 +7490,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(), Op.getOperand(1), DAG.getConstant(0, DL, XLenVT)); case Intrinsic::riscv_vmv_v_x: + case Intrinsic::riscv_th_vmv_v_x: return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), Op.getSimpleValueType(), DL, DAG, Subtarget); @@ -7519,7 +7527,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SDValue Vec = Op.getOperand(1); SDValue VL = getVLOperand(Op); - SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG); + SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, + DAG, Subtarget.hasVendorXTHeadV()); if (Op.getOperand(1).isUndef()) return SplattedVal; SDValue SplattedIdx = @@ -16429,6 +16438,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(TH_SDD) NODE_NAME_CASE(VMV_V_V_VL) NODE_NAME_CASE(VMV_V_X_VL) + NODE_NAME_CASE(TH_VMV_V_X_VL) NODE_NAME_CASE(VFMV_V_F_VL) NODE_NAME_CASE(VMV_X_S) NODE_NAME_CASE(VMV_S_X_VL) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 164ded95a1b5c3..97ceef74368b51 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -151,6 +151,7 @@ enum NodeType : unsigned { // for the VL value to be used for the operation. The first operand is // passthru operand. VMV_V_X_VL, + TH_VMV_V_X_VL, // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand // for the VL value to be used for the operation. The first operand is // passthru operand. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 363d5df254887b..8ab50299818732 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -488,7 +488,8 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB, const MCInstrDesc &Desc = DefMBBI->getDesc(); MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW - MIB.addImm(0); // tu, mu + if (!XTHeadV) + MIB.addImm(0); // tu, mu MIB.addReg(RISCV::VL, RegState::Implicit); MIB.addReg(RISCV::VTYPE, RegState::Implicit); } @@ -522,7 +523,8 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB, const MCInstrDesc &Desc = DefMBBI->getDesc(); MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW - MIB.addImm(0); // tu, mu + if (!XTHeadV) + MIB.addImm(0); // tu, mu MIB.addReg(RISCV::VL, RegState::Implicit); MIB.addReg(RISCV::VTYPE, RegState::Implicit); } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td index 7a8a3796e325a3..daa8e3222ee563 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td @@ -1601,6 +1601,19 @@ class XVPseudoBinaryMaskNoPolicy : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = !interleave([Constraint, "$rd = $merge"], ","); + let HasVLOp = 1; + let HasSEWOp = 1; +} + multiclass XVPseudoBinary("WriteVIMergeV_" # mx); + defvar WriteVIMergeX_MX = !cast("WriteVIMergeX_" # mx); + defvar WriteVIMergeI_MX = !cast("WriteVIMergeI_" # mx); + defvar ReadVIMergeV_MX = !cast("ReadVIMergeV_" # mx); + defvar ReadVIMergeX_MX = !cast("ReadVIMergeX_" # mx); + + def "_VVM" # "_" # m.MX: + VPseudoTiedBinaryCarryIn.R, + m.vrclass, m.vrclass, m, 1, "">, + Sched<[WriteVIMergeV_MX, ReadVIMergeV_MX, ReadVIMergeV_MX, ReadVMask]>; + def "_VXM" # "_" # m.MX: + VPseudoTiedBinaryCarryIn.R, + m.vrclass, GPR, m, 1, "">, + Sched<[WriteVIMergeX_MX, ReadVIMergeV_MX, ReadVIMergeX_MX, ReadVMask]>; + def "_VIM" # "_" # m.MX: + VPseudoTiedBinaryCarryIn.R, + m.vrclass, simm5, m, 1, "">, + Sched<[WriteVIMergeI_MX, ReadVIMergeV_MX, ReadVMask]>; + } +} + multiclass XVPseudoUnaryVMV_V_X_I { foreach m = MxListXTHeadV in { let VLMul = m.value in { @@ -2918,11 +2955,11 @@ multiclass XVPseudoUnaryVMV_V_X_I { defvar ReadVIMovX_MX = !cast("ReadVIMovX_" # mx); let VLMul = m.value in { - def "_V_" # mx : VPseudoUnaryNoMask, + def "_V_" # mx : XVPseudoUnaryNoMask, Sched<[WriteVIMovV_MX, ReadVIMovV_MX]>; - def "_X_" # mx : VPseudoUnaryNoMask, + def "_X_" # mx : XVPseudoUnaryNoMask, Sched<[WriteVIMovX_MX, ReadVIMovX_MX]>; - def "_I_" # mx : VPseudoUnaryNoMask, + def "_I_" # mx : XVPseudoUnaryNoMask, Sched<[WriteVIMovI_MX]>; } } @@ -2930,22 +2967,37 @@ multiclass XVPseudoUnaryVMV_V_X_I { } let Predicates = [HasVendorXTHeadV] in { + defm PseudoTH_VMERGE : XVPseudoVMRG_VM_XM_IM; defm PseudoTH_VMV_V : XVPseudoUnaryVMV_V_X_I; } // Predicates = [HasVendorXTHeadV] -// Patterns for `int_riscv_vmv_v_v` -> `PseudoTH_VMV_V_V_` -foreach vti = AllXVectors in { - let Predicates = GetXVTypePredicates.Predicates in { - // vmv.v.v - def : Pat<(vti.Vector (int_riscv_th_vmv_v_v (vti.Vector vti.RegClass:$passthru), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoTH_VMV_V_V_"#vti.LMul.MX) - $passthru, $rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; +let Predicates = [HasVendorXTHeadV] in { + defm : XVPatBinaryV_VM_XM_IM<"int_riscv_th_vmerge", "PseudoTH_VMERGE">; + // Define patterns for vmerge intrinsics with float-point arguments. + foreach vti = AllFloatXVectors in { + let Predicates = GetXVTypePredicates.Predicates in { + defm : VPatBinaryCarryInTAIL<"int_riscv_th_vmerge", "PseudoTH_VMERGE", "VVM", + vti.Vector, + vti.Vector, vti.Vector, vti.Mask, + vti.Log2SEW, vti.LMul, vti.RegClass, + vti.RegClass, vti.RegClass>; + } + } - // TODO: vmv.v.x, vmv.v.i + // Patterns for `int_riscv_vmv_v_v` -> `PseudoTH_VMV_V_V_` + foreach vti = AllXVectors in { + let Predicates = GetXVTypePredicates.Predicates in { + // vmv.v.v + def : Pat<(vti.Vector (int_riscv_th_vmv_v_v (vti.Vector vti.RegClass:$passthru), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoTH_VMV_V_V_"#vti.LMul.MX) + $passthru, $rs1, GPR:$vl, vti.Log2SEW)>; + // Patterns for vmv.v.x and vmv.v.i are defined + // in RISCVInstrInfoXTHeadVVLPatterns.td + } } -} +} // Predicates = [HasVendorXTHeadV] //===----------------------------------------------------------------------===// // 12.14. Vector Integer Merge and Move Instructions @@ -2967,3 +3019,5 @@ let Predicates = [HasVendorXTHeadV] in { def PseudoTH_VMV8R_V : XVPseudoWholeMove; } } // Predicates = [HasVendorXTHeadV] + +include "RISCVInstrInfoXTHeadVVLPatterns.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVVLPatterns.td new file mode 100644 index 00000000000000..1ac8249db4fd6d --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVVLPatterns.td @@ -0,0 +1,32 @@ +//===-- RISCVInstrInfoXTHeadVVLPatterns.td - RVV VL patterns -----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------------===// +/// +/// This file contains the required infrastructure and VL patterns to support +/// code generation for the standard 'V' (Vector) extension, version 0.7.1 +/// +/// This file is included from RISCVInstrInfoXTHeadVPseudos.td +//===---------------------------------------------------------------------------===// + +def riscv_th_vmv_v_x_vl : SDNode<"RISCVISD::TH_VMV_V_X_VL", + SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, + SDTCisSameAs<0, 1>, + SDTCisVT<2, XLenVT>, + SDTCisVT<3, XLenVT>]>>; + +foreach vti = AllXVectors in { + foreach vti = AllIntegerXVectors in { + def : Pat<(vti.Vector (riscv_th_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)), + (!cast("PseudoTH_VMV_V_X_"#vti.LMul.MX) + vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + defvar ImmPat = !cast("sew"#vti.SEW#"simm5"); + def : Pat<(vti.Vector (riscv_th_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5), + VLOpFrag)), + (!cast("PseudoTH_VMV_V_I_"#vti.LMul.MX) + vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW)>; + } +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmerge.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmerge.ll new file mode 100644 index 00000000000000..211ff74947f9ed --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmerge.ll @@ -0,0 +1,2097 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s + +declare @llvm.riscv.th.vmerge.nxv8i8.nxv8i8( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i8.nxv8i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv16i8.nxv16i8( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i8.nxv16i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv32i8.nxv32i8( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv32i8.nxv32i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv64i8.nxv64i8( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv64i8.nxv64i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4i16.nxv4i16( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i16.nxv4i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8i16.nxv8i16( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i16.nxv8i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv16i16.nxv16i16( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i16.nxv16i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv32i16.nxv32i16( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv32i16.nxv32i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv2i32.nxv2i32( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv2i32.nxv2i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4i32.nxv4i32( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i32.nxv4i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8i32.nxv8i32( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i32.nxv8i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv16i32.nxv16i32( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i32.nxv16i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv1i64.nxv1i64( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv1i64.nxv1i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv2i64.nxv2i64( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv2i64.nxv2i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4i64.nxv4i64( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i64.nxv4i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8i64.nxv8i64( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i64.nxv8i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv16i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv32i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv32i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv64i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv64i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv16i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv32i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv32i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv2i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv2i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv16i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv1i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.th.vmerge.nxv1i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv2i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.th.vmerge.nxv2i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i8.i8( + undef, + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i8.i8( + undef, + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv32i8.i8( + undef, + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv64i8.i8( + undef, + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i16.i16( + undef, + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i16.i16( + undef, + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i16.i16( + undef, + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv32i16.i16( + undef, + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv2i32.i32( + undef, + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i32.i32( + undef, + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i32.i32( + undef, + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16i32.i32( + undef, + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv1i64.i64( + undef, + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv2i64.i64( + undef, + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4i64.i64( + undef, + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmerge.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8i64.i64( + undef, + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4f16.nxv4f16( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4f16.nxv4f16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8f16.nxv8f16( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8f16.nxv8f16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv16f16.nxv16f16( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16f16.nxv16f16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv32f16.nxv32f16( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv32f16.nxv32f16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv2f32.nxv2f32( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv2f32.nxv2f32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4f32.nxv4f32( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4f32.nxv4f32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8f32.nxv8f32( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8f32.nxv8f32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv16f32.nxv16f32( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv16f32.nxv16f32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv1f64.nxv1f64( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv1f64.nxv1f64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv2f64.nxv2f64( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv2f64.nxv2f64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv4f64.nxv4f64( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv4f64.nxv4f64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmerge.nxv8f64.nxv8f64( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmerge.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmerge.nxv8f64.nxv8f64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv0p71/vmv-copy.mir index b66654523024f4..65d5f3bf58a9f2 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmv-copy.mir @@ -39,7 +39,7 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x0 = PseudoTH_VSETVLI $x14, 10, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28m4 = PseudoTH_VLW_V_E32_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m4 = PseudoTH_VMV_V_V_M4 undef $v12m4, $v28m4, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoTH_VMV_V_V_M4 undef $v12m4, $v28m4, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype $x15 = PseudoTH_VSETVLI $x14, 10, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoTH_VLW_V_E32_M4 undef $v28m4, killed $x16, $noreg, 5, implicit $vl, implicit $vtype $v12m4 = COPY $v28m4 @@ -54,10 +54,10 @@ body: | ; CHECK: liveins: $x14 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x0 = PseudoTH_VSETVLI $x14, 10, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoTH_VMV_V_I_M4 undef $v28m4, 0, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m4 = PseudoTH_VMV_V_I_M4 undef $v12m4, 0, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28m4 = PseudoTH_VMV_V_I_M4 undef $v28m4, 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoTH_VMV_V_I_M4 undef $v12m4, 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype $x15 = PseudoTH_VSETVLI $x14, 10, implicit-def $vl, implicit-def $vtype - $v28m4 = PseudoTH_VMV_V_I_M4 undef $v28m4, 0, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28m4 = PseudoTH_VMV_V_I_M4 undef $v28m4, 0, $noreg, 5, implicit $vl, implicit $vtype $v12m4 = COPY $v28m4 ... --- @@ -93,7 +93,7 @@ body: | ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x0 = PseudoTH_VSETVLI $x14, 10, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoTH_VMV_V_I_M4 undef $v28m4, 0, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v28m4 = PseudoTH_VMV_V_I_M4 undef $v28m4, 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype, implicit $vl, implicit $vtype ; CHECK-NEXT: $v4m4, $x0 = PseudoTH_VLEFF_V_E32_M4 undef $v4m4, $x16, $noreg, 5 /* e32 */, implicit-def $vl, implicit $vl, implicit $vtype ; CHECK-NEXT: [[CSRRS:%[0-9]+]]:gpr = CSRRS 3104, $x0 ; CHECK-NEXT: [[CSRRS1:%[0-9]+]]:gpr = CSRRS 3105, $x0 @@ -104,7 +104,7 @@ body: | ; CHECK-NEXT: $v15 = TH_VMV_V_V $v31, implicit $vtype, implicit $vl ; CHECK-NEXT: dead $x0 = TH_VSETVL killed [[CSRRS]], killed [[CSRRS1]], implicit-def $vtype, implicit-def $vl $x15 = PseudoTH_VSETVLI $x14, 10, implicit-def $vl, implicit-def $vtype - $v28m4 = PseudoTH_VMV_V_I_M4 undef $v28m4, 0, $noreg, 5, 0, implicit $vl, implicit $vtype + $v28m4 = PseudoTH_VMV_V_I_M4 undef $v28m4, 0, $noreg, 5, implicit $vl, implicit $vtype $v4m4,$x0 = PseudoTH_VLEFF_V_E32_M4 undef $v4m4, $x16, $noreg, 5, implicit-def $vl $v12m4 = COPY $v28m4 ... @@ -164,8 +164,8 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x0 = PseudoTH_VSETVLI $x14, 8, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v8_v9 = PseudoTH_VLSEG2E_V_E32_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v10 = PseudoTH_VMV_V_V_M1 undef $v10, $v8, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v11 = PseudoTH_VMV_V_V_M1 undef $v11, $v9, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v10 = PseudoTH_VMV_V_V_M1 undef $v10, $v8, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v11 = PseudoTH_VMV_V_V_M1 undef $v11, $v9, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype $x15 = PseudoTH_VSETVLI $x14, 8, implicit-def $vl, implicit-def $vtype $v8_v9 = PseudoTH_VLSEG2E_V_E32_M1 undef $v8_v9, killed $x16, $noreg, 5, implicit $vl, implicit $vtype $v10_v11 = COPY $v8_v9 diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.v.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.v.ll new file mode 100644 index 00000000000000..b8e181300bfffe --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.v.ll @@ -0,0 +1,677 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.th.vmv.v.v.nxv8i8( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv8i8_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv16i8( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv16i8_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv32i8( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv32i8_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv64i8( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv64i8_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv4i16( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv4i16_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv8i16( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv8i16_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv16i16( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv16i16_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv32i16( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv32i16_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv2i32( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv2i32_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv4i32( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv4i32_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv8i32( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv8i32_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv16i32( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv16i32_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv1i64( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv1i64_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv2i64( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv2i64_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv4i64( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv4i64_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv8i64( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv8i64_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv4f16( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv8f16( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv16f16( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv32f16( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv2f32( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv4f32( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv8f32( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv16f32( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv1f64( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv2f64( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv4f64( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.v.nxv8f64( + , + , + iXLen); + +define @intrinsic_vmv_v_v_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_v_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmv.v.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.v.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x-rv32.ll new file mode 100644 index 00000000000000..2a9ed94384424c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x-rv32.ll @@ -0,0 +1,83 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=riscv32 -mattr=+xtheadvector -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +declare @llvm.riscv.th.vmv.v.x.nxv1i64( + , + i64, + i32); + +define @intrinsic_vmv_v_x_i_nxv1i64_vlmax() nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv1i64_vlmax: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli a0, zero, e32, m1, d1 +; CHECK-NEXT: th.vmv.v.i v8, 3 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv1i64( + undef, + i64 12884901891, + i32 -1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv2i64( + , + i64, + i32); + +define @intrinsic_vmv_v_x_i_nxv2i64_vlmax() nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv2i64_vlmax: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli a0, zero, e32, m2, d1 +; CHECK-NEXT: th.vmv.v.i v8, 3 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv2i64( + undef, + i64 12884901891, + i32 -1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv4i64( + , + i64, + i32); + +define @intrinsic_vmv_v_x_i_nxv4i64_vlmax() nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv4i64_vlmax: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli a0, zero, e32, m4, d1 +; CHECK-NEXT: th.vmv.v.i v8, 3 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv4i64( + undef, + i64 12884901891, + i32 -1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv8i64( + , + i64, + i32); + +define @intrinsic_vmv_v_x_i_nxv8i64_vlmax() nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv8i64_vlmax: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli a0, zero, e32, m8, d1 +; CHECK-NEXT: th.vmv.v.i v8, 3 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i64( + undef, + i64 12884901891, + i32 -1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x.ll new file mode 100644 index 00000000000000..13bbda11e38754 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmv.v.x.ll @@ -0,0 +1,609 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.th.vmv.v.x.nxv8i8( + , + i8, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv8i8(i8 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i8( + undef, + i8 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv16i8( + , + i8, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv16i8(i8 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv16i8( + undef, + i8 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv32i8( + , + i8, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv32i8(i8 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv32i8( + undef, + i8 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv64i8( + , + i8, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv64i8(i8 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv64i8( + undef, + i8 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv4i16( + , + i16, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv4i16(i16 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv4i16( + undef, + i16 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv8i16( + , + i16, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv8i16(i16 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i16( + undef, + i16 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv16i16( + , + i16, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv16i16(i16 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv16i16( + undef, + i16 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv32i16( + , + i16, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv32i16(i16 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv32i16( + undef, + i16 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv2i32( + , + i32, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv2i32(i32 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv2i32( + undef, + i32 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv4i32( + , + i32, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv4i32(i32 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv4i32( + undef, + i32 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv8i32( + , + i32, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv8i32(i32 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i32( + undef, + i32 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv16i32( + , + i32, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv16i32(i32 %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_x_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vmv.v.x v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv16i32( + undef, + i32 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv1i64( + , + i64, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv1i64(i64 %0, iXLen %1) nounwind { +; RV32-LABEL: intrinsic_vmv_v_x_x_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m1, d1 +; RV32-NEXT: th.vlse.v v8, (a0), zero +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmv_v_x_x_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; RV64-NEXT: th.vmv.v.x v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv1i64( + undef, + i64 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv2i64( + , + i64, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv2i64(i64 %0, iXLen %1) nounwind { +; RV32-LABEL: intrinsic_vmv_v_x_x_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m2, d1 +; RV32-NEXT: th.vlse.v v8, (a0), zero +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmv_v_x_x_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; RV64-NEXT: th.vmv.v.x v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv2i64( + undef, + i64 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv4i64( + , + i64, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv4i64(i64 %0, iXLen %1) nounwind { +; RV32-LABEL: intrinsic_vmv_v_x_x_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m4, d1 +; RV32-NEXT: th.vlse.v v8, (a0), zero +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmv_v_x_x_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; RV64-NEXT: th.vmv.v.x v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv4i64( + undef, + i64 %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.th.vmv.v.x.nxv8i64( + , + i64, + iXLen); + +define @intrinsic_vmv_v_x_x_nxv8i64(i64 %0, iXLen %1) nounwind { +; RV32-LABEL: intrinsic_vmv_v_x_x_nxv8i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m8, d1 +; RV32-NEXT: th.vlse.v v8, (a0), zero +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmv_v_x_x_nxv8i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; RV64-NEXT: th.vmv.v.x v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i64( + undef, + i64 %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv8i8(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i8( + undef, + i8 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv16i8(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv16i8( + undef, + i8 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv32i8(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv32i8( + undef, + i8 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv64i8(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv64i8( + undef, + i8 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv4i16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv4i16( + undef, + i16 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv8i16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i16( + undef, + i16 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv16i16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv16i16( + undef, + i16 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv32i16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv32i16( + undef, + i16 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv2i32(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv2i32( + undef, + i32 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv4i32(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv4i32( + undef, + i32 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv8i32(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i32( + undef, + i32 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv16i32(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv16i32( + undef, + i32 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv1i64(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv1i64( + undef, + i64 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv2i64(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv2i64( + undef, + i64 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv4i64(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv4i64( + undef, + i64 9, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv_v_x_i_nxv8i64(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv_v_x_i_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmv.v.i v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmv.v.x.nxv8i64( + undef, + i64 9, + iXLen %0) + + ret %a +}