Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 20 additions & 9 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2580,8 +2580,15 @@ static SDValue getAllOnesMask(MVT VecVT, SDValue VL, const SDLoc &DL,
return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
}

static SDValue getVLOp(uint64_t NumElts, const SDLoc &DL, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
static SDValue getVLOp(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL,
SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
// If we know the exact VLEN, our VL is exactly equal to VLMAX, and
// we can't encode the AVL as an immediate, use the VLMAX encoding.
const auto [MinVLMAX, MaxVLMAX] =
RISCVTargetLowering::computeVLMAXBounds(ContainerVT, Subtarget);
if (MinVLMAX == MaxVLMAX && NumElts == MinVLMAX && NumElts > 31)
return DAG.getRegister(RISCV::X0, Subtarget.getXLenVT());

return DAG.getConstant(NumElts, DL, Subtarget.getXLenVT());
}

Expand All @@ -2598,7 +2605,7 @@ static std::pair<SDValue, SDValue>
getDefaultVLOps(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL,
SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
SDValue VL = getVLOp(NumElts, DL, DAG, Subtarget);
SDValue VL = getVLOp(NumElts, ContainerVT, DL, DAG, Subtarget);
SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
return {Mask, VL};
}
Expand Down Expand Up @@ -8650,7 +8657,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
MVT VT = Op->getSimpleValueType(0);
MVT ContainerVT = getContainerForFixedLengthVector(VT);

SDValue VL = getVLOp(VT.getVectorNumElements(), DL, DAG, Subtarget);
SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
Subtarget);
SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
auto *Load = cast<MemIntrinsicSDNode>(Op);
SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
Expand Down Expand Up @@ -8785,7 +8793,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
MVT VT = Op->getOperand(2).getSimpleValueType();
MVT ContainerVT = getContainerForFixedLengthVector(VT);

SDValue VL = getVLOp(VT.getVectorNumElements(), DL, DAG, Subtarget);
SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
Subtarget);
SDValue IntID = DAG.getTargetConstant(VssegInts[NF - 2], DL, XLenVT);
SDValue Ptr = Op->getOperand(NF + 2);

Expand Down Expand Up @@ -9244,7 +9253,7 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
// Set the vector length to only the number of elements we care about. Note
// that for slideup this includes the offset.
unsigned EndIndex = OrigIdx + SubVecVT.getVectorNumElements();
SDValue VL = getVLOp(EndIndex, DL, DAG, Subtarget);
SDValue VL = getVLOp(EndIndex, ContainerVT, DL, DAG, Subtarget);

// Use tail agnostic policy if we're inserting over Vec's tail.
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
Expand Down Expand Up @@ -9421,7 +9430,8 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
// Set the vector length to only the number of elements we care about. This
// avoids sliding down elements we're going to discard straight away.
SDValue VL = getVLOp(SubVecVT.getVectorNumElements(), DL, DAG, Subtarget);
SDValue VL = getVLOp(SubVecVT.getVectorNumElements(), ContainerVT, DL, DAG,
Subtarget);
SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
SDValue Slidedown =
getVSlidedown(DAG, Subtarget, DL, ContainerVT,
Expand Down Expand Up @@ -9828,7 +9838,7 @@ RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
MVT XLenVT = Subtarget.getXLenVT();
MVT ContainerVT = getContainerForFixedLengthVector(VT);

SDValue VL = getVLOp(VT.getVectorNumElements(), DL, DAG, Subtarget);
SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG, Subtarget);

bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
SDValue IntID = DAG.getTargetConstant(
Expand Down Expand Up @@ -9872,7 +9882,8 @@ RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,

MVT ContainerVT = getContainerForFixedLengthVector(VT);

SDValue VL = getVLOp(VT.getVectorNumElements(), DL, DAG, Subtarget);
SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
Subtarget);

SDValue NewValue =
convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
Expand Down
264 changes: 174 additions & 90 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -284,48 +284,76 @@ define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
}

define void @extract_v8i1_v64i1_0(ptr %x, ptr %y) {
; CHECK-LABEL: extract_v8i1_v64i1_0:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 64
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
; CHECK-V-LABEL: extract_v8i1_v64i1_0:
; CHECK-V: # %bb.0:
; CHECK-V-NEXT: li a2, 64
; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-V-NEXT: vlm.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vsm.v v8, (a1)
; CHECK-V-NEXT: ret
;
; CHECK-KNOWNVLEN128-LABEL: extract_v8i1_v64i1_0:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vlm.v v8, (a0)
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0)
store <8 x i1> %c, ptr %y
ret void
}

define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
; CHECK-LABEL: extract_v8i1_v64i1_8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 64
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
; CHECK-V-LABEL: extract_v8i1_v64i1_8:
; CHECK-V: # %bb.0:
; CHECK-V-NEXT: li a2, 64
; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-V-NEXT: vlm.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v8, v8, 1
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vsm.v v8, (a1)
; CHECK-V-NEXT: ret
;
; CHECK-KNOWNVLEN128-LABEL: extract_v8i1_v64i1_8:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vlm.v v8, (a0)
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 1
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8)
store <8 x i1> %c, ptr %y
ret void
}

define void @extract_v8i1_v64i1_48(ptr %x, ptr %y) {
; CHECK-LABEL: extract_v8i1_v64i1_48:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 64
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 6
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
; CHECK-V-LABEL: extract_v8i1_v64i1_48:
; CHECK-V: # %bb.0:
; CHECK-V-NEXT: li a2, 64
; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-V-NEXT: vlm.v v8, (a0)
; CHECK-V-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v8, v8, 6
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vsm.v v8, (a1)
; CHECK-V-NEXT: ret
;
; CHECK-KNOWNVLEN128-LABEL: extract_v8i1_v64i1_48:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vlm.v v8, (a0)
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 6
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48)
store <8 x i1> %c, ptr %y
Expand Down Expand Up @@ -407,79 +435,138 @@ define void @extract_v8i1_nxv64i1_192(<vscale x 64 x i1> %x, ptr %y) {
}

define void @extract_v2i1_v64i1_0(ptr %x, ptr %y) {
; CHECK-LABEL: extract_v2i1_v64i1_0:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 64
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-NEXT: vmv.v.v v9, v8
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
; CHECK-V-LABEL: extract_v2i1_v64i1_0:
; CHECK-V: # %bb.0:
; CHECK-V-NEXT: li a2, 64
; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-V-NEXT: vlm.v v0, (a0)
; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-V-NEXT: vmv.v.i v8, 0
; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vmv.v.i v9, 0
; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-V-NEXT: vmv.v.v v9, v8
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vmsne.vi v8, v9, 0
; CHECK-V-NEXT: vsm.v v8, (a1)
; CHECK-V-NEXT: ret
;
; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_v64i1_0:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vlm.v v0, (a0)
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0)
store <2 x i1> %c, ptr %y
ret void
}

define void @extract_v2i1_v64i1_2(ptr %x, ptr %y) {
; CHECK-LABEL: extract_v2i1_v64i1_2:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 64
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 2, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-NEXT: vmv.v.v v9, v8
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
; CHECK-V-LABEL: extract_v2i1_v64i1_2:
; CHECK-V: # %bb.0:
; CHECK-V-NEXT: li a2, 64
; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-V-NEXT: vlm.v v0, (a0)
; CHECK-V-NEXT: vmv.v.i v8, 0
; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-V-NEXT: vsetivli zero, 2, e8, m1, ta, ma
; CHECK-V-NEXT: vslidedown.vi v8, v8, 2
; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-V-NEXT: vmsne.vi v0, v8, 0
; CHECK-V-NEXT: vmv.v.i v8, 0
; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vmv.v.i v9, 0
; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-V-NEXT: vmv.v.v v9, v8
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vmsne.vi v8, v9, 0
; CHECK-V-NEXT: vsm.v v8, (a1)
; CHECK-V-NEXT: ret
;
; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_v64i1_2:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vlm.v v0, (a0)
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m1, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 2
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2)
store <2 x i1> %c, ptr %y
ret void
}

define void @extract_v2i1_v64i1_42(ptr %x, ptr %y) {
; CHECK-LABEL: extract_v2i1_v64i1_42:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 64
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-NEXT: vmv.v.v v9, v8
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
; CHECK-V-LABEL: extract_v2i1_v64i1_42:
; CHECK-V: # %bb.0:
; CHECK-V-NEXT: li a2, 64
; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; CHECK-V-NEXT: vlm.v v0, (a0)
; CHECK-V-NEXT: vmv.v.i v8, 0
; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-V-NEXT: li a0, 42
; CHECK-V-NEXT: vsetivli zero, 2, e8, m4, ta, ma
; CHECK-V-NEXT: vslidedown.vx v8, v8, a0
; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-V-NEXT: vmsne.vi v0, v8, 0
; CHECK-V-NEXT: vmv.v.i v8, 0
; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vmv.v.i v9, 0
; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-V-NEXT: vmv.v.v v9, v8
; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-V-NEXT: vmsne.vi v8, v9, 0
; CHECK-V-NEXT: vsm.v v8, (a1)
; CHECK-V-NEXT: ret
;
; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_v64i1_42:
; CHECK-KNOWNVLEN128: # %bb.0:
; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vlm.v v0, (a0)
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-KNOWNVLEN128-NEXT: li a0, 42
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m4, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vslidedown.vx v8, v8, a0
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
; CHECK-KNOWNVLEN128-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42)
store <2 x i1> %c, ptr %y
Expand Down Expand Up @@ -660,6 +747,3 @@ declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %vec, i64 %i

declare <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
declare <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-KNOWNVLEN128: {{.*}}
; CHECK-V: {{.*}}
Loading