Skip to content

Commit

Permalink
[GlobalISel] Fold G_ICMP if possible
Browse files Browse the repository at this point in the history
This patch tries to fold `G_ICMP` if possible.
  • Loading branch information
shiltian committed Mar 28, 2024
1 parent 96c8e2e commit f462e85
Show file tree
Hide file tree
Showing 9 changed files with 226 additions and 65 deletions.
4 changes: 4 additions & 0 deletions llvm/include/llvm/CodeGen/GlobalISel/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,10 @@ std::optional<SmallVector<unsigned>>
ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI,
std::function<unsigned(APInt)> CB);

std::optional<SmallVector<APInt>>
ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
const MachineRegisterInfo &MRI);

/// Test if the given value is known to have exactly one bit set. This differs
/// from computeKnownBits in that it doesn't necessarily determine which bit is
/// set.
Expand Down
14 changes: 14 additions & 0 deletions llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,20 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
switch (Opc) {
default:
break;
case TargetOpcode::G_ICMP: {
assert(SrcOps.size() == 3 && "Invalid sources");
assert(DstOps.size() == 1 && "Invalid dsts");
LLT SrcTy = SrcOps[1].getLLTTy(*getMRI());

if (std::optional<SmallVector<APInt>> Cst =
ConstantFoldICmp(SrcOps[0].getPredicate(), SrcOps[1].getReg(),
SrcOps[2].getReg(), *getMRI())) {
if (SrcTy.isVector())
return buildBuildVectorConstant(DstOps[0], *Cst);
return buildConstant(DstOps[0], Cst->front());
}
break;
}
case TargetOpcode::G_ADD:
case TargetOpcode::G_PTR_ADD:
case TargetOpcode::G_AND:
Expand Down
23 changes: 17 additions & 6 deletions llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3768,9 +3768,13 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
}
case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
auto [OldValRes, SuccessRes, Addr, CmpVal, NewVal] = MI.getFirst5Regs();
MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal,
Register NewOldValRes = MRI.cloneVirtualRegister(OldValRes);
Register NewSuccessRes = MRI.cloneVirtualRegister(SuccessRes);
MIRBuilder.buildAtomicCmpXchg(NewOldValRes, Addr, CmpVal, NewVal,
**MI.memoperands_begin());
MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal);
MIRBuilder.buildICmp(CmpInst::ICMP_EQ, NewSuccessRes, NewOldValRes, CmpVal);
MIRBuilder.buildCopy(OldValRes, NewOldValRes);
MIRBuilder.buildCopy(SuccessRes, NewSuccessRes);
MI.eraseFromParent();
return Legalized;
}
Expand Down Expand Up @@ -7657,10 +7661,13 @@ LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) {
LLT Ty = Dst0Ty;
LLT BoolTy = Dst1Ty;

Register NewDst0 = MRI.cloneVirtualRegister(Dst0);
Register NewDst1 = MRI.cloneVirtualRegister(Dst1);

if (IsAdd)
MIRBuilder.buildAdd(Dst0, LHS, RHS);
MIRBuilder.buildAdd(NewDst0, LHS, RHS);
else
MIRBuilder.buildSub(Dst0, LHS, RHS);
MIRBuilder.buildSub(NewDst0, LHS, RHS);

// TODO: If SADDSAT/SSUBSAT is legal, compare results to detect overflow.

Expand All @@ -7673,12 +7680,16 @@ LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) {
// (LHS) if and only if the other operand (RHS) is (non-zero) positive,
// otherwise there will be overflow.
auto ResultLowerThanLHS =
MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, Dst0, LHS);
MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, NewDst0, LHS);
auto ConditionRHS = MIRBuilder.buildICmp(
IsAdd ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGT, BoolTy, RHS, Zero);

MIRBuilder.buildXor(Dst1, ConditionRHS, ResultLowerThanLHS);
MIRBuilder.buildXor(NewDst1, ConditionRHS, ResultLowerThanLHS);

MIRBuilder.buildCopy(Dst0, NewDst0);
MIRBuilder.buildCopy(Dst1, NewDst1);
MI.eraseFromParent();

return Legalized;
}

Expand Down
68 changes: 68 additions & 0 deletions llvm/lib/CodeGen/GlobalISel/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -997,6 +997,74 @@ llvm::ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI,
return std::nullopt;
}

std::optional<SmallVector<APInt>>
llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
const MachineRegisterInfo &MRI) {
LLT Ty = MRI.getType(Op1);
if (Ty != MRI.getType(Op2))
return std::nullopt;

auto TryFoldScalar = [&MRI, Pred](Register LHS,
Register RHS) -> std::optional<APInt> {
auto LHSCst = getIConstantVRegVal(LHS, MRI);
auto RHSCst = getIConstantVRegVal(RHS, MRI);
if (!LHSCst || !RHSCst)
return std::nullopt;

switch (Pred) {
case CmpInst::Predicate::ICMP_EQ:
return APInt(/*numBits=*/1, LHSCst->eq(*RHSCst));
case CmpInst::Predicate::ICMP_NE:
return APInt(/*numBits=*/1, LHSCst->ne(*RHSCst));
case CmpInst::Predicate::ICMP_UGT:
return APInt(/*numBits=*/1, LHSCst->ugt(*RHSCst));
case CmpInst::Predicate::ICMP_UGE:
return APInt(/*numBits=*/1, LHSCst->uge(*RHSCst));
case CmpInst::Predicate::ICMP_ULT:
return APInt(/*numBits=*/1, LHSCst->ult(*RHSCst));
case CmpInst::Predicate::ICMP_ULE:
return APInt(/*numBits=*/1, LHSCst->ule(*RHSCst));
case CmpInst::Predicate::ICMP_SGT:
return APInt(/*numBits=*/1, LHSCst->sgt(*RHSCst));
case CmpInst::Predicate::ICMP_SGE:
return APInt(/*numBits=*/1, LHSCst->sge(*RHSCst));
case CmpInst::Predicate::ICMP_SLT:
return APInt(/*numBits=*/1, LHSCst->slt(*RHSCst));
case CmpInst::Predicate::ICMP_SLE:
return APInt(/*numBits=*/1, LHSCst->sle(*RHSCst));
default:
return std::nullopt;
}
};

SmallVector<APInt> FoldedICmps;

if (Ty.isVector()) {
// Try to constant fold each element.
auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
if (!BV1 || !BV2)
return std::nullopt;
assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
if (auto MaybeFold =
TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
FoldedICmps.emplace_back(*MaybeFold);
continue;
}
return std::nullopt;
}
return FoldedICmps;
}

if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
FoldedICmps.emplace_back(*MaybeCst);
return FoldedICmps;
}

return std::nullopt;
}

bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
GISelKnownBits *KB) {
std::optional<DefinitionAndSourceRegister> DefSrcReg =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32), addrspace 1)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s32), [[COPY1]]
; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s32)
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s1) = COPY [[ICMP]](s1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[COPY4]](s1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
Expand All @@ -40,7 +42,9 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p0), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32))
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s32), [[COPY1]]
; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s32)
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s1) = COPY [[ICMP]](s1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[COPY4]](s1)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
Expand All @@ -63,7 +67,9 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32), addrspace 3)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[COPY1]]
; CHECK-NEXT: S_ENDPGM 0, implicit [[ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ATOMIC_CMPXCHG]](s32)
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s1) = COPY [[ICMP]](s1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[COPY4]](s1)
%0:_(p3) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
Expand All @@ -87,7 +93,9 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY2]](s64), [[COPY1]](s64)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s64), addrspace 1)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s64), [[COPY1]]
; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s64), implicit [[ICMP]](s1)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s64)
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s1) = COPY [[ICMP]](s1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s64), implicit [[COPY4]](s1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = COPY $vgpr4_vgpr5
Expand All @@ -110,7 +118,9 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr3_vgpr4
; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store syncscope("agent-one-as") monotonic monotonic (s64), addrspace 3)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[COPY1]]
; CHECK-NEXT: S_ENDPGM 0, implicit [[ATOMIC_CMPXCHG]](s64), implicit [[ICMP]](s1)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ATOMIC_CMPXCHG]](s64)
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s1) = COPY [[ICMP]](s1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s64), implicit [[COPY4]](s1)
%0:_(p3) = COPY $vgpr0
%1:_(s64) = COPY $vgpr1_vgpr2
%2:_(s64) = COPY $vgpr3_vgpr4
Expand Down
27 changes: 18 additions & 9 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,10 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SEXT_INREG2]](s32), [[COPY2]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s1) = COPY [[XOR]](s1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C1]]
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY3]](s1)
; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
Expand Down Expand Up @@ -56,7 +57,8 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SEXT_INREG2]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s1) = COPY [[XOR]](s1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY2]](s1)
; CHECK-NEXT: $vgpr0 = COPY [[ADD]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
Expand Down Expand Up @@ -86,8 +88,10 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[ADD]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
; CHECK-NEXT: $vgpr0 = COPY [[ADD]](s32)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s1) = COPY [[XOR]](s1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY3]](s1)
; CHECK-NEXT: $vgpr0 = COPY [[COPY2]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
Expand Down Expand Up @@ -117,8 +121,10 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s1) = COPY [[XOR]](s1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY3]](s1)
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
; CHECK-NEXT: $vgpr2 = COPY [[ZEXT]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
Expand Down Expand Up @@ -172,11 +178,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY [[BITCAST2]](<2 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND2]](s32), [[AND3]](s32)
; CHECK-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; CHECK-NEXT: $vgpr0 = COPY [[COPY3]](<2 x s16>)
; CHECK-NEXT: $vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
Expand Down Expand Up @@ -360,13 +367,14 @@ body: |
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR2]](s1)
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR3]](s1)
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<4 x s16>) = COPY [[CONCAT_VECTORS]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C3]]
; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[AND4]](s32), [[AND5]](s32), [[AND6]](s32), [[AND7]](s32)
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY5]](<4 x s16>)
; CHECK-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = COPY [[BUILD_VECTOR]](<4 x s32>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr1_vgpr2
Expand Down Expand Up @@ -403,11 +411,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](<2 x s32>)
; CHECK-NEXT: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
Expand Down
Loading

0 comments on commit f462e85

Please sign in to comment.