Skip to content

Commit

Permalink
[GlobalISel] Handle div-by-pow2
Browse files Browse the repository at this point in the history
This patch adds similar handling of div-by-pow2 as in `SelectionDAG`.
  • Loading branch information
shiltian committed Mar 22, 2024
1 parent bbcfe6f commit dfe9416
Show file tree
Hide file tree
Showing 10 changed files with 2,708 additions and 743 deletions.
8 changes: 8 additions & 0 deletions llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
Original file line number Diff line number Diff line change
Expand Up @@ -673,6 +673,14 @@ class CombinerHelper {
bool matchSDivByConst(MachineInstr &MI);
void applySDivByConst(MachineInstr &MI);

/// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
/// return expressions that implements it by shifting.
bool matchDivByPow2(MachineInstr &MI, bool IsSigned);
void applySDivByPow2(MachineInstr &MI);
/// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
/// return expressions that implements it by shifting.
void applyUDivByPow2(MachineInstr &MI);

// G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
bool matchUMulHToLShr(MachineInstr &MI);
void applyUMulHToLShr(MachineInstr &MI);
Expand Down
9 changes: 9 additions & 0 deletions llvm/include/llvm/CodeGen/GlobalISel/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -308,11 +308,20 @@ std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
Register Src,
const MachineRegisterInfo &MRI);

std::optional<SmallVector<APInt>>
ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
const MachineRegisterInfo &MRI);

/// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
/// then it tries to do an element-wise constant fold.
std::optional<SmallVector<unsigned>>
ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);

/// Tries to constant fold a G_CTTZ operation on \p Src. If \p Src is a vector
/// then it tries to do an element-wise constant fold.
std::optional<SmallVector<unsigned>>
ConstantFoldCTTZ(Register Src, const MachineRegisterInfo &MRI);

/// Test if the given value is known to have exactly one bit set. This differs
/// from computeKnownBits in that it doesn't necessarily determine which bit is
/// set.
Expand Down
16 changes: 15 additions & 1 deletion llvm/include/llvm/Target/GlobalISel/Combine.td
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ def FmArcp : MIFlagEnum<"FmArcp">;
def FmContract : MIFlagEnum<"FmContract">;
def FmAfn : MIFlagEnum<"FmAfn">;
def FmReassoc : MIFlagEnum<"FmReassoc">;
def IsExact : MIFlagEnum<"IsExact">;

def MIFlags;
// def not; -> Already defined as a SDNode
Expand Down Expand Up @@ -1036,7 +1037,20 @@ def sdiv_by_const : GICombineRule<
[{ return Helper.matchSDivByConst(*${root}); }]),
(apply [{ Helper.applySDivByConst(*${root}); }])>;

def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const]>;
def sdiv_by_pow2 : GICombineRule<
(defs root:$root),
(match (G_SDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
[{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/true); }]),
(apply [{ Helper.applySDivByPow2(*${root}); }])>;

def udiv_by_pow2 : GICombineRule<
(defs root:$root),
(match (G_UDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
[{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/false); }]),
(apply [{ Helper.applyUDivByPow2(*${root}); }])>;

def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const,
sdiv_by_pow2, udiv_by_pow2]>;

def reassoc_ptradd : GICombineRule<
(defs root:$root, build_fn_matchinfo:$matchinfo),
Expand Down
30 changes: 30 additions & 0 deletions llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,20 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
switch (Opc) {
default:
break;
case TargetOpcode::G_ICMP: {
assert(SrcOps.size() == 3 && "Invalid sources");
assert(DstOps.size() == 1 && "Invalid dsts");
LLT SrcTy = SrcOps[1].getLLTTy(*getMRI());

if (std::optional<SmallVector<APInt>> Cst =
ConstantFoldICmp(SrcOps[0].getPredicate(), SrcOps[1].getReg(),
SrcOps[2].getReg(), *getMRI())) {
if (SrcTy.isVector())
return buildBuildVectorConstant(DstOps[0], *Cst);
return buildConstant(DstOps[0], Cst->front());
}
break;
}
case TargetOpcode::G_ADD:
case TargetOpcode::G_PTR_ADD:
case TargetOpcode::G_AND:
Expand Down Expand Up @@ -272,6 +286,22 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
buildConstant(VecTy.getScalarType(), Cst).getReg(0));
return buildBuildVector(DstOps[0], ConstantRegs);
}
case TargetOpcode::G_CTTZ: {
assert(SrcOps.size() == 1 && "Expected one source");
assert(DstOps.size() == 1 && "Expected one dest");
auto MaybeCsts = ConstantFoldCTTZ(SrcOps[0].getReg(), *getMRI());
if (!MaybeCsts)
break;
if (MaybeCsts->size() == 1)
return buildConstant(DstOps[0], (*MaybeCsts)[0]);
// This was a vector constant. Build a G_BUILD_VECTOR for them.
SmallVector<Register> ConstantRegs;
LLT VecTy = DstOps[0].getLLTTy(*getMRI());
for (unsigned Cst : *MaybeCsts)
ConstantRegs.emplace_back(
buildConstant(VecTy.getScalarType(), Cst).getReg(0));
return buildBuildVector(DstOps[0], ConstantRegs);
}
}
bool CanCopy = checkCopyToDefsPossible(DstOps);
if (!canPerformCSEForOpc(Opc))
Expand Down
90 changes: 90 additions & 0 deletions llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5270,6 +5270,96 @@ MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
return MIB.buildMul(Ty, Res, Factor);
}

bool CombinerHelper::matchDivByPow2(MachineInstr &MI, bool IsSigned) {
assert((MI.getOpcode() == TargetOpcode::G_SDIV ||
MI.getOpcode() == TargetOpcode::G_UDIV) &&
"Expected SDIV or UDIV");
auto &Div = cast<GenericMachineInstr>(MI);
Register RHS = Div.getReg(2);
auto MatchPow2 = [&](const Constant *C) {
auto *CI = dyn_cast<ConstantInt>(C);
return CI && (CI->getValue().isPowerOf2() ||
(IsSigned && CI->getValue().isNegatedPowerOf2()));
};
return matchUnaryPredicate(MRI, RHS, MatchPow2, /*AllowUndefs=*/false);
}

void CombinerHelper::applySDivByPow2(MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
auto &SDiv = cast<GenericMachineInstr>(MI);
Register Dst = SDiv.getReg(0);
Register LHS = SDiv.getReg(1);
Register RHS = SDiv.getReg(2);
LLT Ty = MRI.getType(Dst);
LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);

Builder.setInstrAndDebugLoc(MI);

// Effectively we want to lower G_SDIV %lhs, %rhs, where %rhs is a power of 2,
// to the following version:
//
// %c1 = G_CTTZ %rhs
// %inexact = G_SUB $bitwidth, %c1
// %sign = %G_ASHR %lhs, $(bitwidth - 1)
// %srl = G_SHR %sign, %inexact
// %add = G_ADD %lhs, %srl
// %sra = G_ASHR %add, %c1
// %sra = G_SELECT, %isoneorallones, %lhs, %sra
// %zero = G_CONSTANT $0
// %neg = G_NEG %sra
// %isneg = G_ICMP SLT %lhs, %zero
// %res = G_SELECT %isneg, %neg, %sra

unsigned Bitwidth = Ty.getScalarSizeInBits();
auto Zero = Builder.buildConstant(Ty, 0);

auto Bits = Builder.buildConstant(ShiftAmtTy, Bitwidth);
auto C1 = Builder.buildCTTZ(ShiftAmtTy, RHS);
auto Inexact = Builder.buildSub(ShiftAmtTy, Bits, C1);
auto Sign = Builder.buildAShr(
Ty, LHS, Builder.buildConstant(ShiftAmtTy, Bitwidth - 1));

// Add (LHS < 0) ? abs2 - 1 : 0;
auto Lshr = Builder.buildLShr(Ty, Sign, Inexact);
auto Add = Builder.buildAdd(Ty, LHS, Lshr);
auto Shr = Builder.buildAShr(Ty, Add, C1);

LLT CCVT =
Ty.isVector() ? LLT::vector(Ty.getElementCount(), 1) : LLT::scalar(1);

auto One = Builder.buildConstant(Ty, 1);
auto AllOnes =
Builder.buildConstant(Ty, APInt::getAllOnes(Ty.getScalarSizeInBits()));
auto IsOne = Builder.buildICmp(CmpInst::Predicate::ICMP_EQ, CCVT, RHS, One);
auto IsAllOnes =
Builder.buildICmp(CmpInst::Predicate::ICMP_EQ, CCVT, RHS, AllOnes);
auto IsOneOrAllOnes = Builder.buildOr(CCVT, IsOne, IsAllOnes);
Shr = Builder.buildSelect(Ty, IsOneOrAllOnes, LHS, Shr);

// If dividing by a positive value, we're done. Otherwise, the result must
// be negated.
auto Neg = Builder.buildNeg(Ty, Shr);
auto IsNeg = Builder.buildICmp(CmpInst::Predicate::ICMP_SLT, CCVT, LHS, Zero);
Builder.buildSelect(MI.getOperand(0).getReg(), IsNeg, Neg, Shr);
MI.eraseFromParent();
}

void CombinerHelper::applyUDivByPow2(MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::G_UDIV && "Expected UDIV");
auto &UDiv = cast<GenericMachineInstr>(MI);
Register Dst = UDiv.getReg(0);
Register LHS = UDiv.getReg(1);
Register RHS = UDiv.getReg(2);
LLT Ty = MRI.getType(Dst);
LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);

Builder.setInstrAndDebugLoc(MI);

auto C1 = Builder.buildCTTZ(ShiftAmtTy, RHS);
Builder.buildLShr(MI.getOperand(0).getReg(), LHS, C1);
MI.eraseFromParent();
}

bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::G_UMULH);
Register RHS = MI.getOperand(2).getReg();
Expand Down
99 changes: 99 additions & 0 deletions llvm/lib/CodeGen/GlobalISel/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -996,6 +996,105 @@ llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
return std::nullopt;
}

std::optional<SmallVector<unsigned>>
llvm::ConstantFoldCTTZ(Register Src, const MachineRegisterInfo &MRI) {
LLT Ty = MRI.getType(Src);
SmallVector<unsigned> FoldedCTTZs;
auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
auto MaybeCst = getIConstantVRegVal(R, MRI);
if (!MaybeCst)
return std::nullopt;
return MaybeCst->countTrailingZeros();
};
if (Ty.isVector()) {
// Try to constant fold each element.
auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
if (!BV)
return std::nullopt;
for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
FoldedCTTZs.emplace_back(*MaybeFold);
continue;
}
return std::nullopt;
}
return FoldedCTTZs;
}
if (auto MaybeCst = tryFoldScalar(Src)) {
FoldedCTTZs.emplace_back(*MaybeCst);
return FoldedCTTZs;
}
return std::nullopt;
}

std::optional<SmallVector<APInt>>
llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
const MachineRegisterInfo &MRI) {
LLT Ty = MRI.getType(Op1);
if (Ty != MRI.getType(Op2))
return std::nullopt;

auto TryFoldScalar = [&MRI, Pred](Register LHS,
Register RHS) -> std::optional<APInt> {
auto LHSCst = getIConstantVRegVal(LHS, MRI);
auto RHSCst = getIConstantVRegVal(RHS, MRI);
if (!LHSCst || !RHSCst)
return std::nullopt;

switch (Pred) {
case CmpInst::Predicate::ICMP_EQ:
return APInt(1, LHSCst->eq(*RHSCst));
case CmpInst::Predicate::ICMP_NE:
return APInt(1, LHSCst->ne(*RHSCst));
case CmpInst::Predicate::ICMP_UGT:
return APInt(1, LHSCst->ugt(*RHSCst));
case CmpInst::Predicate::ICMP_UGE:
return APInt(1, LHSCst->uge(*RHSCst));
case CmpInst::Predicate::ICMP_ULT:
return APInt(1, LHSCst->ult(*RHSCst));
case CmpInst::Predicate::ICMP_ULE:
return APInt(1, LHSCst->ult(*RHSCst));
case CmpInst::Predicate::ICMP_SGT:
return APInt(1, LHSCst->sgt(*RHSCst));
case CmpInst::Predicate::ICMP_SGE:
return APInt(1, LHSCst->sge(*RHSCst));
case CmpInst::Predicate::ICMP_SLT:
return APInt(1, LHSCst->slt(*RHSCst));
case CmpInst::Predicate::ICMP_SLE:
return APInt(1, LHSCst->sle(*RHSCst));
default:
return std::nullopt;
}
};

SmallVector<APInt> FoldedICmps;

if (Ty.isVector()) {
// Try to constant fold each element.
auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
if (!BV1 || !BV2)
return std::nullopt;
assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
if (auto MaybeFold =
TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
FoldedICmps.emplace_back(*MaybeFold);
continue;
}
return std::nullopt;
}
return FoldedICmps;
}

if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
FoldedICmps.emplace_back(*MaybeCst);
return FoldedICmps;
}

return std::nullopt;
}

bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
GISelKnownBits *KB) {
std::optional<DefinitionAndSourceRegister> DefSrcReg =
Expand Down
41 changes: 12 additions & 29 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
Original file line number Diff line number Diff line change
Expand Up @@ -670,36 +670,19 @@ define amdgpu_kernel void @bfe_sext_in_reg_i24(ptr addrspace(1) %out, ptr addrsp
define amdgpu_kernel void @simplify_demanded_bfe_sdiv(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; GFX6-LABEL: simplify_demanded_bfe_sdiv:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, 2.0
; GFX6-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_load_dword s0, s[6:7], 0x0
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: v_mul_lo_u32 v1, v0, -2
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_bfe_i32 s0, s0, 0x100001
; GFX6-NEXT: s_ashr_i32 s2, s0, 31
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: s_add_i32 s0, s0, s2
; GFX6-NEXT: s_xor_b32 s0, s0, s2
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v0
; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s0, v1
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT: v_subrev_i32_e64 v2, s[0:1], 2, v1
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT: v_xor_b32_e32 v0, s2, v0
; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: s_load_dword s3, s[2:3], 0x0
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_bfe_i32 s3, s3, 0x100001
; GFX6-NEXT: s_ashr_i32 s4, s3, 31
; GFX6-NEXT: s_lshr_b32 s4, s4, 31
; GFX6-NEXT: s_add_i32 s3, s3, s4
; GFX6-NEXT: s_ashr_i32 s3, s3, 1
; GFX6-NEXT: v_mov_b32_e32 v0, s3
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
%src = load i32, ptr addrspace(1) %in, align 4
%bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %src, i32 1, i32 16)
Expand Down
Loading

0 comments on commit dfe9416

Please sign in to comment.