-
Notifications
You must be signed in to change notification settings - Fork 15k
LV: fix style after cursory reading (NFC) #105830
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
|
@llvm/pr-subscribers-llvm-transforms Author: Ramkumar Ramachandra (artagnon) ChangesPatch is 46.05 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/105830.diff 1 Files Affected:
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 2145bb8c9ca872..3ae727b8fd5865 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -70,7 +70,6 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
@@ -103,7 +102,6 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
-#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
@@ -125,12 +123,10 @@
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
-#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/VectorBuilder.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstructionCost.h"
@@ -147,12 +143,10 @@
#include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
#include <algorithm>
#include <cassert>
-#include <cmath>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
-#include <map>
#include <memory>
#include <string>
#include <tuple>
@@ -360,7 +354,7 @@ cl::opt<bool> EnableVPlanNativePath(
"enable-vplan-native-path", cl::Hidden,
cl::desc("Enable VPlan-native vectorization path with "
"support for outer loop vectorization."));
-}
+} // namespace llvm
// This flag enables the stress testing of the VPlan H-CFG construction in the
// VPlan-native vectorization path. It must be used in conjuction with
@@ -1189,8 +1183,8 @@ class LoopVectorizationCostModel {
assert(VF.isVector() && "Expected VF >=2");
/// Broadcast this decicion to all instructions inside the group.
/// But the cost will be assigned to one instruction only.
- for (unsigned i = 0; i < Grp->getFactor(); ++i) {
- if (auto *I = Grp->getMember(i)) {
+ for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
+ if (auto *I = Grp->getMember(Idx)) {
if (Grp->getInsertPos() == I)
WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
else
@@ -1833,7 +1827,7 @@ class GeneratedRTChecks {
/// un-linked from the IR and is added back during vector code generation. If
/// there is no vector code generation, the check blocks are removed
/// completely.
- void Create(Loop *L, const LoopAccessInfo &LAI,
+ void create(Loop *L, const LoopAccessInfo &LAI,
const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) {
// Hard cutoff to limit compile-time increase in case a very large number of
@@ -2609,7 +2603,7 @@ PHINode *InnerLoopVectorizer::createInductionResumeValue(
IRBuilder<> B(LoopVectorPreHeader->getTerminator());
// Fast-math-flags propagate from the original induction instruction.
- if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
+ if (isa_and_nonnull<FPMathOperator>(II.getInductionBinOp()))
B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
EndValue = emitTransformedIndex(B, VectorTripCount, II.getStartValue(),
@@ -2790,7 +2784,7 @@ void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
IRBuilder<> B(MiddleBlock->getTerminator());
// Fast-math-flags propagate from the original induction instruction.
- if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
+ if (isa_and_nonnull<FPMathOperator>(II.getInductionBinOp()))
B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
Value *CountMinusOne = B.CreateSub(
@@ -2902,7 +2896,7 @@ LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
return ScalarCallCost;
}
-static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
+static Type *maybeVectorizeType(Type *Elt, ElementCount VF) {
if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
return Elt;
return VectorType::get(Elt, VF);
@@ -2913,7 +2907,7 @@ LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
ElementCount VF) const {
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
assert(ID && "Expected intrinsic call!");
- Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
+ Type *RetTy = maybeVectorizeType(CI->getType(), VF);
FastMathFlags FMF;
if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
FMF = FPMO->getFastMathFlags();
@@ -2923,7 +2917,7 @@ LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
SmallVector<Type *> ParamTys;
std::transform(FTy->param_begin(), FTy->param_end(),
std::back_inserter(ParamTys),
- [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
+ [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
dyn_cast<IntrinsicInst>(CI));
@@ -3015,7 +3009,7 @@ void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
// Returns true if a given use occurs in the predicated block. Phi nodes use
// their operands in their corresponding predecessor blocks.
- auto isBlockOfUsePredicated = [&](Use &U) -> bool {
+ auto IsBlockOfUsePredicated = [&](Use &U) -> bool {
auto *I = cast<Instruction>(U.getUser());
BasicBlock *BB = I->getParent();
if (auto *Phi = dyn_cast<PHINode>(I))
@@ -3059,7 +3053,7 @@ void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
// It's legal to sink the instruction if all its uses occur in the
// predicated block. Otherwise, there's nothing to do yet, and we may
// need to reanalyze the instruction.
- if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
+ if (!llvm::all_of(I->uses(), IsBlockOfUsePredicated)) {
InstsToReanalyze.push_back(I);
continue;
}
@@ -3087,9 +3081,9 @@ void InnerLoopVectorizer::fixNonInductionPHIs(VPlan &Plan,
PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
// Make sure the builder has a valid insert point.
Builder.SetInsertPoint(NewPhi);
- for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
- VPValue *Inc = VPPhi->getIncomingValue(i);
- VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
+ for (unsigned Idx = 0; Idx < VPPhi->getNumOperands(); ++Idx) {
+ VPValue *Inc = VPPhi->getIncomingValue(Idx);
+ VPBasicBlock *VPBB = VPPhi->getIncomingBlock(Idx);
NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
}
}
@@ -3123,7 +3117,7 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
// The pointer operands of loads and stores will be scalar as long as the
// memory access is not a gather or scatter operation. The value operand of a
// store will remain scalar if the store is scalarized.
- auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
+ auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
assert(WideningDecision != CM_Unknown &&
"Widening decision should be ready at this moment");
@@ -3137,7 +3131,7 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
// A helper that returns true if the given value is a getelementptr
// instruction contained in the loop.
- auto isLoopVaryingGEP = [&](Value *V) {
+ auto IsLoopVaryingGEP = [&](Value *V) {
return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
};
@@ -3145,10 +3139,10 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
// be a scalar use and the pointer is only used by memory accesses, we place
// the pointer in ScalarPtrs. Otherwise, the pointer is placed in
// PossibleNonScalarPtrs.
- auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
+ auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
// We only care about bitcast and getelementptr instructions contained in
// the loop.
- if (!isLoopVaryingGEP(Ptr))
+ if (!IsLoopVaryingGEP(Ptr))
return;
// If the pointer has already been identified as scalar (e.g., if it was
@@ -3160,7 +3154,7 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
// If the use of the pointer will be a scalar use, and all users of the
// pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
// place the pointer in PossibleNonScalarPtrs.
- if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
+ if (IsScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
return isa<LoadInst>(U) || isa<StoreInst>(U);
}))
ScalarPtrs.insert(I);
@@ -3185,10 +3179,10 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
for (auto *BB : TheLoop->blocks())
for (auto &I : *BB) {
if (auto *Load = dyn_cast<LoadInst>(&I)) {
- evaluatePtrUse(Load, Load->getPointerOperand());
+ EvaluatePtrUse(Load, Load->getPointerOperand());
} else if (auto *Store = dyn_cast<StoreInst>(&I)) {
- evaluatePtrUse(Store, Store->getPointerOperand());
- evaluatePtrUse(Store, Store->getValueOperand());
+ EvaluatePtrUse(Store, Store->getPointerOperand());
+ EvaluatePtrUse(Store, Store->getValueOperand());
}
}
for (auto *I : ScalarPtrs)
@@ -3214,14 +3208,14 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
unsigned Idx = 0;
while (Idx != Worklist.size()) {
Instruction *Dst = Worklist[Idx++];
- if (!isLoopVaryingGEP(Dst->getOperand(0)))
+ if (!IsLoopVaryingGEP(Dst->getOperand(0)))
continue;
auto *Src = cast<Instruction>(Dst->getOperand(0));
if (llvm::all_of(Src->users(), [&](User *U) -> bool {
auto *J = cast<Instruction>(U);
return !TheLoop->contains(J) || Worklist.count(J) ||
((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
- isScalarUse(J, Src));
+ IsScalarUse(J, Src));
})) {
Worklist.insert(Src);
LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
@@ -3246,7 +3240,7 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
return Induction.second.getKind() ==
InductionDescriptor::IK_PtrInduction &&
(isa<LoadInst>(I) || isa<StoreInst>(I)) &&
- Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
+ Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
};
// Determine if all users of the induction variable are scalar after
@@ -3465,21 +3459,20 @@ bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
// losslessly cast all values to a common type.
unsigned InterleaveFactor = Group->getFactor();
bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
- for (unsigned i = 0; i < InterleaveFactor; i++) {
- Instruction *Member = Group->getMember(i);
+ for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
+ Instruction *Member = Group->getMember(Idx);
if (!Member)
continue;
auto *MemberTy = getLoadStoreType(Member);
bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
// Don't coerce non-integral pointers to integers or vice versa.
- if (MemberNI != ScalarNI) {
+ if (MemberNI != ScalarNI)
// TODO: Consider adding special nullptr value case here
return false;
- } else if (MemberNI && ScalarNI &&
- ScalarTy->getPointerAddressSpace() !=
- MemberTy->getPointerAddressSpace()) {
+ if (MemberNI && ScalarNI &&
+ ScalarTy->getPointerAddressSpace() !=
+ MemberTy->getPointerAddressSpace())
return false;
- }
}
// Check if masking is required.
@@ -3560,7 +3553,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
// Global values, params and instructions outside of current loop are out of
// scope.
- auto isOutOfScope = [&](Value *V) -> bool {
+ auto IsOutOfScope = [&](Value *V) -> bool {
Instruction *I = dyn_cast<Instruction>(V);
return (!I || !TheLoop->contains(I));
};
@@ -3572,8 +3565,8 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
// that require predication must not be considered uniform after
// vectorization, because that would create an erroneous replicating region
// where only a single instance out of VF should be formed.
- auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
- if (isOutOfScope(I)) {
+ auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
+ if (IsOutOfScope(I)) {
LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
<< *I << "\n");
return;
@@ -3596,13 +3589,13 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
for (BasicBlock *E : Exiting) {
auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
- addToWorklistIfAllowed(Cmp);
+ AddToWorklistIfAllowed(Cmp);
}
auto PrevVF = VF.divideCoefficientBy(2);
// Return true if all lanes perform the same memory operation, and we can
// thus chose to execute only one.
- auto isUniformMemOpUse = [&](Instruction *I) {
+ auto IsUniformMemOpUse = [&](Instruction *I) {
// If the value was already known to not be uniform for the previous
// (smaller VF), it cannot be uniform for the larger VF.
if (PrevVF.isVector()) {
@@ -3620,12 +3613,12 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
};
- auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
+ auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
InstWidening WideningDecision = getWideningDecision(I, VF);
assert(WideningDecision != CM_Unknown &&
"Widening decision should be ready at this moment");
- if (isUniformMemOpUse(I))
+ if (IsUniformMemOpUse(I))
return true;
return (WideningDecision == CM_Widen ||
@@ -3636,11 +3629,11 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
// Returns true if Ptr is the pointer operand of a memory access instruction
// I, I is known to not require scalarization, and the pointer is not also
// stored.
- auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
+ auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
return false;
return getLoadStorePointerOperand(I) == Ptr &&
- (isUniformDecision(I, VF) || Legal->isInvariant(Ptr));
+ (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
};
// Holds a list of values which are known to have at least one uniform use.
@@ -3662,7 +3655,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
if (TheLoop->hasLoopInvariantOperands(&I))
- addToWorklistIfAllowed(&I);
+ AddToWorklistIfAllowed(&I);
break;
default:
break;
@@ -3672,9 +3665,9 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
// ExtractValue instructions must be uniform, because the operands are
// known to be loop-invariant.
if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
- assert(isOutOfScope(EVI->getAggregateOperand()) &&
+ assert(IsOutOfScope(EVI->getAggregateOperand()) &&
"Expected aggregate value to be loop invariant");
- addToWorklistIfAllowed(EVI);
+ AddToWorklistIfAllowed(EVI);
continue;
}
@@ -3683,10 +3676,10 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
if (!Ptr)
continue;
- if (isUniformMemOpUse(&I))
- addToWorklistIfAllowed(&I);
+ if (IsUniformMemOpUse(&I))
+ AddToWorklistIfAllowed(&I);
- if (isVectorizedMemAccessUse(&I, Ptr))
+ if (IsVectorizedMemAccessUse(&I, Ptr))
HasUniformUse.insert(Ptr);
}
@@ -3694,28 +3687,28 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
// demanding) users. Since loops are assumed to be in LCSSA form, this
// disallows uses outside the loop as well.
for (auto *V : HasUniformUse) {
- if (isOutOfScope(V))
+ if (IsOutOfScope(V))
continue;
auto *I = cast<Instruction>(V);
auto UsersAreMemAccesses =
llvm::all_of(I->users(), [&](User *U) -> bool {
auto *UI = cast<Instruction>(U);
- return TheLoop->contains(UI) && isVectorizedMemAccessUse(UI, V);
+ return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
});
if (UsersAreMemAccesses)
- addToWorklistIfAllowed(I);
+ AddToWorklistIfAllowed(I);
}
// Expand Worklist in topological order: whenever a new instruction
// is added , its users should be already inside Worklist. It ensures
// a uniform instruction will only be used by uniform instructions.
- unsigned idx = 0;
- while (idx != Worklist.size()) {
- Instruction *I = Worklist[idx++];
+ unsigned Idx = 0;
+ while (Idx != Worklist.size()) {
+ Instruction *I = Worklist[Idx++];
for (auto *OV : I->operand_values()) {
// isOutOfScope operands cannot be uniform instructions.
- if (isOutOfScope(OV))
+ if (IsOutOfScope(OV))
continue;
// First order recurrence Phi's should typically be considered
// non-uniform.
@@ -3727,9 +3720,9 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
auto *OI = cast<Instruction>(OV);
if (llvm::all_of(OI->users(), [&](User *U) -> bool {
auto *J = cast<Instruction>(U);
- return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
+ return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
}))
- addToWorklistIfAllowed(OI);
+ AddToWorklistIfAllowed(OI);
}
}
@@ -3749,7 +3742,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
auto *I = cast<Instruction>(U);
return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
- isVectorizedMemAccessUse(I, Ind);
+ IsVectorizedMemAccessUse(I, Ind);
});
if (!UniformInd)
continue;
@@ -3760,14 +3753,14 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
auto *I = cast<Instruction>(U);
return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
- isVectorizedMemAccessUse(I, IndUpdate);
+ IsVectorizedMemAccessUse(I, IndUpdate);
});
if (!UniformIndUpdate)
continue;
// The induction variable and its update instruction will remain uniform.
- addToWorklistIfAllowed(Ind);
- addToWorklistIfAllowed(IndUpdate);
+ AddToWorklistIfAllowed(Ind);
+ AddToWorklistIfAllowed(IndUpdate);
}
Uniforms[VF].insert(Worklist.begin(), Worklist.end());
@@ -3917,8 +3910,8 @@ FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
if (UserVF.isScalable())
return FixedScalableVFPair(
ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
- else
- return UserVF;
+
+ return UserVF;
}
assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
@@ -4705,11 +4698,10 @@ VectorizationFactor LoopVectorizationPlanner::selectEpilogueVectorizationFactor(
ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
if...
[truncated]
|
4e32478 to
be7152f
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This looks fine to me, but I'll say here that I don't appreciate this kind of PR. Yes, this brings the code more in line with the letter of the coding standard, but I don't think that the code has become any easier to read after this PR. This just takes up reviewer time, makes git blame less useful, and invites bikeshedding (for example, whether you should omit braces on ifs with a comment inside or not is not universally agreed on).
Thanks for letting me know. I won't put up such PRs in the future. The yellow squigglies in VSCode have been irritating me for a long time now (due to clang-tidy), and I thought I should finally fix it in a couple of minutes. However, I realize that it has unnecessarily taken up reviewer time, and I apologize for this. |
No description provided.