From 0bb1b0ac8508220712dbeb7087dae9a34286b12c Mon Sep 17 00:00:00 2001 From: David Sherwood Date: Wed, 13 Nov 2024 17:20:54 +0000 Subject: [PATCH] [NFC][LoopVectorize] Cache result of requiresScalarEpilogue Caching the decision returned by requiresScalarEpilogue means that we can avoid printing out the same debug many times, and also avoids repeating the same calculation. This function will get more complex when we start to reason about more early exit loops, such as in PR #88385. The only problem with this is we sometimes have to invalidate the previous result due to changes in the scalar epilogue status or interleave groups. --- .../Transforms/Vectorize/LoopVectorize.cpp | 78 ++++++++++++++----- .../RISCV/riscv-vector-reverse.ll | 12 +-- 2 files changed, 61 insertions(+), 29 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 1ebc62f984390..3051ad18f163c 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1347,27 +1347,46 @@ class LoopVectorizationCostModel { return InterleaveInfo.getInterleaveGroup(Instr); } + /// Calculate in advance whether a scalar epilogue is required when + /// vectorizing and not vectorizing. If \p Invalidate is true then + /// invalidate a previous decision. + void collectScalarEpilogueRequirements(bool Invalidate) { + auto NeedsScalarEpilogue = [&](bool IsVectorizing) -> bool { + if (!isScalarEpilogueAllowed()) { + LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue"); + return false; + } + // If we might exit from anywhere but the latch, must run the exiting + // iteration in scalar form. + if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { + LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting " + "from latch block\n"); + return true; + } + if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) { + LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: " + "interleaved group requires scalar epilogue"); + return true; + } + LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue"); + return false; + }; + + assert((Invalidate || !RequiresScalarEpilogue) && + "Already determined scalar epilogue requirements!"); + std::pair Result; + Result.first = NeedsScalarEpilogue(true); + LLVM_DEBUG(dbgs() << ", when vectorizing\n"); + Result.second = NeedsScalarEpilogue(false); + LLVM_DEBUG(dbgs() << ", when not vectorizing\n"); + RequiresScalarEpilogue = Result; + } + /// Returns true if we're required to use a scalar epilogue for at least /// the final iteration of the original loop. bool requiresScalarEpilogue(bool IsVectorizing) const { - if (!isScalarEpilogueAllowed()) { - LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n"); - return false; - } - // If we might exit from anywhere but the latch, must run the exiting - // iteration in scalar form. - if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { - LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting " - "from latch block\n"); - return true; - } - if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) { - LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: " - "interleaved group requires scalar epilogue\n"); - return true; - } - LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n"); - return false; + auto &CachedResult = *RequiresScalarEpilogue; + return IsVectorizing ? CachedResult.first : CachedResult.second; } /// Returns true if we're required to use a scalar epilogue for at least @@ -1391,6 +1410,15 @@ class LoopVectorizationCostModel { return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; } + /// Update the ScalarEpilogueStatus to a new value, potentially triggering a + /// recalculation of the scalar epilogue requirements. + void setScalarEpilogueStatus(ScalarEpilogueLowering Status) { + bool Changed = ScalarEpilogueStatus != Status; + ScalarEpilogueStatus = Status; + if (Changed) + collectScalarEpilogueRequirements(/*Invalidate=*/true); + } + /// Returns the TailFoldingStyle that is best for the current loop. TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const { if (!ChosenTailFoldingStyle) @@ -1771,6 +1799,9 @@ class LoopVectorizationCostModel { /// All element types found in the loop. SmallPtrSet ElementTypesInLoop; + + /// Keeps track of whether we require a scalar epilogue. + std::optional> RequiresScalarEpilogue; }; } // end namespace llvm @@ -4058,7 +4089,7 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " "scalar epilogue instead.\n"); - ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; + setScalarEpilogueStatus(CM_ScalarEpilogueAllowed); return computeFeasibleMaxVF(MaxTC, UserVF, false); } return FixedScalableVFPair::getNone(); @@ -4074,6 +4105,7 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { // Note: There is no need to invalidate any cost modeling decisions here, as // none were taken so far. InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); + collectScalarEpilogueRequirements(/*Invalidate=*/true); } FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(MaxTC, UserVF, true); @@ -4145,7 +4177,7 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " "scalar epilogue instead.\n"); - ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; + setScalarEpilogueStatus(CM_ScalarEpilogueAllowed); return MaxFactors; } @@ -7058,6 +7090,7 @@ LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { if (!OrigLoop->isInnermost()) { // If the user doesn't provide a vectorization factor, determine a // reasonable one. + CM.collectScalarEpilogueRequirements(/*Invalidate=*/false); if (UserVF.isZero()) { VF = determineVPlanVF(TTI, CM); LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); @@ -7102,6 +7135,7 @@ LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { assert(OrigLoop->isInnermost() && "Inner loop expected."); + CM.collectScalarEpilogueRequirements(/*Invalidate=*/false); CM.collectValuesToIgnore(); CM.collectElementTypesForWidening(); @@ -7116,11 +7150,13 @@ void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { dbgs() << "LV: Invalidate all interleaved groups due to fold-tail by masking " "which requires masked-interleaved support.\n"); - if (CM.InterleaveInfo.invalidateGroups()) + if (CM.InterleaveInfo.invalidateGroups()) { // Invalidating interleave groups also requires invalidating all decisions // based on them, which includes widening decisions and uniform and scalar // values. CM.invalidateCostModelingDecisions(); + CM.collectScalarEpilogueRequirements(/*Invalidate=*/true); + } } if (CM.foldTailByMasking()) diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll index d68556fca4774..8c46406917cc1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll @@ -17,7 +17,8 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: LV: Found an induction variable. ; CHECK-NEXT: LV: Did not find one integer induction var. ; CHECK-NEXT: LV: We can vectorize this loop (with a runtime bound check)! -; CHECK-NEXT: LV: Loop does not require scalar epilogue +; CHECK-NEXT: LV: Loop does not require scalar epilogue, when vectorizing +; CHECK-NEXT: LV: Loop does not require scalar epilogue, when not vectorizing ; CHECK-NEXT: LV: Found trip count: 0 ; CHECK-NEXT: LV: Found maximum trip count: 4294967295 ; CHECK-NEXT: LV: Scalable vectorization is available @@ -45,7 +46,6 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1 ; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 ; CHECK-NEXT: LV: Using user VF vscale x 4. -; CHECK-NEXT: LV: Loop does not require scalar epilogue ; CHECK-NEXT: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1 ; CHECK-NEXT: LV: Scalarizing: %idxprom = zext i32 %i.0 to i64 ; CHECK-NEXT: LV: Scalarizing: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom @@ -134,7 +134,6 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers ; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class ; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class -; CHECK-NEXT: LV: Loop does not require scalar epilogue ; CHECK-NEXT: LV: Loop cost is 32 ; CHECK-NEXT: LV: IC is 1 ; CHECK-NEXT: LV: VF is vscale x 4 @@ -194,7 +193,6 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK: IR %indvars.iv.next = add nsw i64 %indvars.iv, -1 ; CHECK-NEXT: No successors ; CHECK-NEXT: } -; CHECK: LV: Loop does not require scalar epilogue ; entry: %cmp7 = icmp sgt i32 %n, 0 @@ -231,7 +229,8 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: LV: Found FP op with unsafe algebra. ; CHECK-NEXT: LV: Did not find one integer induction var. ; CHECK-NEXT: LV: We can vectorize this loop (with a runtime bound check)! -; CHECK-NEXT: LV: Loop does not require scalar epilogue +; CHECK-NEXT: LV: Loop does not require scalar epilogue, when vectorizing +; CHECK-NEXT: LV: Loop does not require scalar epilogue, when not vectorizing ; CHECK-NEXT: LV: Found trip count: 0 ; CHECK-NEXT: LV: Found maximum trip count: 4294967295 ; CHECK-NEXT: LV: Scalable vectorization is available @@ -259,7 +258,6 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1 ; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 ; CHECK-NEXT: LV: Using user VF vscale x 4. -; CHECK-NEXT: LV: Loop does not require scalar epilogue ; CHECK-NEXT: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1 ; CHECK-NEXT: LV: Scalarizing: %idxprom = zext i32 %i.0 to i64 ; CHECK-NEXT: LV: Scalarizing: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom @@ -348,7 +346,6 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers ; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class ; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class -; CHECK-NEXT: LV: Loop does not require scalar epilogue ; CHECK-NEXT: LV: Loop cost is 34 ; CHECK-NEXT: LV: IC is 1 ; CHECK-NEXT: LV: VF is vscale x 4 @@ -408,7 +405,6 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK: IR %indvars.iv.next = add nsw i64 %indvars.iv, -1 ; CHECK-NEXT: No successors ; CHECK-NEXT: } -; CHECK: LV: Loop does not require scalar epilogue ; entry: %cmp7 = icmp sgt i32 %n, 0