From 3a4bdf5a6a006b795003e27a838ca10d3ce13824 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 17:29:33 -0400 Subject: [PATCH 1/5] Make BasicBlock::bbJumpKind private --- src/coreclr/jit/assertionprop.cpp | 6 +- src/coreclr/jit/block.cpp | 8 +- src/coreclr/jit/block.h | 20 ++- src/coreclr/jit/codegenarm.cpp | 6 +- src/coreclr/jit/codegenarm64.cpp | 6 +- src/coreclr/jit/codegenarmarch.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 6 +- src/coreclr/jit/codegenlinear.cpp | 12 +- src/coreclr/jit/codegenloongarch64.cpp | 6 +- src/coreclr/jit/codegenriscv64.cpp | 6 +- src/coreclr/jit/codegenxarch.cpp | 8 +- src/coreclr/jit/compiler.cpp | 5 +- src/coreclr/jit/compiler.hpp | 12 +- src/coreclr/jit/emitarm.cpp | 4 +- src/coreclr/jit/emitarm64.cpp | 4 +- src/coreclr/jit/emitloongarch64.cpp | 2 +- src/coreclr/jit/emitriscv64.cpp | 2 +- src/coreclr/jit/emitxarch.cpp | 4 +- src/coreclr/jit/fgbasic.cpp | 88 ++++++------ src/coreclr/jit/fgdiagnostic.cpp | 48 +++---- src/coreclr/jit/fgehopt.cpp | 57 ++++---- src/coreclr/jit/fgflow.cpp | 12 +- src/coreclr/jit/fginline.cpp | 14 +- src/coreclr/jit/fgopt.cpp | 145 ++++++++++---------- src/coreclr/jit/fgprofile.cpp | 46 +++---- src/coreclr/jit/fgprofilesynthesis.cpp | 21 +-- src/coreclr/jit/flowgraph.cpp | 38 ++--- src/coreclr/jit/gschecks.cpp | 2 +- src/coreclr/jit/ifconversion.cpp | 8 +- src/coreclr/jit/importer.cpp | 100 +++++++------- src/coreclr/jit/importercalls.cpp | 6 +- src/coreclr/jit/indirectcalltransformer.cpp | 12 +- src/coreclr/jit/jiteh.cpp | 16 +-- src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/lir.cpp | 2 +- src/coreclr/jit/liveness.cpp | 8 +- src/coreclr/jit/loopcloning.cpp | 32 ++--- src/coreclr/jit/lower.cpp | 28 ++-- src/coreclr/jit/lsra.cpp | 14 +- src/coreclr/jit/morph.cpp | 60 ++++---- src/coreclr/jit/objectalloc.cpp | 2 +- src/coreclr/jit/optimizebools.cpp | 20 +-- src/coreclr/jit/optimizer.cpp | 68 ++++----- src/coreclr/jit/patchpoint.cpp | 4 +- src/coreclr/jit/redundantbranchopts.cpp | 27 ++-- src/coreclr/jit/switchrecognition.cpp | 2 +- 46 files changed, 513 insertions(+), 488 deletions(-) diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 6c353a6a238ef..26f1a3a4d71ec 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5260,7 +5260,7 @@ class AssertionPropFlowCallback { ASSERT_TP pAssertionOut; - if (predBlock->bbJumpKind == BBJ_COND && (predBlock->bbJumpDest == block)) + if (predBlock->getBBJumpKind() == BBJ_COND && (predBlock->bbJumpDest == block)) { pAssertionOut = mJumpDestOut[predBlock->bbNum]; @@ -5460,7 +5460,7 @@ ASSERT_TP* Compiler::optComputeAssertionGen() printf(FMT_BB " valueGen = ", block->bbNum); optPrintAssertionIndices(block->bbAssertionGen); - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { printf(" => " FMT_BB " valueGen = ", block->bbJumpDest->bbNum); optPrintAssertionIndices(jumpDestGen[block->bbNum]); @@ -6020,7 +6020,7 @@ PhaseStatus Compiler::optAssertionPropMain() printf(FMT_BB ":\n", block->bbNum); optDumpAssertionIndices(" in = ", block->bbAssertionIn, "\n"); optDumpAssertionIndices(" out = ", block->bbAssertionOut, "\n"); - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { printf(" " FMT_BB " = ", block->bbJumpDest->bbNum); optDumpAssertionIndices(bbJtrueAssertionOut[block->bbNum], "\n"); diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 8b5cef28a71a8..742025a619e73 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1419,7 +1419,7 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) /* Record the jump kind in the block */ - block->bbJumpKind = jumpKind; + block->setBBJumpKind(jumpKind DEBUG_ARG(this)); if (jumpKind == BBJ_THROW) { @@ -1499,9 +1499,9 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) bool BasicBlock::isBBCallAlwaysPair() const { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) - if (this->bbJumpKind == BBJ_CALLFINALLY) + if (this->getBBJumpKind() == BBJ_CALLFINALLY) #else - if ((this->bbJumpKind == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) + if ((this->getBBJumpKind() == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) #endif { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1510,7 +1510,7 @@ bool BasicBlock::isBBCallAlwaysPair() const #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. assert(this->bbNext != nullptr); - assert(this->bbNext->bbJumpKind == BBJ_ALWAYS); + assert(this->bbNext->getBBJumpKind() == BBJ_ALWAYS); assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); assert(this->bbNext->isEmpty()); diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 9c7953a12b9e5..9a390d35eb46e 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -702,8 +702,26 @@ struct BasicBlock : private LIR::Range // a block corresponding to an exit from the try of a try/finally. bool isBBCallAlwaysPairTail() const; +private: BBjumpKinds bbJumpKind; // jump (if any) at the end of this block +public: + BBjumpKinds getBBJumpKind() const + { + return bbJumpKind; + } + + void setBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) + { +#ifdef DEBUG + // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout + // TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE + // (right now, this assertion does the null check to avoid unused variable warnings) + assert((kind != BBJ_NONE) || (comp != nullptr)); +#endif // DEBUG + bbJumpKind = kind; + } + /* The following union describes the jump target(s) of this block */ union { unsigned bbJumpOffs; // PC offset (temporary only) @@ -1556,7 +1574,7 @@ inline BBArrayIterator BBSwitchTargetList::end() const inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index e8ebf46272fc4..3c8e8cdad6128 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -124,7 +124,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) assert(block->isBBCallAlwaysPair()); assert(block->bbNext != NULL); - assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); + assert(block->bbNext->getBBJumpKind() == BBJ_ALWAYS); assert(block->bbNext->bbJumpDest != NULL); assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); @@ -630,7 +630,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -1294,7 +1294,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 092a031f27048..c2a0823a09179 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -3745,7 +3745,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4646,7 +4646,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4837,7 +4837,7 @@ void CodeGen::genCodeForSelect(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index a0a9967b24e04..6c0f23d4f488d 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -5515,7 +5515,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->getBBJumpKind() == BBJ_RETURN); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 27bdb1e62a8e2..916ac7854a33a 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -376,7 +376,7 @@ void CodeGen::genMarkLabelsForCodegen() for (BasicBlock* const block : compiler->Blocks()) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. case BBJ_COND: @@ -2256,7 +2256,7 @@ void CodeGen::genReportEH() { for (BasicBlock* const block : compiler->Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { ++clonedFinallyCount; } @@ -2582,7 +2582,7 @@ void CodeGen::genReportEH() unsigned reportedClonedFinallyCount = 0; for (BasicBlock* const block : compiler->Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { UNATIVE_OFFSET hndBeg, hndEnd; diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index d36eeb32210f9..fdb473fe29ed7 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -330,7 +330,7 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if ((block->bbPrev != nullptr) && (block->bbPrev->bbJumpKind == BBJ_COND) && + if ((block->bbPrev != nullptr) && (block->bbPrev->getBBJumpKind() == BBJ_COND) && (block->bbWeight != block->bbPrev->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT @@ -619,7 +619,7 @@ void CodeGen::genCodeForBBlist() { // We only need the NOP if we're not going to generate any more code as part of the block end. - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_ALWAYS: case BBJ_THROW: @@ -662,7 +662,7 @@ void CodeGen::genCodeForBBlist() /* Do we need to generate a jump or return? */ - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_RETURN: genExitCode(block); @@ -812,10 +812,10 @@ void CodeGen::genCodeForBBlist() assert(ShouldAlignLoops()); assert(!block->isBBCallAlwaysPairTail()); #if FEATURE_EH_CALLFINALLY_THUNKS - assert(block->bbJumpKind != BBJ_CALLFINALLY); + assert(block->getBBJumpKind() != BBJ_CALLFINALLY); #endif // FEATURE_EH_CALLFINALLY_THUNKS - GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->bbJumpKind == BBJ_ALWAYS)); + GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->getBBJumpKind() == BBJ_ALWAYS)); } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) @@ -2615,7 +2615,7 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode) // void CodeGen::genCodeForJcc(GenTreeCC* jcc) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); assert(jcc->OperIs(GT_JCC)); inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest); diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 6ce58fed53318..26bbc218fc1a7 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1217,7 +1217,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->getBBJumpKind() == BBJ_RETURN); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2928,7 +2928,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4136,7 +4136,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // A GT_JCMP node is created for an integer-comparison's conditional branch. void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 4a64ebb374a19..7d8f3a8233d0d 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -886,7 +886,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->getBBJumpKind() == BBJ_RETURN); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2574,7 +2574,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -3780,7 +3780,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 16ffe5a8d7711..cc959b33e344a 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -369,7 +369,7 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) } else { - assert(block->bbJumpKind == BBJ_EHFILTERRET); + assert(block->getBBJumpKind() == BBJ_EHFILTERRET); // The return value has already been computed. instGen_Return(0); @@ -1441,7 +1441,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4263,7 +4263,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -10241,7 +10241,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) if (jmpEpilog) { - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->getBBJumpKind() == BBJ_RETURN); noway_assert(block->GetFirstLIRNode()); // figure out what jump we have diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index e8c146a21707f..65d01e701d2e6 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5275,7 +5275,8 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } // If there is an unconditional jump (which is not part of callf/always pair) - if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) + if (opts.compJitHideAlignBehindJmp && (block->getBBJumpKind() == BBJ_ALWAYS) && + !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) @@ -5300,7 +5301,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() bool unmarkedLoopAlign = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { // It must be a retless BBJ_CALLFINALLY if we get here. assert(!block->isBBCallAlwaysPair()); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index a786b56edc29d..39c5ecd33681e 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -635,7 +635,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -649,7 +649,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -769,7 +769,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -3125,7 +3125,7 @@ inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block) return false; } - if (!(block->bbFlags & BBF_INTERNAL) || block->bbJumpKind != BBJ_THROW) + if (!(block->bbFlags & BBF_INTERNAL) || block->getBBJumpKind() != BBJ_THROW) { return false; } @@ -3224,7 +3224,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgRemoveBlockAsPred(block); // Update jump kind after the scrub. - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // Any block with a throw is rare block->bbSetRunRarely(); @@ -3236,7 +3236,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) if (isCallAlwaysPair) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlk->getBBJumpKind() == BBJ_ALWAYS); // leaveBlk is now unreachable, so scrub the pred lists. leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index c1dc431c93728..10a1beadf139f 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -4379,7 +4379,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 #ifdef DEBUG // Mark the finally call - if (ins == INS_b && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_b && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) { id->idDebugOnlyInfo()->idFinallyCall = true; } @@ -4523,7 +4523,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index f0428d222fc6c..82131ee325dd4 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -8495,7 +8495,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -8670,7 +8670,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount) #ifdef DEBUG // Mark the finally call - if (ins == INS_bl_local && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_bl_local && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index 73f2dffebada8..d6004451fcb87 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -2046,7 +2046,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index edfe30a3026f6..bfc91a3561572 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -1030,7 +1030,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 65789413500cd..3e2afe7a830c1 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -7614,7 +7614,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -9221,7 +9221,7 @@ void emitter::emitIns_J(instruction ins, #ifdef DEBUG // Mark the finally call - if (ins == INS_call && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_call && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 00925dcf12c2b..3573a015de385 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -206,7 +206,7 @@ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) /* Allocate the block descriptor */ block = bbNewBasicBlock(jumpKind); - noway_assert(block->bbJumpKind == jumpKind); + noway_assert(block->getBBJumpKind() == jumpKind); /* Append the block to the end of the global basic block list */ @@ -395,7 +395,7 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSw { noway_assert(oldSwitchBlock != nullptr); noway_assert(newSwitchBlock != nullptr); - noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH); + noway_assert(oldSwitchBlock->getBBJumpKind() == BBJ_SWITCH); assert(fgPredsComputed); // Walk the switch's jump table, updating the predecessor for each branch. @@ -457,7 +457,7 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* ne noway_assert(blockSwitch != nullptr); noway_assert(newTarget != nullptr); noway_assert(oldTarget != nullptr); - noway_assert(blockSwitch->bbJumpKind == BBJ_SWITCH); + noway_assert(blockSwitch->getBBJumpKind() == BBJ_SWITCH); assert(fgPredsComputed); // For the jump targets values that match oldTarget of our BBJ_SWITCH @@ -537,7 +537,7 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas assert(block != nullptr); assert(fgPredsComputed); - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_COND: @@ -911,7 +911,7 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed } // Determine if the call site is in a no-return block - if (isInlining && (impInlineInfo->iciBlock->bbJumpKind == BBJ_THROW)) + if (isInlining && (impInlineInfo->iciBlock->getBBJumpKind() == BBJ_THROW)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION); } @@ -2721,7 +2721,7 @@ void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlo for (BasicBlock* const block : Blocks(targetBlock, sourceBlock)) { - if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->bbJumpKind != BBJ_RETURN)) + if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->getBBJumpKind() != BBJ_RETURN)) { block->bbFlags |= BBF_BACKWARD_JUMP; compHasBackwardJump = true; @@ -2771,7 +2771,7 @@ void Compiler::fgLinkBasicBlocks() for (BasicBlock* const curBBdesc : Blocks()) { - switch (curBBdesc->bbJumpKind) + switch (curBBdesc->getBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -3675,7 +3675,7 @@ void Compiler::fgFindBasicBlocks() // Still inside the filter block->setHndIndex(XTnum); - if (block->bbJumpKind == BBJ_EHFILTERRET) + if (block->getBBJumpKind() == BBJ_EHFILTERRET) { // Mark catch handler as successor. block->bbJumpDest = hndBegBB; @@ -3808,7 +3808,7 @@ void Compiler::fgFindBasicBlocks() // BBJ_EHFINALLYRET that were imported to BBJ_EHFAULTRET. if ((hndBegBB->bbCatchTyp == BBCT_FAULT) && block->KindIs(BBJ_EHFINALLYRET)) { - block->bbJumpKind = BBJ_EHFAULTRET; + block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } @@ -4015,9 +4015,9 @@ void Compiler::fgFixEntryFlowForOSR() // Now branch from method start to the OSR entry. // fgEnsureFirstBBisScratch(); - assert(fgFirstBB->bbJumpKind == BBJ_NONE); + assert(fgFirstBB->getBBJumpKind() == BBJ_NONE); fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB); - fgFirstBB->bbJumpKind = BBJ_ALWAYS; + fgFirstBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); edge->setLikelihood(1.0); @@ -4057,7 +4057,7 @@ void Compiler::fgCheckBasicBlockControlFlow() continue; } - switch (blk->bbJumpKind) + switch (blk->getBBJumpKind()) { case BBJ_NONE: // block flows into the next one (no jump) @@ -4099,14 +4099,14 @@ void Compiler::fgCheckBasicBlockControlFlow() HBtab = ehGetDsc(blk->getHndIndex()); // Endfilter allowed only in a filter block - if (blk->bbJumpKind == BBJ_EHFILTERRET) + if (blk->getBBJumpKind() == BBJ_EHFILTERRET) { if (!HBtab->HasFilter()) { BADCODE("Unexpected endfilter"); } } - else if (blk->bbJumpKind == BBJ_EHFILTERRET) + else if (blk->getBBJumpKind() == BBJ_EHFILTERRET) { // endfinally allowed only in a finally block if (!HBtab->HasFinallyHandler()) @@ -4114,7 +4114,7 @@ void Compiler::fgCheckBasicBlockControlFlow() BADCODE("Unexpected endfinally"); } } - else if (blk->bbJumpKind == BBJ_EHFAULTRET) + else if (blk->getBBJumpKind() == BBJ_EHFAULTRET) { // 'endfault' (alias of IL 'endfinally') allowed only in a fault block if (!HBtab->HasFaultHandler()) @@ -4560,7 +4560,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) - BasicBlock* newBlock = bbNewBasicBlock(curr->bbJumpKind); + BasicBlock* newBlock = bbNewBasicBlock(curr->getBBJumpKind()); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; @@ -4568,7 +4568,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // For each successor of the original block, set the new block as their predecessor. // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. // Without these arcs, a block 'b' may not be a member of succs(preds(b)) - if (curr->bbJumpKind != BBJ_SWITCH) + if (curr->getBBJumpKind() != BBJ_SWITCH) { for (BasicBlock* const succ : curr->Succs(this)) { @@ -4628,7 +4628,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. - curr->bbJumpKind = BBJ_NONE; + curr->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); fgAddRefPred(newBlock, curr); return newBlock; @@ -4874,7 +4874,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n", curr->bbNum, succ->bbNum, newBlock->bbNum); - if (curr->bbJumpKind == BBJ_COND) + if (curr->getBBJumpKind() == BBJ_COND) { fgReplacePred(succ, curr, newBlock); if (curr->bbJumpDest == succ) @@ -4884,7 +4884,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } fgAddRefPred(newBlock, curr); } - else if (curr->bbJumpKind == BBJ_SWITCH) + else if (curr->getBBJumpKind() == BBJ_SWITCH) { // newBlock replaces 'succ' in the switch. fgReplaceSwitchJumpTarget(curr, newBlock, succ); @@ -4894,7 +4894,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } else { - assert(curr->bbJumpKind == BBJ_ALWAYS); + assert(curr->getBBJumpKind() == BBJ_ALWAYS); fgReplacePred(succ, curr, newBlock); curr->bbJumpDest = newBlock; fgAddRefPred(newBlock, curr); @@ -4907,7 +4907,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. // - if (curr->bbJumpKind != BBJ_ALWAYS) + if (curr->getBBJumpKind() != BBJ_ALWAYS) { newBlock->inheritWeightPercentage(curr, 50); } @@ -5054,7 +5054,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } #endif // FEATURE_EH_FUNCLETS - if (bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (bPrev->getBBJumpKind() == BBJ_CALLFINALLY) { // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; @@ -5063,7 +5063,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && + else if (bPrev->getBBJumpKind() == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && (block->bbNext != fgFirstColdBlock)) { @@ -5071,7 +5071,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. - bPrev->bbJumpKind = BBJ_NONE; + bPrev->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // If this is the first Cold basic block update fgFirstColdBlock @@ -5092,7 +5092,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->isBBCallAlwaysPair()) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlk->getBBJumpKind() == BBJ_ALWAYS); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; @@ -5104,7 +5104,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (block->bbJumpKind == BBJ_RETURN) + else if (block->getBBJumpKind() == BBJ_RETURN) { fgRemoveReturnBlock(block); } @@ -5129,7 +5129,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #ifdef DEBUG /* Some extra checks for the empty case */ - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_NONE: break; @@ -5139,7 +5139,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) noway_assert(block->bbJumpDest != block); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE); + noway_assert(bPrev && bPrev->getBBJumpKind() == BBJ_NONE); break; default: @@ -5154,7 +5154,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) BasicBlock* succBlock; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } @@ -5207,7 +5207,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* Must be a fall through to next block */ - noway_assert(block->bbJumpKind == BBJ_NONE); + noway_assert(block->getBBJumpKind() == BBJ_NONE); /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; @@ -5235,7 +5235,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* If predBlock is a new predecessor, then add it to succBlock's predecessor's list. */ - if (predBlock->bbJumpKind != BBJ_SWITCH) + if (predBlock->getBBJumpKind() != BBJ_SWITCH) { // Even if the pred is not a switch, we could have a conditional branch // to the fallthrough, so duplicate there could be preds @@ -5246,7 +5246,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } /* change all jumps to the removed block */ - switch (predBlock->bbJumpKind) + switch (predBlock->getBBJumpKind()) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); @@ -5257,10 +5257,10 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) PREFIX_ASSUME(bPrev != nullptr); /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { /* bPrev now becomes a BBJ_ALWAYS */ - bPrev->bbJumpKind = BBJ_ALWAYS; + bPrev->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bPrev->bbJumpDest = succBlock; } break; @@ -5313,7 +5313,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (bPrev != nullptr) { - switch (bPrev->bbJumpKind) + switch (bPrev->getBBJumpKind()) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS @@ -5333,7 +5333,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type - bPrev->bbJumpKind = BBJ_NONE; + bPrev->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } break; @@ -5378,11 +5378,11 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) { - switch (bSrc->bbJumpKind) + switch (bSrc->getBBJumpKind()) { case BBJ_NONE: - bSrc->bbJumpKind = BBJ_ALWAYS; + bSrc->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bSrc->bbJumpDest = bDst; JITDUMP("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", bSrc->bbNum, bSrc->bbJumpDest->bbNum); @@ -5459,10 +5459,10 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // - if ((bSrc->bbJumpKind == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && + if ((bSrc->getBBJumpKind() == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { - bSrc->bbJumpKind = BBJ_NONE; + bSrc->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->bbNext->bbNum); @@ -6273,14 +6273,14 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) } else { - if (bAlt->bbJumpKind == BBJ_ALWAYS) + if (bAlt->getBBJumpKind() == BBJ_ALWAYS) { // Our result is true if bAlt's weight is more than bCur's weight result = (bAlt->bbWeight > bCur->bbWeight); } else { - noway_assert(bAlt->bbJumpKind == BBJ_COND); + noway_assert(bAlt->getBBJumpKind() == BBJ_COND); // Our result is true if bAlt's weight is more than twice bCur's weight result = (bAlt->bbWeight > (2 * bCur->bbWeight)); } @@ -6570,7 +6570,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, { goodBlk = blk; } - else if ((goodBlk->bbJumpKind == BBJ_COND) || (blk->bbJumpKind != BBJ_COND)) + else if ((goodBlk->getBBJumpKind() == BBJ_COND) || (blk->getBBJumpKind() != BBJ_COND)) { if ((blk == nearBlk) || !reachedNear) { diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index afc1bbc1db73e..edf64aeccdd37 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -101,7 +101,7 @@ void Compiler::fgDebugCheckUpdate() if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_EHFINALLYRET: @@ -143,13 +143,13 @@ void Compiler::fgDebugCheckUpdate() // Check for an unnecessary jumps to the next block bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { // A conditional branch should never jump to the next block // as it can be folded into a BBJ_NONE; doAssertOnJumpToNextBlock = true; } - else if (block->bbJumpKind == BBJ_ALWAYS) + else if (block->getBBJumpKind() == BBJ_ALWAYS) { // Generally we will want to assert if a BBJ_ALWAYS branches to the next block doAssertOnJumpToNextBlock = true; @@ -184,7 +184,7 @@ void Compiler::fgDebugCheckUpdate() /* Make sure BBF_KEEP_BBJ_ALWAYS is set correctly */ - if ((block->bbJumpKind == BBJ_ALWAYS) && prevIsCallAlwaysPair) + if ((block->getBBJumpKind() == BBJ_ALWAYS) && prevIsCallAlwaysPair) { noway_assert(block->bbFlags & BBF_KEEP_BBJ_ALWAYS); } @@ -192,7 +192,7 @@ void Compiler::fgDebugCheckUpdate() /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); } @@ -984,7 +984,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) } } - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { fprintf(fgxFile, "\\n"); @@ -1015,11 +1015,11 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(fgxFile, ", shape = \"house\""); } - else if (block->bbJumpKind == BBJ_RETURN) + else if (block->getBBJumpKind() == BBJ_RETURN) { fprintf(fgxFile, ", shape = \"invhouse\""); } - else if (block->bbJumpKind == BBJ_THROW) + else if (block->getBBJumpKind() == BBJ_THROW) { fprintf(fgxFile, ", shape = \"trapezium\""); } @@ -1035,7 +1035,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n bbNum); fprintf(fgxFile, "\n ordinal=\"%d\"", blockOrdinal); - fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->bbJumpKind]); + fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->getBBJumpKind()]); if (block->hasTryIndex()) { fprintf(fgxFile, "\n inTry=\"%s\"", "true"); @@ -1152,7 +1152,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n id=\"%d\"", edgeNum); fprintf(fgxFile, "\n source=\"%d\"", bSource->bbNum); fprintf(fgxFile, "\n target=\"%d\"", bTarget->bbNum); - if (bSource->bbJumpKind == BBJ_SWITCH) + if (bSource->getBBJumpKind() == BBJ_SWITCH) { if (edge->getDupCount() >= 2) { @@ -2004,7 +2004,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * } else { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: printf("-> " FMT_BB "%*s ( cond )", block->bbJumpDest->bbNum, @@ -2606,8 +2606,8 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. BasicBlock* prevBlock = block->bbPrev; - if (prevBlock->bbJumpKind == BBJ_CALLFINALLY && block->bbJumpKind == BBJ_ALWAYS && - blockPred->bbJumpKind == BBJ_EHFINALLYRET) + if (prevBlock->getBBJumpKind() == BBJ_CALLFINALLY && block->getBBJumpKind() == BBJ_ALWAYS && + blockPred->getBBJumpKind() == BBJ_EHFINALLYRET) { return true; } @@ -2634,7 +2634,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb } // Our try block can call our finally block - if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->bbJumpKind == BBJ_CALLFINALLY) && + if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->getBBJumpKind() == BBJ_CALLFINALLY) && comp->ehCallFinallyInCorrectRegion(blockPred, block->getHndIndex())) { return true; @@ -2660,7 +2660,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) { - switch (blockPred->bbJumpKind) + switch (blockPred->getBBJumpKind()) { case BBJ_COND: assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); @@ -2734,7 +2734,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } @@ -2756,7 +2756,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB)) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } @@ -2878,12 +2878,12 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // if (compPostImportationCleanupDone || ((block->bbFlags & BBF_IMPORTED) != 0)) { - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && block->lastNode()->OperIsConditionalJump()); } - else if (block->bbJumpKind == BBJ_SWITCH) + else if (block->getBBJumpKind() == BBJ_SWITCH) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && (block->lastNode()->gtOper == GT_SWITCH || block->lastNode()->gtOper == GT_SWITCH_TABLE)); @@ -2987,7 +2987,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // Don't depend on predecessors list for the check. for (BasicBlock* const succBlock : block->Succs()) { - if (succBlock->bbJumpKind == BBJ_CALLFINALLY) + if (succBlock->getBBJumpKind() == BBJ_CALLFINALLY) { BasicBlock* finallyBlock = succBlock->bbJumpDest; assert(finallyBlock->hasHndIndex()); @@ -3729,7 +3729,7 @@ void Compiler::fgDebugCheckBlockLinks() // If this is a switch, check that the tables are consistent. // Note that we don't call GetSwitchDescMap(), because it has the side-effect // of allocating it if it is not present. - if (block->bbJumpKind == BBJ_SWITCH && m_switchDescMap != nullptr) + if (block->getBBJumpKind() == BBJ_SWITCH && m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; if (m_switchDescMap->Lookup(block, &uniqueSuccSet)) @@ -4792,13 +4792,13 @@ void Compiler::fgDebugCheckLoopTable() // The pre-header can only be BBJ_ALWAYS or BBJ_NONE and must enter the loop. BasicBlock* e = loop.lpEntry; - if (h->bbJumpKind == BBJ_ALWAYS) + if (h->getBBJumpKind() == BBJ_ALWAYS) { assert(h->bbJumpDest == e); } else { - assert(h->bbJumpKind == BBJ_NONE); + assert(h->getBBJumpKind() == BBJ_NONE); assert(h->bbNext == e); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); @@ -4907,7 +4907,7 @@ void Compiler::fgDebugCheckLoopTable() // TODO: We might want the following assert, but there are cases where we don't move all // return blocks out of the loop. // Return blocks are not allowed inside a loop; they should have been moved elsewhere. - // assert(block->bbJumpKind != BBJ_RETURN); + // assert(block->getBBJumpKind() != BBJ_RETURN); } else { diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 0d6fedf24ce3e..f6549f3b538df 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -100,7 +100,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() } // If the finally's block jumps back to itself, then it is not empty. - if ((firstBlock->bbJumpKind == BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) + if ((firstBlock->getBBJumpKind() == BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) { JITDUMP("EH#%u finally has basic block that jumps to itself; skipping.\n", XTnum); XTnum++; @@ -142,7 +142,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() { BasicBlock* nextBlock = currentBlock->bbNext; - if ((currentBlock->bbJumpKind == BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) + if ((currentBlock->getBBJumpKind() == BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) { // Retarget the call finally to jump to the return // point. @@ -160,10 +160,10 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() JITDUMP("so that " FMT_BB " jumps to " FMT_BB "; then remove " FMT_BB "\n", currentBlock->bbNum, postTryFinallyBlock->bbNum, leaveBlock->bbNum); - noway_assert(leaveBlock->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlock->getBBJumpKind() == BBJ_ALWAYS); currentBlock->bbJumpDest = postTryFinallyBlock; - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(postTryFinallyBlock, currentBlock); @@ -373,7 +373,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Look for blocks that are always jumps to a call finally // pair that targets the finally - if (firstTryBlock->bbJumpKind != BBJ_ALWAYS) + if (firstTryBlock->getBBJumpKind() != BBJ_ALWAYS) { JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); @@ -437,7 +437,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->bbNext) { - if ((block->bbJumpKind == BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) + if ((block->getBBJumpKind() == BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) { assert(block->isBBCallAlwaysPair()); @@ -463,7 +463,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Time to optimize. // // (1) Convert the callfinally to a normal jump to the handler - callFinally->bbJumpKind = BBJ_ALWAYS; + callFinally->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Identify the leave block and the continuation BasicBlock* const leave = callFinally->bbNext; @@ -536,13 +536,13 @@ PhaseStatus Compiler::fgRemoveEmptyTry() block->clearHndIndex(); } - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { Statement* finallyRet = block->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(block, finallyRet); - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = continuation; fgAddRefPred(continuation, block); fgRemoveRefPred(leave, block); @@ -738,7 +738,7 @@ PhaseStatus Compiler::fgCloneFinally() for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) { - if (block->bbJumpKind == BBJ_SWITCH) + if (block->getBBJumpKind() == BBJ_SWITCH) { hasSwitch = true; break; @@ -753,7 +753,7 @@ PhaseStatus Compiler::fgCloneFinally() regionStmtCount++; } - hasFinallyRet = hasFinallyRet || (block->bbJumpKind == BBJ_EHFINALLYRET); + hasFinallyRet = hasFinallyRet || (block->getBBJumpKind() == BBJ_EHFINALLYRET); isAllRare = isAllRare && block->isRunRarely(); } @@ -821,11 +821,11 @@ PhaseStatus Compiler::fgCloneFinally() // through to a callfinally. BasicBlock* jumpDest = nullptr; - if ((block->bbJumpKind == BBJ_NONE) && (block == lastTryBlock)) + if ((block->getBBJumpKind() == BBJ_NONE) && (block == lastTryBlock)) { jumpDest = block->bbNext; } - else if (block->bbJumpKind == BBJ_ALWAYS) + else if (block->getBBJumpKind() == BBJ_ALWAYS) { jumpDest = block->bbJumpDest; } @@ -989,7 +989,7 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->bbPrev; - if ((placeToMoveAfter->bbJumpKind == BBJ_ALWAYS) && + if ((placeToMoveAfter->getBBJumpKind() == BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) { JITDUMP("Moving callfinally " FMT_BB " to be first in line, before " FMT_BB "\n", @@ -1050,7 +1050,8 @@ PhaseStatus Compiler::fgCloneFinally() // Avoid asserts when `fgNewBBinRegion` verifies the handler table, by mapping any cloned finally // return blocks to BBJ_ALWAYS (which we would do below if we didn't do it here). - BBjumpKinds bbNewJumpKind = (block->bbJumpKind == BBJ_EHFINALLYRET) ? BBJ_ALWAYS : block->bbJumpKind; + BBjumpKinds bbNewJumpKind = + (block->getBBJumpKind() == BBJ_EHFINALLYRET) ? BBJ_ALWAYS : block->getBBJumpKind(); if (block == firstBlock) { @@ -1132,13 +1133,13 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* newBlock = blockMap[block]; - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { Statement* finallyRet = newBlock->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(newBlock, finallyRet); - assert(newBlock->bbJumpKind == BBJ_ALWAYS); // we mapped this above already + assert(newBlock->getBBJumpKind() == BBJ_ALWAYS); // we mapped this above already newBlock->bbJumpDest = normalCallFinallyReturn; fgAddRefPred(normalCallFinallyReturn, newBlock); @@ -1181,7 +1182,7 @@ PhaseStatus Compiler::fgCloneFinally() // This call returns to the expected spot, so // retarget it to branch to the clone. currentBlock->bbJumpDest = firstCloneBlock; - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(firstCloneBlock, currentBlock); @@ -1195,7 +1196,7 @@ PhaseStatus Compiler::fgCloneFinally() // All preds should be BBJ_EHFINALLYRETs from the finally. for (BasicBlock* const leavePred : leaveBlock->PredBlocks()) { - assert(leavePred->bbJumpKind == BBJ_EHFINALLYRET); + assert(leavePred->getBBJumpKind() == BBJ_EHFINALLYRET); assert(leavePred->getHndIndex() == XTnum); } @@ -1241,9 +1242,9 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const hndEndIter = HBtab->ebdHndLast->bbNext; for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->bbNext) { - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { - block->bbJumpKind = BBJ_EHFAULTRET; + block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } } @@ -1407,7 +1408,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // logically "belong" to a child region and the exit // path validity will be checked when looking at the // try blocks in that region. - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { continue; } @@ -1433,13 +1434,13 @@ void Compiler::fgDebugCheckTryFinallyExits() bool isCallToFinally = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (succBlock->bbJumpKind == BBJ_CALLFINALLY) + if (succBlock->getBBJumpKind() == BBJ_CALLFINALLY) { // case (a1) isCallToFinally = isFinally && (succBlock->bbJumpDest == finallyBlock); } #else - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { // case (a2) isCallToFinally = isFinally && (block->bbJumpDest == finallyBlock); @@ -1453,7 +1454,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // case (b) isJumpToClonedFinally = true; } - else if (succBlock->bbJumpKind == BBJ_ALWAYS) + else if (succBlock->getBBJumpKind() == BBJ_ALWAYS) { if (succBlock->isEmpty()) { @@ -1466,7 +1467,7 @@ void Compiler::fgDebugCheckTryFinallyExits() } } } - else if (succBlock->bbJumpKind == BBJ_NONE) + else if (succBlock->getBBJumpKind() == BBJ_NONE) { if (succBlock->isEmpty()) { @@ -1899,7 +1900,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, { // We expect callfinallys to be invoked by a BBJ_ALWAYS at this // stage in compilation. - if (block->bbJumpKind != BBJ_ALWAYS) + if (block->getBBJumpKind() != BBJ_ALWAYS) { // Possible paranoia assert here -- no flow successor of // this block should be a callfinally for this try. @@ -2195,7 +2196,7 @@ PhaseStatus Compiler::fgTailMergeThrows() BasicBlock* const predBlock = predEdge->getSourceBlock(); nextPredEdge = predEdge->getNextPredEdge(); - switch (predBlock->bbJumpKind) + switch (predBlock->getBBJumpKind()) { case BBJ_NONE: { diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 040cd378ac9c0..14f42c83254c5 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -343,7 +343,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) BasicBlock* bNext; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_CALLFINALLY: if (!(block->bbFlags & BBF_RETLESS_CALL)) @@ -354,7 +354,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) bNext = block->bbNext; /* bNext is an unreachable BBJ_ALWAYS block */ - noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(bNext->getBBJumpKind() == BBJ_ALWAYS); while (bNext->countOfInEdges() > 0) { @@ -403,7 +403,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbFlags & BBF_REMOVED) || bcall->bbJumpKind != BBJ_CALLFINALLY || + if ((bcall->bbFlags & BBF_REMOVED) || bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; @@ -470,7 +470,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } @@ -491,7 +491,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switchBlk) { - assert(switchBlk->bbJumpKind == BBJ_SWITCH); + assert(switchBlk->getBBJumpKind() == BBJ_SWITCH); BlockToSwitchDescMap* switchMap = GetSwitchDescMap(); SwitchUniqueSuccSet res; if (switchMap->Lookup(switchBlk, &res)) @@ -546,7 +546,7 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator alloc, BasicBlock* from, BasicBlock* to) { - assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition. + assert(switchBlk->getBBJumpKind() == BBJ_SWITCH); // Precondition. // Is "from" still in the switch table (because it had more than one entry before?) bool fromStillPresent = false; diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index f29293c6b8c96..fd880a2d00348 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -675,12 +675,12 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbNext, block); } else { - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbJumpDest, block); } } @@ -1444,7 +1444,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. - if (InlineeCompiler->fgFirstBB->bbJumpKind == BBJ_RETURN) + if (InlineeCompiler->fgFirstBB->getBBJumpKind() == BBJ_RETURN) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) @@ -1523,20 +1523,20 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) block->bbFlags |= BBF_INTERNAL; } - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } fgAddRefPred(bottomBlock, block); @@ -1945,7 +1945,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 7d4c0f9b11ac4..9814f8b9e6b0d 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -292,7 +292,7 @@ void Compiler::fgComputeReturnBlocks() { // If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only // used to find return blocks. - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks); } @@ -362,7 +362,7 @@ void Compiler::fgComputeEnterBlocksSet() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { assert(block->isBBCallAlwaysPair()); @@ -466,7 +466,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL); block->bbFlags |= BBF_IMPORTED; - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbSetRunRarely(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -474,7 +474,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { - noway_assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(block->bbNext->getBBJumpKind() == BBJ_ALWAYS); fgClearFinallyTargetBit(block->bbNext->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -638,7 +638,7 @@ bool Compiler::fgRemoveDeadBlocks() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { assert(block->isBBCallAlwaysPair()); @@ -1650,7 +1650,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->bbNext)) { - newTryEntry->bbJumpKind = BBJ_THROW; + newTryEntry->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } else { @@ -1787,7 +1787,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); - fromBlock->bbJumpKind = BBJ_COND; + fromBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); fromBlock->bbJumpDest = toBlock; fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); @@ -1827,7 +1827,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // it can be reached directly from "outside". // assert(fgFirstBB->bbJumpDest == osrEntry); - assert(fgFirstBB->bbJumpKind == BBJ_ALWAYS); + assert(fgFirstBB->getBBJumpKind() == BBJ_ALWAYS); if (entryJumpTarget != osrEntry) { @@ -1918,7 +1918,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert(block->bbNext == bNext); - if (block->bbJumpKind != BBJ_NONE) + if (block->getBBJumpKind() != BBJ_NONE) { return false; } @@ -2002,7 +2002,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) // (if they are valid). for (BasicBlock* const predBlock : bNext->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_SWITCH) + if (predBlock->getBBJumpKind() == BBJ_SWITCH) { return false; } @@ -2027,7 +2027,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) { noway_assert(block != nullptr); noway_assert((block->bbFlags & BBF_REMOVED) == 0); - noway_assert(block->bbJumpKind == BBJ_NONE); + noway_assert(block->getBBJumpKind() == BBJ_NONE); noway_assert(bNext == block->bbNext); noway_assert(bNext != nullptr); @@ -2234,7 +2234,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) // or if both block and bNext have non-zero weights // then we will use the max weight for the block. // - if (bNext->bbJumpKind == BBJ_THROW) + if (bNext->getBBJumpKind() == BBJ_THROW) { block->bbSetRunRarely(); } @@ -2268,7 +2268,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* set the right links */ - block->bbJumpKind = bNext->bbJumpKind; + block->setBBJumpKind(bNext->getBBJumpKind() DEBUG_ARG(this)); VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). @@ -2328,7 +2328,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Set the jump targets */ - switch (bNext->bbJumpKind) + switch (bNext->getBBJumpKind()) { case BBJ_CALLFINALLY: // Propagate RETLESS property @@ -2345,7 +2345,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ - if (bNext->bbJumpKind == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) + if (bNext->getBBJumpKind() == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) { fgReplacePred(bNext->bbNext, bNext, block); } @@ -2375,7 +2375,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } @@ -2627,14 +2627,14 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { - noway_assert(block->bbJumpKind == BBJ_COND && block->bbJumpDest == block->bbNext); + noway_assert(block->getBBJumpKind() == BBJ_COND && block->bbJumpDest == block->bbNext); assert(compRationalIRForm == block->IsLIR()); FlowEdge* flow = fgGetPredForBlock(block->bbNext, block); noway_assert(flow->getDupCount() == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); --block->bbNext->bbRefs; flow->decrementDupCount(); @@ -2735,7 +2735,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc bool optimizeJump = true; assert(bDest->isEmpty()); - assert(bDest->bbJumpKind == BBJ_ALWAYS); + assert(bDest->getBBJumpKind() == BBJ_ALWAYS); // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK @@ -2886,7 +2886,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool madeChanges = false; BasicBlock* bPrev = block->bbPrev; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_SWITCH: @@ -2930,7 +2930,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - if (bPrev->bbJumpKind != BBJ_NONE) + if (bPrev->getBBJumpKind() != BBJ_NONE) { break; } @@ -2957,7 +2957,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { /* If this block follows a BBJ_CALLFINALLY do not remove it * (because we don't know who may jump to it) */ - if (bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (bPrev->getBBJumpKind() == BBJ_CALLFINALLY) { break; } @@ -2980,7 +2980,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { BasicBlock* succBlock; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } @@ -2997,7 +2997,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool okToMerge = true; // assume it's ok for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_EHCATCHRET) + if (predBlock->getBBJumpKind() == BBJ_EHCATCHRET) { assert(predBlock->bbJumpDest == block); okToMerge = false; // we can't get rid of the empty block @@ -3119,7 +3119,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { - assert(block->bbJumpKind == BBJ_SWITCH); + assert(block->getBBJumpKind() == BBJ_SWITCH); unsigned jmpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; @@ -3134,7 +3134,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && + if (bDest->isEmpty() && (bDest->getBBJumpKind() == BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { bool optimizeJump = true; @@ -3312,7 +3312,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) // Change the switch jump into a BBJ_ALWAYS block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); if (jmpCnt > 1) { for (unsigned i = 1; i < jmpCnt; ++i) @@ -3377,7 +3377,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->bbJumpKind = BBJ_COND; + block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -3502,7 +3502,7 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigne // // This is by no means the only kind of tail that it is beneficial to duplicate, // just the only one we recognize for now. - if (target->bbJumpKind != BBJ_COND) + if (target->getBBJumpKind() != BBJ_COND) { return false; } @@ -3741,7 +3741,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // if (opts.IsOSR()) { - assert(target->bbJumpKind == BBJ_COND); + assert(target->getBBJumpKind() == BBJ_COND); if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { @@ -3788,7 +3788,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // Fix up block's flow // - block->bbJumpKind = BBJ_COND; + block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = target->bbJumpDest; fgAddRefPred(block->bbJumpDest, block); fgRemoveRefPred(target, block); @@ -3829,7 +3829,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi assert(block->bbNext == bNext); assert(block->bbPrev == bPrev); - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { // We can't remove it if it is a branch from hot => cold if (!fgInDifferentRegions(block, bNext)) @@ -3841,7 +3841,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi if (!block->isBBCallAlwaysPairTail()) { /* the unconditional jump is to the next BB */ - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); #ifdef DEBUG if (verbose) { @@ -3859,7 +3859,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi else { /* remove the conditional statement at the end of block */ - noway_assert(block->bbJumpKind == BBJ_COND); + noway_assert(block->getBBJumpKind() == BBJ_COND); noway_assert(block->isValid()); #ifdef DEBUG @@ -3967,7 +3967,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi /* Conditional is gone - simply fall into the next block */ - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); /* Update bbRefs and bbNum - Conditional predecessors to the same * block are counted twice so we have to remove one of them */ @@ -4002,7 +4002,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } - if (bJump->bbJumpKind != BBJ_ALWAYS) + if (bJump->getBBJumpKind() != BBJ_ALWAYS) { return false; } @@ -4021,7 +4021,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) BasicBlock* bDest = bJump->bbJumpDest; - if (bDest->bbJumpKind != BBJ_COND) + if (bDest->getBBJumpKind() != BBJ_COND) { return false; } @@ -4232,7 +4232,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // We need to update the following flags of the bJump block if they were set in the bDest block bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE; - bJump->bbJumpKind = BBJ_COND; + bJump->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); bJump->bbJumpDest = bDest->bbNext; /* Update bbRefs and bbPreds */ @@ -4324,7 +4324,7 @@ bool Compiler::fgOptimizeSwitchJumps() // assert(!block->IsLIR()); - if (block->bbJumpKind != BBJ_SWITCH) + if (block->getBBJumpKind() != BBJ_SWITCH) { continue; } @@ -4393,7 +4393,7 @@ bool Compiler::fgOptimizeSwitchJumps() // Wire up the new control flow. // - block->bbJumpKind = BBJ_COND; + block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = dominantTarget; FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds; @@ -4516,7 +4516,7 @@ bool Compiler::fgExpandRarelyRunBlocks() noway_assert(tmpbb->isBBCallAlwaysPair()); bPrevPrev = tmpbb; #else - if (tmpbb->bbJumpKind == BBJ_CALLFINALLY) + if (tmpbb->getBBJumpKind() == BBJ_CALLFINALLY) { bPrevPrev = tmpbb; } @@ -4610,7 +4610,7 @@ bool Compiler::fgExpandRarelyRunBlocks() const char* reason = nullptr; - switch (bPrev->bbJumpKind) + switch (bPrev->getBBJumpKind()) { case BBJ_ALWAYS: @@ -4742,7 +4742,7 @@ bool Compiler::fgExpandRarelyRunBlocks() } /* COMPACT blocks if possible */ - if (bPrev->bbJumpKind == BBJ_NONE) + if (bPrev->getBBJumpKind() == BBJ_NONE) { if (fgCanCompactBlocks(bPrev, block)) { @@ -4934,7 +4934,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (forwardBranch) { - if (bPrev->bbJumpKind == BBJ_ALWAYS) + if (bPrev->getBBJumpKind() == BBJ_ALWAYS) { // We can pull up the blocks that the unconditional jump branches to // if the weight of bDest is greater or equal to the weight of block @@ -5017,9 +5017,9 @@ bool Compiler::fgReorderBlocks(bool useProfile) } } } - else // (bPrev->bbJumpKind == BBJ_COND) + else // (bPrev->getBBJumpKind() == BBJ_COND) { - noway_assert(bPrev->bbJumpKind == BBJ_COND); + noway_assert(bPrev->getBBJumpKind() == BBJ_COND); // // We will reverse branch if the taken-jump to bDest ratio (i.e. 'takenRatio') // is more than 51% @@ -5211,7 +5211,8 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->bbNext) && (block->bbJumpKind == BBJ_RETURN) && (bPrev->bbJumpKind == BBJ_ALWAYS)) + if ((bDest == block->bbNext) && (block->getBBJumpKind() == BBJ_RETURN) && + (bPrev->getBBJumpKind() == BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -5245,7 +5246,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) const bool optimizedBranch = fgOptimizeBranch(bPrev); if (optimizedBranch) { - noway_assert(bPrev->bbJumpKind == BBJ_COND); + noway_assert(bPrev->getBBJumpKind() == BBJ_COND); optimizedBranches = true; } continue; @@ -5422,7 +5423,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (bEnd2->isBBCallAlwaysPair()) { - noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(bNext->getBBJumpKind() == BBJ_ALWAYS); // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; @@ -5501,12 +5502,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) { if (bDest != nullptr) { - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->getBBJumpKind() == BBJ_COND) { printf("Decided to reverse conditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } - else if (bPrev->bbJumpKind == BBJ_ALWAYS) + else if (bPrev->getBBJumpKind() == BBJ_ALWAYS) { printf("Decided to straighten unconditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); @@ -5576,7 +5577,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(bEnd != nullptr); // bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call - noway_assert((bEnd->bbJumpKind != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); + noway_assert((bEnd->getBBJumpKind() != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart noway_assert(bStartPrev->bbNext == bStart); @@ -5715,7 +5716,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; - if ((bEnd->bbJumpKind == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && + if ((bEnd->getBBJumpKind() == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] @@ -5843,7 +5844,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) printf("block " FMT_BB, bStart->bbNum); } - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->getBBJumpKind() == BBJ_COND) { printf(" by reversing conditional jump at " FMT_BB "\n", bPrev->bbNum); } @@ -5854,7 +5855,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) } #endif // DEBUG - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->getBBJumpKind() == BBJ_COND) { /* Reverse the bPrev jump condition */ Statement* const condTestStmt = bPrev->lastStmt(); @@ -6102,7 +6103,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) bNext = block->bbNext; bDest = nullptr; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { bDest = block->bbJumpDest; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) @@ -6114,7 +6115,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } } - if (block->bbJumpKind == BBJ_NONE) + if (block->getBBJumpKind() == BBJ_NONE) { bDest = nullptr; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext)) @@ -6146,7 +6147,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (bDest != nullptr) { // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && + if (bDest->isEmpty() && (bDest->getBBJumpKind() == BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) @@ -6165,12 +6166,12 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // - if ((block->bbJumpKind == BBJ_COND) && // block is a BBJ_COND block - (bNext != nullptr) && // block is not the last block - (bNext->bbRefs == 1) && // No other block jumps to bNext - (bNext->bbJumpKind == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block - bNext->isEmpty() && // and it is an empty block - (bNext != bNext->bbJumpDest) && // special case for self jumps + if ((block->getBBJumpKind() == BBJ_COND) && // block is a BBJ_COND block + (bNext != nullptr) && // block is not the last block + (bNext->bbRefs == 1) && // No other block jumps to bNext + (bNext->getBBJumpKind() == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + bNext->isEmpty() && // and it is an empty block + (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock) && (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections { @@ -6383,7 +6384,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // // Update the switch jump table such that it follows jumps to jumps: // - if (block->bbJumpKind == BBJ_SWITCH) + if (block->getBBJumpKind() == BBJ_SWITCH) { if (fgOptimizeSwitchBranches(block)) { @@ -6418,11 +6419,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. - if (block->countOfInEdges() == 0 && bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (block->countOfInEdges() == 0 && bPrev->getBBJumpKind() == BBJ_CALLFINALLY) { assert(bPrev->isBBCallAlwaysPair()); noway_assert(!(bPrev->bbFlags & BBF_RETLESS_CALL)); - noway_assert(block->bbJumpKind == BBJ_ALWAYS); + noway_assert(block->getBBJumpKind() == BBJ_ALWAYS); bPrev = block; continue; } @@ -6454,7 +6455,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } else if (block->countOfInEdges() == 1) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -6551,7 +6552,7 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) { unsigned costSz = 0; // estimate of block's code size cost - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_NONE: costSz = 0; @@ -6899,7 +6900,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) } bool const isNoSplit = stmt == predBlock->firstStmt(); - bool const isFallThrough = (predBlock->bbJumpKind == BBJ_NONE); + bool const isFallThrough = (predBlock->getBBJumpKind() == BBJ_NONE); // Is this block possibly better than what we have? // @@ -6976,7 +6977,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) // Fix up the flow. // - predBlock->bbJumpKind = BBJ_ALWAYS; + predBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); predBlock->bbJumpDest = crossJumpTarget; fgRemoveRefPred(block, predBlock); @@ -7067,7 +7068,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if ((block->bbJumpKind != BBJ_COND) || (block->bbNext == block->bbJumpDest)) + if ((block->getBBJumpKind() != BBJ_COND) || (block->bbNext == block->bbJumpDest)) { return false; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 16d0b0e307010..317dd4a25bca2 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -473,7 +473,7 @@ void BlockCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->getBBJumpKind() == BBJ_RETURN); // Scan for critical preds, and add relocated probes to non-critical preds. // @@ -499,12 +499,12 @@ void BlockCountInstrumentor::RelocateProbes() { // Ensure this pred is not a fall through. // - if (pred->bbJumpKind == BBJ_NONE) + if (pred->getBBJumpKind() == BBJ_NONE) { - pred->bbJumpKind = BBJ_ALWAYS; + pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->bbJumpKind == BBJ_ALWAYS); + assert(pred->getBBJumpKind() == BBJ_ALWAYS); } } @@ -945,7 +945,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) visitor->VisitBlock(block); nBlocks++; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_CALLFINALLY: { @@ -1028,7 +1028,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) JITDUMP("No jump dest for " FMT_BB ", suspect bad code\n", block->bbNum); visitor->Badcode(); } - else if (block->bbJumpKind != BBJ_LEAVE) + else if (block->getBBJumpKind() != BBJ_LEAVE) { JITDUMP("EH RET in " FMT_BB " most-nested in try, suspect bad code\n", block->bbNum); visitor->Badcode(); @@ -1552,9 +1552,9 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() // Importer folding may have changed the block jump kind // to BBJ_NONE. If so, warp it back to BBJ_ALWAYS. // - if (block->bbJumpKind == BBJ_NONE) + if (block->getBBJumpKind() == BBJ_NONE) { - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); block->bbJumpDest = target; } @@ -1657,7 +1657,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->getBBJumpKind() == BBJ_RETURN); // This block should have just one probe, which we no longer need. // @@ -1695,12 +1695,12 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // Ensure this pred is not a fall through. // - if (pred->bbJumpKind == BBJ_NONE) + if (pred->getBBJumpKind() == BBJ_NONE) { - pred->bbJumpKind = BBJ_ALWAYS; + pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->bbJumpKind == BBJ_ALWAYS); + assert(pred->getBBJumpKind() == BBJ_ALWAYS); } } @@ -3166,7 +3166,7 @@ void EfficientEdgeCountReconstructor::Prepare() m_unknownBlocks++; #ifdef DEBUG - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { nReturns++; } @@ -3233,7 +3233,7 @@ void EfficientEdgeCountReconstructor::Prepare() CLRRandom* const random = m_comp->impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomEdgeCounts()); - const bool isReturn = sourceBlock->bbJumpKind == BBJ_RETURN; + const bool isReturn = sourceBlock->getBBJumpKind() == BBJ_RETURN; // We simulate the distribution of counts seen in StdOptimizationData.Mibc. // @@ -3922,7 +3922,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf // void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, BlockInfo* info) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_SWITCH: MarkInterestingSwitches(block, info); @@ -3949,7 +3949,7 @@ void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, B // void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, BlockInfo* info) { - assert(block->bbJumpKind == BBJ_SWITCH); + assert(block->getBBJumpKind() == BBJ_SWITCH); // Thresholds for detecting a dominant switch case. // @@ -4429,11 +4429,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) bSrc = bDst->bbPreds->getSourceBlock(); // Does this block flow into only one other block - if (bSrc->bbJumpKind == BBJ_NONE) + if (bSrc->getBBJumpKind() == BBJ_NONE) { bOnlyNext = bSrc->bbNext; } - else if (bSrc->bbJumpKind == BBJ_ALWAYS) + else if (bSrc->getBBJumpKind() == BBJ_ALWAYS) { bOnlyNext = bSrc->bbJumpDest; } @@ -4450,11 +4450,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) } // Does this block flow into only one other block - if (bDst->bbJumpKind == BBJ_NONE) + if (bDst->getBBJumpKind() == BBJ_NONE) { bOnlyNext = bDst->bbNext; } - else if (bDst->bbJumpKind == BBJ_ALWAYS) + else if (bDst->getBBJumpKind() == BBJ_ALWAYS) { bOnlyNext = bDst->bbJumpDest; } @@ -4485,7 +4485,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // To minimize asmdiffs for now, modify weights only if splitting. if (fgFirstColdBlock != nullptr) { - if (bSrc->bbJumpKind == BBJ_CALLFINALLY) + if (bSrc->getBBJumpKind() == BBJ_CALLFINALLY) { newWeight = bSrc->bbWeight; } @@ -4687,7 +4687,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() } slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - switch (bSrc->bbJumpKind) + switch (bSrc->getBBJumpKind()) { case BBJ_ALWAYS: case BBJ_EHCATCHRET: @@ -4756,7 +4756,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() bSrc = edge->getSourceBlock(); slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - if (bSrc->bbJumpKind == BBJ_COND) + if (bSrc->getBBJumpKind() == BBJ_COND) { weight_t diff; FlowEdge* otherEdge; diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 286510cf71d60..722f5f8cadfdd 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -132,7 +132,7 @@ void ProfileSynthesis::AssignLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -332,8 +332,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // THROW heuristic // - bool const isJumpThrow = (jump->bbJumpKind == BBJ_THROW); - bool const isNextThrow = (next->bbJumpKind == BBJ_THROW); + bool const isJumpThrow = (jump->getBBJumpKind() == BBJ_THROW); + bool const isNextThrow = (next->getBBJumpKind() == BBJ_THROW); if (isJumpThrow != isNextThrow) { @@ -402,8 +402,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // RETURN heuristic // - bool const isJumpReturn = (jump->bbJumpKind == BBJ_RETURN); - bool const isNextReturn = (next->bbJumpKind == BBJ_RETURN); + bool const isJumpReturn = (jump->getBBJumpKind() == BBJ_RETURN); + bool const isNextReturn = (next->getBBJumpKind() == BBJ_RETURN); if (isJumpReturn != isNextReturn) { @@ -499,7 +499,7 @@ void ProfileSynthesis::RepairLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -551,7 +551,7 @@ void ProfileSynthesis::RepairLikelihoods() } JITDUMP("\n"); - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { AssignLikelihoodCond(block); } @@ -591,7 +591,7 @@ void ProfileSynthesis::BlendLikelihoods() { weight_t sum = SumOutgoingLikelihoods(block, &likelihoods); - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -627,7 +627,7 @@ void ProfileSynthesis::BlendLikelihoods() bool const consistent = Compiler::fgProfileWeightsEqual(sum, 1.0, epsilon); bool const zero = Compiler::fgProfileWeightsEqual(block->bbWeight, 0.0, epsilon); - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { AssignLikelihoodCond(block); } @@ -1214,7 +1214,8 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) // // Currently we don't know which edges do this. // - if ((exitBlock->bbJumpKind == BBJ_COND) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) + if ((exitBlock->getBBJumpKind() == BBJ_COND) && + (exitBlockWeight > (missingExitWeight + currentExitWeight))) { JITDUMP("Will adjust likelihood of the exit edge from loop exit block " FMT_BB " to reflect capping; current likelihood is " FMT_WT "\n", diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 3b157483cd75f..2d5c2b3fd68a3 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -120,7 +120,7 @@ PhaseStatus Compiler::fgInsertGCPolls() JITDUMP("Selecting CALL poll in block " FMT_BB " because it is the single return block\n", block->bbNum); pollType = GCPOLL_CALL; } - else if (BBJ_SWITCH == block->bbJumpKind) + else if (BBJ_SWITCH == block->getBBJumpKind()) { // We don't want to deal with all the outgoing edges of a switch block. // @@ -254,15 +254,15 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) BasicBlock* topFallThrough = nullptr; unsigned char lpIndexFallThrough = BasicBlock::NOT_IN_LOOP; - if (top->bbJumpKind == BBJ_COND) + if (top->getBBJumpKind() == BBJ_COND) { topFallThrough = top->bbNext; lpIndexFallThrough = topFallThrough->bbNatLoopNum; } BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true); - bottom = fgNewBBafter(top->bbJumpKind, poll, true); - BBjumpKinds oldJumpKind = top->bbJumpKind; + bottom = fgNewBBafter(top->getBBJumpKind(), poll, true); + BBjumpKinds oldJumpKind = top->getBBJumpKind(); unsigned char lpIndex = top->bbNatLoopNum; // Update block flags @@ -372,7 +372,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) #endif top->bbJumpDest = bottom; - top->bbJumpKind = BBJ_COND; + top->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. fgAddRefPred(bottom, poll); @@ -1287,7 +1287,7 @@ void Compiler::fgLoopCallMark() for (BasicBlock* const block : Blocks()) { - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -1728,7 +1728,7 @@ void Compiler::fgAddSyncMethodEnterExit() // non-exceptional cases for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { fgCreateMonitorTree(lvaMonAcquired, info.compThisArg, block, false /*exit*/); } @@ -1772,7 +1772,7 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis } #endif - if (block->bbJumpKind == BBJ_RETURN && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) { GenTreeUnOp* retNode = block->lastStmt()->GetRootNode()->AsUnOp(); GenTree* retExpr = retNode->gtOp1; @@ -1821,7 +1821,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(genReturnBB != nullptr); assert(genReturnBB != block); assert(fgReturnCount <= 1); // We have a single return for synchronized methods - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->getBBJumpKind() == BBJ_RETURN); assert((block->bbFlags & BBF_HAS_JMP) == 0); assert(block->hasTryIndex()); assert(!block->hasHndIndex()); @@ -1837,7 +1837,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); @@ -1949,7 +1949,7 @@ bool Compiler::fgMoreThanOneReturnBlock() for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { retCnt++; if (retCnt > 1) @@ -2309,7 +2309,7 @@ class MergedReturns // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); - returnBlock->bbJumpKind = BBJ_ALWAYS; + returnBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); returnBlock->bbJumpDest = constReturnBlock; comp->fgAddRefPred(constReturnBlock, returnBlock); @@ -2596,7 +2596,7 @@ PhaseStatus Compiler::fgAddInternal() for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) { - if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if ((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { merger.Record(block); } @@ -3125,7 +3125,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // It's a jump from outside the handler; add it to the newHead preds list and remove // it from the block preds list. - switch (predBlock->bbJumpKind) + switch (predBlock->getBBJumpKind()) { case BBJ_CALLFINALLY: noway_assert(predBlock->bbJumpDest == block); @@ -3451,7 +3451,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // so the code size for block needs be large // enough to make it worth our while // - if ((lblk == nullptr) || (lblk->bbJumpKind != BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) + if ((lblk == nullptr) || (lblk->getBBJumpKind() != BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) { // This block is now a candidate for first cold block // Also remember the predecessor to this block @@ -3503,7 +3503,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // if (prevToFirstColdBlock->bbFallsThrough()) { - switch (prevToFirstColdBlock->bbJumpKind) + switch (prevToFirstColdBlock->getBBJumpKind()) { default: noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()"); @@ -3523,7 +3523,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // This is a slightly more complicated case, because we will // probably need to insert a block to jump to the cold section. // - if (firstColdBlock->isEmpty() && (firstColdBlock->bbJumpKind == BBJ_ALWAYS)) + if (firstColdBlock->isEmpty() && (firstColdBlock->getBBJumpKind() == BBJ_ALWAYS)) { // We can just use this block as the transitionBlock firstColdBlock = firstColdBlock->bbNext; @@ -3548,7 +3548,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // convert it to BBJ_ALWAYS to force an explicit jump. prevToFirstColdBlock->bbJumpDest = firstColdBlock; - prevToFirstColdBlock->bbJumpKind = BBJ_ALWAYS; + prevToFirstColdBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); break; } } @@ -3981,7 +3981,7 @@ PhaseStatus Compiler::fgSetBlockOrder() (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT)) bool partiallyInterruptible = true; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp index e0937b8975fbb..404d86e3abc0e 100644 --- a/src/coreclr/jit/gschecks.cpp +++ b/src/coreclr/jit/gschecks.cpp @@ -529,7 +529,7 @@ void Compiler::gsParamsToShadows() // We would have to insert assignments in all such blocks, just before GT_JMP stmnt. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind != BBJ_RETURN) + if (block->getBBJumpKind() != BBJ_RETURN) { continue; } diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index f51417453225a..da0683be95ab4 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -83,7 +83,7 @@ class OptIfConversionDsc bool OptIfConversionDsc::IfConvertCheckInnerBlockFlow(BasicBlock* block) { // Block should have a single successor or be a return. - if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->bbJumpKind == BBJ_RETURN)))) + if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->getBBJumpKind() == BBJ_RETURN)))) { return false; } @@ -137,7 +137,7 @@ bool OptIfConversionDsc::IfConvertCheckThenFlow() { // All the Then blocks up to m_finalBlock are in a valid flow. m_flowFound = true; - if (thenBlock->bbJumpKind == BBJ_RETURN) + if (thenBlock->getBBJumpKind() == BBJ_RETURN) { assert(m_finalBlock == nullptr); m_mainOper = GT_RETURN; @@ -553,7 +553,7 @@ void OptIfConversionDsc::IfConvertDump() bool OptIfConversionDsc::optIfConvert() { // Does the block end by branching via a JTRUE after a compare? - if (m_startBlock->bbJumpKind != BBJ_COND || m_startBlock->NumSucc() != 2) + if (m_startBlock->getBBJumpKind() != BBJ_COND || m_startBlock->NumSucc() != 2) { return false; } @@ -743,7 +743,7 @@ bool OptIfConversionDsc::optIfConvert() // Update the flow from the original block. m_comp->fgRemoveAllRefPreds(m_startBlock->bbNext, m_startBlock); - m_startBlock->bbJumpKind = BBJ_ALWAYS; + m_startBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); #ifdef DEBUG if (m_comp->verbose) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index b0c2a37f7eaab..704536165d584 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2455,7 +2455,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; @@ -4101,7 +4101,7 @@ bool Compiler::impIsImplicitTailCallCandidate( // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. - if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) + if (!isRecursive && (compCurBB->getBBJumpKind() != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN @@ -4250,7 +4250,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->bbJumpKind == BBJ_LEAVE); + assert(block->getBBJumpKind() == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); @@ -4321,8 +4321,8 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - callBlock = block; - callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock = block; + callBlock->setBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) { @@ -4344,7 +4344,7 @@ void Compiler::impImportLeave(BasicBlock* block) /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); if (step->bbJumpDest != nullptr) { fgRemoveRefPred(step->bbJumpDest, step); @@ -4419,7 +4419,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) { @@ -4523,7 +4523,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->bbJumpKind == BBJ_LEAVE); + assert(block->getBBJumpKind() == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; @@ -4572,9 +4572,9 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - step = block; - step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET - stepType = ST_Catch; + step = block; + step->setBBJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET + stepType = ST_Catch; #ifdef DEBUG if (verbose) @@ -4606,7 +4606,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4651,7 +4651,7 @@ void Compiler::impImportLeave(BasicBlock* block) // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgRemoveRefPred(block->bbJumpDest, block); block->bbJumpDest = callBlock; fgAddRefPred(callBlock, block); @@ -4672,8 +4672,8 @@ void Compiler::impImportLeave(BasicBlock* block) #else // !FEATURE_EH_CALLFINALLY_THUNKS - callBlock = block; - callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock = block; + callBlock->setBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) @@ -4708,7 +4708,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS - if (step->bbJumpKind == BBJ_EHCATCHRET) + if (step->getBBJumpKind() == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. @@ -4758,7 +4758,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4850,12 +4850,12 @@ void Compiler::impImportLeave(BasicBlock* block) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); - assert(step->bbJumpKind == BBJ_EHCATCHRET); + assert(step->getBBJumpKind() == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ @@ -4908,7 +4908,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) @@ -4931,7 +4931,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->getBBJumpKind() == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4992,9 +4992,9 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // work around this we will duplicate B0 (call it B0Dup) before resetting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->getBBJumpKind() == BBJ_CALLFINALLY) { - BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); + BasicBlock* dupBlock = bbNewBasicBlock(block->getBBJumpKind()); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; fgAddRefPred(dupBlock->bbJumpDest, dupBlock); @@ -5024,7 +5024,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) } #endif // FEATURE_EH_FUNCLETS - block->bbJumpKind = BBJ_LEAVE; + block->setBBJumpKind(BBJ_LEAVE DEBUG_ARG(this)); fgInitBBLookup(); fgRemoveRefPred(block->bbJumpDest, block); @@ -6002,7 +6002,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Change block to BBJ_THROW so we won't trigger importation of successors. // - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. @@ -6715,7 +6715,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); - assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); + assert(compCurBB->getBBJumpKind() == BBJ_EHFILTERRET); /* Mark catch handler as successor */ @@ -7256,7 +7256,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } JITDUMP(" %04X", jmpAddr); - if (block->bbJumpKind != BBJ_LEAVE) + if (block->getBBJumpKind() != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } @@ -7302,16 +7302,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { - assert(block->bbJumpKind == BBJ_NONE); + assert(block->getBBJumpKind() == BBJ_NONE); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7363,11 +7363,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); - assertImp((block->bbJumpKind == BBJ_COND) // normal case - || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the - // block for the second time + assertImp((block->getBBJumpKind() == BBJ_COND) // normal case + || + (block->getBBJumpKind() == foldedJumpKind)); // this can happen if we are reimporting the + // block for the second time - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { if (foldedJumpKind == BBJ_NONE) { @@ -7380,7 +7381,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) block->bbJumpDest->bbNum); fgRemoveRefPred(block->bbNext, block); } - block->bbJumpKind = foldedJumpKind; + block->setBBJumpKind(foldedJumpKind DEBUG_ARG(this)); } break; @@ -7548,16 +7549,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { - assert(block->bbJumpKind == BBJ_NONE); + assert(block->getBBJumpKind() == BBJ_NONE); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7633,13 +7634,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -7657,8 +7658,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { printf("\nSwitch folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->getBBJumpKind() == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -8531,9 +8532,10 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } - bool bbInALoop = impBlockIsInALoop(block); - bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && - (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); + bool bbInALoop = impBlockIsInALoop(block); + bool bbIsReturn = + (block->getBBJumpKind() == BBJ_RETURN) && + (!compIsForInlining() || (impInlineInfo->iciBlock->getBBJumpKind() == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { @@ -11279,7 +11281,7 @@ void Compiler::impImportBlock(BasicBlock* block) unsigned multRef = impCanReimport ? unsigned(~0) : 0; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: @@ -12117,11 +12119,11 @@ void Compiler::impImport() JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; - if (entryBlock->bbJumpKind == BBJ_NONE) + if (entryBlock->getBBJumpKind() == BBJ_NONE) { entryBlock = entryBlock->bbNext; } - else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) + else if (opts.IsOSR() && (entryBlock->getBBJumpKind() == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } @@ -12239,7 +12241,7 @@ void Compiler::impFixPredLists() continue; } - if (finallyBlock->bbJumpKind != BBJ_EHFINALLYRET) + if (finallyBlock->getBBJumpKind() != BBJ_EHFINALLYRET) { continue; } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index d340354d34ef1..fbe0978f2514b 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -1095,7 +1095,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); - assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); + assert(!isExplicitTailCall || compCurBB->getBBJumpKind() == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) @@ -1271,10 +1271,10 @@ var_types Compiler::impImportCall(OPCODE opcode, // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // - if (compCurBB->bbJumpKind != BBJ_RETURN) + if (compCurBB->getBBJumpKind() != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); - assert(successor->bbJumpKind == BBJ_RETURN); + assert(successor->getBBJumpKind() == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 37f0d626cbbc3..15cee342aa603 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -572,8 +572,8 @@ class IndirectCallTransformer { // There's no need for a new block here. We can just append to currBlock. // - checkBlock = currBlock; - checkBlock->bbJumpKind = BBJ_COND; + checkBlock = currBlock; + checkBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); } else { @@ -652,7 +652,7 @@ class IndirectCallTransformer if (isLastCheck && ((origCall->gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT_EXACT) != 0)) { checkBlock->bbJumpDest = nullptr; - checkBlock->bbJumpKind = BBJ_NONE; + checkBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(compiler)); return; } @@ -1073,7 +1073,7 @@ class IndirectCallTransformer // BasicBlock* const coldBlock = checkBlock->bbPrev; - if (coldBlock->bbJumpKind != BBJ_NONE) + if (coldBlock->getBBJumpKind() != BBJ_NONE) { JITDUMP("Unexpected flow from cold path " FMT_BB "\n", coldBlock->bbNum); return; @@ -1081,7 +1081,7 @@ class IndirectCallTransformer BasicBlock* const hotBlock = coldBlock->bbPrev; - if ((hotBlock->bbJumpKind != BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) + if ((hotBlock->getBBJumpKind() != BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) { JITDUMP("Unexpected flow from hot path " FMT_BB "\n", hotBlock->bbNum); return; @@ -1126,7 +1126,7 @@ class IndirectCallTransformer // not fall through to the check block. // compiler->fgRemoveRefPred(checkBlock, coldBlock); - coldBlock->bbJumpKind = BBJ_ALWAYS; + coldBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(compiler)); coldBlock->bbJumpDest = elseBlock; compiler->fgAddRefPred(elseBlock, coldBlock); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index a257ebc173502..888058d133b62 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -960,7 +960,7 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** be bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex) { - assert(blockCallFinally->bbJumpKind == BBJ_CALLFINALLY); + assert(blockCallFinally->getBBJumpKind() == BBJ_CALLFINALLY); assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(finallyIndex < compHndBBtabCount); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); @@ -2276,7 +2276,7 @@ bool Compiler::fgNormalizeEHCase2() // Change pred branches. // - if (predBlock->bbJumpKind != BBJ_NONE) + if (predBlock->getBBJumpKind() != BBJ_NONE) { fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); } @@ -3506,7 +3506,7 @@ void Compiler::fgVerifyHandlerTab() } // Check for legal block types - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_EHFINALLYRET: { @@ -4056,12 +4056,12 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_ALWAYS && predBlock->bbJumpDest == block) + if (predBlock->getBBJumpKind() == BBJ_ALWAYS && predBlock->bbJumpDest == block) { BasicBlock* pPrev = predBlock->bbPrev; if (pPrev != nullptr) { - if (pPrev->bbJumpKind == BBJ_CALLFINALLY) + if (pPrev->getBBJumpKind() == BBJ_CALLFINALLY) { // We found a BBJ_CALLFINALLY / BBJ_ALWAYS that still points to this finally target return; @@ -4113,7 +4113,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) ((xtab->ebdHndBeg->bbNext == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. - if (predBlock->bbJumpKind == BBJ_CALLFINALLY) + if (predBlock->getBBJumpKind() == BBJ_CALLFINALLY) { assert(predBlock->bbJumpDest == block); @@ -4184,7 +4184,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) // The block is a handler. Check if the pred block is from its filter. We only need to // check the end filter flag, as there is only a single filter for any handler, and we // already know predBlock is a predecessor of block. - if (predBlock->bbJumpKind == BBJ_EHFILTERRET) + if (predBlock->getBBJumpKind() == BBJ_EHFILTERRET) { assert(!xtab->InHndRegionBBRange(predBlock)); return false; @@ -4413,7 +4413,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { BasicBlock* bFilterLast = HBtab->BBFilterLast(); assert(bFilterLast != nullptr); - assert(bFilterLast->bbJumpKind == BBJ_EHFILTERRET); + assert(bFilterLast->getBBJumpKind() == BBJ_EHFILTERRET); assert(bFilterLast->bbJumpDest == block); #ifdef DEBUG if (verbose) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 57b4f164fd444..820545508968e 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -4098,7 +4098,7 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter a lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 4389b6d6c4d8e..7edb0515ae323 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -1770,7 +1770,7 @@ void LIR::InsertBeforeTerminator(BasicBlock* block, LIR::Range&& range) assert(insertionPoint != nullptr); #if DEBUG - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: assert(insertionPoint->OperIsConditionalJump()); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 62f0e1784a1d3..9c9aafe0686b6 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -378,7 +378,7 @@ void Compiler::fgPerBlockLocalVarLiveness() block->bbMemoryLiveIn = fullMemoryKindSet; block->bbMemoryLiveOut = fullMemoryKindSet; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: @@ -491,7 +491,7 @@ void Compiler::fgPerBlockLocalVarLiveness() // Mark the FrameListRoot as used, if applicable. - if (block->bbJumpKind == BBJ_RETURN && compMethodRequiresPInvokeFrame()) + if (block->getBBJumpKind() == BBJ_RETURN && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) @@ -886,7 +886,7 @@ void Compiler::fgExtendDbgLifetimes() { VarSetOps::ClearD(this, initVars); - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_NONE: PREFIX_ASSUME(block->bbNext != nullptr); @@ -2451,7 +2451,7 @@ void Compiler::fgInterBlockLocalVarLiveness() { // Get the set of live variables on exit from an exception region. VarSetOps::UnionD(this, exceptVars, block->bbLiveOut); - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index c17c4cdd29527..c6e6dc91c3d88 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1766,7 +1766,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) unsigned loopRetCount = 0; for (BasicBlock* const blk : loop.LoopBlocks()) { - if (blk->bbJumpKind == BBJ_RETURN) + if (blk->getBBJumpKind() == BBJ_RETURN) { loopRetCount++; } @@ -1855,7 +1855,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) BasicBlock* top = loop.lpTop; BasicBlock* bottom = loop.lpBottom; - if (bottom->bbJumpKind != BBJ_COND) + if (bottom->getBBJumpKind() != BBJ_COND) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Couldn't find termination test.\n", loopInd); return false; @@ -1945,7 +1945,7 @@ BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, JITDUMP("Inserting loop " FMT_LP " loop choice conditions\n", loopNum); assert(context->HasBlockConditions(loopNum)); assert(slowHead != nullptr); - assert(insertAfter->bbJumpKind == BBJ_NONE); + assert(insertAfter->getBBJumpKind() == BBJ_NONE); if (context->HasBlockConditions(loopNum)) { @@ -2043,11 +2043,11 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) h2->bbNatLoopNum = ambientLoop; h2->bbFlags |= BBF_LOOP_PREHEADER; - if (h->bbJumpKind != BBJ_NONE) + if (h->getBBJumpKind() != BBJ_NONE) { - assert(h->bbJumpKind == BBJ_ALWAYS); + assert(h->getBBJumpKind() == BBJ_ALWAYS); assert(h->bbJumpDest == loop.lpEntry); - h2->bbJumpKind = BBJ_ALWAYS; + h2->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); h2->bbJumpDest = loop.lpEntry; } @@ -2062,16 +2062,16 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Make 'h' fall through to 'h2' (if it didn't already). // Don't add the h->h2 edge because we're going to insert the cloning conditions between 'h' and 'h2', and // optInsertLoopChoiceConditions() will add the edge. - h->bbJumpKind = BBJ_NONE; + h->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); h->bbJumpDest = nullptr; // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) // "newPred" will be the predecessor of the blocks of the cloned loop. BasicBlock* b = loop.lpBottom; BasicBlock* newPred = b; - if (b->bbJumpKind != BBJ_ALWAYS) + if (b->getBBJumpKind() != BBJ_ALWAYS) { - assert(b->bbJumpKind == BBJ_COND); + assert(b->getBBJumpKind() == BBJ_COND); BasicBlock* x = b->bbNext; if (x != nullptr) @@ -2116,7 +2116,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { - BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred, /*extendRegion*/ true); + BasicBlock* newBlk = fgNewBBafter(blk->getBBJumpKind(), newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it @@ -2175,7 +2175,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - assert(blk->bbJumpKind == newblk->bbJumpKind); + assert(blk->getBBJumpKind() == newblk->getBBJumpKind()); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2184,7 +2184,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) optRedirectBlock(newblk, blockMap); // Add predecessor edges for the new successors, as well as the fall-through paths. - switch (newblk->bbJumpKind) + switch (newblk->getBBJumpKind()) { case BBJ_NONE: fgAddRefPred(newblk->bbNext, newblk); @@ -2243,7 +2243,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // We should always have block conditions. assert(context->HasBlockConditions(loopInd)); - assert(h->bbJumpKind == BBJ_NONE); + assert(h->getBBJumpKind() == BBJ_NONE); assert(h->bbNext == h2); // If any condition is false, go to slowHead (which branches or falls through to e2). @@ -2254,8 +2254,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) if (slowHead->bbNext != e2) { // We can't just fall through to the slow path entry, so make it an unconditional branch. - assert(slowHead->bbJumpKind == BBJ_NONE); // This is how we created it above. - slowHead->bbJumpKind = BBJ_ALWAYS; + assert(slowHead->getBBJumpKind() == BBJ_NONE); // This is how we created it above. + slowHead->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); slowHead->bbJumpDest = e2; } @@ -2266,7 +2266,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Add the fall-through path pred (either to T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). - assert(condLast->bbJumpKind == BBJ_COND); + assert(condLast->getBBJumpKind() == BBJ_COND); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum); fgAddRefPred(condLast->bbNext, condLast); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 313354d107826..b985a5a8b1229 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -801,12 +801,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { - originalSwitchBB->bbJumpKind = BBJ_NONE; + originalSwitchBB->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = nullptr; } else { - originalSwitchBB->bbJumpKind = BBJ_ALWAYS; + originalSwitchBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. @@ -891,16 +891,16 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. - assert(originalSwitchBB->bbJumpKind == BBJ_NONE); + assert(originalSwitchBB->getBBJumpKind() == BBJ_NONE); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); - assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH); + assert(afterDefaultCondBlock->getBBJumpKind() == BBJ_SWITCH); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->bbJumpKind = BBJ_COND; + originalSwitchBB->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB @@ -957,12 +957,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } if (afterDefaultCondBlock->bbNext == uniqueSucc) { - afterDefaultCondBlock->bbJumpKind = BBJ_NONE; + afterDefaultCondBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = nullptr; } else { - afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS; + afterDefaultCondBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } @@ -1036,13 +1036,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->bbJumpKind = BBJ_COND; + currentBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1074,8 +1074,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); - assert(currentBlock->bbJumpKind == BBJ_SWITCH); - currentBlock->bbJumpKind = BBJ_NONE; + assert(currentBlock->getBBJumpKind() == BBJ_SWITCH); + currentBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } @@ -1159,7 +1159,7 @@ bool Lowering::TryLowerSwitchToBitTest( { assert(jumpCount >= 2); assert(targetCount >= 2); - assert(bbSwitch->bbJumpKind == BBJ_SWITCH); + assert(bbSwitch->getBBJumpKind() == BBJ_SWITCH); assert(switchValue->OperIs(GT_LCL_VAR)); // @@ -1247,7 +1247,7 @@ bool Lowering::TryLowerSwitchToBitTest( // GenCondition bbSwitchCondition; - bbSwitch->bbJumpKind = BBJ_COND; + bbSwitch->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); @@ -5296,7 +5296,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. - assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || + assert(((returnBB == comp->genReturnBB) && (returnBB->getBBJumpKind() == BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index ec19a65c13464..1b7aebaea1997 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -964,7 +964,7 @@ void LinearScan::setBlockSequence() blockInfo[block->bbNum].hasCriticalInEdge = true; hasCriticalEdges = true; } - else if (predBlock->bbJumpKind == BBJ_SWITCH) + else if (predBlock->getBBJumpKind() == BBJ_SWITCH) { assert(!"Switch with single successor"); } @@ -993,7 +993,7 @@ void LinearScan::setBlockSequence() // according to the desired order. We will handle the EH successors below. const unsigned numSuccs = block->NumSucc(compiler); bool checkForCriticalOutEdge = (numSuccs > 1); - if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH) + if (!checkForCriticalOutEdge && block->getBBJumpKind() == BBJ_SWITCH) { assert(!"Switch with single successor"); } @@ -1549,7 +1549,7 @@ void LinearScan::identifyCandidatesExceptionDataflow() if (block->hasEHBoundaryOut()) { VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut); - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->getBBJumpKind() == BBJ_EHFINALLYRET) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, @@ -2513,7 +2513,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, // IG08: // ... // ... - if (block->bbJumpKind == BBJ_THROW) + if (block->getBBJumpKind() == BBJ_THROW) { JITDUMP(" - throw block; "); return nullptr; @@ -2544,7 +2544,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, assert(!predBlock->hasEHBoundaryOut()); if (isBlockVisited(predBlock)) { - if (predBlock->bbJumpKind == BBJ_COND) + if (predBlock->getBBJumpKind() == BBJ_COND) { // Special handling to improve matching on backedges. BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext; @@ -8177,7 +8177,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: Only switches and JCMP/JTEST (for Arm4) have input regs (and so can be fed by copies), so those // are the only block-ending branches that need special handling. regMaskTP consumedRegs = RBM_NONE; - if (block->bbJumpKind == BBJ_SWITCH) + if (block->getBBJumpKind() == BBJ_SWITCH) { // At this point, Lowering has transformed any non-switch-table blocks into // cascading ifs. @@ -8216,7 +8216,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: GT_COPY has special handling in codegen and its generation is merged with the // node that consumes its result. So both, the input and output regs of GT_COPY must be // excluded from the set available for resolution. - else if (block->bbJumpKind == BBJ_COND) + else if (block->getBBJumpKind() == BBJ_COND) { GenTree* lastNode = LIR::AsRange(block).LastNode(); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 8cc25ba6b68bc..0342221537d57 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6126,7 +6126,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // No unique successor. compCurBB should be a return. // - assert(compCurBB->bbJumpKind == BBJ_RETURN); + assert(compCurBB->getBBJumpKind() == BBJ_RETURN); } else { @@ -6190,7 +6190,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. - compCurBB->bbJumpKind = BBJ_RETURN; + compCurBB->setBBJumpKind(BBJ_RETURN DEBUG_ARG(this)); } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); @@ -6329,7 +6329,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. - noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); + noway_assert(compCurBB->getBBJumpKind() == BBJ_RETURN); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; @@ -6338,7 +6338,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. - compCurBB->bbJumpKind = BBJ_THROW; + compCurBB->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } if (isRootReplaced) @@ -7490,7 +7490,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa } // Finish hooking things up. - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } @@ -8032,7 +8032,7 @@ GenTree* Compiler::fgMorphConst(GenTree* tree) // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; - if (compCurBB->bbJumpKind == BBJ_THROW) + if (compCurBB->getBBJumpKind() == BBJ_THROW) { useLazyStrCns = true; } @@ -13120,7 +13120,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) return result; } - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13183,9 +13183,9 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ - block->bbJumpKind = BBJ_ALWAYS; - bTaken = block->bbJumpDest; - bNotTaken = block->bbNext; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + bTaken = block->bbJumpDest; + bNotTaken = block->bbNext; } else { @@ -13199,9 +13199,9 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ - block->bbJumpKind = BBJ_NONE; - bTaken = block->bbNext; - bNotTaken = block->bbJumpDest; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bTaken = block->bbNext; + bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) @@ -13254,7 +13254,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) FlowEdge* edge; // Now fix the weights of the edges out of 'bUpdated' - switch (bUpdated->bbJumpKind) + switch (bUpdated->getBBJumpKind()) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); @@ -13294,8 +13294,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->getBBJumpKind() == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13356,7 +13356,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } } } - else if (block->bbJumpKind == BBJ_SWITCH) + else if (block->getBBJumpKind() == BBJ_SWITCH) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13429,13 +13429,13 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -13453,8 +13453,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->getBBJumpKind() == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13727,10 +13727,10 @@ void Compiler::fgMorphStmts(BasicBlock* block) // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) - noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && + noway_assert((call->IsFastTailCall() && (compCurBB->getBBJumpKind() == BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || - (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || - (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); + (call->IsTailCallViaJitHelper() && (compCurBB->getBBJumpKind() == BBJ_THROW)) || + (!call->IsTailCall() && (compCurBB->getBBJumpKind() == BBJ_RETURN))); } #ifdef DEBUG @@ -13806,7 +13806,7 @@ void Compiler::fgMorphStmts(BasicBlock* block) if (fgRemoveRestOfBlock) { - if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) + if ((block->getBBJumpKind() == BBJ_COND) || (block->getBBJumpKind() == BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); @@ -13814,8 +13814,8 @@ void Compiler::fgMorphStmts(BasicBlock* block) noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); - if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || - ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) + if (((block->getBBJumpKind() == BBJ_COND) && (last->gtOper == GT_JTRUE)) || + ((block->getBBJumpKind() == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; @@ -13923,7 +13923,7 @@ void Compiler::fgMorphBlocks() fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? - if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if ((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { @@ -13979,7 +13979,7 @@ void Compiler::fgMorphBlocks() // void Compiler::fgMergeBlockReturn(BasicBlock* block) { - assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); + assert((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. @@ -14004,7 +14004,7 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else #endif // !TARGET_X86 { - block->bbJumpKind = BBJ_ALWAYS; + block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index e589bb9f92d85..473fe3c1c0cad 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -510,7 +510,7 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a // Initialize the object memory if necessary. bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; LclVarDsc* const lclDsc = comp->lvaGetDesc(lclNum); if (comp->fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 2efbf40b6d535..68191baedd2e5 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -587,7 +587,7 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() // Update the flow. m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); - m_b1->bbJumpKind = BBJ_NONE; + m_b1->setBBJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); // Fixup flags. m_b2->bbFlags |= (m_b1->bbFlags & BBF_COPY_PROPAGATE); @@ -877,18 +877,18 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() if (optReturnBlock) { m_b1->bbJumpDest = nullptr; - m_b1->bbJumpKind = BBJ_RETURN; + m_b1->setBBJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif - assert(m_b2->bbJumpKind == BBJ_RETURN); + assert(m_b2->getBBJumpKind() == BBJ_RETURN); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { - assert(m_b1->bbJumpKind == BBJ_COND); - assert(m_b2->bbJumpKind == BBJ_COND); + assert(m_b1->getBBJumpKind() == BBJ_COND); + assert(m_b2->getBBJumpKind() == BBJ_COND); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); @@ -1180,7 +1180,7 @@ void OptBoolsDsc::optOptimizeBoolsGcStress() return; } - assert(m_b1->bbJumpKind == BBJ_COND); + assert(m_b1->getBBJumpKind() == BBJ_COND); Statement* const stmt = m_b1->lastStmt(); GenTree* const cond = stmt->GetRootNode(); @@ -1469,7 +1469,7 @@ PhaseStatus Compiler::optOptimizeBools() // We're only interested in conditional jumps here - if (b1->bbJumpKind != BBJ_COND) + if (b1->getBBJumpKind() != BBJ_COND) { continue; } @@ -1492,7 +1492,7 @@ PhaseStatus Compiler::optOptimizeBools() // The next block needs to be a condition or return block. - if (b2->bbJumpKind == BBJ_COND) + if (b2->getBBJumpKind() == BBJ_COND) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { @@ -1517,7 +1517,7 @@ PhaseStatus Compiler::optOptimizeBools() } #endif } - else if (b2->bbJumpKind == BBJ_RETURN) + else if (b2->getBBJumpKind() == BBJ_RETURN) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; @@ -1531,7 +1531,7 @@ PhaseStatus Compiler::optOptimizeBools() // b3 must be RETURN type - if (b3->bbJumpKind != BBJ_RETURN) + if (b3->getBBJumpKind() != BBJ_RETURN) { continue; } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index a583db4b3562c..59d50c6850197 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -741,7 +741,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if ((predBlock->bbJumpKind == BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && + if ((predBlock->getBBJumpKind() == BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough()) { @@ -1150,8 +1150,8 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if ((initBlock->bbJumpKind == BBJ_NONE) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && - (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) + if ((initBlock->getBBJumpKind() == BBJ_NONE) && (initBlock->bbNext == top) && + (initBlock->countOfInEdges() == 1) && (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) { initBlock = initBlock->bbPrev; phdrStmt = initBlock->firstStmt(); @@ -1305,7 +1305,7 @@ bool Compiler::optRecordLoop( // 5. Finding a constant initializer is optional; if the initializer is not found, or is not constant, // it is still considered a for-like loop. // - if (bottom->bbJumpKind == BBJ_COND) + if (bottom->getBBJumpKind() == BBJ_COND) { GenTree* init; GenTree* test; @@ -1385,7 +1385,7 @@ void Compiler::optCheckPreds() } } noway_assert(bb); - switch (bb->bbJumpKind) + switch (bb->getBBJumpKind()) { case BBJ_COND: if (bb->bbJumpDest == block) @@ -1801,7 +1801,7 @@ class LoopSearch // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { - if (head->bbJumpKind == BBJ_ALWAYS) + if (head->getBBJumpKind() == BBJ_ALWAYS) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { @@ -2294,7 +2294,7 @@ class LoopSearch { // Need to reconnect the flow from `block` to `oldNext`. - if ((block->bbJumpKind == BBJ_COND) && (block->bbJumpDest == newNext)) + if ((block->getBBJumpKind() == BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); @@ -2321,7 +2321,7 @@ class LoopSearch noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } - else if ((block->bbJumpKind == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) + else if ((block->getBBJumpKind() == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) @@ -2398,7 +2398,7 @@ class LoopSearch { BasicBlock* exitPoint; - switch (block->bbJumpKind) + switch (block->getBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -2416,7 +2416,7 @@ class LoopSearch // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered a loop exit block, as catch handlers don't have predecessor lists and don't // show up as might be expected in the dominator tree. - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->getBBJumpKind() == BBJ_ALWAYS) { if (!BasicBlock::sameHndRegion(block, exitPoint)) { @@ -2738,7 +2738,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R BasicBlock* newJumpDest = nullptr; - switch (blk->bbJumpKind) + switch (blk->getBBJumpKind()) { case BBJ_NONE: case BBJ_THROW: @@ -2818,10 +2818,10 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { - assert(from->bbJumpKind == to->bbJumpKind); // Precondition. + assert(from->getBBJumpKind() == to->getBBJumpKind()); // Precondition. // copy the jump destination(s) from "from" to "to". - switch (to->bbJumpKind) + switch (to->getBBJumpKind()) { case BBJ_ALWAYS: case BBJ_LEAVE: @@ -2936,7 +2936,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // entry block. If the `head` branches to `top` because it is the BBJ_ALWAYS of a // BBJ_CALLFINALLY/BBJ_ALWAYS pair, we canonicalize by introducing a new fall-through // head block. See FindEntry() for the logic that allows this. - if ((h->bbJumpKind == BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if ((h->getBBJumpKind() == BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // Insert new head @@ -3030,7 +3030,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // not keeping pred lists in good shape. // BasicBlock* const t = optLoopTable[loopInd].lpTop; - assert(siblingB->bbJumpKind == BBJ_COND); + assert(siblingB->getBBJumpKind() == BBJ_COND); assert(siblingB->bbNext == t); JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling); @@ -3207,8 +3207,8 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // assert(h->bbNext == t); assert(h->bbFallsThrough()); - assert((h->bbJumpKind == BBJ_NONE) || (h->bbJumpKind == BBJ_COND)); - if (h->bbJumpKind == BBJ_COND) + assert((h->getBBJumpKind() == BBJ_NONE) || (h->getBBJumpKind() == BBJ_COND)); + if (h->getBBJumpKind() == BBJ_COND) { BasicBlock* const hj = h->bbJumpDest; assert((hj->bbNum < t->bbNum) || (hj->bbNum > b->bbNum)); @@ -3360,7 +3360,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - (newT->bbJumpKind == BBJ_NONE) && (newT->bbNext == origE)) + (newT->getBBJumpKind() == BBJ_NONE) && (newT->bbNext == origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -4280,7 +4280,7 @@ PhaseStatus Compiler::optUnrollLoops() goto DONE_LOOP; } - if (block->bbJumpKind == BBJ_RETURN) + if (block->getBBJumpKind() == BBJ_RETURN) { ++loopRetCount; } @@ -4361,7 +4361,7 @@ PhaseStatus Compiler::optUnrollLoops() for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = - fgNewBBafter(block->bbJumpKind, insertAfter, /*extendRegion*/ true); + fgNewBBafter(block->getBBJumpKind(), insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) @@ -4415,7 +4415,7 @@ PhaseStatus Compiler::optUnrollLoops() { testCopyStmt->SetRootNode(sideEffList); } - newBlock->bbJumpKind = BBJ_NONE; + newBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } @@ -4486,8 +4486,8 @@ PhaseStatus Compiler::optUnrollLoops() fgRemoveAllRefPreds(succ, block); } + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbStmtList = nullptr; - block->bbJumpKind = BBJ_NONE; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; @@ -4524,21 +4524,21 @@ PhaseStatus Compiler::optUnrollLoops() // // If the initBlock is a BBJ_COND drop the condition (and make initBlock a BBJ_NONE block). // - if (initBlock->bbJumpKind == BBJ_COND) + if (initBlock->getBBJumpKind() == BBJ_COND) { assert(dupCond); Statement* initBlockBranchStmt = initBlock->lastStmt(); noway_assert(initBlockBranchStmt->GetRootNode()->OperIs(GT_JTRUE)); fgRemoveStmt(initBlock, initBlockBranchStmt); fgRemoveRefPred(initBlock->bbJumpDest, initBlock); - initBlock->bbJumpKind = BBJ_NONE; + initBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { /* the loop must execute */ assert(!dupCond); assert(totalIter > 0); - noway_assert(initBlock->bbJumpKind == BBJ_NONE); + noway_assert(initBlock->getBBJumpKind() == BBJ_NONE); } // The loop will be removed, so no need to fix up the pre-header. @@ -4548,7 +4548,7 @@ PhaseStatus Compiler::optUnrollLoops() // For unrolled loops, all the unrolling preconditions require the pre-header block to fall // through into TOP. - assert(head->bbJumpKind == BBJ_NONE); + assert(head->getBBJumpKind() == BBJ_NONE); } // If we actually unrolled, tail is now reached @@ -4840,7 +4840,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Does the BB end with an unconditional jump? - if (block->bbJumpKind != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (block->getBBJumpKind() != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; @@ -4850,7 +4850,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) BasicBlock* const bTest = block->bbJumpDest; // Does the bTest consist of 'jtrue(cond) block' ? - if (bTest->bbJumpKind != BBJ_COND) + if (bTest->getBBJumpKind() != BBJ_COND) { return false; } @@ -5077,7 +5077,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - block->bbJumpKind = BBJ_NONE; + block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); @@ -5434,7 +5434,7 @@ void Compiler::optMarkLoopHeads() { if (blockNum <= predBlock->bbNum) { - if (predBlock->bbJumpKind == BBJ_CALLFINALLY) + if (predBlock->getBBJumpKind() == BBJ_CALLFINALLY) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; @@ -5539,7 +5539,7 @@ void Compiler::optFindAndScaleGeneralLoopBlocks() } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. - if ((bottom->bbJumpKind != BBJ_COND) && (bottom->bbJumpKind != BBJ_ALWAYS)) + if ((bottom->getBBJumpKind() != BBJ_COND) && (bottom->getBBJumpKind() != BBJ_ALWAYS)) { continue; } @@ -8198,7 +8198,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; - if (fgIsUsingProfileWeights() && (head->bbJumpKind == BBJ_COND)) + if (fgIsUsingProfileWeights() && (head->getBBJumpKind() == BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { @@ -8306,7 +8306,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) continue; } - switch (predBlock->bbJumpKind) + switch (predBlock->getBBJumpKind()) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, @@ -9181,7 +9181,7 @@ void Compiler::optRemoveRedundantZeroInits() if (tree->Data()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; if (!bbInALoop || bbIsReturn) { diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index a2d6cb5633537..2423a6d9da47a 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -145,7 +145,7 @@ class PatchpointTransformer BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_NONE, block); // Update flow and flags - block->bbJumpKind = BBJ_COND; + block->setBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); block->bbJumpDest = remainderBlock; block->bbFlags |= BBF_INTERNAL; @@ -233,7 +233,7 @@ class PatchpointTransformer } // Update flow - block->bbJumpKind = BBJ_THROW; + block->setBBJumpKind(BBJ_THROW DEBUG_ARG(compiler)); block->bbJumpDest = nullptr; // Add helper call diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 32369d303d206..cdf76a4e5a6b6 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -44,7 +44,7 @@ PhaseStatus Compiler::optRedundantBranches() // We currently can optimize some BBJ_CONDs. // - if (block->bbJumpKind == BBJ_COND) + if (block->getBBJumpKind() == BBJ_COND) { bool madeChangesThisBlock = m_compiler->optRedundantRelop(block); @@ -57,7 +57,7 @@ PhaseStatus Compiler::optRedundantBranches() // a BBJ_COND, retry; perhaps one of the later optimizations // we can do has enabled one of the earlier optimizations. // - if (madeChangesThisBlock && (block->bbJumpKind == BBJ_COND)) + if (madeChangesThisBlock && (block->getBBJumpKind() == BBJ_COND)) { JITDUMP("Will retry RBO in " FMT_BB " after partial optimization\n", block->bbNum); madeChangesThisBlock |= m_compiler->optRedundantBranch(block); @@ -508,7 +508,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) // Check the current dominator // - if (domBlock->bbJumpKind == BBJ_COND) + if (domBlock->getBBJumpKind() == BBJ_COND) { Statement* const domJumpStmt = domBlock->lastStmt(); GenTree* const domJumpTree = domJumpStmt->GetRootNode(); @@ -971,8 +971,8 @@ bool Compiler::optJumpThreadCheck(BasicBlock* const block, BasicBlock* const dom // bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop) { - assert(block->bbJumpKind == BBJ_COND); - assert(domBlock->bbJumpKind == BBJ_COND); + assert(block->getBBJumpKind() == BBJ_COND); + assert(domBlock->getBBJumpKind() == BBJ_COND); // If the dominating block is not the immediate dominator // we might need to duplicate a lot of code to thread @@ -990,7 +990,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl BasicBlock* idomBlock = block->bbIDom; while ((idomBlock != nullptr) && (idomBlock != domBlock)) { - if (idomBlock->bbJumpKind == BBJ_COND) + if (idomBlock->getBBJumpKind() == BBJ_COND) { JITDUMP(" -- " FMT_BB " not closest branching dom, so no threading\n", idomBlock->bbNum); return false; @@ -1082,7 +1082,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // Treat switch preds as ambiguous for now. // - if (predBlock->bbJumpKind == BBJ_SWITCH) + if (predBlock->getBBJumpKind() == BBJ_SWITCH) { JITDUMP(FMT_BB " is a switch pred\n", predBlock->bbNum); BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); @@ -1450,8 +1450,9 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // const bool fallThroughIsTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, jti.m_fallThroughPred->bbNum); - if ((jti.m_fallThroughPred->bbJumpKind == BBJ_NONE) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || - (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) + if ((jti.m_fallThroughPred->getBBJumpKind() == BBJ_NONE) && + ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || + (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) { JITDUMP(FMT_BB " has ambiguous preds and a (%s) fall through pred and no (%s) preds.\n" "Converting fall through pred " FMT_BB " to BBJ_ALWAYS\n", @@ -1460,7 +1461,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // Possibly defer this until after early out below. // - jti.m_fallThroughPred->bbJumpKind = BBJ_ALWAYS; + jti.m_fallThroughPred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); jti.m_fallThroughPred->bbJumpDest = jti.m_block; modifiedFlow = true; } @@ -1532,7 +1533,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) fgRemoveStmt(jti.m_block, lastStmt); JITDUMP(" repurposing " FMT_BB " to always jump to " FMT_BB "\n", jti.m_block->bbNum, jti.m_trueTarget->bbNum); fgRemoveRefPred(jti.m_falseTarget, jti.m_block); - jti.m_block->bbJumpKind = BBJ_ALWAYS; + jti.m_block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); } else if (falsePredsWillReuseBlock) { @@ -1541,7 +1542,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) JITDUMP(" repurposing " FMT_BB " to always fall through to " FMT_BB "\n", jti.m_block->bbNum, jti.m_falseTarget->bbNum); fgRemoveRefPred(jti.m_trueTarget, jti.m_block); - jti.m_block->bbJumpKind = BBJ_NONE; + jti.m_block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // Now reroute the flow from the predecessors. @@ -1623,7 +1624,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // surviving ssa input, and update all the value numbers...) // BasicBlock* const ambBlock = jti.m_ambiguousVNBlock; - if ((ambBlock != nullptr) && (jti.m_block->bbJumpKind == BBJ_COND) && + if ((ambBlock != nullptr) && (jti.m_block->getBBJumpKind() == BBJ_COND) && (jti.m_block->GetUniquePred(this) == ambBlock)) { JITDUMP(FMT_BB " has just one remaining predcessor " FMT_BB "\n", jti.m_block->bbNum, ambBlock->bbNum); diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 5052e6ff57411..90bfa43142e75 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -319,7 +319,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert(isTest); // Convert firstBlock to a switch block - firstBlock->bbJumpKind = BBJ_SWITCH; + firstBlock->setBBJumpKind(BBJ_SWITCH DEBUG_ARG(this)); firstBlock->bbJumpDest = nullptr; firstBlock->bbCodeOffsEnd = lastBlock->bbCodeOffsEnd; firstBlock->lastStmt()->GetRootNode()->ChangeOper(GT_SWITCH); From 23787976a10045d7fc3f751a7962c800d82588ee Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 19:15:21 -0400 Subject: [PATCH 2/5] Convert bbJumpKind comparisons to KindIs() --- src/coreclr/jit/assertionprop.cpp | 6 +- src/coreclr/jit/block.cpp | 6 +- src/coreclr/jit/codegenarm.cpp | 6 +- src/coreclr/jit/codegenarm64.cpp | 6 +- src/coreclr/jit/codegenarmarch.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 4 +- src/coreclr/jit/codegenlinear.cpp | 8 +- src/coreclr/jit/codegenloongarch64.cpp | 6 +- src/coreclr/jit/codegenriscv64.cpp | 6 +- src/coreclr/jit/codegenxarch.cpp | 8 +- src/coreclr/jit/compiler.cpp | 5 +- src/coreclr/jit/compiler.hpp | 10 +- src/coreclr/jit/emitarm.cpp | 4 +- src/coreclr/jit/emitarm64.cpp | 4 +- src/coreclr/jit/emitloongarch64.cpp | 2 +- src/coreclr/jit/emitriscv64.cpp | 2 +- src/coreclr/jit/emitxarch.cpp | 4 +- src/coreclr/jit/fgbasic.cpp | 56 +-- src/coreclr/jit/fgdiagnostic.cpp | 39 +- src/coreclr/jit/fgehopt.cpp | 46 +-- src/coreclr/jit/fgflow.cpp | 10 +- src/coreclr/jit/fginline.cpp | 6 +- src/coreclr/jit/fgopt.cpp | 428 ++++++++++---------- src/coreclr/jit/fgprofile.cpp | 34 +- src/coreclr/jit/fgprofilesynthesis.cpp | 15 +- src/coreclr/jit/flowgraph.cpp | 16 +- src/coreclr/jit/gschecks.cpp | 2 +- src/coreclr/jit/ifconversion.cpp | 6 +- src/coreclr/jit/importer.cpp | 60 ++- src/coreclr/jit/importercalls.cpp | 6 +- src/coreclr/jit/indirectcalltransformer.cpp | 4 +- src/coreclr/jit/jiteh.cpp | 14 +- src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/liveness.cpp | 4 +- src/coreclr/jit/loopcloning.cpp | 22 +- src/coreclr/jit/lower.cpp | 10 +- src/coreclr/jit/lsra.cpp | 14 +- src/coreclr/jit/morph.cpp | 36 +- src/coreclr/jit/objectalloc.cpp | 2 +- src/coreclr/jit/optimizebools.cpp | 16 +- src/coreclr/jit/optimizer.cpp | 48 +-- src/coreclr/jit/redundantbranchopts.cpp | 22 +- 42 files changed, 498 insertions(+), 509 deletions(-) diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 26f1a3a4d71ec..044d1dc0679a1 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5260,7 +5260,7 @@ class AssertionPropFlowCallback { ASSERT_TP pAssertionOut; - if (predBlock->getBBJumpKind() == BBJ_COND && (predBlock->bbJumpDest == block)) + if (predBlock->KindIs(BBJ_COND) && (predBlock->bbJumpDest == block)) { pAssertionOut = mJumpDestOut[predBlock->bbNum]; @@ -5460,7 +5460,7 @@ ASSERT_TP* Compiler::optComputeAssertionGen() printf(FMT_BB " valueGen = ", block->bbNum); optPrintAssertionIndices(block->bbAssertionGen); - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { printf(" => " FMT_BB " valueGen = ", block->bbJumpDest->bbNum); optPrintAssertionIndices(jumpDestGen[block->bbNum]); @@ -6020,7 +6020,7 @@ PhaseStatus Compiler::optAssertionPropMain() printf(FMT_BB ":\n", block->bbNum); optDumpAssertionIndices(" in = ", block->bbAssertionIn, "\n"); optDumpAssertionIndices(" out = ", block->bbAssertionOut, "\n"); - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { printf(" " FMT_BB " = ", block->bbJumpDest->bbNum); optDumpAssertionIndices(bbJtrueAssertionOut[block->bbNum], "\n"); diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 742025a619e73..a5798928b5959 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1499,9 +1499,9 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) bool BasicBlock::isBBCallAlwaysPair() const { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) - if (this->getBBJumpKind() == BBJ_CALLFINALLY) + if (this->KindIs(BBJ_CALLFINALLY)) #else - if ((this->getBBJumpKind() == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) + if (this->KindIs(BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) #endif { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1510,7 +1510,7 @@ bool BasicBlock::isBBCallAlwaysPair() const #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. assert(this->bbNext != nullptr); - assert(this->bbNext->getBBJumpKind() == BBJ_ALWAYS); + assert(this->bbNext->KindIs(BBJ_ALWAYS)); assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); assert(this->bbNext->isEmpty()); diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 3c8e8cdad6128..54c4b7e20dcd5 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -124,7 +124,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) assert(block->isBBCallAlwaysPair()); assert(block->bbNext != NULL); - assert(block->bbNext->getBBJumpKind() == BBJ_ALWAYS); + assert(block->bbNext->KindIs(BBJ_ALWAYS)); assert(block->bbNext->bbJumpDest != NULL); assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); @@ -630,7 +630,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -1294,7 +1294,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index c2a0823a09179..daf56c5a8654b 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -3745,7 +3745,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4646,7 +4646,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4837,7 +4837,7 @@ void CodeGen::genCodeForSelect(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index 6c0f23d4f488d..9a3698627fac5 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -5515,7 +5515,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->getBBJumpKind() == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 916ac7854a33a..6a1e1cecbc0e7 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2256,7 +2256,7 @@ void CodeGen::genReportEH() { for (BasicBlock* const block : compiler->Blocks()) { - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { ++clonedFinallyCount; } @@ -2582,7 +2582,7 @@ void CodeGen::genReportEH() unsigned reportedClonedFinallyCount = 0; for (BasicBlock* const block : compiler->Blocks()) { - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { UNATIVE_OFFSET hndBeg, hndEnd; diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index fdb473fe29ed7..f9d5d1c7cfc04 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -330,7 +330,7 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if ((block->bbPrev != nullptr) && (block->bbPrev->getBBJumpKind() == BBJ_COND) && + if ((block->bbPrev != nullptr) && block->bbPrev->KindIs(BBJ_COND) && (block->bbWeight != block->bbPrev->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT @@ -812,10 +812,10 @@ void CodeGen::genCodeForBBlist() assert(ShouldAlignLoops()); assert(!block->isBBCallAlwaysPairTail()); #if FEATURE_EH_CALLFINALLY_THUNKS - assert(block->getBBJumpKind() != BBJ_CALLFINALLY); + assert(!block->KindIs(BBJ_CALLFINALLY)); #endif // FEATURE_EH_CALLFINALLY_THUNKS - GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->getBBJumpKind() == BBJ_ALWAYS)); + GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS))); } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) @@ -2615,7 +2615,7 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode) // void CodeGen::genCodeForJcc(GenTreeCC* jcc) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(jcc->OperIs(GT_JCC)); inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest); diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 26bbc218fc1a7..6c2d7b26b0eab 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1217,7 +1217,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->getBBJumpKind() == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2928,7 +2928,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4136,7 +4136,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // A GT_JCMP node is created for an integer-comparison's conditional branch. void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 7d8f3a8233d0d..d411cc292f4bd 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -886,7 +886,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->getBBJumpKind() == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2574,7 +2574,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -3780,7 +3780,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index cc959b33e344a..3ac1a84d9307c 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -369,7 +369,7 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) } else { - assert(block->getBBJumpKind() == BBJ_EHFILTERRET); + assert(block->KindIs(BBJ_EHFILTERRET)); // The return value has already been computed. instGen_Return(0); @@ -1441,7 +1441,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->getBBJumpKind() == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4263,7 +4263,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->getBBJumpKind() == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -10241,7 +10241,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) if (jmpEpilog) { - noway_assert(block->getBBJumpKind() == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode()); // figure out what jump we have diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 65d01e701d2e6..c3f63b48e4ab4 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5275,8 +5275,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } // If there is an unconditional jump (which is not part of callf/always pair) - if (opts.compJitHideAlignBehindJmp && (block->getBBJumpKind() == BBJ_ALWAYS) && - !block->isBBCallAlwaysPairTail()) + if (opts.compJitHideAlignBehindJmp && block->KindIs(BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) @@ -5301,7 +5300,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() bool unmarkedLoopAlign = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { // It must be a retless BBJ_CALLFINALLY if we get here. assert(!block->isBBCallAlwaysPair()); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 39c5ecd33681e..8ac6d7bdf47b7 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -635,7 +635,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -649,7 +649,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -769,7 +769,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->getBBJumpKind() != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -3125,7 +3125,7 @@ inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block) return false; } - if (!(block->bbFlags & BBF_INTERNAL) || block->getBBJumpKind() != BBJ_THROW) + if (!(block->bbFlags & BBF_INTERNAL) || !block->KindIs(BBJ_THROW)) { return false; } @@ -3236,7 +3236,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) if (isCallAlwaysPair) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); // leaveBlk is now unreachable, so scrub the pred lists. leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index 10a1beadf139f..33ae40ee208ef 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -4379,7 +4379,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 #ifdef DEBUG // Mark the finally call - if (ins == INS_b && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) + if (ins == INS_b && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } @@ -4523,7 +4523,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index 82131ee325dd4..5e0b4f2e78a95 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -8495,7 +8495,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -8670,7 +8670,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount) #ifdef DEBUG // Mark the finally call - if (ins == INS_bl_local && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) + if (ins == INS_bl_local && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index d6004451fcb87..40c4937fe3b6f 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -2046,7 +2046,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index bfc91a3561572..fd99e65dc7373 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -1030,7 +1030,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 3e2afe7a830c1..d03bb82ea9cc5 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -7614,7 +7614,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->getBBJumpKind() == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -9221,7 +9221,7 @@ void emitter::emitIns_J(instruction ins, #ifdef DEBUG // Mark the finally call - if (ins == INS_call && emitComp->compCurBB->getBBJumpKind() == BBJ_CALLFINALLY) + if (ins == INS_call && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 3573a015de385..254372e770c3e 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -206,7 +206,7 @@ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) /* Allocate the block descriptor */ block = bbNewBasicBlock(jumpKind); - noway_assert(block->getBBJumpKind() == jumpKind); + noway_assert(block->KindIs(jumpKind)); /* Append the block to the end of the global basic block list */ @@ -395,7 +395,7 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSw { noway_assert(oldSwitchBlock != nullptr); noway_assert(newSwitchBlock != nullptr); - noway_assert(oldSwitchBlock->getBBJumpKind() == BBJ_SWITCH); + noway_assert(oldSwitchBlock->KindIs(BBJ_SWITCH)); assert(fgPredsComputed); // Walk the switch's jump table, updating the predecessor for each branch. @@ -457,7 +457,7 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* ne noway_assert(blockSwitch != nullptr); noway_assert(newTarget != nullptr); noway_assert(oldTarget != nullptr); - noway_assert(blockSwitch->getBBJumpKind() == BBJ_SWITCH); + noway_assert(blockSwitch->KindIs(BBJ_SWITCH)); assert(fgPredsComputed); // For the jump targets values that match oldTarget of our BBJ_SWITCH @@ -911,7 +911,7 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed } // Determine if the call site is in a no-return block - if (isInlining && (impInlineInfo->iciBlock->getBBJumpKind() == BBJ_THROW)) + if (isInlining && impInlineInfo->iciBlock->KindIs(BBJ_THROW)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION); } @@ -2721,7 +2721,7 @@ void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlo for (BasicBlock* const block : Blocks(targetBlock, sourceBlock)) { - if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->getBBJumpKind() != BBJ_RETURN)) + if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && !block->KindIs(BBJ_RETURN)) { block->bbFlags |= BBF_BACKWARD_JUMP; compHasBackwardJump = true; @@ -3675,7 +3675,7 @@ void Compiler::fgFindBasicBlocks() // Still inside the filter block->setHndIndex(XTnum); - if (block->getBBJumpKind() == BBJ_EHFILTERRET) + if (block->KindIs(BBJ_EHFILTERRET)) { // Mark catch handler as successor. block->bbJumpDest = hndBegBB; @@ -4015,7 +4015,7 @@ void Compiler::fgFixEntryFlowForOSR() // Now branch from method start to the OSR entry. // fgEnsureFirstBBisScratch(); - assert(fgFirstBB->getBBJumpKind() == BBJ_NONE); + assert(fgFirstBB->KindIs(BBJ_NONE)); fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB); fgFirstBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; @@ -4099,14 +4099,14 @@ void Compiler::fgCheckBasicBlockControlFlow() HBtab = ehGetDsc(blk->getHndIndex()); // Endfilter allowed only in a filter block - if (blk->getBBJumpKind() == BBJ_EHFILTERRET) + if (blk->KindIs(BBJ_EHFILTERRET)) { if (!HBtab->HasFilter()) { BADCODE("Unexpected endfilter"); } } - else if (blk->getBBJumpKind() == BBJ_EHFILTERRET) + else if (blk->KindIs(BBJ_EHFILTERRET)) { // endfinally allowed only in a finally block if (!HBtab->HasFinallyHandler()) @@ -4114,7 +4114,7 @@ void Compiler::fgCheckBasicBlockControlFlow() BADCODE("Unexpected endfinally"); } } - else if (blk->getBBJumpKind() == BBJ_EHFAULTRET) + else if (blk->KindIs(BBJ_EHFAULTRET)) { // 'endfault' (alias of IL 'endfinally') allowed only in a fault block if (!HBtab->HasFaultHandler()) @@ -4568,7 +4568,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // For each successor of the original block, set the new block as their predecessor. // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. // Without these arcs, a block 'b' may not be a member of succs(preds(b)) - if (curr->getBBJumpKind() != BBJ_SWITCH) + if (!curr->KindIs(BBJ_SWITCH)) { for (BasicBlock* const succ : curr->Succs(this)) { @@ -4874,7 +4874,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n", curr->bbNum, succ->bbNum, newBlock->bbNum); - if (curr->getBBJumpKind() == BBJ_COND) + if (curr->KindIs(BBJ_COND)) { fgReplacePred(succ, curr, newBlock); if (curr->bbJumpDest == succ) @@ -4884,7 +4884,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } fgAddRefPred(newBlock, curr); } - else if (curr->getBBJumpKind() == BBJ_SWITCH) + else if (curr->KindIs(BBJ_SWITCH)) { // newBlock replaces 'succ' in the switch. fgReplaceSwitchJumpTarget(curr, newBlock, succ); @@ -4894,7 +4894,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } else { - assert(curr->getBBJumpKind() == BBJ_ALWAYS); + assert(curr->KindIs(BBJ_ALWAYS)); fgReplacePred(succ, curr, newBlock); curr->bbJumpDest = newBlock; fgAddRefPred(newBlock, curr); @@ -4907,7 +4907,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. // - if (curr->getBBJumpKind() != BBJ_ALWAYS) + if (!curr->KindIs(BBJ_ALWAYS)) { newBlock->inheritWeightPercentage(curr, 50); } @@ -5054,7 +5054,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } #endif // FEATURE_EH_FUNCLETS - if (bPrev->getBBJumpKind() == BBJ_CALLFINALLY) + if (bPrev->KindIs(BBJ_CALLFINALLY)) { // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; @@ -5063,7 +5063,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (bPrev->getBBJumpKind() == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && + else if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && (block->bbNext != fgFirstColdBlock)) { @@ -5092,7 +5092,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->isBBCallAlwaysPair()) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; @@ -5104,7 +5104,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (block->getBBJumpKind() == BBJ_RETURN) + else if (block->KindIs(BBJ_RETURN)) { fgRemoveReturnBlock(block); } @@ -5139,7 +5139,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) noway_assert(block->bbJumpDest != block); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - noway_assert(bPrev && bPrev->getBBJumpKind() == BBJ_NONE); + noway_assert(bPrev && bPrev->KindIs(BBJ_NONE)); break; default: @@ -5154,7 +5154,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) BasicBlock* succBlock; - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { succBlock = block->bbJumpDest; } @@ -5207,7 +5207,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* Must be a fall through to next block */ - noway_assert(block->getBBJumpKind() == BBJ_NONE); + noway_assert(block->KindIs(BBJ_NONE)); /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; @@ -5235,7 +5235,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* If predBlock is a new predecessor, then add it to succBlock's predecessor's list. */ - if (predBlock->getBBJumpKind() != BBJ_SWITCH) + if (!predBlock->KindIs(BBJ_SWITCH)) { // Even if the pred is not a switch, we could have a conditional branch // to the fallthrough, so duplicate there could be preds @@ -5257,7 +5257,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) PREFIX_ASSUME(bPrev != nullptr); /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { /* bPrev now becomes a BBJ_ALWAYS */ bPrev->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); @@ -5459,7 +5459,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // - if ((bSrc->getBBJumpKind() == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && + if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { bSrc->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); @@ -6273,14 +6273,14 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) } else { - if (bAlt->getBBJumpKind() == BBJ_ALWAYS) + if (bAlt->KindIs(BBJ_ALWAYS)) { // Our result is true if bAlt's weight is more than bCur's weight result = (bAlt->bbWeight > bCur->bbWeight); } else { - noway_assert(bAlt->getBBJumpKind() == BBJ_COND); + noway_assert(bAlt->KindIs(BBJ_COND)); // Our result is true if bAlt's weight is more than twice bCur's weight result = (bAlt->bbWeight > (2 * bCur->bbWeight)); } @@ -6570,7 +6570,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, { goodBlk = blk; } - else if ((goodBlk->getBBJumpKind() == BBJ_COND) || (blk->getBBJumpKind() != BBJ_COND)) + else if (goodBlk->KindIs(BBJ_COND) || !blk->KindIs(BBJ_COND)) { if ((blk == nearBlk) || !reachedNear) { diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index edf64aeccdd37..b8b868214ae9c 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -143,13 +143,13 @@ void Compiler::fgDebugCheckUpdate() // Check for an unnecessary jumps to the next block bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { // A conditional branch should never jump to the next block // as it can be folded into a BBJ_NONE; doAssertOnJumpToNextBlock = true; } - else if (block->getBBJumpKind() == BBJ_ALWAYS) + else if (block->KindIs(BBJ_ALWAYS)) { // Generally we will want to assert if a BBJ_ALWAYS branches to the next block doAssertOnJumpToNextBlock = true; @@ -184,7 +184,7 @@ void Compiler::fgDebugCheckUpdate() /* Make sure BBF_KEEP_BBJ_ALWAYS is set correctly */ - if ((block->getBBJumpKind() == BBJ_ALWAYS) && prevIsCallAlwaysPair) + if (block->KindIs(BBJ_ALWAYS) && prevIsCallAlwaysPair) { noway_assert(block->bbFlags & BBF_KEEP_BBJ_ALWAYS); } @@ -192,7 +192,7 @@ void Compiler::fgDebugCheckUpdate() /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY) == BBJ_CALLFINALLY) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); } @@ -984,7 +984,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) } } - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { fprintf(fgxFile, "\\n"); @@ -1015,11 +1015,11 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(fgxFile, ", shape = \"house\""); } - else if (block->getBBJumpKind() == BBJ_RETURN) + else if (block->KindIs(BBJ_RETURN)) { fprintf(fgxFile, ", shape = \"invhouse\""); } - else if (block->getBBJumpKind() == BBJ_THROW) + else if (block->KindIs(BBJ_THROW)) { fprintf(fgxFile, ", shape = \"trapezium\""); } @@ -1152,7 +1152,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n id=\"%d\"", edgeNum); fprintf(fgxFile, "\n source=\"%d\"", bSource->bbNum); fprintf(fgxFile, "\n target=\"%d\"", bTarget->bbNum); - if (bSource->getBBJumpKind() == BBJ_SWITCH) + if (bSource->KindIs(BBJ_SWITCH)) { if (edge->getDupCount() >= 2) { @@ -2606,8 +2606,7 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. BasicBlock* prevBlock = block->bbPrev; - if (prevBlock->getBBJumpKind() == BBJ_CALLFINALLY && block->getBBJumpKind() == BBJ_ALWAYS && - blockPred->getBBJumpKind() == BBJ_EHFINALLYRET) + if (prevBlock->KindIs(BBJ_CALLFINALLY) && block->KindIs(BBJ_ALWAYS) && blockPred->KindIs(BBJ_EHFINALLYRET)) { return true; } @@ -2634,7 +2633,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb } // Our try block can call our finally block - if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->getBBJumpKind() == BBJ_CALLFINALLY) && + if ((block->bbCatchTyp == BBCT_FINALLY) && blockPred->KindIs(BBJ_CALLFINALLY) && comp->ehCallFinallyInCorrectRegion(blockPred, block->getHndIndex())) { return true; @@ -2734,7 +2733,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2756,7 +2755,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB)) { - if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2878,12 +2877,12 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // if (compPostImportationCleanupDone || ((block->bbFlags & BBF_IMPORTED) != 0)) { - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && block->lastNode()->OperIsConditionalJump()); } - else if (block->getBBJumpKind() == BBJ_SWITCH) + else if (block->KindIs(BBJ_SWITCH)) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && (block->lastNode()->gtOper == GT_SWITCH || block->lastNode()->gtOper == GT_SWITCH_TABLE)); @@ -2987,7 +2986,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // Don't depend on predecessors list for the check. for (BasicBlock* const succBlock : block->Succs()) { - if (succBlock->getBBJumpKind() == BBJ_CALLFINALLY) + if (succBlock->KindIs(BBJ_CALLFINALLY)) { BasicBlock* finallyBlock = succBlock->bbJumpDest; assert(finallyBlock->hasHndIndex()); @@ -3729,7 +3728,7 @@ void Compiler::fgDebugCheckBlockLinks() // If this is a switch, check that the tables are consistent. // Note that we don't call GetSwitchDescMap(), because it has the side-effect // of allocating it if it is not present. - if (block->getBBJumpKind() == BBJ_SWITCH && m_switchDescMap != nullptr) + if (block->KindIs(BBJ_SWITCH) && m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; if (m_switchDescMap->Lookup(block, &uniqueSuccSet)) @@ -4792,13 +4791,13 @@ void Compiler::fgDebugCheckLoopTable() // The pre-header can only be BBJ_ALWAYS or BBJ_NONE and must enter the loop. BasicBlock* e = loop.lpEntry; - if (h->getBBJumpKind() == BBJ_ALWAYS) + if (h->KindIs(BBJ_ALWAYS)) { assert(h->bbJumpDest == e); } else { - assert(h->getBBJumpKind() == BBJ_NONE); + assert(h->KindIs(BBJ_NONE)); assert(h->bbNext == e); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); @@ -4907,7 +4906,7 @@ void Compiler::fgDebugCheckLoopTable() // TODO: We might want the following assert, but there are cases where we don't move all // return blocks out of the loop. // Return blocks are not allowed inside a loop; they should have been moved elsewhere. - // assert(block->getBBJumpKind() != BBJ_RETURN); + // assert(!block->KindIs(BBJ_RETURN)); } else { diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index f6549f3b538df..782a92c92b645 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -100,7 +100,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() } // If the finally's block jumps back to itself, then it is not empty. - if ((firstBlock->getBBJumpKind() == BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) + if (firstBlock->KindIs(BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) { JITDUMP("EH#%u finally has basic block that jumps to itself; skipping.\n", XTnum); XTnum++; @@ -142,7 +142,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() { BasicBlock* nextBlock = currentBlock->bbNext; - if ((currentBlock->getBBJumpKind() == BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) + if (currentBlock->KindIs(BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) { // Retarget the call finally to jump to the return // point. @@ -160,7 +160,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() JITDUMP("so that " FMT_BB " jumps to " FMT_BB "; then remove " FMT_BB "\n", currentBlock->bbNum, postTryFinallyBlock->bbNum, leaveBlock->bbNum); - noway_assert(leaveBlock->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(leaveBlock->KindIs(BBJ_ALWAYS)); currentBlock->bbJumpDest = postTryFinallyBlock; currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); @@ -373,7 +373,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Look for blocks that are always jumps to a call finally // pair that targets the finally - if (firstTryBlock->getBBJumpKind() != BBJ_ALWAYS) + if (!firstTryBlock->KindIs(BBJ_ALWAYS)) { JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); @@ -437,7 +437,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->bbNext) { - if ((block->getBBJumpKind() == BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) + if (block->KindIs(BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) { assert(block->isBBCallAlwaysPair()); @@ -536,7 +536,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() block->clearHndIndex(); } - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { Statement* finallyRet = block->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); @@ -738,7 +738,7 @@ PhaseStatus Compiler::fgCloneFinally() for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) { - if (block->getBBJumpKind() == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { hasSwitch = true; break; @@ -753,7 +753,7 @@ PhaseStatus Compiler::fgCloneFinally() regionStmtCount++; } - hasFinallyRet = hasFinallyRet || (block->getBBJumpKind() == BBJ_EHFINALLYRET); + hasFinallyRet = hasFinallyRet || (block->KindIs(BBJ_EHFINALLYRET)); isAllRare = isAllRare && block->isRunRarely(); } @@ -821,11 +821,11 @@ PhaseStatus Compiler::fgCloneFinally() // through to a callfinally. BasicBlock* jumpDest = nullptr; - if ((block->getBBJumpKind() == BBJ_NONE) && (block == lastTryBlock)) + if ((block->KindIs(BBJ_NONE)) && (block == lastTryBlock)) { jumpDest = block->bbNext; } - else if (block->getBBJumpKind() == BBJ_ALWAYS) + else if (block->KindIs(BBJ_ALWAYS)) { jumpDest = block->bbJumpDest; } @@ -989,8 +989,7 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->bbPrev; - if ((placeToMoveAfter->getBBJumpKind() == BBJ_ALWAYS) && - (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) + if (placeToMoveAfter->KindIs(BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) { JITDUMP("Moving callfinally " FMT_BB " to be first in line, before " FMT_BB "\n", normalCallFinallyBlock->bbNum, firstCallFinallyBlock->bbNum); @@ -1050,8 +1049,7 @@ PhaseStatus Compiler::fgCloneFinally() // Avoid asserts when `fgNewBBinRegion` verifies the handler table, by mapping any cloned finally // return blocks to BBJ_ALWAYS (which we would do below if we didn't do it here). - BBjumpKinds bbNewJumpKind = - (block->getBBJumpKind() == BBJ_EHFINALLYRET) ? BBJ_ALWAYS : block->getBBJumpKind(); + BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->getBBJumpKind(); if (block == firstBlock) { @@ -1133,13 +1131,13 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* newBlock = blockMap[block]; - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { Statement* finallyRet = newBlock->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(newBlock, finallyRet); - assert(newBlock->getBBJumpKind() == BBJ_ALWAYS); // we mapped this above already + assert(newBlock->KindIs(BBJ_ALWAYS)); // we mapped this above already newBlock->bbJumpDest = normalCallFinallyReturn; fgAddRefPred(normalCallFinallyReturn, newBlock); @@ -1196,7 +1194,7 @@ PhaseStatus Compiler::fgCloneFinally() // All preds should be BBJ_EHFINALLYRETs from the finally. for (BasicBlock* const leavePred : leaveBlock->PredBlocks()) { - assert(leavePred->getBBJumpKind() == BBJ_EHFINALLYRET); + assert(leavePred->KindIs(BBJ_EHFINALLYRET)); assert(leavePred->getHndIndex() == XTnum); } @@ -1242,7 +1240,7 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const hndEndIter = HBtab->ebdHndLast->bbNext; for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->bbNext) { - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } @@ -1408,7 +1406,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // logically "belong" to a child region and the exit // path validity will be checked when looking at the // try blocks in that region. - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { continue; } @@ -1434,13 +1432,13 @@ void Compiler::fgDebugCheckTryFinallyExits() bool isCallToFinally = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (succBlock->getBBJumpKind() == BBJ_CALLFINALLY) + if (succBlock->KindIs(BBJ_CALLFINALLY)) { // case (a1) isCallToFinally = isFinally && (succBlock->bbJumpDest == finallyBlock); } #else - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { // case (a2) isCallToFinally = isFinally && (block->bbJumpDest == finallyBlock); @@ -1454,7 +1452,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // case (b) isJumpToClonedFinally = true; } - else if (succBlock->getBBJumpKind() == BBJ_ALWAYS) + else if (succBlock->KindIs(BBJ_ALWAYS)) { if (succBlock->isEmpty()) { @@ -1467,7 +1465,7 @@ void Compiler::fgDebugCheckTryFinallyExits() } } } - else if (succBlock->getBBJumpKind() == BBJ_NONE) + else if (succBlock->KindIs(BBJ_NONE)) { if (succBlock->isEmpty()) { @@ -1900,7 +1898,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, { // We expect callfinallys to be invoked by a BBJ_ALWAYS at this // stage in compilation. - if (block->getBBJumpKind() != BBJ_ALWAYS) + if (!block->KindIs(BBJ_ALWAYS)) { // Possible paranoia assert here -- no flow successor of // this block should be a callfinally for this try. diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 14f42c83254c5..fd6ef7a356776 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -354,7 +354,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) bNext = block->bbNext; /* bNext is an unreachable BBJ_ALWAYS block */ - noway_assert(bNext->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(bNext->KindIs(BBJ_ALWAYS)); while (bNext->countOfInEdges() > 0) { @@ -403,7 +403,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbFlags & BBF_REMOVED) || bcall->getBBJumpKind() != BBJ_CALLFINALLY || + if ((bcall->bbFlags & BBF_REMOVED) || !bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; @@ -470,7 +470,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -491,7 +491,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switchBlk) { - assert(switchBlk->getBBJumpKind() == BBJ_SWITCH); + assert(switchBlk->KindIs(BBJ_SWITCH)); BlockToSwitchDescMap* switchMap = GetSwitchDescMap(); SwitchUniqueSuccSet res; if (switchMap->Lookup(switchBlk, &res)) @@ -546,7 +546,7 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator alloc, BasicBlock* from, BasicBlock* to) { - assert(switchBlk->getBBJumpKind() == BBJ_SWITCH); // Precondition. + assert(switchBlk->KindIs(BBJ_SWITCH)); // Precondition. // Is "from" still in the switch table (because it had more than one entry before?) bool fromStillPresent = false; diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index fd880a2d00348..51f77ccc3a5f9 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1444,7 +1444,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. - if (InlineeCompiler->fgFirstBB->getBBJumpKind() == BBJ_RETURN) + if (InlineeCompiler->fgFirstBB->KindIs(BBJ_RETURN)) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) @@ -1523,7 +1523,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) block->bbFlags |= BBF_INTERNAL; } - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) @@ -1945,7 +1945,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 9814f8b9e6b0d..bcf25c9d01ef1 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -292,7 +292,7 @@ void Compiler::fgComputeReturnBlocks() { // If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only // used to find return blocks. - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks); } @@ -362,7 +362,7 @@ void Compiler::fgComputeEnterBlocksSet() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert(block->isBBCallAlwaysPair()); @@ -474,7 +474,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { - noway_assert(block->bbNext->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(block->bbNext->KindIs(BBJ_ALWAYS)); fgClearFinallyTargetBit(block->bbNext->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -638,7 +638,7 @@ bool Compiler::fgRemoveDeadBlocks() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert(block->isBBCallAlwaysPair()); @@ -1827,7 +1827,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // it can be reached directly from "outside". // assert(fgFirstBB->bbJumpDest == osrEntry); - assert(fgFirstBB->getBBJumpKind() == BBJ_ALWAYS); + assert(fgFirstBB->KindIs(BBJ_ALWAYS)); if (entryJumpTarget != osrEntry) { @@ -1918,7 +1918,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert(block->bbNext == bNext); - if (block->getBBJumpKind() != BBJ_NONE) + if (!block->KindIs(BBJ_NONE)) { return false; } @@ -2002,7 +2002,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) // (if they are valid). for (BasicBlock* const predBlock : bNext->PredBlocks()) { - if (predBlock->getBBJumpKind() == BBJ_SWITCH) + if (predBlock->KindIs(BBJ_SWITCH)) { return false; } @@ -2027,7 +2027,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) { noway_assert(block != nullptr); noway_assert((block->bbFlags & BBF_REMOVED) == 0); - noway_assert(block->getBBJumpKind() == BBJ_NONE); + noway_assert(block->KindIs(BBJ_NONE)); noway_assert(bNext == block->bbNext); noway_assert(bNext != nullptr); @@ -2234,7 +2234,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) // or if both block and bNext have non-zero weights // then we will use the max weight for the block. // - if (bNext->getBBJumpKind() == BBJ_THROW) + if (bNext->KindIs(BBJ_THROW)) { block->bbSetRunRarely(); } @@ -2345,7 +2345,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ - if (bNext->getBBJumpKind() == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) + if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->bbNext) { fgReplacePred(bNext->bbNext, bNext, block); } @@ -2375,7 +2375,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->getBBJumpKind() != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2627,7 +2627,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { - noway_assert(block->getBBJumpKind() == BBJ_COND && block->bbJumpDest == block->bbNext); + noway_assert(block->KindIs(BBJ_COND) && block->bbJumpDest == block->bbNext); assert(compRationalIRForm == block->IsLIR()); FlowEdge* flow = fgGetPredForBlock(block->bbNext, block); @@ -2735,7 +2735,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc bool optimizeJump = true; assert(bDest->isEmpty()); - assert(bDest->getBBJumpKind() == BBJ_ALWAYS); + assert(bDest->KindIs(BBJ_ALWAYS)); // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK @@ -2930,7 +2930,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - if (bPrev->getBBJumpKind() != BBJ_NONE) + if (!bPrev->KindIs(BBJ_NONE)) { break; } @@ -2957,7 +2957,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { /* If this block follows a BBJ_CALLFINALLY do not remove it * (because we don't know who may jump to it) */ - if (bPrev->getBBJumpKind() == BBJ_CALLFINALLY) + if (bPrev->KindIs(BBJ_CALLFINALLY)) { break; } @@ -2980,7 +2980,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { BasicBlock* succBlock; - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { succBlock = block->bbJumpDest; } @@ -2997,7 +2997,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool okToMerge = true; // assume it's ok for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->getBBJumpKind() == BBJ_EHCATCHRET) + if (predBlock->KindIs(BBJ_EHCATCHRET)) { assert(predBlock->bbJumpDest == block); okToMerge = false; // we can't get rid of the empty block @@ -3119,7 +3119,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { - assert(block->getBBJumpKind() == BBJ_SWITCH); + assert(block->KindIs(BBJ_SWITCH)); unsigned jmpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; @@ -3134,7 +3134,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->getBBJumpKind() == BBJ_ALWAYS) && + if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { bool optimizeJump = true; @@ -3502,7 +3502,7 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigne // // This is by no means the only kind of tail that it is beneficial to duplicate, // just the only one we recognize for now. - if (target->getBBJumpKind() != BBJ_COND) + if (!target->KindIs(BBJ_COND)) { return false; } @@ -3741,7 +3741,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // if (opts.IsOSR()) { - assert(target->getBBJumpKind() == BBJ_COND); + assert(target->KindIs(BBJ_COND)); if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { @@ -3829,7 +3829,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi assert(block->bbNext == bNext); assert(block->bbPrev == bPrev); - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { // We can't remove it if it is a branch from hot => cold if (!fgInDifferentRegions(block, bNext)) @@ -3859,7 +3859,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi else { /* remove the conditional statement at the end of block */ - noway_assert(block->getBBJumpKind() == BBJ_COND); + noway_assert(block->KindIs(BBJ_COND)); noway_assert(block->isValid()); #ifdef DEBUG @@ -4002,7 +4002,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } - if (bJump->getBBJumpKind() != BBJ_ALWAYS) + if (!bJump->KindIs(BBJ_ALWAYS)) { return false; } @@ -4021,7 +4021,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) BasicBlock* bDest = bJump->bbJumpDest; - if (bDest->getBBJumpKind() != BBJ_COND) + if (!bDest->KindIs(BBJ_COND)) { return false; } @@ -4324,7 +4324,7 @@ bool Compiler::fgOptimizeSwitchJumps() // assert(!block->IsLIR()); - if (block->getBBJumpKind() != BBJ_SWITCH) + if (!block->KindIs(BBJ_SWITCH)) { continue; } @@ -4516,7 +4516,7 @@ bool Compiler::fgExpandRarelyRunBlocks() noway_assert(tmpbb->isBBCallAlwaysPair()); bPrevPrev = tmpbb; #else - if (tmpbb->getBBJumpKind() == BBJ_CALLFINALLY) + if (tmpbb->KindIs(BBJ_CALLFINALLY)) { bPrevPrev = tmpbb; } @@ -4742,7 +4742,7 @@ bool Compiler::fgExpandRarelyRunBlocks() } /* COMPACT blocks if possible */ - if (bPrev->getBBJumpKind() == BBJ_NONE) + if (bPrev->KindIs(BBJ_NONE)) { if (fgCanCompactBlocks(bPrev, block)) { @@ -4934,7 +4934,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (forwardBranch) { - if (bPrev->getBBJumpKind() == BBJ_ALWAYS) + if (bPrev->KindIs(BBJ_ALWAYS)) { // We can pull up the blocks that the unconditional jump branches to // if the weight of bDest is greater or equal to the weight of block @@ -5017,9 +5017,9 @@ bool Compiler::fgReorderBlocks(bool useProfile) } } } - else // (bPrev->getBBJumpKind() == BBJ_COND) + else // (bPrev->KindIs(BBJ_COND)) { - noway_assert(bPrev->getBBJumpKind() == BBJ_COND); + noway_assert(bPrev->KindIs(BBJ_COND)); // // We will reverse branch if the taken-jump to bDest ratio (i.e. 'takenRatio') // is more than 51% @@ -5211,8 +5211,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->bbNext) && (block->getBBJumpKind() == BBJ_RETURN) && - (bPrev->getBBJumpKind() == BBJ_ALWAYS)) + if ((bDest == block->bbNext) && (block->KindIs(BBJ_RETURN)) && (bPrev->KindIs(BBJ_ALWAYS))) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -5246,7 +5245,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) const bool optimizedBranch = fgOptimizeBranch(bPrev); if (optimizedBranch) { - noway_assert(bPrev->getBBJumpKind() == BBJ_COND); + noway_assert(bPrev->KindIs(BBJ_COND)); optimizedBranches = true; } continue; @@ -5423,7 +5422,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (bEnd2->isBBCallAlwaysPair()) { - noway_assert(bNext->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(bNext->KindIs(BBJ_ALWAYS)); // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; @@ -5502,12 +5501,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) { if (bDest != nullptr) { - if (bPrev->getBBJumpKind() == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { printf("Decided to reverse conditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } - else if (bPrev->getBBJumpKind() == BBJ_ALWAYS) + else if (bPrev->KindIs(BBJ_ALWAYS)) { printf("Decided to straighten unconditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); @@ -5577,7 +5576,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(bEnd != nullptr); // bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call - noway_assert((bEnd->getBBJumpKind() != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); + noway_assert(!bEnd->KindIs(BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart noway_assert(bStartPrev->bbNext == bStart); @@ -5716,7 +5715,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; - if ((bEnd->getBBJumpKind() == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && + if (bEnd->KindIs(BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] @@ -5844,7 +5843,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) printf("block " FMT_BB, bStart->bbNum); } - if (bPrev->getBBJumpKind() == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { printf(" by reversing conditional jump at " FMT_BB "\n", bPrev->bbNum); } @@ -5855,7 +5854,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) } #endif // DEBUG - if (bPrev->getBBJumpKind() == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { /* Reverse the bPrev jump condition */ Statement* const condTestStmt = bPrev->lastStmt(); @@ -6103,7 +6102,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) bNext = block->bbNext; bDest = nullptr; - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { bDest = block->bbJumpDest; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) @@ -6115,7 +6114,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } } - if (block->getBBJumpKind() == BBJ_NONE) + if (block->KindIs(BBJ_NONE)) { bDest = nullptr; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext)) @@ -6147,7 +6146,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (bDest != nullptr) { // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->getBBJumpKind() == BBJ_ALWAYS) && + if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) @@ -6166,225 +6165,226 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // - if ((block->getBBJumpKind() == BBJ_COND) && // block is a BBJ_COND block - (bNext != nullptr) && // block is not the last block - (bNext->bbRefs == 1) && // No other block jumps to bNext - (bNext->getBBJumpKind() == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block + (bNext != nullptr) && // block is not the last block + (bNext->bbRefs == 1) && // No other block jumps to bNext + bNext->KindIs(BBJ_ALWAYS)) && // The next block is a BBJ_ALWAYS block bNext->isEmpty() && // and it is an empty block (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock) && (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections - { - // case (a) - // - const bool isJumpAroundEmpty = (bNext->bbNext == bDest); - - // case (b) - // - // Note the asymmetric checks for refs == 1 and refs > 1 ensures that we - // differentiate the roles played by bDest and bNextJumpDest. We need some - // sense of which arrangement is preferable to avoid getting stuck in a loop - // reversing and re-reversing. - // - // Other tiebreaking criteria could be considered. - // - // Pragmatic constraints: - // - // * don't consider lexical predecessors, or we may confuse loop recognition - // * don't consider blocks of different rarities - // - BasicBlock* const bNextJumpDest = bNext->bbJumpDest; - const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && - (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && - (block->isRunRarely() == bDest->isRunRarely()); - - bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; - - // We do not optimize jumps between two different try regions. - // However jumping to a block that is not in any try region is OK - // - if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { - optimizeJump = false; - } + // case (a) + // + const bool isJumpAroundEmpty = (bNext->bbNext == bDest); - // Also consider bNext's try region - // - if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) - { - optimizeJump = false; - } + // case (b) + // + // Note the asymmetric checks for refs == 1 and refs > 1 ensures that we + // differentiate the roles played by bDest and bNextJumpDest. We need some + // sense of which arrangement is preferable to avoid getting stuck in a loop + // reversing and re-reversing. + // + // Other tiebreaking criteria could be considered. + // + // Pragmatic constraints: + // + // * don't consider lexical predecessors, or we may confuse loop recognition + // * don't consider blocks of different rarities + // + BasicBlock* const bNextJumpDest = bNext->bbJumpDest; + const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && + (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && + (block->isRunRarely() == bDest->isRunRarely()); - // If we are optimizing using real profile weights - // then don't optimize a conditional jump to an unconditional jump - // until after we have computed the edge weights - // - if (fgIsUsingProfileWeights()) - { - // if block and bdest are in different hot/cold regions we can't do this optimization - // because we can't allow fall-through into the cold region. - if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) + bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; + + // We do not optimize jumps between two different try regions. + // However jumping to a block that is not in any try region is OK + // + if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { optimizeJump = false; } - } - if (optimizeJump && isJumpToJoinFree) - { - // In the join free case, we also need to move bDest right after bNext - // to create same flow as in the isJumpAroundEmpty case. + // Also consider bNext's try region // - if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) + if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) { optimizeJump = false; } - else - { - // We don't expect bDest to already be right after bNext. - // - assert(bDest != bNext->bbNext); - - JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, - bNext->bbNum); - // If bDest can fall through we'll need to create a jump - // block after it too. Remember where to jump to. - // - BasicBlock* const bDestNext = bDest->bbNext; + // If we are optimizing using real profile weights + // then don't optimize a conditional jump to an unconditional jump + // until after we have computed the edge weights + // + if (fgIsUsingProfileWeights()) + { + // if block and bdest are in different hot/cold regions we can't do this optimization + // because we can't allow fall-through into the cold region. + if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) + { + optimizeJump = false; + } + } - // Move bDest + if (optimizeJump && isJumpToJoinFree) + { + // In the join free case, we also need to move bDest right after bNext + // to create same flow as in the isJumpAroundEmpty case. // - if (ehIsBlockEHLast(bDest)) + if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) { - ehUpdateLastBlocks(bDest, bDest->bbPrev); + optimizeJump = false; } + else + { + // We don't expect bDest to already be right after bNext. + // + assert(bDest != bNext->bbNext); - fgUnlinkBlock(bDest); - fgInsertBBafter(bNext, bDest); + JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, + bNext->bbNum); - if (ehIsBlockEHLast(bNext)) - { - ehUpdateLastBlocks(bNext, bDest); - } + // If bDest can fall through we'll need to create a jump + // block after it too. Remember where to jump to. + // + BasicBlock* const bDestNext = bDest->bbNext; - // Add fall through fixup block, if needed. - // - if (bDest->KindIs(BBJ_NONE, BBJ_COND)) - { - BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); - bFixup->inheritWeight(bDestNext); - bFixup->bbJumpDest = bDestNext; + // Move bDest + // + if (ehIsBlockEHLast(bDest)) + { + ehUpdateLastBlocks(bDest, bDest->bbPrev); + } - fgRemoveRefPred(bDestNext, bDest); - fgAddRefPred(bFixup, bDest); - fgAddRefPred(bDestNext, bFixup); - } - } - } + fgUnlinkBlock(bDest); + fgInsertBBafter(bNext, bDest); - if (optimizeJump) - { - JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB - ", " FMT_BB " -> " FMT_BB ")\n", - block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); + if (ehIsBlockEHLast(bNext)) + { + ehUpdateLastBlocks(bNext, bDest); + } - // Reverse the jump condition - // - GenTree* test = block->lastNode(); - noway_assert(test->OperIsConditionalJump()); + // Add fall through fixup block, if needed. + // + if (bDest->KindIs(BBJ_NONE, BBJ_COND)) + { + BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); + bFixup->inheritWeight(bDestNext); + bFixup->bbJumpDest = bDestNext; - if (test->OperGet() == GT_JTRUE) - { - GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); - assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. - test->AsOp()->gtOp1 = cond; + fgRemoveRefPred(bDestNext, bDest); + fgAddRefPred(bFixup, bDest); + fgAddRefPred(bDestNext, bFixup); + } + } } - else + + if (optimizeJump) { - gtReverseCond(test); - } + JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB + ", " FMT_BB " -> " FMT_BB ")\n", + block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); - // Optimize the Conditional JUMP to go to the new target - block->bbJumpDest = bNext->bbJumpDest; + // Reverse the jump condition + // + GenTree* test = block->lastNode(); + noway_assert(test->OperIsConditionalJump()); - fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); + if (test->OperGet() == GT_JTRUE) + { + GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); + assert(cond == + test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. + test->AsOp()->gtOp1 = cond; + } + else + { + gtReverseCond(test); + } - /* - Unlink bNext from the BasicBlock list; note that we can - do this even though other blocks could jump to it - the - reason is that elsewhere in this function we always - redirect jumps to jumps to jump to the final label, - so even if another block jumps to bNext it won't matter - once we're done since any such jump will be redirected - to the final target by the time we're done here. - */ + // Optimize the Conditional JUMP to go to the new target + block->bbJumpDest = bNext->bbJumpDest; - fgRemoveRefPred(bNext, block); - fgUnlinkBlock(bNext); + fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); - /* Mark the block as removed */ - bNext->bbFlags |= BBF_REMOVED; + /* + Unlink bNext from the BasicBlock list; note that we can + do this even though other blocks could jump to it - the + reason is that elsewhere in this function we always + redirect jumps to jumps to jump to the final label, + so even if another block jumps to bNext it won't matter + once we're done since any such jump will be redirected + to the final target by the time we're done here. + */ - // Update the loop table if we removed the bottom of a loop, for example. - fgUpdateLoopsAfterCompacting(block, bNext); + fgRemoveRefPred(bNext, block); + fgUnlinkBlock(bNext); - // If this block was aligned, unmark it - bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); + /* Mark the block as removed */ + bNext->bbFlags |= BBF_REMOVED; - // If this is the first Cold basic block update fgFirstColdBlock - if (bNext == fgFirstColdBlock) - { - fgFirstColdBlock = bNext->bbNext; - } + // Update the loop table if we removed the bottom of a loop, for example. + fgUpdateLoopsAfterCompacting(block, bNext); - // - // If we removed the end of a try region or handler region - // we will need to update ebdTryLast or ebdHndLast. - // + // If this block was aligned, unmark it + bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); - for (EHblkDsc* const HBtab : EHClauses(this)) - { - if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) + // If this is the first Cold basic block update fgFirstColdBlock + if (bNext == fgFirstColdBlock) { - fgSkipRmvdBlocks(HBtab); + fgFirstColdBlock = bNext->bbNext; } - } - // we optimized this JUMP - goto REPEAT to catch similar cases - change = true; - modified = true; + // + // If we removed the end of a try region or handler region + // we will need to update ebdTryLast or ebdHndLast. + // + + for (EHblkDsc* const HBtab : EHClauses(this)) + { + if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) + { + fgSkipRmvdBlocks(HBtab); + } + } + + // we optimized this JUMP - goto REPEAT to catch similar cases + change = true; + modified = true; #ifdef DEBUG - if (verbose) - { - printf("\nAfter reversing the jump:\n"); - fgDispBasicBlocks(verboseTrees); - } + if (verbose) + { + printf("\nAfter reversing the jump:\n"); + fgDispBasicBlocks(verboseTrees); + } #endif // DEBUG - /* - For a rare special case we cannot jump to REPEAT - as jumping to REPEAT will cause us to delete 'block' - because it currently appears to be unreachable. As - it is a self loop that only has a single bbRef (itself) - However since the unlinked bNext has additional bbRefs - (that we will later connect to 'block'), it is not really - unreachable. - */ - if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) - { - continue; - } + /* + For a rare special case we cannot jump to REPEAT + as jumping to REPEAT will cause us to delete 'block' + because it currently appears to be unreachable. As + it is a self loop that only has a single bbRef (itself) + However since the unlinked bNext has additional bbRefs + (that we will later connect to 'block'), it is not really + unreachable. + */ + if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) + { + continue; + } - goto REPEAT; + goto REPEAT; + } } - } } // // Update the switch jump table such that it follows jumps to jumps: // - if (block->getBBJumpKind() == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { if (fgOptimizeSwitchBranches(block)) { @@ -6419,11 +6419,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. - if (block->countOfInEdges() == 0 && bPrev->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->countOfInEdges() == 0 && bPrev->KindIs(BBJ_CALLFINALLY)) { assert(bPrev->isBBCallAlwaysPair()); noway_assert(!(bPrev->bbFlags & BBF_RETLESS_CALL)); - noway_assert(block->getBBJumpKind() == BBJ_ALWAYS); + noway_assert(block->KindIs(BBJ_ALWAYS)); bPrev = block; continue; } @@ -6900,7 +6900,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) } bool const isNoSplit = stmt == predBlock->firstStmt(); - bool const isFallThrough = (predBlock->getBBJumpKind() == BBJ_NONE); + bool const isFallThrough = (predBlock->KindIs(BBJ_NONE)); // Is this block possibly better than what we have? // @@ -7068,7 +7068,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if ((block->getBBJumpKind() != BBJ_COND) || (block->bbNext == block->bbJumpDest)) + if (!block->KindIs(BBJ_COND) || (block->bbNext == block->bbJumpDest)) { return false; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 317dd4a25bca2..26c9afc1fbcf0 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -473,7 +473,7 @@ void BlockCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->getBBJumpKind() == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); // Scan for critical preds, and add relocated probes to non-critical preds. // @@ -499,12 +499,12 @@ void BlockCountInstrumentor::RelocateProbes() { // Ensure this pred is not a fall through. // - if (pred->getBBJumpKind() == BBJ_NONE) + if (pred->KindIs(BBJ_NONE)) { pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->getBBJumpKind() == BBJ_ALWAYS); + assert(pred->KindIs(BBJ_ALWAYS)); } } @@ -1028,7 +1028,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) JITDUMP("No jump dest for " FMT_BB ", suspect bad code\n", block->bbNum); visitor->Badcode(); } - else if (block->getBBJumpKind() != BBJ_LEAVE) + else if (!block->KindIs(BBJ_LEAVE)) { JITDUMP("EH RET in " FMT_BB " most-nested in try, suspect bad code\n", block->bbNum); visitor->Badcode(); @@ -1552,7 +1552,7 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() // Importer folding may have changed the block jump kind // to BBJ_NONE. If so, warp it back to BBJ_ALWAYS. // - if (block->getBBJumpKind() == BBJ_NONE) + if (block->KindIs(BBJ_NONE)) { block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); block->bbJumpDest = target; @@ -1657,7 +1657,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->getBBJumpKind() == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); // This block should have just one probe, which we no longer need. // @@ -1695,12 +1695,12 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // Ensure this pred is not a fall through. // - if (pred->getBBJumpKind() == BBJ_NONE) + if (pred->KindIs(BBJ_NONE)) { pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->getBBJumpKind() == BBJ_ALWAYS); + assert(pred->KindIs(BBJ_ALWAYS)); } } @@ -3166,7 +3166,7 @@ void EfficientEdgeCountReconstructor::Prepare() m_unknownBlocks++; #ifdef DEBUG - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { nReturns++; } @@ -3233,7 +3233,7 @@ void EfficientEdgeCountReconstructor::Prepare() CLRRandom* const random = m_comp->impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomEdgeCounts()); - const bool isReturn = sourceBlock->getBBJumpKind() == BBJ_RETURN; + const bool isReturn = sourceBlock->KindIs(BBJ_RETURN); // We simulate the distribution of counts seen in StdOptimizationData.Mibc. // @@ -3949,7 +3949,7 @@ void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, B // void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, BlockInfo* info) { - assert(block->getBBJumpKind() == BBJ_SWITCH); + assert(block->KindIs(BBJ_SWITCH)); // Thresholds for detecting a dominant switch case. // @@ -4429,11 +4429,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) bSrc = bDst->bbPreds->getSourceBlock(); // Does this block flow into only one other block - if (bSrc->getBBJumpKind() == BBJ_NONE) + if (bSrc->KindIs(BBJ_NONE)) { bOnlyNext = bSrc->bbNext; } - else if (bSrc->getBBJumpKind() == BBJ_ALWAYS) + else if (bSrc->KindIs(BBJ_ALWAYS)) { bOnlyNext = bSrc->bbJumpDest; } @@ -4450,11 +4450,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) } // Does this block flow into only one other block - if (bDst->getBBJumpKind() == BBJ_NONE) + if (bDst->KindIs(BBJ_NONE)) { bOnlyNext = bDst->bbNext; } - else if (bDst->getBBJumpKind() == BBJ_ALWAYS) + else if (bDst->KindIs(BBJ_ALWAYS)) { bOnlyNext = bDst->bbJumpDest; } @@ -4485,7 +4485,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // To minimize asmdiffs for now, modify weights only if splitting. if (fgFirstColdBlock != nullptr) { - if (bSrc->getBBJumpKind() == BBJ_CALLFINALLY) + if (bSrc->KindIs(BBJ_CALLFINALLY)) { newWeight = bSrc->bbWeight; } @@ -4756,7 +4756,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() bSrc = edge->getSourceBlock(); slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - if (bSrc->getBBJumpKind() == BBJ_COND) + if (bSrc->KindIs(BBJ_COND)) { weight_t diff; FlowEdge* otherEdge; diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 722f5f8cadfdd..f256ca73846c8 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -332,8 +332,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // THROW heuristic // - bool const isJumpThrow = (jump->getBBJumpKind() == BBJ_THROW); - bool const isNextThrow = (next->getBBJumpKind() == BBJ_THROW); + bool const isJumpThrow = (jump->KindIs(BBJ_THROW)); + bool const isNextThrow = (next->KindIs(BBJ_THROW)); if (isJumpThrow != isNextThrow) { @@ -402,8 +402,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // RETURN heuristic // - bool const isJumpReturn = (jump->getBBJumpKind() == BBJ_RETURN); - bool const isNextReturn = (next->getBBJumpKind() == BBJ_RETURN); + bool const isJumpReturn = (jump->KindIs(BBJ_RETURN)); + bool const isNextReturn = (next->KindIs(BBJ_RETURN)); if (isJumpReturn != isNextReturn) { @@ -551,7 +551,7 @@ void ProfileSynthesis::RepairLikelihoods() } JITDUMP("\n"); - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { AssignLikelihoodCond(block); } @@ -627,7 +627,7 @@ void ProfileSynthesis::BlendLikelihoods() bool const consistent = Compiler::fgProfileWeightsEqual(sum, 1.0, epsilon); bool const zero = Compiler::fgProfileWeightsEqual(block->bbWeight, 0.0, epsilon); - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { AssignLikelihoodCond(block); } @@ -1214,8 +1214,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) // // Currently we don't know which edges do this. // - if ((exitBlock->getBBJumpKind() == BBJ_COND) && - (exitBlockWeight > (missingExitWeight + currentExitWeight))) + if ((exitBlock->KindIs(BBJ_COND)) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) { JITDUMP("Will adjust likelihood of the exit edge from loop exit block " FMT_BB " to reflect capping; current likelihood is " FMT_WT "\n", diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 2d5c2b3fd68a3..2ef7dbc9d38a3 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -254,7 +254,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) BasicBlock* topFallThrough = nullptr; unsigned char lpIndexFallThrough = BasicBlock::NOT_IN_LOOP; - if (top->getBBJumpKind() == BBJ_COND) + if (top->KindIs(BBJ_COND)) { topFallThrough = top->bbNext; lpIndexFallThrough = topFallThrough->bbNatLoopNum; @@ -1728,7 +1728,7 @@ void Compiler::fgAddSyncMethodEnterExit() // non-exceptional cases for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { fgCreateMonitorTree(lvaMonAcquired, info.compThisArg, block, false /*exit*/); } @@ -1772,7 +1772,7 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis } #endif - if (block->getBBJumpKind() == BBJ_RETURN && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) + if (block->KindIs(BBJ_RETURN) && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) { GenTreeUnOp* retNode = block->lastStmt()->GetRootNode()->AsUnOp(); GenTree* retExpr = retNode->gtOp1; @@ -1821,7 +1821,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(genReturnBB != nullptr); assert(genReturnBB != block); assert(fgReturnCount <= 1); // We have a single return for synchronized methods - assert(block->getBBJumpKind() == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); assert((block->bbFlags & BBF_HAS_JMP) == 0); assert(block->hasTryIndex()); assert(!block->hasHndIndex()); @@ -1949,7 +1949,7 @@ bool Compiler::fgMoreThanOneReturnBlock() for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { retCnt++; if (retCnt > 1) @@ -2596,7 +2596,7 @@ PhaseStatus Compiler::fgAddInternal() for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) { - if ((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if ((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { merger.Record(block); } @@ -3451,7 +3451,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // so the code size for block needs be large // enough to make it worth our while // - if ((lblk == nullptr) || (lblk->getBBJumpKind() != BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) + if ((lblk == nullptr) || !lblk->KindIs(BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) { // This block is now a candidate for first cold block // Also remember the predecessor to this block @@ -3523,7 +3523,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // This is a slightly more complicated case, because we will // probably need to insert a block to jump to the cold section. // - if (firstColdBlock->isEmpty() && (firstColdBlock->getBBJumpKind() == BBJ_ALWAYS)) + if (firstColdBlock->isEmpty() && (firstColdBlock->KindIs(BBJ_ALWAYS))) { // We can just use this block as the transitionBlock firstColdBlock = firstColdBlock->bbNext; diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp index 404d86e3abc0e..0953920d6192e 100644 --- a/src/coreclr/jit/gschecks.cpp +++ b/src/coreclr/jit/gschecks.cpp @@ -529,7 +529,7 @@ void Compiler::gsParamsToShadows() // We would have to insert assignments in all such blocks, just before GT_JMP stmnt. for (BasicBlock* const block : Blocks()) { - if (block->getBBJumpKind() != BBJ_RETURN) + if (!block->KindIs(BBJ_RETURN)) { continue; } diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index da0683be95ab4..7b50f5428458b 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -83,7 +83,7 @@ class OptIfConversionDsc bool OptIfConversionDsc::IfConvertCheckInnerBlockFlow(BasicBlock* block) { // Block should have a single successor or be a return. - if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->getBBJumpKind() == BBJ_RETURN)))) + if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->KindIs(BBJ_RETURN))))) { return false; } @@ -137,7 +137,7 @@ bool OptIfConversionDsc::IfConvertCheckThenFlow() { // All the Then blocks up to m_finalBlock are in a valid flow. m_flowFound = true; - if (thenBlock->getBBJumpKind() == BBJ_RETURN) + if (thenBlock->KindIs(BBJ_RETURN)) { assert(m_finalBlock == nullptr); m_mainOper = GT_RETURN; @@ -553,7 +553,7 @@ void OptIfConversionDsc::IfConvertDump() bool OptIfConversionDsc::optIfConvert() { // Does the block end by branching via a JTRUE after a compare? - if (m_startBlock->getBBJumpKind() != BBJ_COND || m_startBlock->NumSucc() != 2) + if (!m_startBlock->KindIs(BBJ_COND) || m_startBlock->NumSucc() != 2) { return false; } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 704536165d584..f0e46f2100a25 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4101,7 +4101,7 @@ bool Compiler::impIsImplicitTailCallCandidate( // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. - if (!isRecursive && (compCurBB->getBBJumpKind() != BBJ_RETURN)) + if (!isRecursive && !compCurBB->KindIs(BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN @@ -4250,7 +4250,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->getBBJumpKind() == BBJ_LEAVE); + assert(block->KindIs(BBJ_LEAVE)); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); @@ -4344,7 +4344,7 @@ void Compiler::impImportLeave(BasicBlock* block) /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); if (step->bbJumpDest != nullptr) { fgRemoveRefPred(step->bbJumpDest, step); @@ -4523,7 +4523,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->getBBJumpKind() == BBJ_LEAVE); + assert(block->KindIs(BBJ_LEAVE)); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; @@ -4606,7 +4606,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4708,7 +4708,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS - if (step->getBBJumpKind() == BBJ_EHCATCHRET) + if (step->KindIs(BBJ_EHCATCHRET)) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. @@ -4758,7 +4758,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4850,12 +4850,12 @@ void Compiler::impImportLeave(BasicBlock* block) if (stepType == ST_FinallyReturn) { - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); } else { assert(stepType == ST_Catch); - assert(step->getBBJumpKind() == BBJ_EHCATCHRET); + assert(step->KindIs(BBJ_EHCATCHRET)); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ @@ -4931,7 +4931,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->getBBJumpKind() == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4992,7 +4992,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // work around this we will duplicate B0 (call it B0Dup) before resetting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. - if (block->getBBJumpKind() == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { BasicBlock* dupBlock = bbNewBasicBlock(block->getBBJumpKind()); dupBlock->bbFlags = block->bbFlags; @@ -6715,7 +6715,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); - assert(compCurBB->getBBJumpKind() == BBJ_EHFILTERRET); + assert(compCurBB->KindIs(BBJ_EHFILTERRET)); /* Mark catch handler as successor */ @@ -7256,7 +7256,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } JITDUMP(" %04X", jmpAddr); - if (block->getBBJumpKind() != BBJ_LEAVE) + if (!block->KindIs(BBJ_LEAVE)) { impResetLeaveBlock(block, jmpAddr); } @@ -7302,7 +7302,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); @@ -7311,7 +7311,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else { - assert(block->getBBJumpKind() == BBJ_NONE); + assert(block->KindIs(BBJ_NONE)); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7363,12 +7363,11 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); - assertImp((block->getBBJumpKind() == BBJ_COND) // normal case - || - (block->getBBJumpKind() == foldedJumpKind)); // this can happen if we are reimporting the - // block for the second time + // BBJ_COND: normal case + // foldedJumpKind: this can happen if we are reimporting the block for the second time + assertImp(block->KindIs(BBJ_COND, foldedJumpKind)); // normal case - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { if (foldedJumpKind == BBJ_NONE) { @@ -7549,7 +7548,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); @@ -7558,7 +7557,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else { - assert(block->getBBJumpKind() == BBJ_NONE); + assert(block->KindIs(BBJ_NONE)); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7658,8 +7657,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { printf("\nSwitch folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->getBBJumpKind() == BBJ_ALWAYS) + block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -8532,10 +8531,9 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } - bool bbInALoop = impBlockIsInALoop(block); - bool bbIsReturn = - (block->getBBJumpKind() == BBJ_RETURN) && - (!compIsForInlining() || (impInlineInfo->iciBlock->getBBJumpKind() == BBJ_RETURN)); + bool bbInALoop = impBlockIsInALoop(block); + bool bbIsReturn = (block->KindIs(BBJ_RETURN)) && + (!compIsForInlining() || (impInlineInfo->iciBlock->KindIs(BBJ_RETURN))); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { @@ -12119,11 +12117,11 @@ void Compiler::impImport() JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; - if (entryBlock->getBBJumpKind() == BBJ_NONE) + if (entryBlock->KindIs(BBJ_NONE)) { entryBlock = entryBlock->bbNext; } - else if (opts.IsOSR() && (entryBlock->getBBJumpKind() == BBJ_ALWAYS)) + else if (opts.IsOSR() && (entryBlock->KindIs(BBJ_ALWAYS))) { entryBlock = entryBlock->bbJumpDest; } @@ -12241,7 +12239,7 @@ void Compiler::impFixPredLists() continue; } - if (finallyBlock->getBBJumpKind() != BBJ_EHFINALLYRET) + if (!finallyBlock->KindIs(BBJ_EHFINALLYRET)) { continue; } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index fbe0978f2514b..deff9be27e4c0 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -1095,7 +1095,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); - assert(!isExplicitTailCall || compCurBB->getBBJumpKind() == BBJ_RETURN); + assert(!isExplicitTailCall || compCurBB->KindIs(BBJ_RETURN)); // Ask VM for permission to tailcall if (canTailCall) @@ -1271,10 +1271,10 @@ var_types Compiler::impImportCall(OPCODE opcode, // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // - if (compCurBB->getBBJumpKind() != BBJ_RETURN) + if (!compCurBB->KindIs(BBJ_RETURN)) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); - assert(successor->getBBJumpKind() == BBJ_RETURN); + assert(successor->KindIs(BBJ_RETURN)); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 15cee342aa603..ab67048abbbc6 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -1073,7 +1073,7 @@ class IndirectCallTransformer // BasicBlock* const coldBlock = checkBlock->bbPrev; - if (coldBlock->getBBJumpKind() != BBJ_NONE) + if (!coldBlock->KindIs(BBJ_NONE)) { JITDUMP("Unexpected flow from cold path " FMT_BB "\n", coldBlock->bbNum); return; @@ -1081,7 +1081,7 @@ class IndirectCallTransformer BasicBlock* const hotBlock = coldBlock->bbPrev; - if ((hotBlock->getBBJumpKind() != BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) + if (!hotBlock->KindIs(BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) { JITDUMP("Unexpected flow from hot path " FMT_BB "\n", hotBlock->bbNum); return; diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 888058d133b62..8606b18743e72 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -960,7 +960,7 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** be bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex) { - assert(blockCallFinally->getBBJumpKind() == BBJ_CALLFINALLY); + assert(blockCallFinally->KindIs(BBJ_CALLFINALLY)); assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(finallyIndex < compHndBBtabCount); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); @@ -2276,7 +2276,7 @@ bool Compiler::fgNormalizeEHCase2() // Change pred branches. // - if (predBlock->getBBJumpKind() != BBJ_NONE) + if (!predBlock->KindIs(BBJ_NONE)) { fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); } @@ -4056,12 +4056,12 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->getBBJumpKind() == BBJ_ALWAYS && predBlock->bbJumpDest == block) + if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->bbJumpDest == block) { BasicBlock* pPrev = predBlock->bbPrev; if (pPrev != nullptr) { - if (pPrev->getBBJumpKind() == BBJ_CALLFINALLY) + if (pPrev->KindIs(BBJ_CALLFINALLY)) { // We found a BBJ_CALLFINALLY / BBJ_ALWAYS that still points to this finally target return; @@ -4113,7 +4113,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) ((xtab->ebdHndBeg->bbNext == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. - if (predBlock->getBBJumpKind() == BBJ_CALLFINALLY) + if (predBlock->KindIs(BBJ_CALLFINALLY)) { assert(predBlock->bbJumpDest == block); @@ -4184,7 +4184,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) // The block is a handler. Check if the pred block is from its filter. We only need to // check the end filter flag, as there is only a single filter for any handler, and we // already know predBlock is a predecessor of block. - if (predBlock->getBBJumpKind() == BBJ_EHFILTERRET) + if (predBlock->KindIs(BBJ_EHFILTERRET)) { assert(!xtab->InHndRegionBBRange(predBlock)); return false; @@ -4413,7 +4413,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { BasicBlock* bFilterLast = HBtab->BBFilterLast(); assert(bFilterLast != nullptr); - assert(bFilterLast->getBBJumpKind() == BBJ_EHFILTERRET); + assert(bFilterLast->KindIs(BBJ_EHFILTERRET)); assert(bFilterLast->bbJumpDest == block); #ifdef DEBUG if (verbose) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 820545508968e..8af56fa167317 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -4098,7 +4098,7 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter a lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 9c9aafe0686b6..d66ddc05a5cdc 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -491,7 +491,7 @@ void Compiler::fgPerBlockLocalVarLiveness() // Mark the FrameListRoot as used, if applicable. - if (block->getBBJumpKind() == BBJ_RETURN && compMethodRequiresPInvokeFrame()) + if (block->KindIs(BBJ_RETURN) && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) @@ -2451,7 +2451,7 @@ void Compiler::fgInterBlockLocalVarLiveness() { // Get the set of live variables on exit from an exception region. VarSetOps::UnionD(this, exceptVars, block->bbLiveOut); - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index c6e6dc91c3d88..e9f4df7692474 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1766,7 +1766,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) unsigned loopRetCount = 0; for (BasicBlock* const blk : loop.LoopBlocks()) { - if (blk->getBBJumpKind() == BBJ_RETURN) + if (blk->KindIs(BBJ_RETURN)) { loopRetCount++; } @@ -1855,7 +1855,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) BasicBlock* top = loop.lpTop; BasicBlock* bottom = loop.lpBottom; - if (bottom->getBBJumpKind() != BBJ_COND) + if (!bottom->KindIs(BBJ_COND)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Couldn't find termination test.\n", loopInd); return false; @@ -1945,7 +1945,7 @@ BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, JITDUMP("Inserting loop " FMT_LP " loop choice conditions\n", loopNum); assert(context->HasBlockConditions(loopNum)); assert(slowHead != nullptr); - assert(insertAfter->getBBJumpKind() == BBJ_NONE); + assert(insertAfter->KindIs(BBJ_NONE)); if (context->HasBlockConditions(loopNum)) { @@ -2043,9 +2043,9 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) h2->bbNatLoopNum = ambientLoop; h2->bbFlags |= BBF_LOOP_PREHEADER; - if (h->getBBJumpKind() != BBJ_NONE) + if (!h->KindIs(BBJ_NONE)) { - assert(h->getBBJumpKind() == BBJ_ALWAYS); + assert(h->KindIs(BBJ_ALWAYS)); assert(h->bbJumpDest == loop.lpEntry); h2->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); h2->bbJumpDest = loop.lpEntry; @@ -2069,9 +2069,9 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // "newPred" will be the predecessor of the blocks of the cloned loop. BasicBlock* b = loop.lpBottom; BasicBlock* newPred = b; - if (b->getBBJumpKind() != BBJ_ALWAYS) + if (!b->KindIs(BBJ_ALWAYS)) { - assert(b->getBBJumpKind() == BBJ_COND); + assert(b->KindIs(BBJ_COND)); BasicBlock* x = b->bbNext; if (x != nullptr) @@ -2175,7 +2175,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - assert(blk->getBBJumpKind() == newblk->getBBJumpKind()); + assert(blk->KindIs(newblk->getBBJumpKind())); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2243,7 +2243,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // We should always have block conditions. assert(context->HasBlockConditions(loopInd)); - assert(h->getBBJumpKind() == BBJ_NONE); + assert(h->KindIs(BBJ_NONE)); assert(h->bbNext == h2); // If any condition is false, go to slowHead (which branches or falls through to e2). @@ -2254,7 +2254,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) if (slowHead->bbNext != e2) { // We can't just fall through to the slow path entry, so make it an unconditional branch. - assert(slowHead->getBBJumpKind() == BBJ_NONE); // This is how we created it above. + assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. slowHead->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); slowHead->bbJumpDest = e2; } @@ -2266,7 +2266,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Add the fall-through path pred (either to T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). - assert(condLast->getBBJumpKind() == BBJ_COND); + assert(condLast->KindIs(BBJ_COND)); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum); fgAddRefPred(condLast->bbNext, condLast); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index b985a5a8b1229..26f4ed946955d 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -891,9 +891,9 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. - assert(originalSwitchBB->getBBJumpKind() == BBJ_NONE); + assert(originalSwitchBB->KindIs(BBJ_NONE)); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); - assert(afterDefaultCondBlock->getBBJumpKind() == BBJ_SWITCH); + assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. @@ -1074,7 +1074,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); - assert(currentBlock->getBBJumpKind() == BBJ_SWITCH); + assert(currentBlock->KindIs(BBJ_SWITCH)); currentBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. @@ -1159,7 +1159,7 @@ bool Lowering::TryLowerSwitchToBitTest( { assert(jumpCount >= 2); assert(targetCount >= 2); - assert(bbSwitch->getBBJumpKind() == BBJ_SWITCH); + assert(bbSwitch->KindIs(BBJ_SWITCH)); assert(switchValue->OperIs(GT_LCL_VAR)); // @@ -5296,7 +5296,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. - assert(((returnBB == comp->genReturnBB) && (returnBB->getBBJumpKind() == BBJ_RETURN)) || + assert(((returnBB == comp->genReturnBB) && (returnBB->KindIs(BBJ_RETURN))) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 1b7aebaea1997..88af18d880898 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -964,7 +964,7 @@ void LinearScan::setBlockSequence() blockInfo[block->bbNum].hasCriticalInEdge = true; hasCriticalEdges = true; } - else if (predBlock->getBBJumpKind() == BBJ_SWITCH) + else if (predBlock->KindIs(BBJ_SWITCH)) { assert(!"Switch with single successor"); } @@ -993,7 +993,7 @@ void LinearScan::setBlockSequence() // according to the desired order. We will handle the EH successors below. const unsigned numSuccs = block->NumSucc(compiler); bool checkForCriticalOutEdge = (numSuccs > 1); - if (!checkForCriticalOutEdge && block->getBBJumpKind() == BBJ_SWITCH) + if (!checkForCriticalOutEdge && block->KindIs(BBJ_SWITCH)) { assert(!"Switch with single successor"); } @@ -1549,7 +1549,7 @@ void LinearScan::identifyCandidatesExceptionDataflow() if (block->hasEHBoundaryOut()) { VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut); - if (block->getBBJumpKind() == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, @@ -2513,7 +2513,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, // IG08: // ... // ... - if (block->getBBJumpKind() == BBJ_THROW) + if (block->KindIs(BBJ_THROW)) { JITDUMP(" - throw block; "); return nullptr; @@ -2544,7 +2544,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, assert(!predBlock->hasEHBoundaryOut()); if (isBlockVisited(predBlock)) { - if (predBlock->getBBJumpKind() == BBJ_COND) + if (predBlock->KindIs(BBJ_COND)) { // Special handling to improve matching on backedges. BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext; @@ -8177,7 +8177,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: Only switches and JCMP/JTEST (for Arm4) have input regs (and so can be fed by copies), so those // are the only block-ending branches that need special handling. regMaskTP consumedRegs = RBM_NONE; - if (block->getBBJumpKind() == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { // At this point, Lowering has transformed any non-switch-table blocks into // cascading ifs. @@ -8216,7 +8216,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: GT_COPY has special handling in codegen and its generation is merged with the // node that consumes its result. So both, the input and output regs of GT_COPY must be // excluded from the set available for resolution. - else if (block->getBBJumpKind() == BBJ_COND) + else if (block->KindIs(BBJ_COND)) { GenTree* lastNode = LIR::AsRange(block).LastNode(); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 0342221537d57..88175479c16b5 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6126,7 +6126,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // No unique successor. compCurBB should be a return. // - assert(compCurBB->getBBJumpKind() == BBJ_RETURN); + assert(compCurBB->KindIs(BBJ_RETURN)); } else { @@ -6329,7 +6329,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. - noway_assert(compCurBB->getBBJumpKind() == BBJ_RETURN); + noway_assert(compCurBB->KindIs(BBJ_RETURN)); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; @@ -8032,7 +8032,7 @@ GenTree* Compiler::fgMorphConst(GenTree* tree) // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; - if (compCurBB->getBBJumpKind() == BBJ_THROW) + if (compCurBB->KindIs(BBJ_THROW)) { useLazyStrCns = true; } @@ -13120,7 +13120,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) return result; } - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13293,9 +13293,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, - block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->getBBJumpKind() == BBJ_ALWAYS) + printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13356,7 +13355,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } } } - else if (block->getBBJumpKind() == BBJ_SWITCH) + else if (block->KindIs(BBJ_SWITCH)) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13452,9 +13451,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, - block->getBBJumpKind() == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->getBBJumpKind() == BBJ_ALWAYS) + printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13727,10 +13725,10 @@ void Compiler::fgMorphStmts(BasicBlock* block) // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) - noway_assert((call->IsFastTailCall() && (compCurBB->getBBJumpKind() == BBJ_RETURN) && + noway_assert((call->IsFastTailCall() && (compCurBB->KindIs(BBJ_RETURN)) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || - (call->IsTailCallViaJitHelper() && (compCurBB->getBBJumpKind() == BBJ_THROW)) || - (!call->IsTailCall() && (compCurBB->getBBJumpKind() == BBJ_RETURN))); + (call->IsTailCallViaJitHelper() && (compCurBB->KindIs(BBJ_THROW))) || + (!call->IsTailCall() && (compCurBB->KindIs(BBJ_RETURN)))); } #ifdef DEBUG @@ -13806,7 +13804,7 @@ void Compiler::fgMorphStmts(BasicBlock* block) if (fgRemoveRestOfBlock) { - if ((block->getBBJumpKind() == BBJ_COND) || (block->getBBJumpKind() == BBJ_SWITCH)) + if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); @@ -13814,8 +13812,8 @@ void Compiler::fgMorphStmts(BasicBlock* block) noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); - if (((block->getBBJumpKind() == BBJ_COND) && (last->gtOper == GT_JTRUE)) || - ((block->getBBJumpKind() == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) + if ((block->KindIs(BBJ_COND) && (last->gtOper == GT_JTRUE)) || + (block->KindIs(BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; @@ -13923,7 +13921,7 @@ void Compiler::fgMorphBlocks() fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? - if ((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if ((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { @@ -13979,7 +13977,7 @@ void Compiler::fgMorphBlocks() // void Compiler::fgMergeBlockReturn(BasicBlock* block) { - assert((block->getBBJumpKind() == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); + assert((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index 473fe3c1c0cad..3694e83c248f2 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -510,7 +510,7 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a // Initialize the object memory if necessary. bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); LclVarDsc* const lclDsc = comp->lvaGetDesc(lclNum); if (comp->fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 68191baedd2e5..09683dde47bc8 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -881,14 +881,14 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif - assert(m_b2->getBBJumpKind() == BBJ_RETURN); + assert(m_b2->KindIs(BBJ_RETURN)); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { - assert(m_b1->getBBJumpKind() == BBJ_COND); - assert(m_b2->getBBJumpKind() == BBJ_COND); + assert(m_b1->KindIs(BBJ_COND)); + assert(m_b2->KindIs(BBJ_COND)); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); @@ -1180,7 +1180,7 @@ void OptBoolsDsc::optOptimizeBoolsGcStress() return; } - assert(m_b1->getBBJumpKind() == BBJ_COND); + assert(m_b1->KindIs(BBJ_COND)); Statement* const stmt = m_b1->lastStmt(); GenTree* const cond = stmt->GetRootNode(); @@ -1469,7 +1469,7 @@ PhaseStatus Compiler::optOptimizeBools() // We're only interested in conditional jumps here - if (b1->getBBJumpKind() != BBJ_COND) + if (!b1->KindIs(BBJ_COND)) { continue; } @@ -1492,7 +1492,7 @@ PhaseStatus Compiler::optOptimizeBools() // The next block needs to be a condition or return block. - if (b2->getBBJumpKind() == BBJ_COND) + if (b2->KindIs(BBJ_COND)) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { @@ -1517,7 +1517,7 @@ PhaseStatus Compiler::optOptimizeBools() } #endif } - else if (b2->getBBJumpKind() == BBJ_RETURN) + else if (b2->KindIs(BBJ_RETURN)) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; @@ -1531,7 +1531,7 @@ PhaseStatus Compiler::optOptimizeBools() // b3 must be RETURN type - if (b3->getBBJumpKind() != BBJ_RETURN) + if (!b3->KindIs(BBJ_RETURN)) { continue; } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 59d50c6850197..f073ce0f11fc6 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -741,7 +741,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if ((predBlock->getBBJumpKind() == BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && + if ((predBlock->KindIs(BBJ_NONE)) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough()) { @@ -1150,8 +1150,8 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if ((initBlock->getBBJumpKind() == BBJ_NONE) && (initBlock->bbNext == top) && - (initBlock->countOfInEdges() == 1) && (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) + if ((initBlock->KindIs(BBJ_NONE)) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && + (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) { initBlock = initBlock->bbPrev; phdrStmt = initBlock->firstStmt(); @@ -1305,7 +1305,7 @@ bool Compiler::optRecordLoop( // 5. Finding a constant initializer is optional; if the initializer is not found, or is not constant, // it is still considered a for-like loop. // - if (bottom->getBBJumpKind() == BBJ_COND) + if (bottom->KindIs(BBJ_COND)) { GenTree* init; GenTree* test; @@ -1801,7 +1801,7 @@ class LoopSearch // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { - if (head->getBBJumpKind() == BBJ_ALWAYS) + if (head->KindIs(BBJ_ALWAYS)) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { @@ -2294,7 +2294,7 @@ class LoopSearch { // Need to reconnect the flow from `block` to `oldNext`. - if ((block->getBBJumpKind() == BBJ_COND) && (block->bbJumpDest == newNext)) + if ((block->KindIs(BBJ_COND)) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); @@ -2321,7 +2321,7 @@ class LoopSearch noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } - else if ((block->getBBJumpKind() == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) + else if (block->KindIs(BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) @@ -2416,7 +2416,7 @@ class LoopSearch // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered a loop exit block, as catch handlers don't have predecessor lists and don't // show up as might be expected in the dominator tree. - if (block->getBBJumpKind() == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { if (!BasicBlock::sameHndRegion(block, exitPoint)) { @@ -2818,7 +2818,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { - assert(from->getBBJumpKind() == to->getBBJumpKind()); // Precondition. + assert(from->KindIs(to->getBBJumpKind())); // Precondition. // copy the jump destination(s) from "from" to "to". switch (to->getBBJumpKind()) @@ -2936,7 +2936,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // entry block. If the `head` branches to `top` because it is the BBJ_ALWAYS of a // BBJ_CALLFINALLY/BBJ_ALWAYS pair, we canonicalize by introducing a new fall-through // head block. See FindEntry() for the logic that allows this. - if ((h->getBBJumpKind() == BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (h->KindIs(BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // Insert new head @@ -3030,7 +3030,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // not keeping pred lists in good shape. // BasicBlock* const t = optLoopTable[loopInd].lpTop; - assert(siblingB->getBBJumpKind() == BBJ_COND); + assert(siblingB->KindIs(BBJ_COND)); assert(siblingB->bbNext == t); JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling); @@ -3207,8 +3207,8 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // assert(h->bbNext == t); assert(h->bbFallsThrough()); - assert((h->getBBJumpKind() == BBJ_NONE) || (h->getBBJumpKind() == BBJ_COND)); - if (h->getBBJumpKind() == BBJ_COND) + assert((h->KindIs(BBJ_NONE)) || (h->KindIs(BBJ_COND))); + if (h->KindIs(BBJ_COND)) { BasicBlock* const hj = h->bbJumpDest; assert((hj->bbNum < t->bbNum) || (hj->bbNum > b->bbNum)); @@ -3360,7 +3360,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - (newT->getBBJumpKind() == BBJ_NONE) && (newT->bbNext == origE)) + (newT->KindIs(BBJ_NONE)) && (newT->bbNext == origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -4280,7 +4280,7 @@ PhaseStatus Compiler::optUnrollLoops() goto DONE_LOOP; } - if (block->getBBJumpKind() == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { ++loopRetCount; } @@ -4524,7 +4524,7 @@ PhaseStatus Compiler::optUnrollLoops() // // If the initBlock is a BBJ_COND drop the condition (and make initBlock a BBJ_NONE block). // - if (initBlock->getBBJumpKind() == BBJ_COND) + if (initBlock->KindIs(BBJ_COND)) { assert(dupCond); Statement* initBlockBranchStmt = initBlock->lastStmt(); @@ -4538,7 +4538,7 @@ PhaseStatus Compiler::optUnrollLoops() /* the loop must execute */ assert(!dupCond); assert(totalIter > 0); - noway_assert(initBlock->getBBJumpKind() == BBJ_NONE); + noway_assert(initBlock->KindIs(BBJ_NONE)); } // The loop will be removed, so no need to fix up the pre-header. @@ -4548,7 +4548,7 @@ PhaseStatus Compiler::optUnrollLoops() // For unrolled loops, all the unrolling preconditions require the pre-header block to fall // through into TOP. - assert(head->getBBJumpKind() == BBJ_NONE); + assert(head->KindIs(BBJ_NONE)); } // If we actually unrolled, tail is now reached @@ -4840,7 +4840,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Does the BB end with an unconditional jump? - if (block->getBBJumpKind() != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (!block->KindIs(BBJ_ALWAYS) || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; @@ -4850,7 +4850,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) BasicBlock* const bTest = block->bbJumpDest; // Does the bTest consist of 'jtrue(cond) block' ? - if (bTest->getBBJumpKind() != BBJ_COND) + if (!bTest->KindIs(BBJ_COND)) { return false; } @@ -5434,7 +5434,7 @@ void Compiler::optMarkLoopHeads() { if (blockNum <= predBlock->bbNum) { - if (predBlock->getBBJumpKind() == BBJ_CALLFINALLY) + if (predBlock->KindIs(BBJ_CALLFINALLY)) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; @@ -5539,7 +5539,7 @@ void Compiler::optFindAndScaleGeneralLoopBlocks() } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. - if ((bottom->getBBJumpKind() != BBJ_COND) && (bottom->getBBJumpKind() != BBJ_ALWAYS)) + if (!bottom->KindIs(BBJ_COND, BBJ_ALWAYS)) { continue; } @@ -8198,7 +8198,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; - if (fgIsUsingProfileWeights() && (head->getBBJumpKind() == BBJ_COND)) + if (fgIsUsingProfileWeights() && (head->KindIs(BBJ_COND))) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { @@ -9181,7 +9181,7 @@ void Compiler::optRemoveRedundantZeroInits() if (tree->Data()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->getBBJumpKind() == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); if (!bbInALoop || bbIsReturn) { diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index cdf76a4e5a6b6..4cf9739d6c73d 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -44,7 +44,7 @@ PhaseStatus Compiler::optRedundantBranches() // We currently can optimize some BBJ_CONDs. // - if (block->getBBJumpKind() == BBJ_COND) + if (block->KindIs(BBJ_COND)) { bool madeChangesThisBlock = m_compiler->optRedundantRelop(block); @@ -57,7 +57,7 @@ PhaseStatus Compiler::optRedundantBranches() // a BBJ_COND, retry; perhaps one of the later optimizations // we can do has enabled one of the earlier optimizations. // - if (madeChangesThisBlock && (block->getBBJumpKind() == BBJ_COND)) + if (madeChangesThisBlock && block->KindIs(BBJ_COND)) { JITDUMP("Will retry RBO in " FMT_BB " after partial optimization\n", block->bbNum); madeChangesThisBlock |= m_compiler->optRedundantBranch(block); @@ -508,7 +508,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) // Check the current dominator // - if (domBlock->getBBJumpKind() == BBJ_COND) + if (domBlock->KindIs(BBJ_COND)) { Statement* const domJumpStmt = domBlock->lastStmt(); GenTree* const domJumpTree = domJumpStmt->GetRootNode(); @@ -971,8 +971,8 @@ bool Compiler::optJumpThreadCheck(BasicBlock* const block, BasicBlock* const dom // bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop) { - assert(block->getBBJumpKind() == BBJ_COND); - assert(domBlock->getBBJumpKind() == BBJ_COND); + assert(block->KindIs(BBJ_COND)); + assert(domBlock->KindIs(BBJ_COND)); // If the dominating block is not the immediate dominator // we might need to duplicate a lot of code to thread @@ -990,7 +990,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl BasicBlock* idomBlock = block->bbIDom; while ((idomBlock != nullptr) && (idomBlock != domBlock)) { - if (idomBlock->getBBJumpKind() == BBJ_COND) + if (idomBlock->KindIs(BBJ_COND)) { JITDUMP(" -- " FMT_BB " not closest branching dom, so no threading\n", idomBlock->bbNum); return false; @@ -1082,7 +1082,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // Treat switch preds as ambiguous for now. // - if (predBlock->getBBJumpKind() == BBJ_SWITCH) + if (predBlock->KindIs(BBJ_SWITCH)) { JITDUMP(FMT_BB " is a switch pred\n", predBlock->bbNum); BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); @@ -1450,9 +1450,8 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // const bool fallThroughIsTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, jti.m_fallThroughPred->bbNum); - if ((jti.m_fallThroughPred->getBBJumpKind() == BBJ_NONE) && - ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || - (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) + if ((jti.m_fallThroughPred->KindIs(BBJ_NONE)) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || + (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) { JITDUMP(FMT_BB " has ambiguous preds and a (%s) fall through pred and no (%s) preds.\n" "Converting fall through pred " FMT_BB " to BBJ_ALWAYS\n", @@ -1624,8 +1623,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // surviving ssa input, and update all the value numbers...) // BasicBlock* const ambBlock = jti.m_ambiguousVNBlock; - if ((ambBlock != nullptr) && (jti.m_block->getBBJumpKind() == BBJ_COND) && - (jti.m_block->GetUniquePred(this) == ambBlock)) + if ((ambBlock != nullptr) && (jti.m_block->KindIs(BBJ_COND)) && (jti.m_block->GetUniquePred(this) == ambBlock)) { JITDUMP(FMT_BB " has just one remaining predcessor " FMT_BB "\n", jti.m_block->bbNum, ambBlock->bbNum); From a8c8a6c30722b5903b6c156870936de23c78b572 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 19:32:53 -0400 Subject: [PATCH 3/5] Style --- src/coreclr/jit/fgehopt.cpp | 2 +- src/coreclr/jit/fgopt.cpp | 337 ++++++++++++------------ src/coreclr/jit/fgprofilesynthesis.cpp | 10 +- src/coreclr/jit/flowgraph.cpp | 4 +- src/coreclr/jit/importer.cpp | 4 +- src/coreclr/jit/lower.cpp | 3 +- src/coreclr/jit/morph.cpp | 10 +- src/coreclr/jit/optimizer.cpp | 12 +- src/coreclr/jit/redundantbranchopts.cpp | 6 +- 9 files changed, 193 insertions(+), 195 deletions(-) diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 782a92c92b645..893dae0893c47 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -821,7 +821,7 @@ PhaseStatus Compiler::fgCloneFinally() // through to a callfinally. BasicBlock* jumpDest = nullptr; - if ((block->KindIs(BBJ_NONE)) && (block == lastTryBlock)) + if (block->KindIs(BBJ_NONE) && (block == lastTryBlock)) { jumpDest = block->bbNext; } diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index bcf25c9d01ef1..d4b36c1b723c6 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -5211,7 +5211,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->bbNext) && (block->KindIs(BBJ_RETURN)) && (bPrev->KindIs(BBJ_ALWAYS))) + if ((bDest == block->bbNext) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -6165,220 +6165,219 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // - if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block - (bNext != nullptr) && // block is not the last block - (bNext->bbRefs == 1) && // No other block jumps to bNext - bNext->KindIs(BBJ_ALWAYS)) && // The next block is a BBJ_ALWAYS block - bNext->isEmpty() && // and it is an empty block - (bNext != bNext->bbJumpDest) && // special case for self jumps + if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block + (bNext != nullptr) && // block is not the last block + (bNext->bbRefs == 1) && // No other block jumps to bNext + bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + bNext->isEmpty() && // and it is an empty block + (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock) && (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections - { - // case (a) - // - const bool isJumpAroundEmpty = (bNext->bbNext == bDest); + { + // case (a) + // + const bool isJumpAroundEmpty = (bNext->bbNext == bDest); - // case (b) - // - // Note the asymmetric checks for refs == 1 and refs > 1 ensures that we - // differentiate the roles played by bDest and bNextJumpDest. We need some - // sense of which arrangement is preferable to avoid getting stuck in a loop - // reversing and re-reversing. - // - // Other tiebreaking criteria could be considered. - // - // Pragmatic constraints: - // - // * don't consider lexical predecessors, or we may confuse loop recognition - // * don't consider blocks of different rarities - // - BasicBlock* const bNextJumpDest = bNext->bbJumpDest; - const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && - (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && - (block->isRunRarely() == bDest->isRunRarely()); + // case (b) + // + // Note the asymmetric checks for refs == 1 and refs > 1 ensures that we + // differentiate the roles played by bDest and bNextJumpDest. We need some + // sense of which arrangement is preferable to avoid getting stuck in a loop + // reversing and re-reversing. + // + // Other tiebreaking criteria could be considered. + // + // Pragmatic constraints: + // + // * don't consider lexical predecessors, or we may confuse loop recognition + // * don't consider blocks of different rarities + // + BasicBlock* const bNextJumpDest = bNext->bbJumpDest; + const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && + (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && + (block->isRunRarely() == bDest->isRunRarely()); - bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; + bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; - // We do not optimize jumps between two different try regions. - // However jumping to a block that is not in any try region is OK - // - if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) - { - optimizeJump = false; - } + // We do not optimize jumps between two different try regions. + // However jumping to a block that is not in any try region is OK + // + if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) + { + optimizeJump = false; + } - // Also consider bNext's try region - // - if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) + // Also consider bNext's try region + // + if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) + { + optimizeJump = false; + } + + // If we are optimizing using real profile weights + // then don't optimize a conditional jump to an unconditional jump + // until after we have computed the edge weights + // + if (fgIsUsingProfileWeights()) + { + // if block and bdest are in different hot/cold regions we can't do this optimization + // because we can't allow fall-through into the cold region. + if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) { optimizeJump = false; } + } - // If we are optimizing using real profile weights - // then don't optimize a conditional jump to an unconditional jump - // until after we have computed the edge weights + if (optimizeJump && isJumpToJoinFree) + { + // In the join free case, we also need to move bDest right after bNext + // to create same flow as in the isJumpAroundEmpty case. // - if (fgIsUsingProfileWeights()) + if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) { - // if block and bdest are in different hot/cold regions we can't do this optimization - // because we can't allow fall-through into the cold region. - if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) - { - optimizeJump = false; - } + optimizeJump = false; } - - if (optimizeJump && isJumpToJoinFree) + else { - // In the join free case, we also need to move bDest right after bNext - // to create same flow as in the isJumpAroundEmpty case. + // We don't expect bDest to already be right after bNext. // - if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) - { - optimizeJump = false; - } - else - { - // We don't expect bDest to already be right after bNext. - // - assert(bDest != bNext->bbNext); + assert(bDest != bNext->bbNext); - JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, - bNext->bbNum); + JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, + bNext->bbNum); - // If bDest can fall through we'll need to create a jump - // block after it too. Remember where to jump to. - // - BasicBlock* const bDestNext = bDest->bbNext; + // If bDest can fall through we'll need to create a jump + // block after it too. Remember where to jump to. + // + BasicBlock* const bDestNext = bDest->bbNext; - // Move bDest - // - if (ehIsBlockEHLast(bDest)) - { - ehUpdateLastBlocks(bDest, bDest->bbPrev); - } + // Move bDest + // + if (ehIsBlockEHLast(bDest)) + { + ehUpdateLastBlocks(bDest, bDest->bbPrev); + } - fgUnlinkBlock(bDest); - fgInsertBBafter(bNext, bDest); + fgUnlinkBlock(bDest); + fgInsertBBafter(bNext, bDest); - if (ehIsBlockEHLast(bNext)) - { - ehUpdateLastBlocks(bNext, bDest); - } + if (ehIsBlockEHLast(bNext)) + { + ehUpdateLastBlocks(bNext, bDest); + } - // Add fall through fixup block, if needed. - // - if (bDest->KindIs(BBJ_NONE, BBJ_COND)) - { - BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); - bFixup->inheritWeight(bDestNext); - bFixup->bbJumpDest = bDestNext; + // Add fall through fixup block, if needed. + // + if (bDest->KindIs(BBJ_NONE, BBJ_COND)) + { + BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); + bFixup->inheritWeight(bDestNext); + bFixup->bbJumpDest = bDestNext; - fgRemoveRefPred(bDestNext, bDest); - fgAddRefPred(bFixup, bDest); - fgAddRefPred(bDestNext, bFixup); - } + fgRemoveRefPred(bDestNext, bDest); + fgAddRefPred(bFixup, bDest); + fgAddRefPred(bDestNext, bFixup); } } + } - if (optimizeJump) - { - JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB - ", " FMT_BB " -> " FMT_BB ")\n", - block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); + if (optimizeJump) + { + JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB + ", " FMT_BB " -> " FMT_BB ")\n", + block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); - // Reverse the jump condition - // - GenTree* test = block->lastNode(); - noway_assert(test->OperIsConditionalJump()); + // Reverse the jump condition + // + GenTree* test = block->lastNode(); + noway_assert(test->OperIsConditionalJump()); - if (test->OperGet() == GT_JTRUE) - { - GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); - assert(cond == - test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. - test->AsOp()->gtOp1 = cond; - } - else - { - gtReverseCond(test); - } + if (test->OperGet() == GT_JTRUE) + { + GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); + assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. + test->AsOp()->gtOp1 = cond; + } + else + { + gtReverseCond(test); + } - // Optimize the Conditional JUMP to go to the new target - block->bbJumpDest = bNext->bbJumpDest; + // Optimize the Conditional JUMP to go to the new target + block->bbJumpDest = bNext->bbJumpDest; - fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); + fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); - /* - Unlink bNext from the BasicBlock list; note that we can - do this even though other blocks could jump to it - the - reason is that elsewhere in this function we always - redirect jumps to jumps to jump to the final label, - so even if another block jumps to bNext it won't matter - once we're done since any such jump will be redirected - to the final target by the time we're done here. - */ + /* + Unlink bNext from the BasicBlock list; note that we can + do this even though other blocks could jump to it - the + reason is that elsewhere in this function we always + redirect jumps to jumps to jump to the final label, + so even if another block jumps to bNext it won't matter + once we're done since any such jump will be redirected + to the final target by the time we're done here. + */ - fgRemoveRefPred(bNext, block); - fgUnlinkBlock(bNext); + fgRemoveRefPred(bNext, block); + fgUnlinkBlock(bNext); - /* Mark the block as removed */ - bNext->bbFlags |= BBF_REMOVED; + /* Mark the block as removed */ + bNext->bbFlags |= BBF_REMOVED; - // Update the loop table if we removed the bottom of a loop, for example. - fgUpdateLoopsAfterCompacting(block, bNext); + // Update the loop table if we removed the bottom of a loop, for example. + fgUpdateLoopsAfterCompacting(block, bNext); - // If this block was aligned, unmark it - bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); + // If this block was aligned, unmark it + bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); - // If this is the first Cold basic block update fgFirstColdBlock - if (bNext == fgFirstColdBlock) - { - fgFirstColdBlock = bNext->bbNext; - } + // If this is the first Cold basic block update fgFirstColdBlock + if (bNext == fgFirstColdBlock) + { + fgFirstColdBlock = bNext->bbNext; + } - // - // If we removed the end of a try region or handler region - // we will need to update ebdTryLast or ebdHndLast. - // + // + // If we removed the end of a try region or handler region + // we will need to update ebdTryLast or ebdHndLast. + // - for (EHblkDsc* const HBtab : EHClauses(this)) + for (EHblkDsc* const HBtab : EHClauses(this)) + { + if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) { - if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) - { - fgSkipRmvdBlocks(HBtab); - } + fgSkipRmvdBlocks(HBtab); } + } - // we optimized this JUMP - goto REPEAT to catch similar cases - change = true; - modified = true; + // we optimized this JUMP - goto REPEAT to catch similar cases + change = true; + modified = true; #ifdef DEBUG - if (verbose) - { - printf("\nAfter reversing the jump:\n"); - fgDispBasicBlocks(verboseTrees); - } + if (verbose) + { + printf("\nAfter reversing the jump:\n"); + fgDispBasicBlocks(verboseTrees); + } #endif // DEBUG - /* - For a rare special case we cannot jump to REPEAT - as jumping to REPEAT will cause us to delete 'block' - because it currently appears to be unreachable. As - it is a self loop that only has a single bbRef (itself) - However since the unlinked bNext has additional bbRefs - (that we will later connect to 'block'), it is not really - unreachable. - */ - if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) - { - continue; - } - - goto REPEAT; + /* + For a rare special case we cannot jump to REPEAT + as jumping to REPEAT will cause us to delete 'block' + because it currently appears to be unreachable. As + it is a self loop that only has a single bbRef (itself) + However since the unlinked bNext has additional bbRefs + (that we will later connect to 'block'), it is not really + unreachable. + */ + if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) + { + continue; } + + goto REPEAT; } + } } // diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index f256ca73846c8..4d6d549e03d26 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -332,8 +332,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // THROW heuristic // - bool const isJumpThrow = (jump->KindIs(BBJ_THROW)); - bool const isNextThrow = (next->KindIs(BBJ_THROW)); + bool const isJumpThrow = jump->KindIs(BBJ_THROW); + bool const isNextThrow = next->KindIs(BBJ_THROW); if (isJumpThrow != isNextThrow) { @@ -402,8 +402,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // RETURN heuristic // - bool const isJumpReturn = (jump->KindIs(BBJ_RETURN)); - bool const isNextReturn = (next->KindIs(BBJ_RETURN)); + bool const isJumpReturn = jump->KindIs(BBJ_RETURN); + bool const isNextReturn = next->KindIs(BBJ_RETURN); if (isJumpReturn != isNextReturn) { @@ -1214,7 +1214,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) // // Currently we don't know which edges do this. // - if ((exitBlock->KindIs(BBJ_COND)) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) + if (exitBlock->KindIs(BBJ_COND) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) { JITDUMP("Will adjust likelihood of the exit edge from loop exit block " FMT_BB " to reflect capping; current likelihood is " FMT_WT "\n", diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 2ef7dbc9d38a3..fb4399cf8618c 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -2596,7 +2596,7 @@ PhaseStatus Compiler::fgAddInternal() for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) { - if ((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { merger.Record(block); } @@ -3523,7 +3523,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // This is a slightly more complicated case, because we will // probably need to insert a block to jump to the cold section. // - if (firstColdBlock->isEmpty() && (firstColdBlock->KindIs(BBJ_ALWAYS))) + if (firstColdBlock->isEmpty() && firstColdBlock->KindIs(BBJ_ALWAYS)) { // We can just use this block as the transitionBlock firstColdBlock = firstColdBlock->bbNext; diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index f0e46f2100a25..db5d2e3ecd4b7 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -8532,7 +8532,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } bool bbInALoop = impBlockIsInALoop(block); - bool bbIsReturn = (block->KindIs(BBJ_RETURN)) && + bool bbIsReturn = block->KindIs(BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->KindIs(BBJ_RETURN))); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) @@ -12121,7 +12121,7 @@ void Compiler::impImport() { entryBlock = entryBlock->bbNext; } - else if (opts.IsOSR() && (entryBlock->KindIs(BBJ_ALWAYS))) + else if (opts.IsOSR() && entryBlock->KindIs(BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 26f4ed946955d..66bdb7a64f52b 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -5296,8 +5296,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. - assert(((returnBB == comp->genReturnBB) && (returnBB->KindIs(BBJ_RETURN))) || - returnBB->endsWithTailCallOrJmp(comp)); + assert(((returnBB == comp->genReturnBB) && returnBB->KindIs(BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 88175479c16b5..4a2606729253f 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -13725,10 +13725,10 @@ void Compiler::fgMorphStmts(BasicBlock* block) // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) - noway_assert((call->IsFastTailCall() && (compCurBB->KindIs(BBJ_RETURN)) && + noway_assert((call->IsFastTailCall() && compCurBB->KindIs(BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || - (call->IsTailCallViaJitHelper() && (compCurBB->KindIs(BBJ_THROW))) || - (!call->IsTailCall() && (compCurBB->KindIs(BBJ_RETURN)))); + (call->IsTailCallViaJitHelper() && compCurBB->KindIs(BBJ_THROW)) || + (!call->IsTailCall() && compCurBB->KindIs(BBJ_RETURN))); } #ifdef DEBUG @@ -13921,7 +13921,7 @@ void Compiler::fgMorphBlocks() fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? - if ((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { @@ -13977,7 +13977,7 @@ void Compiler::fgMorphBlocks() // void Compiler::fgMergeBlockReturn(BasicBlock* block) { - assert((block->KindIs(BBJ_RETURN)) && ((block->bbFlags & BBF_HAS_JMP) == 0)); + assert(block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index f073ce0f11fc6..3d3baeef6a201 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -741,7 +741,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if ((predBlock->KindIs(BBJ_NONE)) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && + if (predBlock->KindIs(BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough()) { @@ -1150,7 +1150,7 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if ((initBlock->KindIs(BBJ_NONE)) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && + if (initBlock->KindIs(BBJ_NONE) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) { initBlock = initBlock->bbPrev; @@ -2294,7 +2294,7 @@ class LoopSearch { // Need to reconnect the flow from `block` to `oldNext`. - if ((block->KindIs(BBJ_COND)) && (block->bbJumpDest == newNext)) + if (block->KindIs(BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); @@ -3207,7 +3207,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // assert(h->bbNext == t); assert(h->bbFallsThrough()); - assert((h->KindIs(BBJ_NONE)) || (h->KindIs(BBJ_COND))); + assert(h->KindIs(BBJ_NONE, BBJ_COND)); if (h->KindIs(BBJ_COND)) { BasicBlock* const hj = h->bbJumpDest; @@ -3360,7 +3360,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - (newT->KindIs(BBJ_NONE)) && (newT->bbNext == origE)) + newT->KindIs(BBJ_NONE) && (newT->bbNext == origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -8198,7 +8198,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; - if (fgIsUsingProfileWeights() && (head->KindIs(BBJ_COND))) + if (fgIsUsingProfileWeights() && head->KindIs(BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 4cf9739d6c73d..cbc59c30d73a3 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1450,8 +1450,8 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // const bool fallThroughIsTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, jti.m_fallThroughPred->bbNum); - if ((jti.m_fallThroughPred->KindIs(BBJ_NONE)) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || - (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) + if (jti.m_fallThroughPred->KindIs(BBJ_NONE) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || + (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) { JITDUMP(FMT_BB " has ambiguous preds and a (%s) fall through pred and no (%s) preds.\n" "Converting fall through pred " FMT_BB " to BBJ_ALWAYS\n", @@ -1623,7 +1623,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // surviving ssa input, and update all the value numbers...) // BasicBlock* const ambBlock = jti.m_ambiguousVNBlock; - if ((ambBlock != nullptr) && (jti.m_block->KindIs(BBJ_COND)) && (jti.m_block->GetUniquePred(this) == ambBlock)) + if ((ambBlock != nullptr) && jti.m_block->KindIs(BBJ_COND) && (jti.m_block->GetUniquePred(this) == ambBlock)) { JITDUMP(FMT_BB " has just one remaining predcessor " FMT_BB "\n", jti.m_block->bbNum, ambBlock->bbNum); From 7aadbdcefdc14ad57f688d87725d05db166b3c96 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 19:35:07 -0400 Subject: [PATCH 4/5] Convert case --- src/coreclr/jit/block.cpp | 2 +- src/coreclr/jit/block.h | 6 ++-- src/coreclr/jit/codegencommon.cpp | 2 +- src/coreclr/jit/codegenlinear.cpp | 4 +-- src/coreclr/jit/compiler.hpp | 2 +- src/coreclr/jit/fgbasic.cpp | 32 +++++++++--------- src/coreclr/jit/fgdiagnostic.cpp | 8 ++--- src/coreclr/jit/fgehopt.cpp | 14 ++++---- src/coreclr/jit/fgflow.cpp | 2 +- src/coreclr/jit/fginline.cpp | 8 ++--- src/coreclr/jit/fgopt.cpp | 36 ++++++++++----------- src/coreclr/jit/fgprofile.cpp | 12 +++---- src/coreclr/jit/fgprofilesynthesis.cpp | 6 ++-- src/coreclr/jit/flowgraph.cpp | 22 ++++++------- src/coreclr/jit/ifconversion.cpp | 2 +- src/coreclr/jit/importer.cpp | 32 +++++++++--------- src/coreclr/jit/indirectcalltransformer.cpp | 6 ++-- src/coreclr/jit/jiteh.cpp | 2 +- src/coreclr/jit/lir.cpp | 2 +- src/coreclr/jit/liveness.cpp | 4 +-- src/coreclr/jit/loopcloning.cpp | 12 +++---- src/coreclr/jit/lower.cpp | 18 +++++------ src/coreclr/jit/morph.cpp | 18 +++++------ src/coreclr/jit/optimizebools.cpp | 4 +-- src/coreclr/jit/optimizer.cpp | 22 ++++++------- src/coreclr/jit/patchpoint.cpp | 4 +-- src/coreclr/jit/redundantbranchopts.cpp | 6 ++-- src/coreclr/jit/switchrecognition.cpp | 2 +- 28 files changed, 145 insertions(+), 145 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index a5798928b5959..c2aa5ff45e3f4 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1419,7 +1419,7 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) /* Record the jump kind in the block */ - block->setBBJumpKind(jumpKind DEBUG_ARG(this)); + block->SetBBJumpKind(jumpKind DEBUG_ARG(this)); if (jumpKind == BBJ_THROW) { diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 9a390d35eb46e..88312967936f2 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -706,12 +706,12 @@ struct BasicBlock : private LIR::Range BBjumpKinds bbJumpKind; // jump (if any) at the end of this block public: - BBjumpKinds getBBJumpKind() const + BBjumpKinds GetBBJumpKind() const { return bbJumpKind; } - void setBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) + void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) { #ifdef DEBUG // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout @@ -1574,7 +1574,7 @@ inline BBArrayIterator BBSwitchTargetList::end() const inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 6a1e1cecbc0e7..190b0f418515b 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -376,7 +376,7 @@ void CodeGen::genMarkLabelsForCodegen() for (BasicBlock* const block : compiler->Blocks()) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. case BBJ_COND: diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index f9d5d1c7cfc04..c1b93541c14c8 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -619,7 +619,7 @@ void CodeGen::genCodeForBBlist() { // We only need the NOP if we're not going to generate any more code as part of the block end. - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_THROW: @@ -662,7 +662,7 @@ void CodeGen::genCodeForBBlist() /* Do we need to generate a jump or return? */ - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_RETURN: genExitCode(block); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 8ac6d7bdf47b7..43d8e927c65f7 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -3224,7 +3224,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgRemoveBlockAsPred(block); // Update jump kind after the scrub. - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // Any block with a throw is rare block->bbSetRunRarely(); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 254372e770c3e..9853f3f47b26e 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -537,7 +537,7 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas assert(block != nullptr); assert(fgPredsComputed); - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_COND: @@ -2771,7 +2771,7 @@ void Compiler::fgLinkBasicBlocks() for (BasicBlock* const curBBdesc : Blocks()) { - switch (curBBdesc->getBBJumpKind()) + switch (curBBdesc->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -3808,7 +3808,7 @@ void Compiler::fgFindBasicBlocks() // BBJ_EHFINALLYRET that were imported to BBJ_EHFAULTRET. if ((hndBegBB->bbCatchTyp == BBCT_FAULT) && block->KindIs(BBJ_EHFINALLYRET)) { - block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } @@ -4017,7 +4017,7 @@ void Compiler::fgFixEntryFlowForOSR() fgEnsureFirstBBisScratch(); assert(fgFirstBB->KindIs(BBJ_NONE)); fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB); - fgFirstBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + fgFirstBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); edge->setLikelihood(1.0); @@ -4057,7 +4057,7 @@ void Compiler::fgCheckBasicBlockControlFlow() continue; } - switch (blk->getBBJumpKind()) + switch (blk->GetBBJumpKind()) { case BBJ_NONE: // block flows into the next one (no jump) @@ -4560,7 +4560,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) - BasicBlock* newBlock = bbNewBasicBlock(curr->getBBJumpKind()); + BasicBlock* newBlock = bbNewBasicBlock(curr->GetBBJumpKind()); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; @@ -4628,7 +4628,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. - curr->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + curr->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); fgAddRefPred(newBlock, curr); return newBlock; @@ -5071,7 +5071,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. - bPrev->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bPrev->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // If this is the first Cold basic block update fgFirstColdBlock @@ -5129,7 +5129,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #ifdef DEBUG /* Some extra checks for the empty case */ - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_NONE: break; @@ -5246,7 +5246,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } /* change all jumps to the removed block */ - switch (predBlock->getBBJumpKind()) + switch (predBlock->GetBBJumpKind()) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); @@ -5260,7 +5260,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->KindIs(BBJ_ALWAYS)) { /* bPrev now becomes a BBJ_ALWAYS */ - bPrev->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + bPrev->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bPrev->bbJumpDest = succBlock; } break; @@ -5313,7 +5313,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (bPrev != nullptr) { - switch (bPrev->getBBJumpKind()) + switch (bPrev->GetBBJumpKind()) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS @@ -5333,7 +5333,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type - bPrev->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bPrev->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } break; @@ -5378,11 +5378,11 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) { - switch (bSrc->getBBJumpKind()) + switch (bSrc->GetBBJumpKind()) { case BBJ_NONE: - bSrc->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + bSrc->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bSrc->bbJumpDest = bDst; JITDUMP("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", bSrc->bbNum, bSrc->bbJumpDest->bbNum); @@ -5462,7 +5462,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { - bSrc->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->bbNext->bbNum); diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index b8b868214ae9c..df319152a2dd0 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -101,7 +101,7 @@ void Compiler::fgDebugCheckUpdate() if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_EHFINALLYRET: @@ -1035,7 +1035,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n bbNum); fprintf(fgxFile, "\n ordinal=\"%d\"", blockOrdinal); - fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->getBBJumpKind()]); + fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->GetBBJumpKind()]); if (block->hasTryIndex()) { fprintf(fgxFile, "\n inTry=\"%s\"", "true"); @@ -2004,7 +2004,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * } else { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: printf("-> " FMT_BB "%*s ( cond )", block->bbJumpDest->bbNum, @@ -2659,7 +2659,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) { - switch (blockPred->getBBJumpKind()) + switch (blockPred->GetBBJumpKind()) { case BBJ_COND: assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 893dae0893c47..e5fbe43e1590f 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -163,7 +163,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() noway_assert(leaveBlock->KindIs(BBJ_ALWAYS)); currentBlock->bbJumpDest = postTryFinallyBlock; - currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(postTryFinallyBlock, currentBlock); @@ -463,7 +463,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Time to optimize. // // (1) Convert the callfinally to a normal jump to the handler - callFinally->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + callFinally->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Identify the leave block and the continuation BasicBlock* const leave = callFinally->bbNext; @@ -542,7 +542,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(block, finallyRet); - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = continuation; fgAddRefPred(continuation, block); fgRemoveRefPred(leave, block); @@ -1049,7 +1049,7 @@ PhaseStatus Compiler::fgCloneFinally() // Avoid asserts when `fgNewBBinRegion` verifies the handler table, by mapping any cloned finally // return blocks to BBJ_ALWAYS (which we would do below if we didn't do it here). - BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->getBBJumpKind(); + BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->GetBBJumpKind(); if (block == firstBlock) { @@ -1180,7 +1180,7 @@ PhaseStatus Compiler::fgCloneFinally() // This call returns to the expected spot, so // retarget it to branch to the clone. currentBlock->bbJumpDest = firstCloneBlock; - currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(firstCloneBlock, currentBlock); @@ -1242,7 +1242,7 @@ PhaseStatus Compiler::fgCloneFinally() { if (block->KindIs(BBJ_EHFINALLYRET)) { - block->setBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } } @@ -2194,7 +2194,7 @@ PhaseStatus Compiler::fgTailMergeThrows() BasicBlock* const predBlock = predEdge->getSourceBlock(); nextPredEdge = predEdge->getNextPredEdge(); - switch (predBlock->getBBJumpKind()) + switch (predBlock->GetBBJumpKind()) { case BBJ_NONE: { diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index fd6ef7a356776..d2669ccaca382 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -343,7 +343,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) BasicBlock* bNext; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: if (!(block->bbFlags & BBF_RETLESS_CALL)) diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 51f77ccc3a5f9..a844199697a60 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -675,12 +675,12 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbNext, block); } else { - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbJumpDest, block); } } @@ -1530,13 +1530,13 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } fgAddRefPred(bottomBlock, block); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index d4b36c1b723c6..18637ac7b49ca 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -466,7 +466,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL); block->bbFlags |= BBF_IMPORTED; - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbSetRunRarely(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1650,7 +1650,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->bbNext)) { - newTryEntry->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + newTryEntry->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } else { @@ -1787,7 +1787,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); - fromBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + fromBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); fromBlock->bbJumpDest = toBlock; fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); @@ -2268,7 +2268,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* set the right links */ - block->setBBJumpKind(bNext->getBBJumpKind() DEBUG_ARG(this)); + block->SetBBJumpKind(bNext->GetBBJumpKind() DEBUG_ARG(this)); VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). @@ -2328,7 +2328,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Set the jump targets */ - switch (bNext->getBBJumpKind()) + switch (bNext->GetBBJumpKind()) { case BBJ_CALLFINALLY: // Propagate RETLESS property @@ -2634,7 +2634,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) noway_assert(flow->getDupCount() == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); --block->bbNext->bbRefs; flow->decrementDupCount(); @@ -2886,7 +2886,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool madeChanges = false; BasicBlock* bPrev = block->bbPrev; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_SWITCH: @@ -3312,7 +3312,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) // Change the switch jump into a BBJ_ALWAYS block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); if (jmpCnt > 1) { for (unsigned i = 1; i < jmpCnt; ++i) @@ -3377,7 +3377,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -3788,7 +3788,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // Fix up block's flow // - block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = target->bbJumpDest; fgAddRefPred(block->bbJumpDest, block); fgRemoveRefPred(target, block); @@ -3841,7 +3841,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi if (!block->isBBCallAlwaysPairTail()) { /* the unconditional jump is to the next BB */ - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); #ifdef DEBUG if (verbose) { @@ -3967,7 +3967,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi /* Conditional is gone - simply fall into the next block */ - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); /* Update bbRefs and bbNum - Conditional predecessors to the same * block are counted twice so we have to remove one of them */ @@ -4232,7 +4232,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // We need to update the following flags of the bJump block if they were set in the bDest block bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE; - bJump->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + bJump->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); bJump->bbJumpDest = bDest->bbNext; /* Update bbRefs and bbPreds */ @@ -4393,7 +4393,7 @@ bool Compiler::fgOptimizeSwitchJumps() // Wire up the new control flow. // - block->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = dominantTarget; FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds; @@ -4610,7 +4610,7 @@ bool Compiler::fgExpandRarelyRunBlocks() const char* reason = nullptr; - switch (bPrev->getBBJumpKind()) + switch (bPrev->GetBBJumpKind()) { case BBJ_ALWAYS: @@ -6454,7 +6454,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } else if (block->countOfInEdges() == 1) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -6551,7 +6551,7 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) { unsigned costSz = 0; // estimate of block's code size cost - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_NONE: costSz = 0; @@ -6976,7 +6976,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) // Fix up the flow. // - predBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + predBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); predBlock->bbJumpDest = crossJumpTarget; fgRemoveRefPred(block, predBlock); diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 26c9afc1fbcf0..6444e45085db7 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -501,7 +501,7 @@ void BlockCountInstrumentor::RelocateProbes() // if (pred->KindIs(BBJ_NONE)) { - pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + pred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } assert(pred->KindIs(BBJ_ALWAYS)); @@ -945,7 +945,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) visitor->VisitBlock(block); nBlocks++; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: { @@ -1554,7 +1554,7 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() // if (block->KindIs(BBJ_NONE)) { - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); block->bbJumpDest = target; } @@ -1697,7 +1697,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // if (pred->KindIs(BBJ_NONE)) { - pred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + pred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } assert(pred->KindIs(BBJ_ALWAYS)); @@ -3922,7 +3922,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf // void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, BlockInfo* info) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_SWITCH: MarkInterestingSwitches(block, info); @@ -4687,7 +4687,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() } slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - switch (bSrc->getBBJumpKind()) + switch (bSrc->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_EHCATCHRET: diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 4d6d549e03d26..90d56a835ff10 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -132,7 +132,7 @@ void ProfileSynthesis::AssignLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -499,7 +499,7 @@ void ProfileSynthesis::RepairLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -591,7 +591,7 @@ void ProfileSynthesis::BlendLikelihoods() { weight_t sum = SumOutgoingLikelihoods(block, &likelihoods); - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index fb4399cf8618c..78dc4571352aa 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -120,7 +120,7 @@ PhaseStatus Compiler::fgInsertGCPolls() JITDUMP("Selecting CALL poll in block " FMT_BB " because it is the single return block\n", block->bbNum); pollType = GCPOLL_CALL; } - else if (BBJ_SWITCH == block->getBBJumpKind()) + else if (BBJ_SWITCH == block->GetBBJumpKind()) { // We don't want to deal with all the outgoing edges of a switch block. // @@ -261,8 +261,8 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) } BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true); - bottom = fgNewBBafter(top->getBBJumpKind(), poll, true); - BBjumpKinds oldJumpKind = top->getBBJumpKind(); + bottom = fgNewBBafter(top->GetBBJumpKind(), poll, true); + BBjumpKinds oldJumpKind = top->GetBBJumpKind(); unsigned char lpIndex = top->bbNatLoopNum; // Update block flags @@ -372,7 +372,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) #endif top->bbJumpDest = bottom; - top->setBBJumpKind(BBJ_COND DEBUG_ARG(this)); + top->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. fgAddRefPred(bottom, poll); @@ -1287,7 +1287,7 @@ void Compiler::fgLoopCallMark() for (BasicBlock* const block : Blocks()) { - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -1837,7 +1837,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); @@ -2309,7 +2309,7 @@ class MergedReturns // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); - returnBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + returnBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); returnBlock->bbJumpDest = constReturnBlock; comp->fgAddRefPred(constReturnBlock, returnBlock); @@ -3125,7 +3125,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // It's a jump from outside the handler; add it to the newHead preds list and remove // it from the block preds list. - switch (predBlock->getBBJumpKind()) + switch (predBlock->GetBBJumpKind()) { case BBJ_CALLFINALLY: noway_assert(predBlock->bbJumpDest == block); @@ -3503,7 +3503,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // if (prevToFirstColdBlock->bbFallsThrough()) { - switch (prevToFirstColdBlock->getBBJumpKind()) + switch (prevToFirstColdBlock->GetBBJumpKind()) { default: noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()"); @@ -3548,7 +3548,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // convert it to BBJ_ALWAYS to force an explicit jump. prevToFirstColdBlock->bbJumpDest = firstColdBlock; - prevToFirstColdBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + prevToFirstColdBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); break; } } @@ -3981,7 +3981,7 @@ PhaseStatus Compiler::fgSetBlockOrder() (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT)) bool partiallyInterruptible = true; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index 7b50f5428458b..6fd420c62a3d1 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -743,7 +743,7 @@ bool OptIfConversionDsc::optIfConvert() // Update the flow from the original block. m_comp->fgRemoveAllRefPreds(m_startBlock->bbNext, m_startBlock); - m_startBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + m_startBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); #ifdef DEBUG if (m_comp->verbose) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index db5d2e3ecd4b7..b2abb048ad461 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2455,7 +2455,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; @@ -4322,7 +4322,7 @@ void Compiler::impImportLeave(BasicBlock* block) { assert(step == DUMMY_INIT(NULL)); callBlock = block; - callBlock->setBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock->SetBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) { @@ -4419,7 +4419,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) { @@ -4573,7 +4573,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { step = block; - step->setBBJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET + step->SetBBJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG @@ -4651,7 +4651,7 @@ void Compiler::impImportLeave(BasicBlock* block) // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgRemoveRefPred(block->bbJumpDest, block); block->bbJumpDest = callBlock; fgAddRefPred(callBlock, block); @@ -4673,7 +4673,7 @@ void Compiler::impImportLeave(BasicBlock* block) #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; - callBlock->setBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock->SetBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) @@ -4908,7 +4908,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) @@ -4994,7 +4994,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // will be treated as pair and handled correctly. if (block->KindIs(BBJ_CALLFINALLY)) { - BasicBlock* dupBlock = bbNewBasicBlock(block->getBBJumpKind()); + BasicBlock* dupBlock = bbNewBasicBlock(block->GetBBJumpKind()); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; fgAddRefPred(dupBlock->bbJumpDest, dupBlock); @@ -5024,7 +5024,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) } #endif // FEATURE_EH_FUNCLETS - block->setBBJumpKind(BBJ_LEAVE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_LEAVE DEBUG_ARG(this)); fgInitBBLookup(); fgRemoveRefPred(block->bbJumpDest, block); @@ -6002,7 +6002,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Change block to BBJ_THROW so we won't trigger importation of successors. // - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. @@ -7307,7 +7307,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -7380,7 +7380,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) block->bbJumpDest->bbNum); fgRemoveRefPred(block->bbNext, block); } - block->setBBJumpKind(foldedJumpKind DEBUG_ARG(this)); + block->SetBBJumpKind(foldedJumpKind DEBUG_ARG(this)); } break; @@ -7553,7 +7553,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -7633,13 +7633,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -11279,7 +11279,7 @@ void Compiler::impImportBlock(BasicBlock* block) unsigned multRef = impCanReimport ? unsigned(~0) : 0; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index ab67048abbbc6..da1fb1933b239 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -573,7 +573,7 @@ class IndirectCallTransformer // There's no need for a new block here. We can just append to currBlock. // checkBlock = currBlock; - checkBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); + checkBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); } else { @@ -652,7 +652,7 @@ class IndirectCallTransformer if (isLastCheck && ((origCall->gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT_EXACT) != 0)) { checkBlock->bbJumpDest = nullptr; - checkBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(compiler)); + checkBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(compiler)); return; } @@ -1126,7 +1126,7 @@ class IndirectCallTransformer // not fall through to the check block. // compiler->fgRemoveRefPred(checkBlock, coldBlock); - coldBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(compiler)); + coldBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(compiler)); coldBlock->bbJumpDest = elseBlock; compiler->fgAddRefPred(elseBlock, coldBlock); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 8606b18743e72..69658a7cd1dd2 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -3506,7 +3506,7 @@ void Compiler::fgVerifyHandlerTab() } // Check for legal block types - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_EHFINALLYRET: { diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 7edb0515ae323..44e810592a006 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -1770,7 +1770,7 @@ void LIR::InsertBeforeTerminator(BasicBlock* block, LIR::Range&& range) assert(insertionPoint != nullptr); #if DEBUG - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: assert(insertionPoint->OperIsConditionalJump()); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index d66ddc05a5cdc..d32854e4224c7 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -378,7 +378,7 @@ void Compiler::fgPerBlockLocalVarLiveness() block->bbMemoryLiveIn = fullMemoryKindSet; block->bbMemoryLiveOut = fullMemoryKindSet; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: @@ -886,7 +886,7 @@ void Compiler::fgExtendDbgLifetimes() { VarSetOps::ClearD(this, initVars); - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_NONE: PREFIX_ASSUME(block->bbNext != nullptr); diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index e9f4df7692474..721f97b47019f 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -2047,7 +2047,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { assert(h->KindIs(BBJ_ALWAYS)); assert(h->bbJumpDest == loop.lpEntry); - h2->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + h2->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); h2->bbJumpDest = loop.lpEntry; } @@ -2062,7 +2062,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Make 'h' fall through to 'h2' (if it didn't already). // Don't add the h->h2 edge because we're going to insert the cloning conditions between 'h' and 'h2', and // optInsertLoopChoiceConditions() will add the edge. - h->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + h->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); h->bbJumpDest = nullptr; // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) @@ -2116,7 +2116,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { - BasicBlock* newBlk = fgNewBBafter(blk->getBBJumpKind(), newPred, /*extendRegion*/ true); + BasicBlock* newBlk = fgNewBBafter(blk->GetBBJumpKind(), newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it @@ -2175,7 +2175,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - assert(blk->KindIs(newblk->getBBJumpKind())); + assert(blk->KindIs(newblk->GetBBJumpKind())); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2184,7 +2184,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) optRedirectBlock(newblk, blockMap); // Add predecessor edges for the new successors, as well as the fall-through paths. - switch (newblk->getBBJumpKind()) + switch (newblk->GetBBJumpKind()) { case BBJ_NONE: fgAddRefPred(newblk->bbNext, newblk); @@ -2255,7 +2255,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { // We can't just fall through to the slow path entry, so make it an unconditional branch. assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. - slowHead->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + slowHead->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); slowHead->bbJumpDest = e2; } diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 66bdb7a64f52b..94d6057803ba4 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -801,12 +801,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { - originalSwitchBB->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); + originalSwitchBB->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = nullptr; } else { - originalSwitchBB->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + originalSwitchBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. @@ -900,7 +900,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); + originalSwitchBB->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB @@ -957,12 +957,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } if (afterDefaultCondBlock->bbNext == uniqueSucc) { - afterDefaultCondBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); + afterDefaultCondBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = nullptr; } else { - afterDefaultCondBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + afterDefaultCondBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } @@ -1036,13 +1036,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); + currentBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1075,7 +1075,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->KindIs(BBJ_SWITCH)); - currentBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); + currentBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } @@ -1247,7 +1247,7 @@ bool Lowering::TryLowerSwitchToBitTest( // GenCondition bbSwitchCondition; - bbSwitch->setBBJumpKind(BBJ_COND DEBUG_ARG(comp)); + bbSwitch->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 4a2606729253f..53a99febef0aa 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6190,7 +6190,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. - compCurBB->setBBJumpKind(BBJ_RETURN DEBUG_ARG(this)); + compCurBB->SetBBJumpKind(BBJ_RETURN DEBUG_ARG(this)); } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); @@ -6338,7 +6338,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. - compCurBB->setBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + compCurBB->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } if (isRootReplaced) @@ -7490,7 +7490,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa } // Finish hooking things up. - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } @@ -13183,7 +13183,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bTaken = block->bbJumpDest; bNotTaken = block->bbNext; } @@ -13199,7 +13199,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); bTaken = block->bbNext; bNotTaken = block->bbJumpDest; } @@ -13254,7 +13254,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) FlowEdge* edge; // Now fix the weights of the edges out of 'bUpdated' - switch (bUpdated->getBBJumpKind()) + switch (bUpdated->GetBBJumpKind()) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); @@ -13428,13 +13428,13 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -14002,7 +14002,7 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else #endif // !TARGET_X86 { - block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 09683dde47bc8..82d2430b91445 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -587,7 +587,7 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() // Update the flow. m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); - m_b1->setBBJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); + m_b1->SetBBJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); // Fixup flags. m_b2->bbFlags |= (m_b1->bbFlags & BBF_COPY_PROPAGATE); @@ -877,7 +877,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() if (optReturnBlock) { m_b1->bbJumpDest = nullptr; - m_b1->setBBJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); + m_b1->SetBBJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 3d3baeef6a201..bc54bcc3af06f 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -1385,7 +1385,7 @@ void Compiler::optCheckPreds() } } noway_assert(bb); - switch (bb->getBBJumpKind()) + switch (bb->GetBBJumpKind()) { case BBJ_COND: if (bb->bbJumpDest == block) @@ -2398,7 +2398,7 @@ class LoopSearch { BasicBlock* exitPoint; - switch (block->getBBJumpKind()) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -2738,7 +2738,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R BasicBlock* newJumpDest = nullptr; - switch (blk->getBBJumpKind()) + switch (blk->GetBBJumpKind()) { case BBJ_NONE: case BBJ_THROW: @@ -2818,10 +2818,10 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { - assert(from->KindIs(to->getBBJumpKind())); // Precondition. + assert(from->KindIs(to->GetBBJumpKind())); // Precondition. // copy the jump destination(s) from "from" to "to". - switch (to->getBBJumpKind()) + switch (to->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_LEAVE: @@ -4361,7 +4361,7 @@ PhaseStatus Compiler::optUnrollLoops() for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = - fgNewBBafter(block->getBBJumpKind(), insertAfter, /*extendRegion*/ true); + fgNewBBafter(block->GetBBJumpKind(), insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) @@ -4415,7 +4415,7 @@ PhaseStatus Compiler::optUnrollLoops() { testCopyStmt->SetRootNode(sideEffList); } - newBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + newBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } @@ -4486,7 +4486,7 @@ PhaseStatus Compiler::optUnrollLoops() fgRemoveAllRefPreds(succ, block); } - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbStmtList = nullptr; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; @@ -4531,7 +4531,7 @@ PhaseStatus Compiler::optUnrollLoops() noway_assert(initBlockBranchStmt->GetRootNode()->OperIs(GT_JTRUE)); fgRemoveStmt(initBlock, initBlockBranchStmt); fgRemoveRefPred(initBlock->bbJumpDest, initBlock); - initBlock->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + initBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -5077,7 +5077,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); @@ -8306,7 +8306,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) continue; } - switch (predBlock->getBBJumpKind()) + switch (predBlock->GetBBJumpKind()) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 2423a6d9da47a..017509086d208 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -145,7 +145,7 @@ class PatchpointTransformer BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_NONE, block); // Update flow and flags - block->setBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); block->bbJumpDest = remainderBlock; block->bbFlags |= BBF_INTERNAL; @@ -233,7 +233,7 @@ class PatchpointTransformer } // Update flow - block->setBBJumpKind(BBJ_THROW DEBUG_ARG(compiler)); + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(compiler)); block->bbJumpDest = nullptr; // Add helper call diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index cbc59c30d73a3..dfbd1863cb4b6 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1460,7 +1460,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // Possibly defer this until after early out below. // - jti.m_fallThroughPred->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + jti.m_fallThroughPred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); jti.m_fallThroughPred->bbJumpDest = jti.m_block; modifiedFlow = true; } @@ -1532,7 +1532,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) fgRemoveStmt(jti.m_block, lastStmt); JITDUMP(" repurposing " FMT_BB " to always jump to " FMT_BB "\n", jti.m_block->bbNum, jti.m_trueTarget->bbNum); fgRemoveRefPred(jti.m_falseTarget, jti.m_block); - jti.m_block->setBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + jti.m_block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); } else if (falsePredsWillReuseBlock) { @@ -1541,7 +1541,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) JITDUMP(" repurposing " FMT_BB " to always fall through to " FMT_BB "\n", jti.m_block->bbNum, jti.m_falseTarget->bbNum); fgRemoveRefPred(jti.m_trueTarget, jti.m_block); - jti.m_block->setBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + jti.m_block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // Now reroute the flow from the predecessors. diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 90bfa43142e75..125c2cf2fbebe 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -319,7 +319,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert(isTest); // Convert firstBlock to a switch block - firstBlock->setBBJumpKind(BBJ_SWITCH DEBUG_ARG(this)); + firstBlock->SetBBJumpKind(BBJ_SWITCH DEBUG_ARG(this)); firstBlock->bbJumpDest = nullptr; firstBlock->bbCodeOffsEnd = lastBlock->bbCodeOffsEnd; firstBlock->lastStmt()->GetRootNode()->ChangeOper(GT_SWITCH); From 754743d59204ce62223c4b229ba6e9790aeeb2c1 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 2 Oct 2023 21:23:36 -0400 Subject: [PATCH 5/5] Typo --- src/coreclr/jit/fgdiagnostic.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index df319152a2dd0..318e241d35ae0 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -192,7 +192,7 @@ void Compiler::fgDebugCheckUpdate() /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ - if (block->KindIs(BBJ_CALLFINALLY) == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); }