diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp
index f38878e33fff8..755724e74f3ce 100644
--- a/src/coreclr/jit/assertionprop.cpp
+++ b/src/coreclr/jit/assertionprop.cpp
@@ -5268,7 +5268,7 @@ class AssertionPropFlowCallback
{
// Scenario where next block and conditional block, both point to the same block.
// In such case, intersect the assertions present on both the out edges of predBlock.
- assert(predBlock->bbNext == block);
+ assert(predBlock->NextIs(block));
BitVecOps::IntersectionD(apTraits, pAssertionOut, predBlock->bbAssertionOut);
if (VerboseDataflow())
diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp
index c2aa5ff45e3f4..34d1156b0c3c9 100644
--- a/src/coreclr/jit/block.cpp
+++ b/src/coreclr/jit/block.cpp
@@ -133,7 +133,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
// these cannot cause transfer to the handler...)
// TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via
// something like:
- // for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->bbNext; bb = bb->bbNext)
+ // for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->Next(); bb = bb->Next())
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
@@ -160,7 +160,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
if (enclosingDsc->HasFilter())
{
for (BasicBlock* filterBlk = enclosingDsc->ebdFilter; filterBlk != enclosingDsc->ebdHndBeg;
- filterBlk = filterBlk->bbNext)
+ filterBlk = filterBlk->Next())
{
res = new (this, CMK_FlowEdge) FlowEdge(filterBlk, res);
@@ -186,6 +186,36 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
return res;
}
+//------------------------------------------------------------------------
+// IsLastHotBlock: see if this is the last block before the cold section
+//
+// Arguments:
+// compiler - current compiler instance
+//
+// Returns:
+// true if the next block is fgFirstColdBlock
+// (if fgFirstColdBlock is null, this call is equivalent to IsLast())
+//
+bool BasicBlock::IsLastHotBlock(Compiler* compiler) const
+{
+ return (bbNext == compiler->fgFirstColdBlock);
+}
+
+//------------------------------------------------------------------------
+// IsFirstColdBlock: see if this is the first block in the cold section
+//
+// Arguments:
+// compiler - current compiler instance
+//
+// Returns:
+// true if this is fgFirstColdBlock
+// (fgFirstColdBlock is null if there is no cold code)
+//
+bool BasicBlock::IsFirstColdBlock(Compiler* compiler) const
+{
+ return (this == compiler->fgFirstColdBlock);
+}
+
//------------------------------------------------------------------------
// checkPredListOrder: see if pred list is properly ordered
//
@@ -1509,10 +1539,10 @@ bool BasicBlock::isBBCallAlwaysPair() const
assert(!(this->bbFlags & BBF_RETLESS_CALL));
#endif
// Some asserts that the next block is a BBJ_ALWAYS of the proper form.
- assert(this->bbNext != nullptr);
- assert(this->bbNext->KindIs(BBJ_ALWAYS));
- assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS);
- assert(this->bbNext->isEmpty());
+ assert(!this->IsLast());
+ assert(this->Next()->KindIs(BBJ_ALWAYS));
+ assert(this->Next()->bbFlags & BBF_KEEP_BBJ_ALWAYS);
+ assert(this->Next()->isEmpty());
return true;
}
diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h
index 88312967936f2..fb44614fec95e 100644
--- a/src/coreclr/jit/block.h
+++ b/src/coreclr/jit/block.h
@@ -508,10 +508,49 @@ struct BasicBlock : private LIR::Range
{
friend class LIR;
+private:
BasicBlock* bbNext; // next BB in ascending PC offset order
BasicBlock* bbPrev;
- void setNext(BasicBlock* next)
+ BBjumpKinds bbJumpKind; // jump (if any) at the end of this block
+
+public:
+ BBjumpKinds GetBBJumpKind() const
+ {
+ return bbJumpKind;
+ }
+
+ void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* compiler))
+ {
+#ifdef DEBUG
+ // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout
+ // TODO: Change assert to check if compiler is in appropriate optimization phase to use BBJ_NONE
+ // (right now, this assertion does the null check to avoid unused variable warnings)
+ assert((kind != BBJ_NONE) || (compiler != nullptr));
+#endif // DEBUG
+ bbJumpKind = kind;
+ }
+
+ BasicBlock* Prev() const
+ {
+ return bbPrev;
+ }
+
+ void SetPrev(BasicBlock* prev)
+ {
+ bbPrev = prev;
+ if (prev)
+ {
+ prev->bbNext = this;
+ }
+ }
+
+ BasicBlock* Next() const
+ {
+ return bbNext;
+ }
+
+ void SetNext(BasicBlock* next)
{
bbNext = next;
if (next)
@@ -520,6 +559,37 @@ struct BasicBlock : private LIR::Range
}
}
+ bool IsFirst() const
+ {
+ return (bbPrev == nullptr);
+ }
+
+ bool IsLast() const
+ {
+ return (bbNext == nullptr);
+ }
+
+ bool PrevIs(BasicBlock* block) const
+ {
+ return (bbPrev == block);
+ }
+
+ bool NextIs(BasicBlock* block) const
+ {
+ return (bbNext == block);
+ }
+
+ bool IsLastHotBlock(Compiler* compiler) const;
+
+ bool IsFirstColdBlock(Compiler* compiler) const;
+
+ /* The following union describes the jump target(s) of this block */
+ union {
+ unsigned bbJumpOffs; // PC offset (temporary only)
+ BasicBlock* bbJumpDest; // basic block
+ BBswtDesc* bbJumpSwt; // switch descriptor
+ };
+
BasicBlockFlags bbFlags;
static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0);
@@ -702,33 +772,6 @@ struct BasicBlock : private LIR::Range
// a block corresponding to an exit from the try of a try/finally.
bool isBBCallAlwaysPairTail() const;
-private:
- BBjumpKinds bbJumpKind; // jump (if any) at the end of this block
-
-public:
- BBjumpKinds GetBBJumpKind() const
- {
- return bbJumpKind;
- }
-
- void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp))
- {
-#ifdef DEBUG
- // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout
- // TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE
- // (right now, this assertion does the null check to avoid unused variable warnings)
- assert((kind != BBJ_NONE) || (comp != nullptr));
-#endif // DEBUG
- bbJumpKind = kind;
- }
-
- /* The following union describes the jump target(s) of this block */
- union {
- unsigned bbJumpOffs; // PC offset (temporary only)
- BasicBlock* bbJumpDest; // basic block
- BBswtDesc* bbJumpSwt; // switch descriptor
- };
-
bool KindIs(BBjumpKinds kind) const
{
return bbJumpKind == kind;
@@ -1435,10 +1478,10 @@ class BasicBlockIterator
{
assert(m_block != nullptr);
// Check that we haven't been spliced out of the list.
- assert((m_block->bbNext == nullptr) || (m_block->bbNext->bbPrev == m_block));
- assert((m_block->bbPrev == nullptr) || (m_block->bbPrev->bbNext == m_block));
+ assert(m_block->IsLast() || m_block->Next()->PrevIs(m_block));
+ assert(m_block->IsFirst() || m_block->Prev()->NextIs(m_block));
- m_block = m_block->bbNext;
+ m_block = m_block->Next();
return *this;
}
@@ -1501,7 +1544,7 @@ class BasicBlockRangeList
BasicBlockIterator end() const
{
- return BasicBlockIterator(m_end->bbNext); // walk until we see the block *following* the `m_end` block
+ return BasicBlockIterator(m_end->Next()); // walk until we see the block *following* the `m_end` block
}
};
@@ -1596,18 +1639,18 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
break;
case BBJ_NONE:
- m_succs[0] = block->bbNext;
+ m_succs[0] = block->Next();
m_begin = &m_succs[0];
m_end = &m_succs[1];
break;
case BBJ_COND:
- m_succs[0] = block->bbNext;
+ m_succs[0] = block->Next();
m_begin = &m_succs[0];
// If both fall-through and branch successors are identical, then only include
// them once in the iteration (this is the same behavior as NumSucc()/GetSucc()).
- if (block->bbJumpDest == block->bbNext)
+ if (block->NextIs(block->bbJumpDest))
{
m_end = &m_succs[1];
}
diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis
index 6d25673d9c970..81e1503de22cf 100644
--- a/src/coreclr/jit/clrjit.natvis
+++ b/src/coreclr/jit/clrjit.natvis
@@ -105,7 +105,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
varIndex++
bbLiveInMap = bbLiveInMap >> 1
- block = block->bbNext
+ block = block->Next()
- "OutVarToRegMaps"
@@ -124,7 +124,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
varIndex++
bbLiveInMap = bbLiveInMap >> 1
- block = block->bbNext
+ block = block->Next()
- this->m_AvailableRegs
diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp
index 54c4b7e20dcd5..874284cab4dad 100644
--- a/src/coreclr/jit/codegenarm.cpp
+++ b/src/coreclr/jit/codegenarm.cpp
@@ -123,12 +123,12 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
// we would have otherwise created retless calls.
assert(block->isBBCallAlwaysPair());
- assert(block->bbNext != NULL);
- assert(block->bbNext->KindIs(BBJ_ALWAYS));
- assert(block->bbNext->bbJumpDest != NULL);
- assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
+ assert(!block->IsLast());
+ assert(block->Next()->KindIs(BBJ_ALWAYS));
+ assert(block->Next()->bbJumpDest != NULL);
+ assert(block->Next()->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
- bbFinallyRet = block->bbNext->bbJumpDest;
+ bbFinallyRet = block->Next()->bbJumpDest;
// Load the address where the finally funclet should return into LR.
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
@@ -143,7 +143,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
// block is RETLESS.
assert(!(block->bbFlags & BBF_RETLESS_CALL));
assert(block->isBBCallAlwaysPair());
- return block->bbNext;
+ return block->Next();
}
//------------------------------------------------------------------------
diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp
index 6d22044c156b9..4fc3436df5155 100644
--- a/src/coreclr/jit/codegenarm64.cpp
+++ b/src/coreclr/jit/codegenarm64.cpp
@@ -2160,7 +2160,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
}
GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);
- BasicBlock* const nextBlock = block->bbNext;
+ BasicBlock* const nextBlock = block->Next();
if (block->bbFlags & BBF_RETLESS_CALL)
{
@@ -2184,7 +2184,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
BasicBlock* const jumpDest = nextBlock->bbJumpDest;
// Now go to where the finally funclet needs to return to.
- if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
+ if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
{
// Fall-through.
// TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly
diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp
index c203223d78f95..fd0b2de289b61 100644
--- a/src/coreclr/jit/codegenarmarch.cpp
+++ b/src/coreclr/jit/codegenarmarch.cpp
@@ -3338,8 +3338,8 @@ void CodeGen::genCall(GenTreeCall* call)
#ifdef FEATURE_READYTORUN
else if (call->IsR2ROrVirtualStubRelativeIndir())
{
- assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
- ((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE)));
+ assert((call->IsR2RRelativeIndir() && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
+ (call->IsVirtualStubRelativeIndir() && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert(call->gtControlExpr == nullptr);
regNumber tmpReg = call->GetSingleTempReg();
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index 8ccac405a37dd..495af5eb27158 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -402,10 +402,10 @@ void CodeGen::genMarkLabelsForCodegen()
{
// For callfinally thunks, we need to mark the block following the callfinally/always pair,
// as that's needed for identifying the range of the "duplicate finally" region in EH data.
- BasicBlock* bbToLabel = block->bbNext;
+ BasicBlock* bbToLabel = block->Next();
if (block->isBBCallAlwaysPair())
{
- bbToLabel = bbToLabel->bbNext; // skip the BBJ_ALWAYS
+ bbToLabel = bbToLabel->Next(); // skip the BBJ_ALWAYS
}
if (bbToLabel != nullptr)
{
@@ -446,16 +446,16 @@ void CodeGen::genMarkLabelsForCodegen()
JITDUMP(" " FMT_BB " : try begin\n", HBtab->ebdTryBeg->bbNum);
JITDUMP(" " FMT_BB " : hnd begin\n", HBtab->ebdHndBeg->bbNum);
- if (HBtab->ebdTryLast->bbNext != nullptr)
+ if (!HBtab->ebdTryLast->IsLast())
{
- HBtab->ebdTryLast->bbNext->bbFlags |= BBF_HAS_LABEL;
- JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->bbNext->bbNum);
+ HBtab->ebdTryLast->Next()->bbFlags |= BBF_HAS_LABEL;
+ JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->Next()->bbNum);
}
- if (HBtab->ebdHndLast->bbNext != nullptr)
+ if (!HBtab->ebdHndLast->IsLast())
{
- HBtab->ebdHndLast->bbNext->bbFlags |= BBF_HAS_LABEL;
- JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->bbNext->bbNum);
+ HBtab->ebdHndLast->Next()->bbFlags |= BBF_HAS_LABEL;
+ JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->Next()->bbNum);
}
if (HBtab->HasFilter())
@@ -2302,9 +2302,9 @@ void CodeGen::genReportEH()
hndBeg = compiler->ehCodeOffset(HBtab->ebdHndBeg);
tryEnd = (HBtab->ebdTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
- : compiler->ehCodeOffset(HBtab->ebdTryLast->bbNext);
+ : compiler->ehCodeOffset(HBtab->ebdTryLast->Next());
hndEnd = (HBtab->ebdHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
- : compiler->ehCodeOffset(HBtab->ebdHndLast->bbNext);
+ : compiler->ehCodeOffset(HBtab->ebdHndLast->Next());
if (HBtab->HasFilter())
{
@@ -2524,9 +2524,9 @@ void CodeGen::genReportEH()
hndBeg = compiler->ehCodeOffset(bbHndBeg);
tryEnd = (bbTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
- : compiler->ehCodeOffset(bbTryLast->bbNext);
+ : compiler->ehCodeOffset(bbTryLast->Next());
hndEnd = (bbHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
- : compiler->ehCodeOffset(bbHndLast->bbNext);
+ : compiler->ehCodeOffset(bbHndLast->Next());
if (encTab->HasFilter())
{
@@ -2590,10 +2590,10 @@ void CodeGen::genReportEH()
// How big is it? The BBJ_ALWAYS has a null bbEmitCookie! Look for the block after, which must be
// a label or jump target, since the BBJ_CALLFINALLY doesn't fall through.
- BasicBlock* bbLabel = block->bbNext;
+ BasicBlock* bbLabel = block->Next();
if (block->isBBCallAlwaysPair())
{
- bbLabel = bbLabel->bbNext; // skip the BBJ_ALWAYS
+ bbLabel = bbLabel->Next(); // skip the BBJ_ALWAYS
}
if (bbLabel == nullptr)
{
@@ -3235,7 +3235,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
regArgTab[regArgNum + i].writeThru = (varDsc->lvIsInReg() && varDsc->lvLiveInOutOfHndlr);
/* mark stack arguments since we will take care of those first */
- regArgTab[regArgNum + i].stackArg = (varDsc->lvIsInReg()) ? false : true;
+ regArgTab[regArgNum + i].stackArg = varDsc->lvIsInReg() ? false : true;
/* If it goes on the stack or in a register that doesn't hold
* an argument anymore -> CANNOT form a circular dependency */
@@ -5210,8 +5210,8 @@ void CodeGen::genReserveEpilog(BasicBlock* block)
assert(block != nullptr);
const VARSET_TP& gcrefVarsArg(GetEmitter()->emitThisGCrefVars);
- bool last = (block->bbNext == nullptr);
- GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last);
+ GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg,
+ block->IsLast());
}
#if defined(FEATURE_EH_FUNCLETS)
@@ -5257,9 +5257,8 @@ void CodeGen::genReserveFuncletEpilog(BasicBlock* block)
JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum);
- bool last = (block->bbNext == nullptr);
GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur, last);
+ gcInfo.gcRegByrefSetCur, block->IsLast());
}
#endif // FEATURE_EH_FUNCLETS
@@ -5812,7 +5811,7 @@ void CodeGen::genFnProlog()
{
excludeMask |= RBM_PINVOKE_FRAME;
- assert((!compiler->opts.ShouldUsePInvokeHelpers()) || (compiler->info.compLvFrameListRoot == BAD_VAR_NUM));
+ assert(!compiler->opts.ShouldUsePInvokeHelpers() || (compiler->info.compLvFrameListRoot == BAD_VAR_NUM));
if (!compiler->opts.ShouldUsePInvokeHelpers())
{
excludeMask |= (RBM_PINVOKE_TCB | RBM_PINVOKE_SCRATCH);
diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp
index c1b93541c14c8..5ff4d9cc1b6f1 100644
--- a/src/coreclr/jit/codegenlinear.cpp
+++ b/src/coreclr/jit/codegenlinear.cpp
@@ -170,7 +170,7 @@ void CodeGen::genCodeForBBlist()
BasicBlock* block;
- for (block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (block = compiler->fgFirstBB; block != nullptr; block = block->Next())
{
#ifdef DEBUG
@@ -310,7 +310,7 @@ void CodeGen::genCodeForBBlist()
//
bool needLabel = (block->bbFlags & BBF_HAS_LABEL) != 0;
- if (block == compiler->fgFirstColdBlock)
+ if (block->IsFirstColdBlock(compiler))
{
#ifdef DEBUG
if (compiler->verbose)
@@ -319,7 +319,7 @@ void CodeGen::genCodeForBBlist()
}
#endif
// We should never have a block that falls through into the Cold section
- noway_assert(!block->bbPrev->bbFallsThrough());
+ noway_assert(!block->Prev()->bbFallsThrough());
needLabel = true;
}
@@ -330,12 +330,11 @@ void CodeGen::genCodeForBBlist()
//
// Note: We need to have set compCurBB before calling emitAddLabel
//
- if ((block->bbPrev != nullptr) && block->bbPrev->KindIs(BBJ_COND) &&
- (block->bbWeight != block->bbPrev->bbWeight))
+ if (!block->IsFirst() && block->Prev()->KindIs(BBJ_COND) && (block->bbWeight != block->Prev()->bbWeight))
{
JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT
" different from " FMT_BB " with weight " FMT_WT "\n",
- block->bbPrev->bbNum, block->bbPrev->bbWeight, block->bbNum, block->bbWeight);
+ block->Prev()->bbNum, block->Prev()->bbWeight, block->bbNum, block->bbWeight);
needLabel = true;
}
@@ -355,7 +354,7 @@ void CodeGen::genCodeForBBlist()
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(block));
}
- if (block == compiler->fgFirstColdBlock)
+ if (block->IsFirstColdBlock(compiler))
{
// We require the block that starts the Cold section to have a label
noway_assert(block->bbEmitCookie);
@@ -519,7 +518,7 @@ void CodeGen::genCodeForBBlist()
#endif // DEBUG
#if defined(DEBUG)
- if (block->bbNext == nullptr)
+ if (block->IsLast())
{
// Unit testing of the emitter: generate a bunch of instructions into the last block
// (it's as good as any, but better than the prologue, which can only be a single instruction
@@ -547,10 +546,14 @@ void CodeGen::genCodeForBBlist()
/* Is this the last block, and are there any open scopes left ? */
- bool isLastBlockProcessed = (block->bbNext == nullptr);
+ bool isLastBlockProcessed;
if (block->isBBCallAlwaysPair())
{
- isLastBlockProcessed = (block->bbNext->bbNext == nullptr);
+ isLastBlockProcessed = block->Next()->IsLast();
+ }
+ else
+ {
+ isLastBlockProcessed = block->IsLast();
}
if (compiler->opts.compDbgInfo && isLastBlockProcessed)
@@ -615,7 +618,7 @@ void CodeGen::genCodeForBBlist()
// Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically,
// if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions
// generated before the OS epilog starts, such as a GS cookie check.
- if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
+ if (block->IsLast() || !BasicBlock::sameEHRegion(block, block->Next()))
{
// We only need the NOP if we're not going to generate any more code as part of the block end.
@@ -636,7 +639,7 @@ void CodeGen::genCodeForBBlist()
break;
case BBJ_NONE:
- if (block->bbNext == nullptr)
+ if (block->IsLast())
{
// Call immediately before the end of the code; we should never get here .
instGen(INS_BREAKPOINT); // This should never get executed
@@ -679,10 +682,10 @@ void CodeGen::genCodeForBBlist()
// 2. If this is this is the last block of the hot section.
// 3. If the subsequent block is a special throw block.
// 4. On AMD64, if the next block is in a different EH region.
- if ((block->bbNext == nullptr) || (block->bbNext->bbFlags & BBF_FUNCLET_BEG) ||
- !BasicBlock::sameEHRegion(block, block->bbNext) ||
- (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext)) ||
- block->bbNext == compiler->fgFirstColdBlock)
+ if (block->IsLast() || (block->Next()->bbFlags & BBF_FUNCLET_BEG) ||
+ !BasicBlock::sameEHRegion(block, block->Next()) ||
+ (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->Next())) ||
+ block->IsLastHotBlock(compiler))
{
instGen(INS_BREAKPOINT); // This should never get executed
}
@@ -783,10 +786,10 @@ void CodeGen::genCodeForBBlist()
{
GetEmitter()->emitSetLoopBackEdge(block->bbJumpDest);
- if (block->bbNext != nullptr)
+ if (!block->IsLast())
{
- JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->bbNext->bbNum);
- block->bbNext->bbFlags |= BBF_HAS_LABEL;
+ JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->Next()->bbNum);
+ block->Next()->bbFlags |= BBF_HAS_LABEL;
}
}
#endif // FEATURE_LOOP_ALIGN
@@ -818,7 +821,7 @@ void CodeGen::genCodeForBBlist()
GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS)));
}
- if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign()))
+ if (!block->IsLast() && block->Next()->isLoopAlign())
{
if (compiler->opts.compJitHideAlignBehindJmp)
{
@@ -949,7 +952,7 @@ void CodeGen::genSpillVar(GenTree* tree)
{
// We only have 'GTF_SPILL' and 'GTF_SPILLED' on a def of a write-thru lclVar
// or a single-def var that is to be spilled at its definition.
- assert((varDsc->IsAlwaysAliveInMemory()) && ((tree->gtFlags & GTF_VAR_DEF) != 0));
+ assert(varDsc->IsAlwaysAliveInMemory() && ((tree->gtFlags & GTF_VAR_DEF) != 0));
}
if (needsSpill)
diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp
index 075b1f1c847d1..ee1bc3f0ac1d0 100644
--- a/src/coreclr/jit/codegenloongarch64.cpp
+++ b/src/coreclr/jit/codegenloongarch64.cpp
@@ -1520,7 +1520,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
}
GetEmitter()->emitIns_J(INS_bl, block->bbJumpDest);
- BasicBlock* const nextBlock = block->bbNext;
+ BasicBlock* const nextBlock = block->Next();
if (block->bbFlags & BBF_RETLESS_CALL)
{
@@ -1544,7 +1544,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
BasicBlock* const jumpDest = nextBlock->bbJumpDest;
// Now go to where the finally funclet needs to return to.
- if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
+ if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
{
// Fall-through.
// TODO-LOONGARCH64-CQ: Can we get rid of this instruction, and just have the call return directly
@@ -6480,8 +6480,8 @@ void CodeGen::genCall(GenTreeCall* call)
#ifdef FEATURE_READYTORUN
else if (call->IsR2ROrVirtualStubRelativeIndir())
{
- assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
- ((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE)));
+ assert((call->IsR2RRelativeIndir() && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
+ (call->IsVirtualStubRelativeIndir() && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert(call->gtControlExpr == nullptr);
regNumber tmpReg = call->GetSingleTempReg();
diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp
index 6b858312ae328..4f5fe303cf101 100644
--- a/src/coreclr/jit/codegenriscv64.cpp
+++ b/src/coreclr/jit/codegenriscv64.cpp
@@ -1158,7 +1158,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
}
GetEmitter()->emitIns_J(INS_jal, block->bbJumpDest);
- BasicBlock* const nextBlock = block->bbNext;
+ BasicBlock* const nextBlock = block->Next();
if (block->bbFlags & BBF_RETLESS_CALL)
{
@@ -1182,7 +1182,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
BasicBlock* const jumpDest = nextBlock->bbJumpDest;
// Now go to where the finally funclet needs to return to.
- if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
+ if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
{
// Fall-through.
// TODO-RISCV64-CQ: Can we get rid of this instruction, and just have the call return directly
@@ -6173,8 +6173,8 @@ void CodeGen::genCall(GenTreeCall* call)
#ifdef FEATURE_READYTORUN
else if (call->IsR2ROrVirtualStubRelativeIndir())
{
- assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
- ((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE)));
+ assert((call->IsR2RRelativeIndir() && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
+ (call->IsVirtualStubRelativeIndir() && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert(call->gtControlExpr == nullptr);
regNumber tmpReg = call->GetSingleTempReg();
diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp
index f5eb3cbf80256..869872e5062d1 100644
--- a/src/coreclr/jit/codegenxarch.cpp
+++ b/src/coreclr/jit/codegenxarch.cpp
@@ -205,7 +205,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
- BasicBlock* const nextBlock = block->bbNext;
+ BasicBlock* const nextBlock = block->Next();
#if defined(FEATURE_EH_FUNCLETS)
// Generate a call to the finally, like this:
@@ -256,7 +256,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
BasicBlock* const jumpDest = nextBlock->bbJumpDest;
// Now go to where the finally funclet needs to return to.
- if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
+ if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
{
// Fall-through.
// TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
@@ -8248,7 +8248,7 @@ void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk)
// For now, we only support the "push" case; we will push a full slot for the first field of each slot
// within the struct.
- assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg);
+ assert(putArgStk->isPushKind() && !preAdjustedStack && m_pushStkArg);
// If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0.
// (Note that this mode is not currently being used.)
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index c3f63b48e4ab4..13f31ec559a26 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -5291,11 +5291,11 @@ PhaseStatus Compiler::placeLoopAlignInstructions()
}
}
- if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign()))
+ if (!block->IsLast() && block->Next()->isLoopAlign())
{
// Loop alignment is disabled for cold blocks
assert((block->bbFlags & BBF_COLD) == 0);
- BasicBlock* const loopTop = block->bbNext;
+ BasicBlock* const loopTop = block->Next();
bool isSpecialCallFinally = block->isBBCallAlwaysPairTail();
bool unmarkedLoopAlign = false;
@@ -6379,9 +6379,9 @@ void Compiler::compCompileFinish()
// Small methods cannot meaningfully have a big number of locals
// or arguments. We always track arguments at the start of
// the prolog which requires memory
- (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc
- (getJitStressLevel() == 0) && // We need extra memory for stress
- !opts.optRepeat && // We need extra memory to repeat opts
+ (info.compLocalsCount <= 32) && !opts.MinOpts() && // We may have too many local variables, etc
+ (getJitStressLevel() == 0) && // We need extra memory for stress
+ !opts.optRepeat && // We need extra memory to repeat opts
!compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for
// DirectAlloc
// Factor of 2x is because data-structures are bigger under DEBUG
@@ -9614,7 +9614,7 @@ BasicBlock* dFindBlock(unsigned bbNum)
BasicBlock* block = nullptr;
dbBlock = nullptr;
- for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (block = comp->fgFirstBB; block != nullptr; block = block->Next())
{
if (block->bbNum == bbNum)
{
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index c98b1331bb831..76081ebf8b09c 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -6482,7 +6482,7 @@ class Compiler
// Returns "true" iff this is a "top entry" loop.
bool lpIsTopEntry() const
{
- if (lpHead->bbNext == lpEntry)
+ if (lpHead->NextIs(lpEntry))
{
assert(lpHead->bbFallsThrough());
assert(lpTop == lpEntry);
diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp
index 43d8e927c65f7..080d1fb4be13d 100644
--- a/src/coreclr/jit/compiler.hpp
+++ b/src/coreclr/jit/compiler.hpp
@@ -633,7 +633,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func)
BasicBlock* finBeg = ehDsc->ebdHndBeg;
- for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
+ for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next())
{
if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg))
{
@@ -642,12 +642,12 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func)
assert(bcall->isBBCallAlwaysPair());
- RETURN_ON_ABORT(func(bcall->bbNext));
+ RETURN_ON_ABORT(func(bcall->Next()));
}
RETURN_ON_ABORT(VisitEHSuccessors(comp, this, func));
- for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
+ for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next())
{
if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg))
{
@@ -655,7 +655,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func)
}
assert(bcall->isBBCallAlwaysPair());
- RETURN_ON_ABORT(VisitSuccessorEHSuccessors(comp, this, bcall->bbNext, func));
+ RETURN_ON_ABORT(VisitSuccessorEHSuccessors(comp, this, bcall->Next(), func));
}
break;
@@ -767,7 +767,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func)
BasicBlock* finBeg = ehDsc->ebdHndBeg;
- for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
+ for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next())
{
if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg))
{
@@ -776,7 +776,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func)
assert(bcall->isBBCallAlwaysPair());
- RETURN_ON_ABORT(func(bcall->bbNext));
+ RETURN_ON_ABORT(func(bcall->Next()));
}
break;
@@ -3235,7 +3235,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block)
// Must do this after we update bbJumpKind of block.
if (isCallAlwaysPair)
{
- BasicBlock* leaveBlk = block->bbNext;
+ BasicBlock* leaveBlk = block->Next();
noway_assert(leaveBlk->KindIs(BBJ_ALWAYS));
// leaveBlk is now unreachable, so scrub the pred lists.
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index 61c118eacc810..b38a00fe22ff9 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -9881,7 +9881,7 @@ void emitter::emitStackPop(BYTE* addr, bool isCall, unsigned char callInstrSize,
// recorded (when we're doing the ptr reg map for a non-fully-interruptible method).
if (emitFullGCinfo
#ifndef JIT32_GCENCODER
- || (emitComp->IsFullPtrRegMapRequired() && (!emitComp->GetInterruptible()) && isCall)
+ || (emitComp->IsFullPtrRegMapRequired() && !emitComp->GetInterruptible() && isCall)
#endif // JIT32_GCENCODER
)
{
diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp
index d03bb82ea9cc5..7cd209b6e5e71 100644
--- a/src/coreclr/jit/emitxarch.cpp
+++ b/src/coreclr/jit/emitxarch.cpp
@@ -10550,7 +10550,7 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
nsep = true;
}
- if ((id->idIsDspReloc()) && (id->idIns() != INS_i_jmp))
+ if (id->idIsDspReloc() && (id->idIns() != INS_i_jmp))
{
if (nsep)
{
diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp
index 9853f3f47b26e..4165d10147125 100644
--- a/src/coreclr/jit/fgbasic.cpp
+++ b/src/coreclr/jit/fgbasic.cpp
@@ -212,12 +212,12 @@ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind)
if (fgFirstBB)
{
- fgLastBB->setNext(block);
+ fgLastBB->SetNext(block);
}
else
{
- fgFirstBB = block;
- block->bbPrev = nullptr;
+ fgFirstBB = block;
+ block->SetPrev(nullptr);
}
fgLastBB = block;
@@ -694,7 +694,7 @@ BasicBlock* Compiler::fgLookupBB(unsigned addr)
while (dsc->bbFlags & BBF_INTERNAL)
{
- dsc = dsc->bbNext;
+ dsc = dsc->Next();
mid++;
// We skipped over too many, Set hi back to the original mid - 1
@@ -2793,7 +2793,7 @@ void Compiler::fgLinkBasicBlocks()
break;
}
- if (!curBBdesc->bbNext)
+ if (curBBdesc->IsLast())
{
BADCODE("Fall thru the end of a method");
}
@@ -2803,7 +2803,7 @@ void Compiler::fgLinkBasicBlocks()
FALLTHROUGH;
case BBJ_NONE:
- fgAddRefPred(curBBdesc->bbNext, curBBdesc, oldEdge);
+ fgAddRefPred(curBBdesc->Next(), curBBdesc, oldEdge);
break;
case BBJ_EHFILTERRET:
@@ -2839,7 +2839,7 @@ void Compiler::fgLinkBasicBlocks()
/* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */
- noway_assert(*(jumpPtr - 1) == curBBdesc->bbNext);
+ noway_assert(curBBdesc->NextIs(*(jumpPtr - 1)));
break;
}
@@ -3664,7 +3664,7 @@ void Compiler::fgFindBasicBlocks()
hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER;
// Mark all BBs that belong to the filter with the XTnum of the corresponding handler
- for (block = filtBB; /**/; block = block->bbNext)
+ for (block = filtBB; /**/; block = block->Next())
{
if (block == nullptr)
{
@@ -3685,7 +3685,7 @@ void Compiler::fgFindBasicBlocks()
}
}
- if (!block->bbNext || block->bbNext != hndBegBB)
+ if (block->IsLast() || !block->NextIs(hndBegBB))
{
BADCODE3("Filter does not immediately precede handler for filter", " at offset %04X",
filtBB->bbCodeOffs);
@@ -3753,10 +3753,10 @@ void Compiler::fgFindBasicBlocks()
HBtab->ebdHandlerType = ToEHHandlerType(clause.Flags);
HBtab->ebdTryBeg = tryBegBB;
- HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->bbPrev;
+ HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->Prev();
HBtab->ebdHndBeg = hndBegBB;
- HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->bbPrev;
+ HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->Prev();
//
// Assert that all of our try/hnd blocks are setup correctly.
@@ -3798,7 +3798,7 @@ void Compiler::fgFindBasicBlocks()
BasicBlock* block;
- for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->bbNext)
+ for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->Next())
{
if (!block->hasHndIndex())
{
@@ -3821,7 +3821,7 @@ void Compiler::fgFindBasicBlocks()
/* Mark all blocks within the covered range of the try */
- for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->bbNext)
+ for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->Next())
{
/* Mark this BB as belonging to a 'try' block */
@@ -4016,7 +4016,7 @@ void Compiler::fgFixEntryFlowForOSR()
//
fgEnsureFirstBBisScratch();
assert(fgFirstBB->KindIs(BBJ_NONE));
- fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB);
+ fgRemoveRefPred(fgFirstBB->Next(), fgFirstBB);
fgFirstBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this));
fgFirstBB->bbJumpDest = fgOSREntryBB;
FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB);
@@ -4061,7 +4061,7 @@ void Compiler::fgCheckBasicBlockControlFlow()
{
case BBJ_NONE: // block flows into the next one (no jump)
- fgControlFlowPermitted(blk, blk->bbNext);
+ fgControlFlowPermitted(blk, blk->Next());
break;
@@ -4073,7 +4073,7 @@ void Compiler::fgCheckBasicBlockControlFlow()
case BBJ_COND: // block conditionally jumps to the target
- fgControlFlowPermitted(blk, blk->bbNext);
+ fgControlFlowPermitted(blk, blk->Next());
fgControlFlowPermitted(blk, blk->bbJumpDest);
@@ -4855,7 +4855,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ)
assert(fgGetPredForBlock(succ, curr) != nullptr);
BasicBlock* newBlock;
- if (succ == curr->bbNext)
+ if (curr->NextIs(succ))
{
// The successor is the fall-through path of a BBJ_COND, or
// an immediately following block of a BBJ_SWITCH (which has
@@ -4928,26 +4928,14 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ)
void Compiler::fgUnlinkBlock(BasicBlock* block)
{
- if (block->bbPrev)
- {
- block->bbPrev->bbNext = block->bbNext;
- if (block->bbNext)
- {
- block->bbNext->bbPrev = block->bbPrev;
- }
- else
- {
- fgLastBB = block->bbPrev;
- }
- }
- else
+ if (block->IsFirst())
{
assert(block == fgFirstBB);
assert(block != fgLastBB);
assert((fgFirstBBScratch == nullptr) || (fgFirstBBScratch == fgFirstBB));
- fgFirstBB = block->bbNext;
- fgFirstBB->bbPrev = nullptr;
+ fgFirstBB = block->Next();
+ fgFirstBB->SetPrev(nullptr);
if (fgFirstBBScratch != nullptr)
{
@@ -4961,6 +4949,14 @@ void Compiler::fgUnlinkBlock(BasicBlock* block)
fgFirstBBScratch = nullptr;
}
}
+ else
+ {
+ block->Prev()->SetNext(block->Next());
+ if (block == fgLastBB)
+ {
+ fgLastBB = block->Prev();
+ }
+ }
}
/*****************************************************************************************************
@@ -4975,22 +4971,22 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd)
assert(bBeg != nullptr);
assert(bEnd != nullptr);
- BasicBlock* bPrev = bBeg->bbPrev;
+ BasicBlock* bPrev = bBeg->Prev();
assert(bPrev != nullptr); // Can't unlink a range starting with the first block
- bPrev->setNext(bEnd->bbNext);
+ bPrev->SetNext(bEnd->Next());
/* If we removed the last block in the method then update fgLastBB */
if (fgLastBB == bEnd)
{
fgLastBB = bPrev;
- noway_assert(fgLastBB->bbNext == nullptr);
+ noway_assert(fgLastBB->IsLast());
}
// If bEnd was the first Cold basic block update fgFirstColdBlock
- if (fgFirstColdBlock == bEnd)
+ if (bEnd->IsFirstColdBlock(this))
{
- fgFirstColdBlock = bPrev->bbNext;
+ fgFirstColdBlock = bPrev->Next();
}
#if defined(FEATURE_EH_FUNCLETS)
@@ -4999,7 +4995,7 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd)
// can't cross the non-funclet/funclet region. And you can't unlink the first block
// of the first funclet with this, either. (If that's necessary, it could be allowed
// by updating fgFirstFuncletBB to bEnd->bbNext.)
- for (BasicBlock* tempBB = bBeg; tempBB != bEnd->bbNext; tempBB = tempBB->bbNext)
+ for (BasicBlock* tempBB = bBeg; tempBB != bEnd->Next(); tempBB = tempBB->Next())
{
assert(tempBB != fgFirstFuncletBB);
}
@@ -5018,7 +5014,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
PREFIX_ASSUME(block != nullptr);
- BasicBlock* bPrev = block->bbPrev;
+ BasicBlock* bPrev = block->Prev();
JITDUMP("fgRemoveBlock " FMT_BB ", unreachable=%s\n", block->bbNum, dspBool(unreachable));
@@ -5029,7 +5025,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
// the SwitchDescs might be removed.
InvalidateUniqueSwitchSuccMap();
- noway_assert((block == fgFirstBB) || (bPrev && (bPrev->bbNext == block)));
+ noway_assert((block == fgFirstBB) || (bPrev && bPrev->NextIs(block)));
noway_assert(!(block->bbFlags & BBF_DONT_REMOVE));
// Should never remove a genReturnBB, as we might have special hookups there.
@@ -5050,7 +5046,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
// If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext
if (block == fgFirstFuncletBB)
{
- fgFirstFuncletBB = block->bbNext;
+ fgFirstFuncletBB = block->Next();
}
#endif // FEATURE_EH_FUNCLETS
@@ -5063,9 +5059,9 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
NO_WAY("No retless call finally blocks; need unwind target instead");
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
}
- else if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->bbJumpDest == block->bbNext &&
- !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) &&
- (block->bbNext != fgFirstColdBlock))
+ else if (bPrev->KindIs(BBJ_ALWAYS) && block->NextIs(bPrev->bbJumpDest) &&
+ !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && !block->IsFirstColdBlock(this) &&
+ !block->IsLastHotBlock(this))
{
// previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE.
// Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS),
@@ -5075,9 +5071,9 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
}
// If this is the first Cold basic block update fgFirstColdBlock
- if (block == fgFirstColdBlock)
+ if (block->IsFirstColdBlock(this))
{
- fgFirstColdBlock = block->bbNext;
+ fgFirstColdBlock = block->Next();
}
/* Unlink this block from the bbNext chain */
@@ -5091,7 +5087,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
*/
if (block->isBBCallAlwaysPair())
{
- BasicBlock* leaveBlk = block->bbNext;
+ BasicBlock* leaveBlk = block->Next();
noway_assert(leaveBlk->KindIs(BBJ_ALWAYS));
leaveBlk->bbFlags &= ~BBF_DONT_REMOVE;
@@ -5160,7 +5156,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
}
else
{
- succBlock = block->bbNext;
+ succBlock = block->Next();
}
bool skipUnmarkLoop = false;
@@ -5173,16 +5169,16 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
}
// If this is the first Cold basic block update fgFirstColdBlock
- if (block == fgFirstColdBlock)
+ if (block->IsFirstColdBlock(this))
{
- fgFirstColdBlock = block->bbNext;
+ fgFirstColdBlock = block->Next();
}
#if defined(FEATURE_EH_FUNCLETS)
// Update fgFirstFuncletBB if necessary
if (block == fgFirstFuncletBB)
{
- fgFirstFuncletBB = block->bbNext;
+ fgFirstFuncletBB = block->Next();
}
#endif // FEATURE_EH_FUNCLETS
@@ -5273,7 +5269,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
}
/* Check if both side of the BBJ_COND now jump to the same block */
- if (predBlock->bbNext == succBlock)
+ if (predBlock->NextIs(succBlock))
{
// Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest.
noway_assert(predBlock->bbJumpDest == block);
@@ -5327,7 +5323,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
// the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS
// pairing.
- if ((bPrev->bbJumpDest == bPrev->bbNext) &&
+ if (bPrev->NextIs(bPrev->bbJumpDest) &&
!fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold
{
if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail())
@@ -5340,7 +5336,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
case BBJ_COND:
/* Check for branch to next block */
- if (bPrev->bbJumpDest == bPrev->bbNext)
+ if (bPrev->NextIs(bPrev->bbJumpDest))
{
fgRemoveConditionalJump(bPrev);
}
@@ -5376,7 +5372,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst)
{
/* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */
- if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst))
+ if (bSrc->bbFallsThrough() && !bSrc->NextIs(bDst))
{
switch (bSrc->GetBBJumpKind())
{
@@ -5459,13 +5455,12 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst)
// If bSrc is an unconditional branch to the next block
// then change it to a BBJ_NONE block
//
- if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) &&
- (bSrc->bbJumpDest == bSrc->bbNext))
+ if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && bSrc->NextIs(bSrc->bbJumpDest))
{
bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this));
JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB
" into a BBJ_NONE block\n",
- bSrc->bbNum, bSrc->bbNext->bbNum);
+ bSrc->bbNum, bSrc->Next()->bbNum);
}
}
}
@@ -5518,7 +5513,7 @@ bool Compiler::fgRenumberBlocks()
block->bbNum = num;
}
- if (block->bbNext == nullptr)
+ if (block->IsLast())
{
fgLastBB = block;
fgBBcount = num;
@@ -5594,7 +5589,7 @@ bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL *
while (true)
{
- bTemp = bTemp->bbNext;
+ bTemp = bTemp->Next();
if (bTemp == nullptr)
{
@@ -5641,24 +5636,20 @@ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBloc
{
printf("Relocated block%s [" FMT_BB ".." FMT_BB "] inserted after " FMT_BB "%s\n", (bStart == bEnd) ? "" : "s",
bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum,
- (insertAfterBlk->bbNext == nullptr) ? " at the end of method" : "");
+ insertAfterBlk->IsLast() ? " at the end of method" : "");
}
#endif // DEBUG
/* relink [bStart .. bEnd] into the flow graph */
- bEnd->bbNext = insertAfterBlk->bbNext;
- if (insertAfterBlk->bbNext)
- {
- insertAfterBlk->bbNext->bbPrev = bEnd;
- }
- insertAfterBlk->setNext(bStart);
+ bEnd->SetNext(insertAfterBlk->Next());
+ insertAfterBlk->SetNext(bStart);
/* If insertAfterBlk was fgLastBB then update fgLastBB */
if (insertAfterBlk == fgLastBB)
{
fgLastBB = bEnd;
- noway_assert(fgLastBB->bbNext == nullptr);
+ noway_assert(fgLastBB->IsLast());
}
}
@@ -5731,7 +5722,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
#if !defined(FEATURE_EH_FUNCLETS)
// In the funclets case, we still need to set some information on the handler blocks
- if (bLast->bbNext == NULL)
+ if (bLast->IsLast())
{
INDEBUG(reason = "region is already at the end of the method";)
goto FAILURE;
@@ -5756,7 +5747,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
noway_assert(inTheRange == false);
inTheRange = true;
}
- else if (block == bLast->bbNext)
+ else if (bLast->NextIs(block))
{
noway_assert(inTheRange == true);
inTheRange = false;
@@ -5782,12 +5773,12 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
break;
}
- block = block->bbNext;
+ block = block->Next();
}
// Ensure that bStart .. bLast defined a valid range
noway_assert((validRange == true) && (inTheRange == false));
- bPrev = bStart->bbPrev;
+ bPrev = bStart->Prev();
noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function.
JITDUMP("Relocating %s range " FMT_BB ".." FMT_BB " (EH#%u) to end of BBlist\n",
@@ -5824,7 +5815,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
#endif // FEATURE_EH_FUNCLETS
BasicBlock* bNext;
- bNext = bLast->bbNext;
+ bNext = bLast->Next();
/* Temporarily unlink [bStart .. bLast] from the flow graph */
fgUnlinkRange(bStart, bLast);
@@ -5873,7 +5864,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
{
// If we moved a set of blocks that were at the end of
// a different try region then we may need to update ebdTryLast
- for (block = HBtab->ebdTryBeg; block != nullptr; block = block->bbNext)
+ for (block = HBtab->ebdTryBeg; block != nullptr; block = block->Next())
{
if (block == bPrev)
{
@@ -5882,7 +5873,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
fgSetTryEnd(HBtab, bPrev);
break;
}
- else if (block == HBtab->ebdTryLast->bbNext)
+ else if (HBtab->ebdTryLast->NextIs(block))
{
// bPrev does not come after the TryBeg, thus we are larger, and
// it is moving with us.
@@ -5894,14 +5885,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
{
// If we moved a set of blocks that were at the end of
// a different handler region then we must update ebdHndLast
- for (block = HBtab->ebdHndBeg; block != nullptr; block = block->bbNext)
+ for (block = HBtab->ebdHndBeg; block != nullptr; block = block->Next())
{
if (block == bPrev)
{
fgSetHndEnd(HBtab, bPrev);
break;
}
- else if (block == HBtab->ebdHndLast->bbNext)
+ else if (HBtab->ebdHndLast->NextIs(block))
{
// bPrev does not come after the HndBeg
break;
@@ -5921,7 +5912,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
else
{
assert(fgFirstFuncletBB !=
- insertAfterBlk->bbNext); // We insert at the end, not at the beginning, of the funclet region.
+ insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the funclet region.
}
// These asserts assume we aren't moving try regions (which we might need to do). Only
@@ -5955,14 +5946,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
{
// If we moved a set of blocks that were at the end of
// a different try region then we may need to update ebdTryLast
- for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext)
+ for (block = HBtab->ebdTryBeg; block != NULL; block = block->Next())
{
if (block == bPrev)
{
fgSetTryEnd(HBtab, bPrev);
break;
}
- else if (block == HBtab->ebdTryLast->bbNext)
+ else if (HBtab->ebdTryLast->NextIs(block))
{
// bPrev does not come after the TryBeg
break;
@@ -5973,14 +5964,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r
{
// If we moved a set of blocks that were at the end of
// a different handler region then we must update ebdHndLast
- for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext)
+ for (block = HBtab->ebdHndBeg; block != NULL; block = block->Next())
{
if (block == bPrev)
{
fgSetHndEnd(HBtab, bPrev);
break;
}
- else if (block == HBtab->ebdHndLast->bbNext)
+ else if (HBtab->ebdHndLast->NextIs(block))
{
// bPrev does not come after the HndBeg
break;
@@ -6180,16 +6171,16 @@ BasicBlock* Compiler::fgNewBBFromTreeAfter(
*/
void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk)
{
- if (insertBeforeBlk->bbPrev)
+ if (insertBeforeBlk->IsFirst())
{
- fgInsertBBafter(insertBeforeBlk->bbPrev, newBlk);
+ newBlk->SetNext(fgFirstBB);
+
+ fgFirstBB = newBlk;
+ newBlk->SetPrev(nullptr);
}
else
{
- newBlk->setNext(fgFirstBB);
-
- fgFirstBB = newBlk;
- newBlk->bbPrev = nullptr;
+ fgInsertBBafter(insertBeforeBlk->Prev(), newBlk);
}
#if defined(FEATURE_EH_FUNCLETS)
@@ -6212,20 +6203,13 @@ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk)
*/
void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk)
{
- newBlk->bbNext = insertAfterBlk->bbNext;
-
- if (insertAfterBlk->bbNext)
- {
- insertAfterBlk->bbNext->bbPrev = newBlk;
- }
-
- insertAfterBlk->bbNext = newBlk;
- newBlk->bbPrev = insertAfterBlk;
+ newBlk->SetNext(insertAfterBlk->Next());
+ insertAfterBlk->SetNext(newBlk);
if (fgLastBB == insertAfterBlk)
{
fgLastBB = newBlk;
- assert(fgLastBB->bbNext == nullptr);
+ assert(fgLastBB->IsLast());
}
}
@@ -6256,7 +6240,7 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt)
}
// Currently bNext is the fall through for bCur
- BasicBlock* bNext = bCur->bbNext;
+ BasicBlock* bNext = bCur->Next();
noway_assert(bNext != nullptr);
// We will set result to true if bAlt is a better fall through than bCur
@@ -6382,7 +6366,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// Assert that startBlk precedes endBlk in the block list.
// We don't want to use bbNum to assert this condition, as we cannot depend on the block numbers being
// sequential at all times.
- for (BasicBlock* b = startBlk; b != endBlk; b = b->bbNext)
+ for (BasicBlock* b = startBlk; b != endBlk; b = b->Next())
{
assert(b != nullptr); // We reached the end of the block list, but never found endBlk.
}
@@ -6411,7 +6395,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
if (nearBlk != nullptr)
{
// Does the nearBlk precede the startBlk?
- for (blk = nearBlk; blk != nullptr; blk = blk->bbNext)
+ for (blk = nearBlk; blk != nullptr; blk = blk->Next())
{
if (blk == startBlk)
{
@@ -6425,7 +6409,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
}
}
- for (blk = startBlk; blk != endBlk; blk = blk->bbNext)
+ for (blk = startBlk; blk != endBlk; blk = blk->Next())
{
// The only way (blk == nullptr) could be true is if the caller passed an endBlk that preceded startBlk in the
// block list, or if endBlk isn't in the block list at all. In DEBUG, we'll instead hit the similar
@@ -6464,7 +6448,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// and be in the correct EH region. This is must be guaranteed by the caller (as it is by
// fgNewBBinRegion(), which passes the search range as an exact EH region block range).
// Because of this assumption, we only check the EH information for blocks before the last block.
- if (blk->bbNext != endBlk)
+ if (!blk->NextIs(endBlk))
{
// We are in the middle of the search range. We can't insert the new block in
// an inner try or handler region. We can, however, set the insertion
@@ -6598,10 +6582,10 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// inserted block is marked as the entry block for the filter. Becuase this sort of split can be complex
// (especially given that it must ensure that the liveness of the exception object is properly tracked),
// we avoid this situation by never generating single-block filters on x86 (see impPushCatchArgOnStack).
- if (insertingIntoFilter && (bestBlk == endBlk->bbPrev))
+ if (insertingIntoFilter && (bestBlk == endBlk->Prev()))
{
assert(bestBlk != startBlk);
- bestBlk = bestBlk->bbPrev;
+ bestBlk = bestBlk->Prev();
}
#endif // defined(JIT32_GCENCODER)
@@ -6756,7 +6740,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
// We will put the newBB in the try region.
EHblkDsc* ehDsc = ehGetDsc(tryIndex - 1);
startBlk = ehDsc->ebdTryBeg;
- endBlk = ehDsc->ebdTryLast->bbNext;
+ endBlk = ehDsc->ebdTryLast->Next();
regionIndex = tryIndex;
}
else if (putInFilter)
@@ -6772,7 +6756,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
// We will put the newBB in the handler region.
EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1);
startBlk = ehDsc->ebdHndBeg;
- endBlk = ehDsc->ebdHndLast->bbNext;
+ endBlk = ehDsc->ebdHndLast->Next();
regionIndex = hndIndex;
}
@@ -6872,7 +6856,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
bool putInTryRegion)
{
/* Insert the new block */
- BasicBlock* afterBlkNext = afterBlk->bbNext;
+ BasicBlock* afterBlkNext = afterBlk->Next();
(void)afterBlkNext; // prevent "unused variable" error from GCC
BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false);
@@ -6905,7 +6889,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
// Is afterBlk at the end of a try region?
if (HBtab->ebdTryLast == afterBlk)
{
- noway_assert(afterBlkNext == newBlk->bbNext);
+ noway_assert(newBlk->NextIs(afterBlkNext));
bool extendTryRegion = false;
if (newBlk->hasTryIndex())
@@ -6944,7 +6928,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
// Is afterBlk at the end of a handler region?
if (HBtab->ebdHndLast == afterBlk)
{
- noway_assert(afterBlkNext == newBlk->bbNext);
+ noway_assert(newBlk->NextIs(afterBlkNext));
// Does newBlk extend this handler region?
bool extendHndRegion = false;
@@ -6982,7 +6966,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
}
/* If afterBlk falls through, we insert a jump around newBlk */
- fgConnectFallThrough(afterBlk, newBlk->bbNext);
+ fgConnectFallThrough(afterBlk, newBlk->Next());
// If the loop table is valid, add this block to the appropriate loop.
// Note we don't verify (via flow) that this block actually belongs
@@ -6991,8 +6975,8 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
//
if (optLoopTableValid)
{
- BasicBlock* const bbPrev = newBlk->bbPrev;
- BasicBlock* const bbNext = newBlk->bbNext;
+ BasicBlock* const bbPrev = newBlk->Prev();
+ BasicBlock* const bbNext = newBlk->Next();
if ((bbPrev != nullptr) && (bbNext != nullptr))
{
diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp
index 318e241d35ae0..c848f1c862faa 100644
--- a/src/coreclr/jit/fgdiagnostic.cpp
+++ b/src/coreclr/jit/fgdiagnostic.cpp
@@ -82,7 +82,7 @@ void Compiler::fgDebugCheckUpdate()
BasicBlock* prev;
BasicBlock* block;
- for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->bbNext)
+ for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->Next())
{
/* no unreachable blocks */
@@ -168,7 +168,7 @@ void Compiler::fgDebugCheckUpdate()
// We are allowed to have a branch from a hot 'block' to a cold 'bbNext'
//
- if ((block->bbNext != nullptr) && fgInDifferentRegions(block, block->bbNext))
+ if (!block->IsLast() && fgInDifferentRegions(block, block->Next()))
{
doAssertOnJumpToNextBlock = false;
}
@@ -176,7 +176,7 @@ void Compiler::fgDebugCheckUpdate()
if (doAssertOnJumpToNextBlock)
{
- if (block->bbJumpDest == block->bbNext)
+ if (block->NextIs(block->bbJumpDest))
{
noway_assert(!"Unnecessary jump to the next block!");
}
@@ -199,7 +199,7 @@ void Compiler::fgDebugCheckUpdate()
/* no un-compacted blocks */
- if (fgCanCompactBlocks(block, block->bbNext))
+ if (fgCanCompactBlocks(block, block->Next()))
{
noway_assert(!"Found un-compacted blocks!");
}
@@ -889,7 +889,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
"ALWAYS", "LEAVE", "CALLFINALLY", "COND", "SWITCH"};
BasicBlock* block;
- for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->bbNext, blockOrdinal++)
+ for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->Next(), blockOrdinal++)
{
if (createDotFile)
{
@@ -1091,7 +1091,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
{
unsigned edgeNum = 1;
BasicBlock* bTarget;
- for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->bbNext)
+ for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->Next())
{
double targetWeightDivisor;
if (bTarget->bbWeight == BB_ZERO_WEIGHT)
@@ -1214,10 +1214,10 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
{
// Invisible edge for bbNext chain
//
- if (bSource->bbNext != nullptr)
+ if (!bSource->IsLast())
{
fprintf(fgxFile, " " FMT_BB " -> " FMT_BB " [style=\"invis\", weight=25];\n", bSource->bbNum,
- bSource->bbNext->bbNum);
+ bSource->Next()->bbNum);
}
}
@@ -1641,7 +1641,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
bool needIndent = true;
BasicBlock* bbCur = rgn->m_bbStart;
- BasicBlock* bbEnd = rgn->m_bbEnd->bbNext;
+ BasicBlock* bbEnd = rgn->m_bbEnd->Next();
Region* child = rgn->m_rgnChild;
BasicBlock* childCurBB = (child == nullptr) ? nullptr : child->m_bbStart;
@@ -1660,7 +1660,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
{
fprintf(file, "%*s" FMT_BB ";", needIndent ? indent : 0, "", bbCur->bbNum);
needIndent = false;
- bbCur = bbCur->bbNext;
+ bbCur = bbCur->Next();
}
if (bbCur == bbEnd)
@@ -1684,7 +1684,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
childCount++;
- bbCur = child->m_bbEnd->bbNext; // Next, output blocks after this child.
+ bbCur = child->m_bbEnd->Next(); // Next, output blocks after this child.
child = child->m_rgnNext; // Move to the next child, if any.
childCurBB = (child == nullptr) ? nullptr : child->m_bbStart;
}
@@ -1745,7 +1745,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
if (ehDsc->HasFilter())
{
sprintf_s(name, sizeof(name), "EH#%u filter", XTnum);
- rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, ehDsc->ebdHndBeg->bbPrev);
+ rgnGraph.Insert(name, RegionGraph::RegionType::EH, ehDsc->ebdFilter, ehDsc->ebdHndBeg->Prev());
}
}
}
@@ -2200,7 +2200,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 *
/* brace matching editor workaround to compensate for the following line: { */
printf("} ");
}
- if (HBtab->HasFilter() && block->bbNext == HBtab->ebdHndBeg)
+ if (HBtab->HasFilter() && block->NextIs(HBtab->ebdHndBeg))
{
cnt += 2;
/* brace matching editor workaround to compensate for the following line: { */
@@ -2256,7 +2256,7 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock,
int ibcColWidth = 0;
- for (BasicBlock* block = firstBlock; block != nullptr; block = block->bbNext)
+ for (BasicBlock* block = firstBlock; block != nullptr; block = block->Next())
{
if (block->hasProfileWeight())
{
@@ -2340,9 +2340,9 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock,
for (BasicBlock* block : *fgBBOrder)
{
// First, do some checking on the bbPrev links
- if (block->bbPrev)
+ if (!block->IsFirst())
{
- if (block->bbPrev->bbNext != block)
+ if (!block->Prev()->NextIs(block))
{
printf("bad prev link\n");
}
@@ -2352,7 +2352,7 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock,
printf("bad prev link!\n");
}
- if (inDefaultOrder && (block == fgFirstColdBlock))
+ if (inDefaultOrder && (block->IsFirstColdBlock(this)))
{
printf(
"~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~"
@@ -2450,7 +2450,7 @@ void Compiler::fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock)
{
// Note that typically we have already called fgDispBasicBlocks()
// so we don't need to print the preds and succs again here.
- for (BasicBlock* block = firstBlock; block != nullptr; block = block->bbNext)
+ for (BasicBlock* block = firstBlock; block != nullptr; block = block->Next())
{
fgDumpBlock(block);
@@ -2605,7 +2605,7 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb
// is marked as "returning" to the BBJ_ALWAYS block following the BBJ_CALLFINALLY
// block that does a local call to the finally. This BBJ_ALWAYS is within
// the try region protected by the finally (for x86, ARM), but that's ok.
- BasicBlock* prevBlock = block->bbPrev;
+ BasicBlock* prevBlock = block->Prev();
if (prevBlock->KindIs(BBJ_CALLFINALLY) && block->KindIs(BBJ_ALWAYS) && blockPred->KindIs(BBJ_EHFINALLYRET))
{
return true;
@@ -2662,11 +2662,11 @@ bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block)
switch (blockPred->GetBBJumpKind())
{
case BBJ_COND:
- assert(blockPred->bbNext == block || blockPred->bbJumpDest == block);
+ assert(blockPred->NextIs(block) || blockPred->bbJumpDest == block);
return true;
case BBJ_NONE:
- assert(blockPred->bbNext == block);
+ assert(blockPred->NextIs(block));
return true;
case BBJ_CALLFINALLY:
@@ -2731,14 +2731,14 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block)
BasicBlock* endBlk;
comp->ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk);
- for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
+ for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next())
{
if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg)
{
continue;
}
- if (block == bcall->bbNext)
+ if (bcall->NextIs(block))
{
return true;
}
@@ -2760,7 +2760,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block)
continue;
}
- if (block != bcall->bbNext)
+ if (!bcall->NextIs(block))
{
continue;
}
@@ -2789,7 +2789,7 @@ void Compiler::fgDebugCheckBBNumIncreasing()
{
for (BasicBlock* const block : Blocks())
{
- assert(block->bbNext == nullptr || (block->bbNum < block->bbNext->bbNum));
+ assert(block->IsLast() || (block->bbNum < block->Next()->bbNum));
}
}
@@ -2865,7 +2865,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef
if (checkBBNum)
{
// Check that bbNum is sequential
- assert(block->bbNext == nullptr || (block->bbNum + 1 == block->bbNext->bbNum));
+ assert(block->IsLast() || (block->bbNum + 1 == block->Next()->bbNum));
}
// If the block is a BBJ_COND, a BBJ_SWITCH or a
@@ -3703,26 +3703,26 @@ void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees)
// ensure that bbNext and bbPrev are consistent
void Compiler::fgDebugCheckBlockLinks()
{
- assert(fgFirstBB->bbPrev == nullptr);
+ assert(fgFirstBB->IsFirst());
for (BasicBlock* const block : Blocks())
{
- if (block->bbNext)
+ if (block->IsLast())
{
- assert(block->bbNext->bbPrev == block);
+ assert(block == fgLastBB);
}
else
{
- assert(block == fgLastBB);
+ assert(block->Next()->PrevIs(block));
}
- if (block->bbPrev)
+ if (block->IsFirst())
{
- assert(block->bbPrev->bbNext == block);
+ assert(block == fgFirstBB);
}
else
{
- assert(block == fgFirstBB);
+ assert(block->Prev()->NextIs(block));
}
// If this is a switch, check that the tables are consistent.
@@ -4798,7 +4798,7 @@ void Compiler::fgDebugCheckLoopTable()
else
{
assert(h->KindIs(BBJ_NONE));
- assert(h->bbNext == e);
+ assert(h->NextIs(e));
assert(loop.lpTop == e);
assert(loop.lpIsTopEntry());
}
diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp
index e5fbe43e1590f..829a5ae4d9c98 100644
--- a/src/coreclr/jit/fgehopt.cpp
+++ b/src/coreclr/jit/fgehopt.cpp
@@ -140,7 +140,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally()
while (currentBlock != endCallFinallyRangeBlock)
{
- BasicBlock* nextBlock = currentBlock->bbNext;
+ BasicBlock* nextBlock = currentBlock->Next();
if (currentBlock->KindIs(BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock))
{
@@ -151,7 +151,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally()
// the finally is empty.
noway_assert(currentBlock->isBBCallAlwaysPair());
- BasicBlock* const leaveBlock = currentBlock->bbNext;
+ BasicBlock* const leaveBlock = currentBlock->Next();
BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest;
JITDUMP("Modifying callfinally " FMT_BB " leave " FMT_BB " finally " FMT_BB " continuation " FMT_BB
@@ -172,7 +172,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally()
// Delete the leave block, which should be marked as
// keep always and have the sole finally block as a pred.
assert((leaveBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0);
- nextBlock = leaveBlock->bbNext;
+ nextBlock = leaveBlock->Next();
fgRemoveRefPred(leaveBlock, firstBlock);
leaveBlock->bbFlags &= ~BBF_KEEP_BBJ_ALWAYS;
fgRemoveBlock(leaveBlock, /* unreachable */ true);
@@ -398,7 +398,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
if (firstTryBlock != lastTryBlock)
{
JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum,
- firstTryBlock->bbNext->bbNum);
+ firstTryBlock->Next()->bbNum);
XTnum++;
continue;
}
@@ -417,9 +417,9 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
BasicBlock* const callFinally = firstTryBlock;
// Try must be a callalways pair of blocks.
- if (firstTryBlock->bbNext != lastTryBlock)
+ if (!firstTryBlock->NextIs(lastTryBlock))
{
- JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, firstTryBlock->bbNext->bbNum);
+ JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, firstTryBlock->Next()->bbNum);
XTnum++;
continue;
}
@@ -435,7 +435,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
bool verifiedSingleCallfinally = true;
ehGetCallFinallyBlockRange(XTnum, &firstCallFinallyRangeBlock, &endCallFinallyRangeBlock);
- for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->bbNext)
+ for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->Next())
{
if (block->KindIs(BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock))
{
@@ -448,7 +448,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
break;
}
- block = block->bbNext;
+ block = block->Next();
}
}
@@ -466,7 +466,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
callFinally->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this));
// Identify the leave block and the continuation
- BasicBlock* const leave = callFinally->bbNext;
+ BasicBlock* const leave = callFinally->Next();
BasicBlock* const continuation = leave->bbJumpDest;
// (2) Cleanup the leave so it can be deleted by subsequent opts
@@ -729,14 +729,14 @@ PhaseStatus Compiler::fgCloneFinally()
BasicBlock* const lastBlock = HBtab->ebdHndLast;
assert(firstBlock != nullptr);
assert(lastBlock != nullptr);
- BasicBlock* nextBlock = lastBlock->bbNext;
+ BasicBlock* nextBlock = lastBlock->Next();
unsigned regionBBCount = 0;
unsigned regionStmtCount = 0;
bool hasFinallyRet = false;
bool isAllRare = true;
bool hasSwitch = false;
- for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext)
+ for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->Next())
{
if (block->KindIs(BBJ_SWITCH))
{
@@ -804,7 +804,7 @@ PhaseStatus Compiler::fgCloneFinally()
BasicBlock* const lastTryBlock = HBtab->ebdTryLast;
assert(firstTryBlock->getTryIndex() == XTnum);
assert(bbInTryRegions(XTnum, lastTryBlock));
- BasicBlock* const beforeTryBlock = firstTryBlock->bbPrev;
+ BasicBlock* const beforeTryBlock = firstTryBlock->Prev();
BasicBlock* normalCallFinallyBlock = nullptr;
BasicBlock* normalCallFinallyReturn = nullptr;
@@ -813,7 +813,7 @@ PhaseStatus Compiler::fgCloneFinally()
const bool usingProfileWeights = fgIsUsingProfileWeights();
weight_t currentWeight = BB_ZERO_WEIGHT;
- for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->bbPrev)
+ for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->Prev())
{
#if FEATURE_EH_CALLFINALLY_THUNKS
// Blocks that transfer control to callfinallies are usually
@@ -823,7 +823,7 @@ PhaseStatus Compiler::fgCloneFinally()
if (block->KindIs(BBJ_NONE) && (block == lastTryBlock))
{
- jumpDest = block->bbNext;
+ jumpDest = block->Next();
}
else if (block->KindIs(BBJ_ALWAYS))
{
@@ -853,7 +853,7 @@ PhaseStatus Compiler::fgCloneFinally()
// Found a block that invokes the finally.
//
- BasicBlock* const finallyReturnBlock = jumpDest->bbNext;
+ BasicBlock* const finallyReturnBlock = jumpDest->Next();
BasicBlock* const postTryFinallyBlock = finallyReturnBlock->bbJumpDest;
bool isUpdate = false;
@@ -967,7 +967,7 @@ PhaseStatus Compiler::fgCloneFinally()
BasicBlock* firstCallFinallyBlock = nullptr;
for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock;
- block = block->bbNext)
+ block = block->Next())
{
if (block->isBBCallAlwaysPair())
{
@@ -987,7 +987,7 @@ PhaseStatus Compiler::fgCloneFinally()
// but only if it's targeted by the last block in the try range.
if (firstCallFinallyBlock != normalCallFinallyBlock)
{
- BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->bbPrev;
+ BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->Prev();
if (placeToMoveAfter->KindIs(BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock))
{
@@ -995,7 +995,7 @@ PhaseStatus Compiler::fgCloneFinally()
normalCallFinallyBlock->bbNum, firstCallFinallyBlock->bbNum);
BasicBlock* const firstToMove = normalCallFinallyBlock;
- BasicBlock* const lastToMove = normalCallFinallyBlock->bbNext;
+ BasicBlock* const lastToMove = normalCallFinallyBlock->Next();
fgUnlinkRange(firstToMove, lastToMove);
fgMoveBlocksAfter(firstToMove, lastToMove, placeToMoveAfter);
@@ -1006,7 +1006,7 @@ PhaseStatus Compiler::fgCloneFinally()
fgVerifyHandlerTab();
#endif // DEBUG
- assert(nextBlock == lastBlock->bbNext);
+ assert(lastBlock->NextIs(nextBlock));
// Update where the callfinally range begins, since we might
// have altered this with callfinally rearrangement, and/or
@@ -1043,7 +1043,7 @@ PhaseStatus Compiler::fgCloneFinally()
unsigned cloneBBCount = 0;
weight_t const originalWeight = firstBlock->hasProfileWeight() ? firstBlock->bbWeight : BB_ZERO_WEIGHT;
- for (BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext)
+ for (BasicBlock* block = firstBlock; block != nextBlock; block = block->Next())
{
BasicBlock* newBlock;
@@ -1062,9 +1062,9 @@ PhaseStatus Compiler::fgCloneFinally()
// If the clone ends up just after the finally, adjust
// the stopping point for finally traversal.
- if (newBlock->bbNext == nextBlock)
+ if (newBlock->NextIs(nextBlock))
{
- assert(newBlock->bbPrev == lastBlock);
+ assert(newBlock->PrevIs(lastBlock));
nextBlock = newBlock;
}
}
@@ -1127,7 +1127,7 @@ PhaseStatus Compiler::fgCloneFinally()
// Redirect any branches within the newly-cloned
// finally, and any finally returns to jump to the return
// point.
- for (BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext)
+ for (BasicBlock* block = firstBlock; block != nextBlock; block = block->Next())
{
BasicBlock* newBlock = blockMap[block];
@@ -1159,13 +1159,13 @@ PhaseStatus Compiler::fgCloneFinally()
while (currentBlock != endCallFinallyRangeBlock)
{
- BasicBlock* nextBlockToScan = currentBlock->bbNext;
+ BasicBlock* nextBlockToScan = currentBlock->Next();
if (currentBlock->isBBCallAlwaysPair())
{
if (currentBlock->bbJumpDest == firstBlock)
{
- BasicBlock* const leaveBlock = currentBlock->bbNext;
+ BasicBlock* const leaveBlock = currentBlock->Next();
BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest;
// Note we must retarget all callfinallies that have this
@@ -1189,7 +1189,7 @@ PhaseStatus Compiler::fgCloneFinally()
// Delete the leave block, which should be marked as
// keep always.
assert((leaveBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0);
- nextBlock = leaveBlock->bbNext;
+ nextBlock = leaveBlock->Next();
// All preds should be BBJ_EHFINALLYRETs from the finally.
for (BasicBlock* const leavePred : leaveBlock->PredBlocks())
@@ -1237,8 +1237,8 @@ PhaseStatus Compiler::fgCloneFinally()
// Change all BBJ_EHFINALLYRET to BBJ_EHFAULTRET in the now-fault region.
BasicBlock* const hndBegIter = HBtab->ebdHndBeg;
- BasicBlock* const hndEndIter = HBtab->ebdHndLast->bbNext;
- for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->bbNext)
+ BasicBlock* const hndEndIter = HBtab->ebdHndLast->Next();
+ for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->Next())
{
if (block->KindIs(BBJ_EHFINALLYRET))
{
@@ -1469,7 +1469,7 @@ void Compiler::fgDebugCheckTryFinallyExits()
{
if (succBlock->isEmpty())
{
- BasicBlock* const succSuccBlock = succBlock->bbNext;
+ BasicBlock* const succSuccBlock = succBlock->Next();
// case (d)
if (succSuccBlock->bbFlags & BBF_CLONED_FINALLY_BEGIN)
@@ -1620,7 +1620,7 @@ void Compiler::fgAddFinallyTargetFlags()
{
if (block->isBBCallAlwaysPair())
{
- BasicBlock* const leave = block->bbNext;
+ BasicBlock* const leave = block->Next();
BasicBlock* const continuation = leave->bbJumpDest;
if ((continuation->bbFlags & BBF_FINALLY_TARGET) == 0)
@@ -1789,7 +1789,7 @@ PhaseStatus Compiler::fgMergeFinallyChains()
BasicBlock* const beginHandlerBlock = HBtab->ebdHndBeg;
for (BasicBlock* currentBlock = firstCallFinallyRangeBlock; currentBlock != endCallFinallyRangeBlock;
- currentBlock = currentBlock->bbNext)
+ currentBlock = currentBlock->Next())
{
// Ignore "retless" callfinallys (where the finally doesn't return).
if (currentBlock->isBBCallAlwaysPair() && (currentBlock->bbJumpDest == beginHandlerBlock))
@@ -1803,7 +1803,7 @@ PhaseStatus Compiler::fgMergeFinallyChains()
callFinallyCount++;
// Locate the continuation
- BasicBlock* const leaveBlock = currentBlock->bbNext;
+ BasicBlock* const leaveBlock = currentBlock->Next();
BasicBlock* const continuationBlock = leaveBlock->bbJumpDest;
// If this is the first time we've seen this
@@ -1836,7 +1836,7 @@ PhaseStatus Compiler::fgMergeFinallyChains()
// sure they all jump to the appropriate canonical
// callfinally.
for (BasicBlock* currentBlock = firstCallFinallyRangeBlock; currentBlock != endCallFinallyRangeBlock;
- currentBlock = currentBlock->bbNext)
+ currentBlock = currentBlock->Next())
{
bool merged = fgRetargetBranchesToCanonicalCallFinally(currentBlock, beginHandlerBlock, continuationMap);
didMerge = didMerge || merged;
@@ -1921,7 +1921,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block,
// Ok, this is a callfinally that invokes the right handler.
// Get its continuation.
- BasicBlock* const leaveBlock = callFinally->bbNext;
+ BasicBlock* const leaveBlock = callFinally->Next();
BasicBlock* const continuationBlock = leaveBlock->bbJumpDest;
// Find the canonical callfinally for that continuation.
@@ -1956,7 +1956,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block,
canonicalCallFinally->setBBProfileWeight(newCanonicalWeight);
- BasicBlock* const canonicalLeaveBlock = canonicalCallFinally->bbNext;
+ BasicBlock* const canonicalLeaveBlock = canonicalCallFinally->Next();
weight_t const canonicalLeaveWeight =
canonicalLeaveBlock->hasProfileWeight() ? canonicalLeaveBlock->bbWeight : BB_ZERO_WEIGHT;
@@ -2099,7 +2099,7 @@ PhaseStatus Compiler::fgTailMergeThrows()
// Walk blocks from last to first so that any branches we
// introduce to the canonical blocks end up lexically forward
// and there is less jumbled flow to sort out later.
- for (BasicBlock* block = fgLastBB; block != nullptr; block = block->bbPrev)
+ for (BasicBlock* block = fgLastBB; block != nullptr; block = block->Prev())
{
// Workaround: don't consider try entry blocks as candidates
// for merging; if the canonical throw is later in the same try,
@@ -2213,7 +2213,7 @@ PhaseStatus Compiler::fgTailMergeThrows()
case BBJ_COND:
{
// Flow to non canonical block could be via fall through or jump or both.
- if (predBlock->bbNext == nonCanonicalBlock)
+ if (predBlock->NextIs(nonCanonicalBlock))
{
fgTailMergeThrowsFallThroughHelper(predBlock, nonCanonicalBlock, canonicalBlock, predEdge);
}
@@ -2289,7 +2289,7 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
BasicBlock* canonicalBlock,
FlowEdge* predEdge)
{
- assert(predBlock->bbNext == nonCanonicalBlock);
+ assert(predBlock->NextIs(nonCanonicalBlock));
BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true);
@@ -2300,7 +2300,6 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
fgRemoveRefPred(nonCanonicalBlock, predBlock);
// Wire up the new flow
- predBlock->bbNext = newBlock;
fgAddRefPred(newBlock, predBlock, predEdge);
newBlock->bbJumpDest = canonicalBlock;
diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp
index d2669ccaca382..23f122177606b 100644
--- a/src/coreclr/jit/fgflow.cpp
+++ b/src/coreclr/jit/fgflow.cpp
@@ -351,7 +351,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
assert(block->isBBCallAlwaysPair());
/* The block after the BBJ_CALLFINALLY block is not reachable */
- bNext = block->bbNext;
+ bNext = block->Next();
/* bNext is an unreachable BBJ_ALWAYS block */
noway_assert(bNext->KindIs(BBJ_ALWAYS));
@@ -370,12 +370,12 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
break;
case BBJ_NONE:
- fgRemoveRefPred(block->bbNext, block);
+ fgRemoveRefPred(block->Next(), block);
break;
case BBJ_COND:
fgRemoveRefPred(block->bbJumpDest, block);
- fgRemoveRefPred(block->bbNext, block);
+ fgRemoveRefPred(block->Next(), block);
break;
case BBJ_EHFILTERRET:
@@ -401,7 +401,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
BasicBlock* finBeg = ehDsc->ebdHndBeg;
- for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
+ for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next())
{
if ((bcall->bbFlags & BBF_REMOVED) || !bcall->KindIs(BBJ_CALLFINALLY) ||
bcall->bbJumpDest != finBeg)
@@ -410,7 +410,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
}
assert(bcall->isBBCallAlwaysPair());
- fgRemoveRefPred(bcall->bbNext, block);
+ fgRemoveRefPred(bcall->Next(), block);
}
}
}
@@ -468,7 +468,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock*
BasicBlock* finBeg = ehDsc->ebdHndBeg;
- for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
+ for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next())
{
if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg)
{
@@ -479,7 +479,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock*
if (succNum == i)
{
- *bres = bcall->bbNext;
+ *bres = bcall->Next();
return;
}
succNum++;
diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp
index a844199697a60..3f99cb6a7fb39 100644
--- a/src/coreclr/jit/fginline.cpp
+++ b/src/coreclr/jit/fginline.cpp
@@ -676,7 +676,7 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0))
{
block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler));
- m_compiler->fgRemoveRefPred(block->bbNext, block);
+ m_compiler->fgRemoveRefPred(block->Next(), block);
}
else
{
@@ -819,7 +819,7 @@ PhaseStatus Compiler::fgInline()
}
}
- block = block->bbNext;
+ block = block->Next();
} while (block);
@@ -840,7 +840,7 @@ PhaseStatus Compiler::fgInline()
fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates);
}
- block = block->bbNext;
+ block = block->Next();
} while (block);
@@ -1526,18 +1526,18 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
if (block->KindIs(BBJ_RETURN))
{
noway_assert((block->bbFlags & BBF_HAS_JMP) == 0);
- if (block->bbNext)
+ if (block->IsLast())
+ {
+ JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum);
+ block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this));
+ }
+ else
{
JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n",
block->bbNum, bottomBlock->bbNum);
block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this));
block->bbJumpDest = bottomBlock;
}
- else
- {
- JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum);
- block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this));
- }
fgAddRefPred(bottomBlock, block);
}
@@ -1548,10 +1548,10 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
InlineeCompiler->fgFirstBB->bbRefs--;
// Insert inlinee's blocks into inliner's block list.
- topBlock->setNext(InlineeCompiler->fgFirstBB);
+ topBlock->SetNext(InlineeCompiler->fgFirstBB);
fgRemoveRefPred(bottomBlock, topBlock);
fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock);
- InlineeCompiler->fgLastBB->setNext(bottomBlock);
+ InlineeCompiler->fgLastBB->SetNext(bottomBlock);
//
// Add inlinee's block count to inliner's.
diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp
index 18637ac7b49ca..22436d28c834e 100644
--- a/src/coreclr/jit/fgopt.cpp
+++ b/src/coreclr/jit/fgopt.cpp
@@ -134,7 +134,7 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2)
{
noway_assert(b1->KindIs(BBJ_NONE, BBJ_ALWAYS, BBJ_COND));
- if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->bbNext, b2))
+ if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->Next(), b2))
{
return true;
}
@@ -367,7 +367,7 @@ void Compiler::fgComputeEnterBlocksSet()
assert(block->isBBCallAlwaysPair());
// Don't remove the BBJ_ALWAYS block that is only here for the unwinder.
- BlockSetOps::AddElemD(this, fgAlwaysBlks, block->bbNext->bbNum);
+ BlockSetOps::AddElemD(this, fgAlwaysBlks, block->Next()->bbNum);
}
}
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
@@ -474,8 +474,8 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock)
// the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW.
if (bIsBBCallAlwaysPair)
{
- noway_assert(block->bbNext->KindIs(BBJ_ALWAYS));
- fgClearFinallyTargetBit(block->bbNext->bbJumpDest);
+ noway_assert(block->Next()->KindIs(BBJ_ALWAYS));
+ fgClearFinallyTargetBit(block->Next()->bbJumpDest);
}
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
}
@@ -490,7 +490,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock)
if (hasUnreachableBlocks)
{
// Now remove the unreachable blocks
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next())
{
// If we marked a block with BBF_REMOVED then we need to call fgRemoveBlock() on it
@@ -506,7 +506,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock)
//
if (block->isBBCallAlwaysPair())
{
- block = block->bbNext;
+ block = block->Next();
}
}
}
@@ -643,7 +643,7 @@ bool Compiler::fgRemoveDeadBlocks()
assert(block->isBBCallAlwaysPair());
// Don't remove the BBJ_ALWAYS block that is only here for the unwinder.
- worklist.push_back(block->bbNext);
+ worklist.push_back(block->Next());
}
}
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
@@ -1007,7 +1007,7 @@ void Compiler::fgComputeDoms()
BasicBlock* block = nullptr;
- for (block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
+ for (block = fgFirstBB->Next(); block != nullptr; block = block->Next())
{
// If any basic block has no predecessors then we flag it as processed and temporarily
// mark its predecessor list to be flRoot. This makes the flowgraph connected,
@@ -1175,7 +1175,7 @@ DomTreeNode* Compiler::fgBuildDomTree()
// Traverse the entire block list to build the dominator tree. Skip fgFirstBB
// as it is always a root of the dominator forest.
- for (BasicBlock* const block : Blocks(fgFirstBB->bbNext))
+ for (BasicBlock* const block : Blocks(fgFirstBB->Next()))
{
BasicBlock* parent = block->bbIDom;
@@ -1470,7 +1470,7 @@ PhaseStatus Compiler::fgPostImportationCleanup()
for (cur = fgFirstBB; cur != nullptr; cur = nxt)
{
// Get hold of the next block (in case we delete 'cur')
- nxt = cur->bbNext;
+ nxt = cur->Next();
// Should this block be removed?
if (!(cur->bbFlags & BBF_IMPORTED))
@@ -1571,10 +1571,10 @@ PhaseStatus Compiler::fgPostImportationCleanup()
// Find the first unremoved block before the try entry block.
//
BasicBlock* const oldTryEntry = HBtab->ebdTryBeg;
- BasicBlock* tryEntryPrev = oldTryEntry->bbPrev;
+ BasicBlock* tryEntryPrev = oldTryEntry->Prev();
while ((tryEntryPrev != nullptr) && ((tryEntryPrev->bbFlags & BBF_REMOVED) != 0))
{
- tryEntryPrev = tryEntryPrev->bbPrev;
+ tryEntryPrev = tryEntryPrev->Prev();
}
// Because we've added an unremovable scratch block as
@@ -1585,7 +1585,7 @@ PhaseStatus Compiler::fgPostImportationCleanup()
// If there is a next block of this prev block, and that block is
// contained in the current try, we'd like to make that block
// the new start of the try, and keep the region.
- BasicBlock* newTryEntry = tryEntryPrev->bbNext;
+ BasicBlock* newTryEntry = tryEntryPrev->Next();
bool updateTryEntry = false;
if ((newTryEntry != nullptr) && bbInTryRegions(XTnum, newTryEntry))
@@ -1648,13 +1648,13 @@ PhaseStatus Compiler::fgPostImportationCleanup()
// out of order handler, the next block may be a handler. So even though
// this new try entry block is unreachable, we need to give it a
// plausible flow target. Simplest is to just mark it as a throw.
- if (bbIsHandlerBeg(newTryEntry->bbNext))
+ if (bbIsHandlerBeg(newTryEntry->Next()))
{
newTryEntry->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this));
}
else
{
- fgAddRefPred(newTryEntry->bbNext, newTryEntry);
+ fgAddRefPred(newTryEntry->Next(), newTryEntry);
}
JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n",
@@ -1916,7 +1916,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext)
return false;
}
- noway_assert(block->bbNext == bNext);
+ noway_assert(block->NextIs(bNext));
if (!block->KindIs(BBJ_NONE))
{
@@ -2029,7 +2029,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
noway_assert((block->bbFlags & BBF_REMOVED) == 0);
noway_assert(block->KindIs(BBJ_NONE));
- noway_assert(bNext == block->bbNext);
+ noway_assert(block->NextIs(bNext));
noway_assert(bNext != nullptr);
noway_assert((bNext->bbFlags & BBF_REMOVED) == 0);
noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty());
@@ -2320,7 +2320,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
/* Unlink bNext and update all the marker pointers if necessary */
- fgUnlinkRange(block->bbNext, bNext);
+ fgUnlinkRange(block->Next(), bNext);
// If bNext was the last block of a try or handler, update the EH table.
@@ -2345,15 +2345,15 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
fgReplacePred(bNext->bbJumpDest, bNext, block);
/* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */
- if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->bbNext)
+ if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->Next())
{
- fgReplacePred(bNext->bbNext, bNext, block);
+ fgReplacePred(bNext->Next(), bNext, block);
}
break;
case BBJ_NONE:
/* Update the predecessor list for 'bNext->bbNext' */
- fgReplacePred(bNext->bbNext, bNext, block);
+ fgReplacePred(bNext->Next(), bNext, block);
break;
case BBJ_EHFILTERRET:
@@ -2373,7 +2373,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
BasicBlock* finBeg = ehDsc->ebdHndBeg;
- for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
+ for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next())
{
if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg)
{
@@ -2381,7 +2381,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
}
noway_assert(bcall->isBBCallAlwaysPair());
- fgReplacePred(bcall->bbNext, bNext, block);
+ fgReplacePred(bcall->Next(), bNext, block);
}
}
}
@@ -2569,7 +2569,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block)
}
#endif // DEBUG
- noway_assert(block->bbPrev != nullptr); // Can't use this function to remove the first block
+ noway_assert(!block->IsFirst()); // Can't use this function to remove the first block
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
assert(!block->isBBCallAlwaysPairTail()); // can't remove the BBJ_ALWAYS of a BBJ_CALLFINALLY / BBJ_ALWAYS pair
@@ -2627,15 +2627,15 @@ void Compiler::fgUnreachableBlock(BasicBlock* block)
//
void Compiler::fgRemoveConditionalJump(BasicBlock* block)
{
- noway_assert(block->KindIs(BBJ_COND) && block->bbJumpDest == block->bbNext);
+ noway_assert(block->KindIs(BBJ_COND) && block->NextIs(block->bbJumpDest));
assert(compRationalIRForm == block->IsLIR());
- FlowEdge* flow = fgGetPredForBlock(block->bbNext, block);
+ FlowEdge* flow = fgGetPredForBlock(block->Next(), block);
noway_assert(flow->getDupCount() == 2);
// Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount.
block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this));
- --block->bbNext->bbRefs;
+ --block->Next()->bbRefs;
flow->decrementDupCount();
#ifdef DEBUG
@@ -2644,7 +2644,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block)
{
printf("Block " FMT_BB " becoming a BBJ_NONE to " FMT_BB " (jump target is the same whether the condition"
" is true or false)\n",
- block->bbNum, block->bbNext->bbNum);
+ block->bbNum, block->Next()->bbNum);
}
#endif
@@ -2884,7 +2884,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
assert(block->isEmpty());
bool madeChanges = false;
- BasicBlock* bPrev = block->bbPrev;
+ BasicBlock* bPrev = block->Prev();
switch (block->GetBBJumpKind())
{
@@ -2914,8 +2914,8 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
// A GOTO cannot be to the next block since that
// should have been fixed by the optimization above
// An exception is made for a jump from Hot to Cold
- noway_assert(block->bbJumpDest != block->bbNext || block->isBBCallAlwaysPairTail() ||
- fgInDifferentRegions(block, block->bbNext));
+ noway_assert(!block->NextIs(block->bbJumpDest) || block->isBBCallAlwaysPairTail() ||
+ fgInDifferentRegions(block, block->Next()));
/* Cannot remove the first BB */
if (!bPrev)
@@ -2936,7 +2936,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
}
// can't allow fall through into cold code
- if (block->bbNext == fgFirstColdBlock)
+ if (block->IsLastHotBlock(this))
{
break;
}
@@ -2986,7 +2986,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
}
else
{
- succBlock = block->bbNext;
+ succBlock = block->Next();
}
if ((succBlock != nullptr) && !BasicBlock::sameEHRegion(block, succBlock))
@@ -3073,7 +3073,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
// Make sure we don't break that invariant.
if (fgIsUsingProfileWeights() && block->hasProfileWeight() && (block->bbFlags & BBF_INTERNAL) == 0)
{
- BasicBlock* bNext = block->bbNext;
+ BasicBlock* bNext = block->Next();
// Check if the next block can't maintain the invariant.
if ((bNext == nullptr) || ((bNext->bbFlags & BBF_INTERNAL) != 0) || !bNext->hasProfileWeight())
@@ -3082,7 +3082,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
BasicBlock* curBB = bPrev;
while ((curBB != nullptr) && (curBB->bbFlags & BBF_INTERNAL) != 0)
{
- curBB = curBB->bbPrev;
+ curBB = curBB->Prev();
}
if (curBB == nullptr)
{
@@ -3323,7 +3323,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
return true;
}
- else if (block->bbJumpSwt->bbsCount == 2 && block->bbJumpSwt->bbsDstTab[1] == block->bbNext)
+ else if ((block->bbJumpSwt->bbsCount == 2) && block->NextIs(block->bbJumpSwt->bbsDstTab[1]))
{
/* Use a BBJ_COND(switchVal==0) for a switch with only one
significant clause besides the default clause, if the
@@ -3743,10 +3743,10 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock*
{
assert(target->KindIs(BBJ_COND));
- if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0)
+ if ((target->Next()->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0)
{
JITDUMP("Deferring: " FMT_BB " --> " FMT_BB "; latter looks like loop top\n", target->bbNum,
- target->bbNext->bbNum);
+ target->Next()->bbNum);
return false;
}
@@ -3800,7 +3800,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock*
// The new block 'next' will inherit its weight from 'block'
//
next->inheritWeight(block);
- next->bbJumpDest = target->bbNext;
+ next->bbJumpDest = target->Next();
fgAddRefPred(next, block);
fgAddRefPred(next->bbJumpDest, next);
@@ -3826,8 +3826,8 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi
{
assert(block->KindIs(BBJ_COND, BBJ_ALWAYS));
assert(block->bbJumpDest == bNext);
- assert(block->bbNext == bNext);
- assert(block->bbPrev == bPrev);
+ assert(block->NextIs(bNext));
+ assert(block->PrevIs(bPrev));
if (block->KindIs(BBJ_ALWAYS))
{
@@ -4026,7 +4026,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
return false;
}
- if (bDest->bbJumpDest != bJump->bbNext)
+ if (!bJump->NextIs(bDest->bbJumpDest))
{
return false;
}
@@ -4039,7 +4039,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
}
// do not jump into another try region
- BasicBlock* bDestNext = bDest->bbNext;
+ BasicBlock* bDestNext = bDest->Next();
if (bDestNext->hasTryIndex() && !BasicBlock::sameTryRegion(bJump, bDestNext))
{
return false;
@@ -4072,10 +4072,10 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
bool allProfileWeightsAreValid = false;
weight_t weightJump = bJump->bbWeight;
weight_t weightDest = bDest->bbWeight;
- weight_t weightNext = bJump->bbNext->bbWeight;
+ weight_t weightNext = bJump->Next()->bbWeight;
bool rareJump = bJump->isRunRarely();
bool rareDest = bDest->isRunRarely();
- bool rareNext = bJump->bbNext->isRunRarely();
+ bool rareNext = bJump->Next()->isRunRarely();
// If we have profile data then we calculate the number of time
// the loop will iterate into loopIterations
@@ -4086,7 +4086,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
//
if ((bJump->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) &&
(bDest->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) &&
- (bJump->bbNext->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)))
+ (bJump->Next()->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)))
{
allProfileWeightsAreValid = true;
@@ -4233,13 +4233,13 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE;
bJump->SetBBJumpKind(BBJ_COND DEBUG_ARG(this));
- bJump->bbJumpDest = bDest->bbNext;
+ bJump->bbJumpDest = bDest->Next();
/* Update bbRefs and bbPreds */
// bJump now falls through into the next block
//
- fgAddRefPred(bJump->bbNext, bJump);
+ fgAddRefPred(bJump->Next(), bJump);
// bJump no longer jumps to bDest
//
@@ -4247,7 +4247,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
// bJump now jumps to bDest->bbNext
//
- fgAddRefPred(bDest->bbNext, bJump);
+ fgAddRefPred(bDest->Next(), bJump);
if (weightJump > 0)
{
@@ -4510,7 +4510,7 @@ bool Compiler::fgExpandRarelyRunBlocks()
{
// If we've got a BBJ_CALLFINALLY/BBJ_ALWAYS pair, treat the BBJ_CALLFINALLY as an
// additional predecessor for the BBJ_ALWAYS block
- tmpbb = bPrev->bbPrev;
+ tmpbb = bPrev->Prev();
noway_assert(tmpbb != nullptr);
#if defined(FEATURE_EH_FUNCLETS)
noway_assert(tmpbb->isBBCallAlwaysPair());
@@ -4542,7 +4542,7 @@ bool Compiler::fgExpandRarelyRunBlocks()
// Walk the flow graph lexically forward from pred->getBlock()
// if we find (block == bPrevPrev) then
// pred->getBlock() is an earlier predecessor.
- for (tmpbb = pred->getSourceBlock(); tmpbb != nullptr; tmpbb = tmpbb->bbNext)
+ for (tmpbb = pred->getSourceBlock(); tmpbb != nullptr; tmpbb = tmpbb->Next())
{
if (tmpbb == bPrevPrev)
{
@@ -4570,7 +4570,7 @@ bool Compiler::fgExpandRarelyRunBlocks()
// bPrevPrev is lexically after bPrev and we do not
// want to select it as our new block
- for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->bbNext)
+ for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->Next())
{
if (tmpbb == bPrev)
{
@@ -4596,7 +4596,7 @@ bool Compiler::fgExpandRarelyRunBlocks()
BasicBlock* block;
BasicBlock* bPrev;
- for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext)
+ for (bPrev = fgFirstBB, block = bPrev->Next(); block != nullptr; bPrev = block, block = block->Next())
{
if (bPrev->isRunRarely())
{
@@ -4678,7 +4678,7 @@ bool Compiler::fgExpandRarelyRunBlocks()
// Now iterate over every block to see if we can prove that a block is rarely run
// (i.e. when all predecessors to the block are rarely run)
//
- for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext)
+ for (bPrev = fgFirstBB, block = bPrev->Next(); block != nullptr; bPrev = block, block = block->Next())
{
// If block is not run rarely, then check to make sure that it has
// at least one non-rarely run block.
@@ -4728,7 +4728,7 @@ bool Compiler::fgExpandRarelyRunBlocks()
//
if (block->isBBCallAlwaysPair())
{
- BasicBlock* bNext = block->bbNext;
+ BasicBlock* bNext = block->Next();
PREFIX_ASSUME(bNext != nullptr);
bNext->bbSetRunRarely();
#ifdef DEBUG
@@ -4832,7 +4832,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
#endif // FEATURE_EH_FUNCLETS
// We can't relocate anything if we only have one block
- if (fgFirstBB->bbNext == nullptr)
+ if (fgFirstBB->IsLast())
{
return false;
}
@@ -4880,7 +4880,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
EHblkDsc* HBtab;
// Iterate over every block, remembering our previous block in bPrev
- for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext)
+ for (bPrev = fgFirstBB, block = bPrev->Next(); block != nullptr; bPrev = block, block = block->Next())
{
//
// Consider relocating the rarely run blocks such that they are at the end of the method.
@@ -5128,7 +5128,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
weight_t highestWeight = 0;
BasicBlock* candidateBlock = nullptr;
BasicBlock* lastNonFallThroughBlock = bPrev;
- BasicBlock* bTmp = bPrev->bbNext;
+ BasicBlock* bTmp = bPrev->Next();
while (bTmp != nullptr)
{
@@ -5137,7 +5137,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
if (bTmp->isBBCallAlwaysPair())
{
// Move bTmp forward
- bTmp = bTmp->bbNext;
+ bTmp = bTmp->Next();
}
//
@@ -5164,7 +5164,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
// otherwise we have a new candidateBlock
//
highestWeight = bTmp->bbWeight;
- candidateBlock = lastNonFallThroughBlock->bbNext;
+ candidateBlock = lastNonFallThroughBlock->Next();
}
}
@@ -5173,7 +5173,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
lastNonFallThroughBlock = bTmp;
}
- bTmp = bTmp->bbNext;
+ bTmp = bTmp->Next();
}
// If we didn't find a suitable block then skip this
@@ -5211,7 +5211,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
/* (bPrev is known to be a normal block at this point) */
if (!isRare)
{
- if ((bDest == block->bbNext) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS))
+ if (block->NextIs(bDest) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS))
{
// This is a common case with expressions like "return Expr1 && Expr2" -- move the return
// to establish fall-through.
@@ -5277,7 +5277,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
BasicBlock* bStart = block;
BasicBlock* bEnd = bStart;
- bNext = bEnd->bbNext;
+ bNext = bEnd->Next();
bool connected_bDest = false;
if ((backwardBranch && !isRare) ||
@@ -5296,7 +5296,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
{
// Move bEnd and bNext forward
bEnd = bNext;
- bNext = bNext->bbNext;
+ bNext = bNext->Next();
}
//
@@ -5309,7 +5309,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
#if defined(FEATURE_EH_FUNCLETS)
// Check if we've reached the funclets region, at the end of the function
- if (fgFirstFuncletBB == bEnd->bbNext)
+ if (bEnd->NextIs(fgFirstFuncletBB))
{
break;
}
@@ -5356,7 +5356,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
// Move bEnd and bNext forward
bEnd = bNext;
- bNext = bNext->bbNext;
+ bNext = bNext->Next();
}
// Set connected_bDest to true if moving blocks [bStart .. bEnd]
@@ -5396,12 +5396,12 @@ bool Compiler::fgReorderBlocks(bool useProfile)
bPrev2 = block;
while (bPrev2 != nullptr)
{
- if (bPrev2->bbNext == bDest)
+ if (bPrev2->NextIs(bDest))
{
break;
}
- bPrev2 = bPrev2->bbNext;
+ bPrev2 = bPrev2->Next();
}
if ((bPrev2 != nullptr) && fgEhAllowsMoveBlock(bPrev, bDest))
@@ -5414,7 +5414,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
//
bStart2 = bDest;
bEnd2 = bStart2;
- bNext = bEnd2->bbNext;
+ bNext = bEnd2->Next();
while (true)
{
@@ -5425,7 +5425,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
noway_assert(bNext->KindIs(BBJ_ALWAYS));
// Move bEnd2 and bNext forward
bEnd2 = bNext;
- bNext = bNext->bbNext;
+ bNext = bNext->Next();
}
// Check for the Loop exit conditions
@@ -5475,7 +5475,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
// Move bEnd2 and bNext forward
bEnd2 = bNext;
- bNext = bNext->bbNext;
+ bNext = bNext->Next();
}
}
}
@@ -5579,7 +5579,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
noway_assert(!bEnd->KindIs(BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL));
// bStartPrev must be set to the block that precedes bStart
- noway_assert(bStartPrev->bbNext == bStart);
+ noway_assert(bStartPrev->NextIs(bStart));
// Since we will be unlinking [bStart..bEnd],
// we need to compute and remember if bStart is in each of
@@ -5630,7 +5630,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
if (ehDsc != nullptr)
{
- endBlk = lastBlk->bbNext;
+ endBlk = lastBlk->Next();
/*
Multiple (nested) try regions might start from the same BB.
@@ -5650,7 +5650,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
*/
while (!BasicBlock::sameTryRegion(startBlk, bStart) && (startBlk != endBlk))
{
- startBlk = startBlk->bbNext;
+ startBlk = startBlk->Next();
}
// startBlk cannot equal endBlk as it must come before endBlk
@@ -5666,12 +5666,12 @@ bool Compiler::fgReorderBlocks(bool useProfile)
// or if bEnd->bbNext is in a different try region
// then we cannot move the blocks
//
- if ((bEnd->bbNext == nullptr) || !BasicBlock::sameTryRegion(startBlk, bEnd->bbNext))
+ if (bEnd->IsLast() || !BasicBlock::sameTryRegion(startBlk, bEnd->Next()))
{
goto CANNOT_MOVE;
}
- startBlk = bEnd->bbNext;
+ startBlk = bEnd->Next();
// Check that the new startBlk still comes before endBlk
@@ -5684,7 +5684,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
BasicBlock* tmpBlk = startBlk;
while ((tmpBlk != endBlk) && (tmpBlk != nullptr))
{
- tmpBlk = tmpBlk->bbNext;
+ tmpBlk = tmpBlk->Next();
}
// when tmpBlk is NULL that means startBlk is after endBlk
@@ -5719,7 +5719,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
fgIsForwardBranch(bEnd, bPrev))
{
// Set nearBlk to be the block in [startBlk..endBlk]
- // such that nearBlk->bbNext == bEnd->JumpDest
+ // such that nearBlk->NextIs(bEnd->JumpDest)
// if no such block exists then set nearBlk to NULL
nearBlk = startBlk;
jumpBlk = bEnd;
@@ -5731,7 +5731,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
if (nearBlk != bPrev)
{
// Check if nearBlk satisfies our requirement
- if (nearBlk->bbNext == bEnd->bbJumpDest)
+ if (nearBlk->NextIs(bEnd->bbJumpDest))
{
break;
}
@@ -5745,7 +5745,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
}
// advance nearBlk to the next block
- nearBlk = nearBlk->bbNext;
+ nearBlk = nearBlk->Next();
} while (nearBlk != nullptr);
}
@@ -5783,10 +5783,10 @@ bool Compiler::fgReorderBlocks(bool useProfile)
/* We couldn't move the blocks, so put everything back */
/* relink [bStart .. bEnd] into the flow graph */
- bPrev->setNext(bStart);
- if (bEnd->bbNext)
+ bPrev->SetNext(bStart);
+ if (!bEnd->IsLast())
{
- bEnd->bbNext->bbPrev = bEnd;
+ bEnd->Next()->SetPrev(bEnd);
}
#ifdef DEBUG
if (verbose)
@@ -5880,7 +5880,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
else
{
noway_assert(insertAfterBlk == bPrev);
- noway_assert(insertAfterBlk->bbNext == block);
+ noway_assert(insertAfterBlk->NextIs(block));
/* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */
bPrev->bbJumpDest = block;
@@ -5933,7 +5933,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
fgConnectFallThrough(bPrev, block);
}
- BasicBlock* bSkip = bEnd->bbNext;
+ BasicBlock* bSkip = bEnd->Next();
/* If bEnd falls through, we must insert a jump to bNext */
fgConnectFallThrough(bEnd, bNext);
@@ -5968,7 +5968,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
// Set our iteration point 'block' to be the new bPrev->bbNext
// It will be used as the next bPrev
- block = bPrev->bbNext;
+ block = bPrev->Next();
} // end of for loop(bPrev,block)
@@ -6068,7 +6068,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
BasicBlock* bNext; // the successor of the current block
BasicBlock* bDest; // the jump target of the current block
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (block = fgFirstBB; block != nullptr; block = block->Next())
{
/* Some blocks may be already marked removed by other optimizations
* (e.g worthless loop removal), without being explicitly removed
@@ -6079,14 +6079,14 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
{
if (bPrev)
{
- bPrev->setNext(block->bbNext);
+ bPrev->SetNext(block->Next());
}
else
{
/* WEIRD first basic block is removed - should have an assert here */
noway_assert(!"First basic block marked as BBF_REMOVED???");
- fgFirstBB = block->bbNext;
+ fgFirstBB = block->Next();
}
continue;
}
@@ -6099,7 +6099,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
REPEAT:;
- bNext = block->bbNext;
+ bNext = block->Next();
bDest = nullptr;
if (block->KindIs(BBJ_ALWAYS))
@@ -6110,19 +6110,19 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
change = true;
modified = true;
bDest = block->bbJumpDest;
- bNext = block->bbNext;
+ bNext = block->Next();
}
}
if (block->KindIs(BBJ_NONE))
{
bDest = nullptr;
- if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext))
+ if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->Next()))
{
change = true;
modified = true;
bDest = block->bbJumpDest;
- bNext = block->bbNext;
+ bNext = block->Next();
}
}
@@ -6171,12 +6171,12 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block
bNext->isEmpty() && // and it is an empty block
(bNext != bNext->bbJumpDest) && // special case for self jumps
- (bDest != fgFirstColdBlock) &&
+ !bDest->IsFirstColdBlock(this) &&
(!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections
{
// case (a)
//
- const bool isJumpAroundEmpty = (bNext->bbNext == bDest);
+ const bool isJumpAroundEmpty = bNext->NextIs(bDest);
// case (b)
//
@@ -6241,7 +6241,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
{
// We don't expect bDest to already be right after bNext.
//
- assert(bDest != bNext->bbNext);
+ assert(!bNext->NextIs(bDest));
JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum,
bNext->bbNum);
@@ -6249,13 +6249,13 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
// If bDest can fall through we'll need to create a jump
// block after it too. Remember where to jump to.
//
- BasicBlock* const bDestNext = bDest->bbNext;
+ BasicBlock* const bDestNext = bDest->Next();
// Move bDest
//
if (ehIsBlockEHLast(bDest))
{
- ehUpdateLastBlocks(bDest, bDest->bbPrev);
+ ehUpdateLastBlocks(bDest, bDest->Prev());
}
fgUnlinkBlock(bDest);
@@ -6331,9 +6331,9 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump"));
// If this is the first Cold basic block update fgFirstColdBlock
- if (bNext == fgFirstColdBlock)
+ if (bNext->IsFirstColdBlock(this))
{
- fgFirstColdBlock = bNext->bbNext;
+ fgFirstColdBlock = bNext->Next();
}
//
@@ -6483,7 +6483,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase)
if (block->isEmpty())
{
- assert(bPrev == block->bbPrev);
+ assert(block->PrevIs(bPrev));
if (fgOptimizeEmptyBlock(block))
{
change = true;
@@ -7067,7 +7067,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early)
// ternaries in C#).
// The logic below could be generalized to BBJ_SWITCH, but this currently
// has almost no CQ benefit but does have a TP impact.
- if (!block->KindIs(BBJ_COND) || (block->bbNext == block->bbJumpDest))
+ if (!block->KindIs(BBJ_COND) || block->NextIs(block->bbJumpDest))
{
return false;
}
@@ -7116,7 +7116,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early)
Statement* nextFirstStmt;
Statement* destFirstStmt;
- if (!getSuccCandidate(block->bbNext, &nextFirstStmt) || !getSuccCandidate(block->bbJumpDest, &destFirstStmt))
+ if (!getSuccCandidate(block->Next(), &nextFirstStmt) || !getSuccCandidate(block->bbJumpDest, &destFirstStmt))
{
return false;
}
@@ -7144,10 +7144,10 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early)
JITDUMP("We can; moving statement\n");
- fgUnlinkStmt(block->bbNext, nextFirstStmt);
+ fgUnlinkStmt(block->Next(), nextFirstStmt);
fgInsertStmtNearEnd(block, nextFirstStmt);
fgUnlinkStmt(block->bbJumpDest, destFirstStmt);
- block->bbFlags |= block->bbNext->bbFlags & BBF_COPY_PROPAGATE;
+ block->bbFlags |= block->Next()->bbFlags & BBF_COPY_PROPAGATE;
return true;
}
diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp
index 6444e45085db7..5b61db49a9099 100644
--- a/src/coreclr/jit/fgprofile.cpp
+++ b/src/coreclr/jit/fgprofile.cpp
@@ -530,7 +530,7 @@ void BlockCountInstrumentor::RelocateProbes()
// Handle case where we had a fall through critical edge
//
- if (pred->bbNext == intermediary)
+ if (pred->NextIs(intermediary))
{
m_comp->fgRemoveRefPred(pred, block);
m_comp->fgAddRefPred(intermediary, block);
@@ -963,7 +963,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor)
{
// This block should be the only pred of the continuation.
//
- BasicBlock* const target = block->bbNext;
+ BasicBlock* const target = block->Next();
assert(!BlockSetOps::IsMember(this, marked, target->bbNum));
visitor->VisitTreeEdge(block, target);
stack.Push(target);
@@ -3363,7 +3363,7 @@ void EfficientEdgeCountReconstructor::Solve()
// The ideal solver order is likely reverse postorder over the depth-first spanning tree.
// We approximate it here by running from last node to first.
//
- for (BasicBlock* block = m_comp->fgLastBB; (block != nullptr); block = block->bbPrev)
+ for (BasicBlock* block = m_comp->fgLastBB; (block != nullptr); block = block->Prev())
{
BlockInfo* const info = BlockToInfo(block);
@@ -4413,7 +4413,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight)
weight = 0;
iterations++;
- for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->Next())
{
if (!bDst->hasProfileWeight() && (bDst->bbPreds != nullptr))
{
@@ -4431,7 +4431,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight)
// Does this block flow into only one other block
if (bSrc->KindIs(BBJ_NONE))
{
- bOnlyNext = bSrc->bbNext;
+ bOnlyNext = bSrc->Next();
}
else if (bSrc->KindIs(BBJ_ALWAYS))
{
@@ -4452,7 +4452,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight)
// Does this block flow into only one other block
if (bDst->KindIs(BBJ_NONE))
{
- bOnlyNext = bDst->bbNext;
+ bOnlyNext = bDst->Next();
}
else if (bDst->KindIs(BBJ_ALWAYS))
{
@@ -4582,7 +4582,7 @@ bool Compiler::fgComputeCalledCount(weight_t returnWeight)
//
while (firstILBlock->bbFlags & BBF_INTERNAL)
{
- firstILBlock = firstILBlock->bbNext;
+ firstILBlock = firstILBlock->Next();
}
}
@@ -4655,7 +4655,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights()
JITDUMP("Initial weight assignments\n\n");
// Now we will compute the initial m_edgeWeightMin and m_edgeWeightMax values
- for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->Next())
{
weight_t bDstWeight = bDst->bbWeight;
@@ -4746,7 +4746,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights()
hasIncompleteEdgeWeights = false;
JITDUMP("\n -- step 1 --\n");
- for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->Next())
{
for (FlowEdge* const edge : bDst->PredEdges())
{
@@ -4761,13 +4761,13 @@ PhaseStatus Compiler::fgComputeEdgeWeights()
weight_t diff;
FlowEdge* otherEdge;
BasicBlock* otherDst;
- if (bSrc->bbNext == bDst)
+ if (bSrc->NextIs(bDst))
{
otherDst = bSrc->bbJumpDest;
}
else
{
- otherDst = bSrc->bbNext;
+ otherDst = bSrc->Next();
}
otherEdge = fgGetPredForBlock(otherDst, bSrc);
@@ -4842,7 +4842,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights()
JITDUMP("\n -- step 2 --\n");
- for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->Next())
{
weight_t bDstWeight = bDst->bbWeight;
diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp
index 90d56a835ff10..d50a03260a130 100644
--- a/src/coreclr/jit/fgprofilesynthesis.cpp
+++ b/src/coreclr/jit/fgprofilesynthesis.cpp
@@ -290,7 +290,7 @@ bool ProfileSynthesis::IsLoopExitEdge(FlowEdge* edge)
//
void ProfileSynthesis::AssignLikelihoodNext(BasicBlock* block)
{
- FlowEdge* const edge = m_comp->fgGetPredForBlock(block->bbNext, block);
+ FlowEdge* const edge = m_comp->fgGetPredForBlock(block->Next(), block);
edge->setLikelihood(1.0);
}
@@ -317,7 +317,7 @@ void ProfileSynthesis::AssignLikelihoodJump(BasicBlock* block)
void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block)
{
BasicBlock* const jump = block->bbJumpDest;
- BasicBlock* const next = block->bbNext;
+ BasicBlock* const next = block->Next();
// Watch for degenerate case
//
@@ -1221,7 +1221,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop)
exitBlock->bbNum, exitEdge->getLikelihood());
BasicBlock* const jump = exitBlock->bbJumpDest;
- BasicBlock* const next = exitBlock->bbNext;
+ BasicBlock* const next = exitBlock->Next();
FlowEdge* const jumpEdge = m_comp->fgGetPredForBlock(jump, exitBlock);
FlowEdge* const nextEdge = m_comp->fgGetPredForBlock(next, exitBlock);
weight_t const exitLikelihood = (missingExitWeight + currentExitWeight) / exitBlockWeight;
diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp
index 78dc4571352aa..7fd5a41b4f8ef 100644
--- a/src/coreclr/jit/flowgraph.cpp
+++ b/src/coreclr/jit/flowgraph.cpp
@@ -80,7 +80,7 @@ PhaseStatus Compiler::fgInsertGCPolls()
// Walk through the blocks and hunt for a block that needs a GC Poll
//
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next())
{
compCurBB = block;
@@ -256,7 +256,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
if (top->KindIs(BBJ_COND))
{
- topFallThrough = top->bbNext;
+ topFallThrough = top->Next();
lpIndexFallThrough = topFallThrough->bbNatLoopNum;
}
@@ -384,7 +384,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
switch (oldJumpKind)
{
case BBJ_NONE:
- fgReplacePred(bottom->bbNext, top, bottom);
+ fgReplacePred(bottom->Next(), top, bottom);
break;
case BBJ_RETURN:
case BBJ_THROW:
@@ -392,8 +392,8 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
break;
case BBJ_COND:
// replace predecessor in the fall through block.
- noway_assert(bottom->bbNext);
- fgReplacePred(bottom->bbNext, top, bottom);
+ noway_assert(!bottom->IsLast());
+ fgReplacePred(bottom->Next(), top, bottom);
// fall through for the jump target
FALLTHROUGH;
@@ -1562,7 +1562,7 @@ void Compiler::fgAddSyncMethodEnterExit()
// Create a block for the start of the try region, where the monitor enter call
// will go.
BasicBlock* const tryBegBB = fgSplitBlockAtEnd(fgFirstBB);
- BasicBlock* const tryNextBB = tryBegBB->bbNext;
+ BasicBlock* const tryNextBB = tryBegBB->Next();
BasicBlock* const tryLastBB = fgLastBB;
// If we have profile data the new block will inherit the next block's weight
@@ -1577,8 +1577,8 @@ void Compiler::fgAddSyncMethodEnterExit()
assert(!tryLastBB->bbFallsThrough());
BasicBlock* faultBB = fgNewBBafter(BBJ_EHFAULTRET, tryLastBB, false);
- assert(tryLastBB->bbNext == faultBB);
- assert(faultBB->bbNext == nullptr);
+ assert(tryLastBB->NextIs(faultBB));
+ assert(faultBB->IsLast());
assert(faultBB == fgLastBB);
faultBB->bbRefs = 1;
@@ -1633,7 +1633,7 @@ void Compiler::fgAddSyncMethodEnterExit()
// to point to the new try handler.
BasicBlock* tmpBB;
- for (tmpBB = tryBegBB->bbNext; tmpBB != faultBB; tmpBB = tmpBB->bbNext)
+ for (tmpBB = tryBegBB->Next(); tmpBB != faultBB; tmpBB = tmpBB->Next())
{
if (!tmpBB->hasTryIndex())
{
@@ -2154,7 +2154,7 @@ class MergedReturns
BasicBlock* newReturnBB = comp->fgNewBBinRegion(BBJ_RETURN);
comp->fgReturnCount++;
- noway_assert(newReturnBB->bbNext == nullptr);
+ noway_assert(newReturnBB->IsLast());
JITDUMP("\n newReturnBB [" FMT_BB "] created\n", newReturnBB->bbNum);
@@ -2594,7 +2594,7 @@ PhaseStatus Compiler::fgAddInternal()
// Visit the BBJ_RETURN blocks and merge as necessary.
- for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; !lastBlockBeforeGenReturns->NextIs(block); block = block->Next())
{
if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0))
{
@@ -3004,12 +3004,12 @@ BasicBlock* Compiler::fgLastBBInMainFunction()
if (fgFirstFuncletBB != nullptr)
{
- return fgFirstFuncletBB->bbPrev;
+ return fgFirstFuncletBB->Prev();
}
#endif // FEATURE_EH_FUNCLETS
- assert(fgLastBB->bbNext == nullptr);
+ assert(fgLastBB->IsLast());
return fgLastBB;
}
@@ -3062,7 +3062,7 @@ BasicBlock* Compiler::fgGetDomSpeculatively(const BasicBlock* block)
/*****************************************************************************************************
*
* Function to return the first basic block after the main part of the function. With funclets, it is
- * the block of the first funclet. Otherwise it is NULL if there are no funclets (fgLastBB->bbNext).
+ * the block of the first funclet. Otherwise it is NULL if there are no funclets (fgLastBB->Next()).
* This is equivalent to fgLastBBInMainFunction()->bbNext
* An exclusive end of the main method.
*/
@@ -3078,7 +3078,7 @@ BasicBlock* Compiler::fgEndBBAfterMainFunction()
#endif // FEATURE_EH_FUNCLETS
- assert(fgLastBB->bbNext == nullptr);
+ assert(fgLastBB->IsLast());
return nullptr;
}
@@ -3302,7 +3302,7 @@ PhaseStatus Compiler::fgCreateFunclets()
//
bool Compiler::fgFuncletsAreCold()
{
- for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->Next())
{
if (!block->isRunRarely())
{
@@ -3365,7 +3365,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock()
if (forceSplit)
{
- firstColdBlock = fgFirstBB->bbNext;
+ firstColdBlock = fgFirstBB->Next();
prevToFirstColdBlock = fgFirstBB;
JITDUMP("JitStressProcedureSplitting is enabled: Splitting after the first basic block\n");
}
@@ -3373,7 +3373,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock()
{
bool inFuncletSection = false;
- for (lblk = nullptr, block = fgFirstBB; block != nullptr; lblk = block, block = block->bbNext)
+ for (lblk = nullptr, block = fgFirstBB; block != nullptr; lblk = block, block = block->Next())
{
bool blockMustBeInHotSection = false;
@@ -3413,7 +3413,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock()
if (fgFuncletsAreCold())
{
firstColdBlock = fgFirstFuncletBB;
- prevToFirstColdBlock = fgFirstFuncletBB->bbPrev;
+ prevToFirstColdBlock = fgFirstFuncletBB->Prev();
}
break;
@@ -3486,7 +3486,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock()
// Cold section is 5 bytes in size.
// Ignore if stress-splitting.
//
- if (!forceSplit && firstColdBlock->bbNext == nullptr)
+ if (!forceSplit && firstColdBlock->IsLast())
{
// If the size of the cold block is 7 or less
// then we will keep it in the Hot section.
@@ -3515,7 +3515,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock()
//
assert(prevToFirstColdBlock->isBBCallAlwaysPair());
firstColdBlock =
- firstColdBlock->bbNext; // Note that this assignment could make firstColdBlock == nullptr
+ firstColdBlock->Next(); // Note that this assignment could make firstColdBlock == nullptr
break;
case BBJ_COND:
@@ -3526,7 +3526,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock()
if (firstColdBlock->isEmpty() && firstColdBlock->KindIs(BBJ_ALWAYS))
{
// We can just use this block as the transitionBlock
- firstColdBlock = firstColdBlock->bbNext;
+ firstColdBlock = firstColdBlock->Next();
// Note that this assignment could make firstColdBlock == NULL
}
else
@@ -3554,7 +3554,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock()
}
}
- for (block = firstColdBlock; block != nullptr; block = block->bbNext)
+ for (block = firstColdBlock; block != nullptr; block = block->Next())
{
block->bbFlags |= BBF_COLD;
block->unmarkLoopAlign(this DEBUG_ARG("Loop alignment disabled for cold blocks"));
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index f0ffca4a5ef08..f3ab7185b4148 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -871,7 +871,7 @@ int GenTree::GetRegisterDstCount(Compiler* compiler) const
assert(!isContained());
if (!IsMultiRegNode())
{
- return (IsValue()) ? 1 : 0;
+ return IsValue() ? 1 : 0;
}
else if (IsMultiRegCall())
{
diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp
index 529bbfdb24492..0ab8e106a3d32 100644
--- a/src/coreclr/jit/helperexpansion.cpp
+++ b/src/coreclr/jit/helperexpansion.cpp
@@ -824,7 +824,7 @@ template bbNext)
+ for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next())
{
if (skipRarelyRunBlocks && block->isRunRarely())
{
diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp
index 8489917bb7b54..9a09dd1540770 100644
--- a/src/coreclr/jit/ifconversion.cpp
+++ b/src/coreclr/jit/ifconversion.cpp
@@ -122,7 +122,7 @@ bool OptIfConversionDsc::IfConvertCheckInnerBlockFlow(BasicBlock* block)
bool OptIfConversionDsc::IfConvertCheckThenFlow()
{
m_flowFound = false;
- BasicBlock* thenBlock = m_startBlock->bbNext;
+ BasicBlock* thenBlock = m_startBlock->Next();
for (int thenLimit = 0; thenLimit < m_checkLimit; thenLimit++)
{
@@ -385,7 +385,7 @@ void OptIfConversionDsc::IfConvertDump()
{
assert(m_startBlock != nullptr);
m_comp->fgDumpBlock(m_startBlock);
- for (BasicBlock* dumpBlock = m_startBlock->bbNext; dumpBlock != m_finalBlock;
+ for (BasicBlock* dumpBlock = m_startBlock->Next(); dumpBlock != m_finalBlock;
dumpBlock = dumpBlock->GetUniqueSucc())
{
m_comp->fgDumpBlock(dumpBlock);
@@ -575,7 +575,7 @@ bool OptIfConversionDsc::optIfConvert()
}
// Check the Then and Else blocks have a single operation each.
- if (!IfConvertCheckStmts(m_startBlock->bbNext, &m_thenOperation))
+ if (!IfConvertCheckStmts(m_startBlock->Next(), &m_thenOperation))
{
return false;
}
@@ -742,7 +742,7 @@ bool OptIfConversionDsc::optIfConvert()
}
// Update the flow from the original block.
- m_comp->fgRemoveAllRefPreds(m_startBlock->bbNext, m_startBlock);
+ m_comp->fgRemoveAllRefPreds(m_startBlock->Next(), m_startBlock);
m_startBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp));
#ifdef DEBUG
@@ -789,7 +789,7 @@ PhaseStatus Compiler::optIfConversion()
{
OptIfConversionDsc optIfConversionDsc(this, block);
madeChanges |= optIfConversionDsc.optIfConvert();
- block = block->bbPrev;
+ block = block->Prev();
}
#endif
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 91dbf2e5e7687..076fb70d2fc1d 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -1953,7 +1953,7 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H
impPushOnStack(tree, typeInfo(clsHnd));
- return hndBlk->bbNext;
+ return hndBlk->Next();
}
}
@@ -7298,14 +7298,14 @@ void Compiler::impImportBlockCode(BasicBlock* block)
BADCODE("invalid type for brtrue/brfalse");
}
- if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
+ if (opts.OptimizationEnabled() && block->NextIs(block->bbJumpDest))
{
// We may have already modified `block`'s jump kind, if this is a re-importation.
//
if (block->KindIs(BBJ_COND))
{
JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n",
- block->bbNum, block->bbNext->bbNum);
+ block->bbNum, block->Next()->bbNum);
fgRemoveRefPred(block->bbJumpDest, block);
block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this));
}
@@ -7371,14 +7371,14 @@ void Compiler::impImportBlockCode(BasicBlock* block)
{
if (foldedJumpKind == BBJ_NONE)
{
- JITDUMP("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum);
+ JITDUMP("\nThe block falls through into the next " FMT_BB "\n", block->Next()->bbNum);
fgRemoveRefPred(block->bbJumpDest, block);
}
else
{
JITDUMP("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
block->bbJumpDest->bbNum);
- fgRemoveRefPred(block->bbNext, block);
+ fgRemoveRefPred(block->Next(), block);
}
block->SetBBJumpKind(foldedJumpKind DEBUG_ARG(this));
}
@@ -7544,14 +7544,14 @@ void Compiler::impImportBlockCode(BasicBlock* block)
assertImp((genActualType(op1) == genActualType(op2)) || (varTypeIsI(op1) && varTypeIsI(op2)) ||
(varTypeIsFloating(op1) && varTypeIsFloating(op2)));
- if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
+ if (opts.OptimizationEnabled() && block->NextIs(block->bbJumpDest))
{
// We may have already modified `block`'s jump kind, if this is a re-importation.
//
if (block->KindIs(BBJ_COND))
{
JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n",
- block->bbNum, block->bbNext->bbNum);
+ block->bbNum, block->Next()->bbNum);
fgRemoveRefPred(block->bbJumpDest, block);
block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this));
}
@@ -7630,7 +7630,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
- if (curJump != block->bbNext)
+ if (!block->NextIs(curJump))
{
// transform the basic block into a BBJ_ALWAYS
block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this));
@@ -11135,7 +11135,7 @@ void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdFilter!
- const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
+ const bool isSingleBlockFilter = (filterBB->NextIs(hndBegBB));
filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
impImportBlockPending(filterBB);
@@ -11289,12 +11289,12 @@ void Compiler::impImportBlock(BasicBlock* block)
/* Note if the next block has more than one ancestor */
- multRef |= block->bbNext->bbRefs;
+ multRef |= block->Next()->bbRefs;
/* Does the next block have temps assigned? */
- baseTmp = block->bbNext->bbStkTempsIn;
- tgtBlock = block->bbNext;
+ baseTmp = block->Next()->bbStkTempsIn;
+ tgtBlock = block->Next();
if (baseTmp != NO_BASE_TMP)
{
@@ -11315,9 +11315,9 @@ void Compiler::impImportBlock(BasicBlock* block)
break;
case BBJ_NONE:
- multRef |= block->bbNext->bbRefs;
- baseTmp = block->bbNext->bbStkTempsIn;
- tgtBlock = block->bbNext;
+ multRef |= block->Next()->bbRefs;
+ baseTmp = block->Next()->bbStkTempsIn;
+ tgtBlock = block->Next();
break;
case BBJ_SWITCH:
@@ -12119,7 +12119,7 @@ void Compiler::impImport()
if (entryBlock->KindIs(BBJ_NONE))
{
- entryBlock = entryBlock->bbNext;
+ entryBlock = entryBlock->Next();
}
else if (opts.IsOSR() && entryBlock->KindIs(BBJ_ALWAYS))
{
@@ -12253,7 +12253,7 @@ void Compiler::impFixPredLists()
continue;
}
- BasicBlock* const continuation = predBlock->bbNext;
+ BasicBlock* const continuation = predBlock->Next();
fgAddRefPred(continuation, finallyBlock);
if (!added)
diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp
index 02774119c82e1..9d92da20126b3 100644
--- a/src/coreclr/jit/importercalls.cpp
+++ b/src/coreclr/jit/importercalls.cpp
@@ -1912,7 +1912,7 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
// be at most 64 arguments - 32 lengths and 32 lower bounds.
//
- if ((!numArgsArg->IsCnsIntOrI()) || (numArgsArg->AsIntCon()->IconValue() < 1) ||
+ if (!numArgsArg->IsCnsIntOrI() || (numArgsArg->AsIntCon()->IconValue() < 1) ||
(numArgsArg->AsIntCon()->IconValue() > 64))
{
return nullptr;
diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp
index da1fb1933b239..000e99f47d486 100644
--- a/src/coreclr/jit/indirectcalltransformer.cpp
+++ b/src/coreclr/jit/indirectcalltransformer.cpp
@@ -1071,7 +1071,7 @@ class IndirectCallTransformer
// Find the hot/cold predecessors. (Consider: just record these when
// we did the scouting).
//
- BasicBlock* const coldBlock = checkBlock->bbPrev;
+ BasicBlock* const coldBlock = checkBlock->Prev();
if (!coldBlock->KindIs(BBJ_NONE))
{
@@ -1079,7 +1079,7 @@ class IndirectCallTransformer
return;
}
- BasicBlock* const hotBlock = coldBlock->bbPrev;
+ BasicBlock* const hotBlock = coldBlock->Prev();
if (!hotBlock->KindIs(BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock))
{
diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp
index 9de15947cfdd6..97ba02897703c 100644
--- a/src/coreclr/jit/jiteh.cpp
+++ b/src/coreclr/jit/jiteh.cpp
@@ -32,7 +32,7 @@ BasicBlock* EHblkDsc::BBFilterLast()
noway_assert(ebdHndBeg != nullptr);
// The last block of the filter is the block immediately preceding the first block of the handler.
- return ebdHndBeg->bbPrev;
+ return ebdHndBeg->Prev();
}
BasicBlock* EHblkDsc::ExFlowBlock()
@@ -107,7 +107,7 @@ bool EHblkDsc::HasFinallyOrFaultHandler()
bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd)
{
- for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->bbNext)
+ for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->Next())
{
if (pWalk == pBlk)
{
@@ -119,7 +119,7 @@ bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd)
bool EHblkDsc::InTryRegionBBRange(BasicBlock* pBlk)
{
- return InBBRange(pBlk, ebdTryBeg, ebdTryLast->bbNext);
+ return InBBRange(pBlk, ebdTryBeg, ebdTryLast->Next());
}
bool EHblkDsc::InFilterRegionBBRange(BasicBlock* pBlk)
@@ -129,7 +129,7 @@ bool EHblkDsc::InFilterRegionBBRange(BasicBlock* pBlk)
bool EHblkDsc::InHndRegionBBRange(BasicBlock* pBlk)
{
- return InBBRange(pBlk, ebdHndBeg, ebdHndLast->bbNext);
+ return InBBRange(pBlk, ebdHndBeg, ebdHndLast->Next());
}
unsigned EHblkDsc::ebdGetEnclosingRegionIndex(bool* inTryRegion)
@@ -836,7 +836,7 @@ void Compiler::ehUpdateForDeletedBlock(BasicBlock* block)
return;
}
- BasicBlock* bPrev = block->bbPrev;
+ BasicBlock* bPrev = block->Prev();
assert(bPrev != nullptr);
ehUpdateLastBlocks(block, bPrev);
@@ -865,7 +865,7 @@ bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block)
if (ehIsBlockEHLast(block))
{
- BasicBlock* bPrev = block->bbPrev;
+ BasicBlock* bPrev = block->Prev();
if ((bPrev != nullptr) && ehIsBlockEHLast(bPrev))
{
return false;
@@ -941,18 +941,18 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** be
if (inTryRegion)
{
*begBlk = ehDsc->ebdTryBeg;
- *endBlk = ehDsc->ebdTryLast->bbNext;
+ *endBlk = ehDsc->ebdTryLast->Next();
}
else
{
*begBlk = ehDsc->ebdHndBeg;
- *endBlk = ehDsc->ebdHndLast->bbNext;
+ *endBlk = ehDsc->ebdHndLast->Next();
}
}
#else // !FEATURE_EH_CALLFINALLY_THUNKS
EHblkDsc* ehDsc = ehGetDsc(finallyIndex);
*begBlk = ehDsc->ebdTryBeg;
- *endBlk = ehDsc->ebdTryLast->bbNext;
+ *endBlk = ehDsc->ebdTryLast->Next();
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
}
@@ -1320,10 +1320,10 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab)
bLast = nullptr;
// Find the first non-removed block after the 'try' region to end our iteration.
- bEnd = handlerTab->ebdTryLast->bbNext;
+ bEnd = handlerTab->ebdTryLast->Next();
while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED))
{
- bEnd = bEnd->bbNext;
+ bEnd = bEnd->Next();
}
// Update bLast to account for any removed blocks
@@ -1335,7 +1335,7 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab)
bLast = block;
}
- block = block->bbNext;
+ block = block->Next();
if (block == bEnd)
{
@@ -1349,10 +1349,10 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab)
bLast = nullptr;
// Find the first non-removed block after the handler region to end our iteration.
- bEnd = handlerTab->ebdHndLast->bbNext;
+ bEnd = handlerTab->ebdHndLast->Next();
while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED))
{
- bEnd = bEnd->bbNext;
+ bEnd = bEnd->Next();
}
// Update bLast to account for any removed blocks
@@ -1364,7 +1364,7 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab)
bLast = block;
}
- block = block->bbNext;
+ block = block->Next();
if (block == bEnd)
{
break;
@@ -2281,7 +2281,7 @@ bool Compiler::fgNormalizeEHCase2()
fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk);
}
- if ((predBlock->bbNext == newTryStart) && predBlock->bbFallsThrough())
+ if (predBlock->NextIs(newTryStart) && predBlock->bbFallsThrough())
{
fgRemoveRefPred(insertBeforeBlk, predBlock);
fgAddRefPred(newTryStart, predBlock);
@@ -2295,7 +2295,7 @@ bool Compiler::fgNormalizeEHCase2()
// outwards in enclosing try index order, and we'll get to them later.
// Move the insert block backwards, to the one we just inserted.
- insertBeforeBlk = insertBeforeBlk->bbPrev;
+ insertBeforeBlk = insertBeforeBlk->Prev();
assert(insertBeforeBlk == newTryStart);
modified = true;
@@ -3428,7 +3428,7 @@ void Compiler::fgVerifyHandlerTab()
{
BasicBlock* blockEnd;
- for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->bbNext; block != blockEnd; block = block->bbNext)
+ for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->Next(); block != blockEnd; block = block->Next())
{
if (blockTryIndex[block->bbNum] == 0)
{
@@ -3436,8 +3436,8 @@ void Compiler::fgVerifyHandlerTab()
}
}
- for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext;
- block != blockEnd; block = block->bbNext)
+ for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->Next();
+ block != blockEnd; block = block->Next())
{
if (blockHndIndex[block->bbNum] == 0)
{
@@ -3465,8 +3465,8 @@ void Compiler::fgVerifyHandlerTab()
BasicBlock* blockEnd;
for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg),
- blockEnd = HBtab->ebdHndLast->bbNext;
- block != blockEnd; block = block->bbNext)
+ blockEnd = HBtab->ebdHndLast->Next();
+ block != blockEnd; block = block->Next())
{
if (blockTryIndex[block->bbNum] == 0)
{
@@ -4058,7 +4058,7 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block)
{
if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->bbJumpDest == block)
{
- BasicBlock* pPrev = predBlock->bbPrev;
+ BasicBlock* pPrev = predBlock->Prev();
if (pPrev != nullptr)
{
if (pPrev->KindIs(BBJ_CALLFINALLY))
@@ -4110,7 +4110,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block)
if (xtab->HasFinallyHandler())
{
assert((xtab->ebdHndBeg == block) || // The normal case
- ((xtab->ebdHndBeg->bbNext == block) &&
+ (xtab->ebdHndBeg->NextIs(block) &&
(xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're
// trying to decide how to split up the predecessor edges.
if (predBlock->KindIs(BBJ_CALLFINALLY))
@@ -4347,9 +4347,9 @@ bool Compiler::fgRelocateEHRegions()
*/
void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
{
- assert(block->bbPrev != nullptr);
+ assert(!block->IsFirst());
- BasicBlock* bPrev = block->bbPrev;
+ BasicBlock* bPrev = block->Prev();
bPrev->copyEHRegion(block);
@@ -4466,7 +4466,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
void Compiler::fgExtendEHRegionAfter(BasicBlock* block)
{
- BasicBlock* newBlk = block->bbNext;
+ BasicBlock* newBlk = block->Next();
assert(newBlk != nullptr);
newBlk->copyEHRegion(block);
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index 8af56fa167317..c414a0cd36de2 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -4014,7 +4014,7 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt,
/* Is this a call to unmanaged code ? */
if (tree->IsCall() && compMethodRequiresPInvokeFrame())
{
- assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
+ assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM));
if (!opts.ShouldUsePInvokeHelpers())
{
/* Get the special variable descriptor */
@@ -4246,7 +4246,7 @@ PhaseStatus Compiler::lvaMarkLocalVars()
// If we have direct pinvokes, verify the frame list root local was set up properly
if (compMethodRequiresPInvokeFrame())
{
- assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
+ assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM));
if (!opts.ShouldUsePInvokeHelpers())
{
noway_assert(info.compLvFrameListRoot >= info.compLocalsCount && info.compLvFrameListRoot < lvaCount);
diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp
index d32854e4224c7..9447bf7a8dc54 100644
--- a/src/coreclr/jit/liveness.cpp
+++ b/src/coreclr/jit/liveness.cpp
@@ -288,7 +288,7 @@ void Compiler::fgPerNodeLocalVarLiveness(GenTree* tree)
if ((call->IsUnmanaged() || call->IsTailCallViaJitHelper()) && compMethodRequiresPInvokeFrame())
{
- assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
+ assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM));
if (!opts.ShouldUsePInvokeHelpers() && !call->IsSuppressGCTransition())
{
// Get the FrameRoot local and mark it as used.
@@ -365,7 +365,7 @@ void Compiler::fgPerBlockLocalVarLiveness()
}
}
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (block = fgFirstBB; block; block = block->Next())
{
// Strictly speaking, the assignments for the "Def" cases aren't necessary here.
// The empty set would do as well. Use means "use-before-def", so as long as that's
@@ -407,7 +407,7 @@ void Compiler::fgPerBlockLocalVarLiveness()
// memory that is not a GC Heap def.
byrefStatesMatchGcHeapStates = true;
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (block = fgFirstBB; block; block = block->Next())
{
VarSetOps::ClearD(this, fgCurUseSet);
VarSetOps::ClearD(this, fgCurDefSet);
@@ -493,7 +493,7 @@ void Compiler::fgPerBlockLocalVarLiveness()
if (block->KindIs(BBJ_RETURN) && compMethodRequiresPInvokeFrame())
{
- assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
+ assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM));
if (!opts.ShouldUsePInvokeHelpers())
{
// 32-bit targets always pop the frame in the epilog.
@@ -889,8 +889,8 @@ void Compiler::fgExtendDbgLifetimes()
switch (block->GetBBJumpKind())
{
case BBJ_NONE:
- PREFIX_ASSUME(block->bbNext != nullptr);
- VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
+ PREFIX_ASSUME(!block->IsLast());
+ VarSetOps::UnionD(this, initVars, block->Next()->bbScope);
break;
case BBJ_ALWAYS:
@@ -903,15 +903,15 @@ void Compiler::fgExtendDbgLifetimes()
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
- PREFIX_ASSUME(block->bbNext != nullptr);
- VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
+ PREFIX_ASSUME(!block->IsLast());
+ VarSetOps::UnionD(this, initVars, block->Next()->bbScope);
}
VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope);
break;
case BBJ_COND:
- PREFIX_ASSUME(block->bbNext != nullptr);
- VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
+ PREFIX_ASSUME(!block->IsLast());
+ VarSetOps::UnionD(this, initVars, block->Next()->bbScope);
VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope);
break;
@@ -1305,11 +1305,11 @@ class LiveVarAnalysis
m_memoryLiveIn = emptyMemoryKindSet;
m_memoryLiveOut = emptyMemoryKindSet;
- for (BasicBlock* block = m_compiler->fgLastBB; block; block = block->bbPrev)
+ for (BasicBlock* block = m_compiler->fgLastBB; block; block = block->Prev())
{
// sometimes block numbers are not monotonically increasing which
// would cause us not to identify backedges
- if (block->bbNext && block->bbNext->bbNum <= block->bbNum)
+ if (!block->IsLast() && block->Next()->bbNum <= block->bbNum)
{
m_hasPossibleBackEdge = true;
}
@@ -1391,7 +1391,7 @@ void Compiler::fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call)
// This ensure that this variable is kept alive at the tail-call
if (call->IsTailCallViaJitHelper() && compMethodRequiresPInvokeFrame())
{
- assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
+ assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM));
if (!opts.ShouldUsePInvokeHelpers())
{
// Get the FrameListRoot local and make it live.
@@ -1412,7 +1412,7 @@ void Compiler::fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call)
if (call->IsUnmanaged() && compMethodRequiresPInvokeFrame())
{
// Get the FrameListRoot local and make it live.
- assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
+ assert(!opts.ShouldUsePInvokeHelpers() || (info.compLvFrameListRoot == BAD_VAR_NUM));
if (!opts.ShouldUsePInvokeHelpers() && !call->IsSuppressGCTransition())
{
LclVarDsc* frameVarDsc = lvaGetDesc(info.compLvFrameListRoot);
diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp
index f976f1d46adf6..f29a5178b0c5d 100644
--- a/src/coreclr/jit/loopcloning.cpp
+++ b/src/coreclr/jit/loopcloning.cpp
@@ -1800,7 +1800,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd)
// that block; this is one of those cases. This could be fixed fairly easily; for example,
// we could add a dummy nop block after the (cloned) loop bottom, in the same handler scope as the
// loop. This is just a corner to cut to get this working faster.
- BasicBlock* bbAfterLoop = loop.lpBottom->bbNext;
+ BasicBlock* bbAfterLoop = loop.lpBottom->Next();
if (bbAfterLoop != nullptr && bbIsHandlerBeg(bbAfterLoop))
{
JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Next block after bottom is a handler start.\n", loopInd);
@@ -2074,7 +2074,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
{
assert(b->KindIs(BBJ_COND));
- BasicBlock* x = b->bbNext;
+ BasicBlock* x = b->Next();
if (x != nullptr)
{
JITDUMP("Create branch around cloned loop\n");
@@ -2188,7 +2188,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
switch (newblk->GetBBJumpKind())
{
case BBJ_NONE:
- fgAddRefPred(newblk->bbNext, newblk);
+ fgAddRefPred(newblk->Next(), newblk);
break;
case BBJ_ALWAYS:
@@ -2197,7 +2197,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
break;
case BBJ_COND:
- fgAddRefPred(newblk->bbNext, newblk);
+ fgAddRefPred(newblk->Next(), newblk);
fgAddRefPred(newblk->bbJumpDest, newblk);
break;
@@ -2245,14 +2245,14 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
assert(context->HasBlockConditions(loopInd));
assert(h->KindIs(BBJ_NONE));
- assert(h->bbNext == h2);
+ assert(h->NextIs(h2));
// If any condition is false, go to slowHead (which branches or falls through to e2).
BasicBlock* e2 = nullptr;
bool foundIt = blockMap->Lookup(loop.lpEntry, &e2);
assert(foundIt && e2 != nullptr);
- if (slowHead->bbNext != e2)
+ if (!slowHead->NextIs(e2))
{
// We can't just fall through to the slow path entry, so make it an unconditional branch.
assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above.
@@ -2268,8 +2268,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
// Add the fall-through path pred (either to T/E for fall-through from conditions to fast path,
// or H2 if branch to E of fast path).
assert(condLast->KindIs(BBJ_COND));
- JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum);
- fgAddRefPred(condLast->bbNext, condLast);
+ JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->Next()->bbNum);
+ fgAddRefPred(condLast->Next(), condLast);
// Don't unroll loops that we've cloned -- the unroller expects any loop it should unroll to
// initialize the loop counter immediately before entering the loop, but we've left a shared
@@ -2921,8 +2921,8 @@ bool Compiler::optCheckLoopCloningGDVTestProfitable(GenTreeOp* guard, LoopCloneV
// Check for (4)
//
- BasicBlock* const hotSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbJumpDest : typeTestBlock->bbNext;
- BasicBlock* const coldSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbNext : typeTestBlock->bbJumpDest;
+ BasicBlock* const hotSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbJumpDest : typeTestBlock->Next();
+ BasicBlock* const coldSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->Next() : typeTestBlock->bbJumpDest;
if (!hotSuccessor->hasProfileWeight() || !coldSuccessor->hasProfileWeight())
{
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 07f1f95ff4ebd..81330c9c23a40 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -799,7 +799,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
{
JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum);
noway_assert(comp->opts.OptimizationDisabled());
- if (originalSwitchBB->bbNext == jumpTab[0])
+ if (originalSwitchBB->NextIs(jumpTab[0]))
{
originalSwitchBB->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp));
originalSwitchBB->bbJumpDest = nullptr;
@@ -847,7 +847,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
var_types tempLclType = temp->TypeGet();
BasicBlock* defaultBB = jumpTab[jumpCnt - 1];
- BasicBlock* followingBB = originalSwitchBB->bbNext;
+ BasicBlock* followingBB = originalSwitchBB->Next();
/* Is the number of cases right for a test and jump switch? */
const bool fFirstCaseFollows = (followingBB == jumpTab[0]);
@@ -892,7 +892,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
// originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock
// representing the fall-through flow from originalSwitchBB.
assert(originalSwitchBB->KindIs(BBJ_NONE));
- assert(originalSwitchBB->bbNext == afterDefaultCondBlock);
+ assert(originalSwitchBB->NextIs(afterDefaultCondBlock));
assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH));
assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault);
assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet.
@@ -955,7 +955,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
assert(jumpTab[i] == uniqueSucc);
(void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock);
}
- if (afterDefaultCondBlock->bbNext == uniqueSucc)
+ if (afterDefaultCondBlock->NextIs(uniqueSucc))
{
afterDefaultCondBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp));
afterDefaultCondBlock->bbJumpDest = nullptr;
@@ -1064,7 +1064,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
// There is a fall-through to the following block. In the loop
// above, we deleted all the predecessor edges from the switch.
// In this case, we need to add one back.
- comp->fgAddRefPred(currentBlock->bbNext, currentBlock);
+ comp->fgAddRefPred(currentBlock->Next(), currentBlock);
}
if (!fUsedAfterDefaultCondBlock)
@@ -1221,7 +1221,7 @@ bool Lowering::TryLowerSwitchToBitTest(
// impacts register allocation.
//
- if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1))
+ if (!bbSwitch->NextIs(bbCase0) && !bbSwitch->NextIs(bbCase1))
{
return false;
}
@@ -1252,7 +1252,7 @@ bool Lowering::TryLowerSwitchToBitTest(
comp->fgRemoveAllRefPreds(bbCase1, bbSwitch);
comp->fgRemoveAllRefPreds(bbCase0, bbSwitch);
- if (bbSwitch->bbNext == bbCase0)
+ if (bbSwitch->NextIs(bbCase0))
{
// GenCondition::C generates JC so we jump to bbCase1 when the bit is set
bbSwitchCondition = GenCondition::C;
@@ -1263,7 +1263,7 @@ bool Lowering::TryLowerSwitchToBitTest(
}
else
{
- assert(bbSwitch->bbNext == bbCase1);
+ assert(bbSwitch->NextIs(bbCase1));
// GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set
bbSwitchCondition = GenCondition::NC;
@@ -1288,7 +1288,7 @@ bool Lowering::TryLowerSwitchToBitTest(
//
// Fallback to AND(RSZ(bitTable, switchValue), 1)
//
- GenTree* tstCns = comp->gtNewIconNode(bbSwitch->bbNext != bbCase0 ? 0 : 1, bitTableType);
+ GenTree* tstCns = comp->gtNewIconNode(bbSwitch->NextIs(bbCase0) ? 1 : 0, bitTableType);
GenTree* shift = comp->gtNewOperNode(GT_RSZ, bitTableType, bitTableIcon, switchValue);
GenTree* one = comp->gtNewIconNode(1, bitTableType);
GenTree* andOp = comp->gtNewOperNode(GT_AND, bitTableType, shift, one);
diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp
index 88af18d880898..95120f5d28532 100644
--- a/src/coreclr/jit/lsra.cpp
+++ b/src/coreclr/jit/lsra.cpp
@@ -1028,7 +1028,7 @@ void LinearScan::setBlockSequence()
// For layout order, simply use bbNext
if (isTraversalLayoutOrder())
{
- nextBlock = block->bbNext;
+ nextBlock = block->Next();
continue;
}
@@ -1483,15 +1483,15 @@ void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb)
varDsc->SetRegNum(newRegNum);
count++;
- BasicBlock* prevReportedBlock = bb->bbPrev;
- if (bb->bbPrev != nullptr && bb->bbPrev->isBBCallAlwaysPairTail())
+ BasicBlock* prevReportedBlock = bb->Prev();
+ if (!bb->IsFirst() && bb->Prev()->isBBCallAlwaysPairTail())
{
// For callf+always pair we generate the code for the always
// block in genCallFinally and skip it, so we don't report
// anything for it (it has only trivial instructions, so that
// does not matter much). So whether we need to rehome or not
// depends on what we reported at the end of the callf block.
- prevReportedBlock = bb->bbPrev->bbPrev;
+ prevReportedBlock = bb->Prev()->Prev();
}
if (prevReportedBlock != nullptr && VarSetOps::IsMember(compiler, prevReportedBlock->bbLiveOut, varIndex))
@@ -2547,7 +2547,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block,
if (predBlock->KindIs(BBJ_COND))
{
// Special handling to improve matching on backedges.
- BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext;
+ BasicBlock* otherBlock = predBlock->NextIs(block) ? predBlock->bbJumpDest : predBlock->Next();
noway_assert(otherBlock != nullptr);
if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn)
{
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index 31166db9d122c..e07cf9d4e0dfe 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -7482,7 +7482,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// block removal on it.
fgEnsureFirstBBisScratch();
fgFirstBB->bbFlags |= BBF_DONT_REMOVE;
- block->bbJumpDest = fgFirstBB->bbNext;
+ block->bbJumpDest = fgFirstBB->Next();
}
// Finish hooking things up.
@@ -13154,7 +13154,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
- noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0));
+ noway_assert((block->Next()->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0));
if (condTree != cond)
{
@@ -13181,14 +13181,14 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
/* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */
block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this));
bTaken = block->bbJumpDest;
- bNotTaken = block->bbNext;
+ bNotTaken = block->Next();
}
else
{
/* Unmark the loop if we are removing a backwards branch */
/* dest block must also be marked as a loop head and */
/* We must be able to reach the backedge block */
- if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) &&
+ if (block->bbJumpDest->isLoopHead() && (block->bbJumpDest->bbNum <= block->bbNum) &&
fgReachable(block->bbJumpDest, block))
{
optUnmarkLoopBlocks(block->bbJumpDest, block);
@@ -13196,7 +13196,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
/* JTRUE 0 - transform the basic block into a BBJ_NONE */
block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this));
- bTaken = block->bbNext;
+ bTaken = block->Next();
bNotTaken = block->bbJumpDest;
}
@@ -13253,24 +13253,24 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
switch (bUpdated->GetBBJumpKind())
{
case BBJ_NONE:
- edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
+ edge = fgGetPredForBlock(bUpdated->Next(), bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
- edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
+ edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next());
break;
case BBJ_COND:
- edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
+ edge = fgGetPredForBlock(bUpdated->Next(), bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
- edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
+ edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next());
FALLTHROUGH;
case BBJ_ALWAYS:
edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
- edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
+ edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next());
break;
default:
@@ -13421,7 +13421,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
- if (curJump != block->bbNext)
+ if (!block->NextIs(curJump))
{
// transform the basic block into a BBJ_ALWAYS
block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this));
@@ -13925,7 +13925,7 @@ void Compiler::fgMorphBlocks()
}
}
- block = block->bbNext;
+ block = block->Next();
} while (block != nullptr);
// We are done with the global morphing phase
diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp
index 7e864a5eab25c..75e19184cfcf9 100644
--- a/src/coreclr/jit/morphblock.cpp
+++ b/src/coreclr/jit/morphblock.cpp
@@ -792,13 +792,13 @@ void MorphCopyBlockHelper::MorphStructCases()
}
#if defined(TARGET_ARM)
- if ((m_store->OperIsIndir()) && m_store->AsIndir()->IsUnaligned())
+ if (m_store->OperIsIndir() && m_store->AsIndir()->IsUnaligned())
{
JITDUMP(" store is unaligned");
requiresCopyBlock = true;
}
- if ((m_src->OperIsIndir()) && m_src->AsIndir()->IsUnaligned())
+ if (m_src->OperIsIndir() && m_src->AsIndir()->IsUnaligned())
{
JITDUMP(" src is unaligned");
requiresCopyBlock = true;
diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp
index 82d2430b91445..b9728f7899179 100644
--- a/src/coreclr/jit/optimizebools.cpp
+++ b/src/coreclr/jit/optimizebools.cpp
@@ -106,7 +106,7 @@ class OptBoolsDsc
// B3: GT_RETURN (BBJ_RETURN)
// B4: GT_RETURN (BBJ_RETURN)
//
-// Case 2: if B1.bbJumpDest == B2->bbNext, it transforms
+// Case 2: if B2->NextIs(B1.bbJumpDest), it transforms
// B1 : brtrue(t1, B3)
// B2 : brtrue(t2, Bx)
// B3 :
@@ -136,7 +136,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock()
m_sameTarget = true;
}
- else if (m_b1->bbJumpDest == m_b2->bbNext)
+ else if (m_b2->NextIs(m_b1->bbJumpDest))
{
// Given the following sequence of blocks :
// B1: brtrue(t1, B3)
@@ -480,13 +480,13 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock()
m_t3 = nullptr;
bool foundEndOfOrConditions = false;
- if ((m_b1->bbNext == m_b2) && (m_b1->bbJumpDest == m_b2->bbNext))
+ if (m_b1->NextIs(m_b2) && m_b2->NextIs(m_b1->bbJumpDest))
{
// Found the end of two (or more) conditions being ORed together.
// The final condition has been inverted.
foundEndOfOrConditions = true;
}
- else if ((m_b1->bbNext == m_b2) && (m_b1->bbJumpDest == m_b2->bbJumpDest))
+ else if (m_b1->NextIs(m_b2) && (m_b1->bbJumpDest == m_b2->bbJumpDest))
{
// Found two conditions connected together.
}
@@ -848,7 +848,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
}
else
{
- edge2 = m_comp->fgGetPredForBlock(m_b2->bbNext, m_b2);
+ edge2 = m_comp->fgGetPredForBlock(m_b2->Next(), m_b2);
m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1);
@@ -882,7 +882,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
m_b1->bbJumpSwt = m_b2->bbJumpSwt;
#endif
assert(m_b2->KindIs(BBJ_RETURN));
- assert(m_b1->bbNext == m_b2);
+ assert(m_b1->NextIs(m_b2));
assert(m_b3 != nullptr);
}
else
@@ -890,8 +890,8 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
assert(m_b1->KindIs(BBJ_COND));
assert(m_b2->KindIs(BBJ_COND));
assert(m_b1->bbJumpDest == m_b2->bbJumpDest);
- assert(m_b1->bbNext == m_b2);
- assert(m_b2->bbNext != nullptr);
+ assert(m_b1->NextIs(m_b2));
+ assert(!m_b2->IsLast());
}
if (!optReturnBlock)
@@ -900,7 +900,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
//
// Replace pred 'm_b2' for 'm_b2->bbNext' with 'm_b1'
// Remove pred 'm_b2' for 'm_b2->bbJumpDest'
- m_comp->fgReplacePred(m_b2->bbNext, m_b2, m_b1);
+ m_comp->fgReplacePred(m_b2->Next(), m_b2, m_b1);
m_comp->fgRemoveRefPred(m_b2->bbJumpDest, m_b2);
}
@@ -1463,7 +1463,7 @@ PhaseStatus Compiler::optOptimizeBools()
numPasses++;
change = false;
- for (BasicBlock* b1 = fgFirstBB; b1 != nullptr; b1 = retry ? b1 : b1->bbNext)
+ for (BasicBlock* b1 = fgFirstBB; b1 != nullptr; b1 = retry ? b1 : b1->Next())
{
retry = false;
@@ -1476,7 +1476,7 @@ PhaseStatus Compiler::optOptimizeBools()
// If there is no next block, we're done
- BasicBlock* b2 = b1->bbNext;
+ BasicBlock* b2 = b1->Next();
if (b2 == nullptr)
{
break;
@@ -1494,7 +1494,7 @@ PhaseStatus Compiler::optOptimizeBools()
if (b2->KindIs(BBJ_COND))
{
- if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext))
+ if ((b1->bbJumpDest != b2->bbJumpDest) && !b2->NextIs(b1->bbJumpDest))
{
continue;
}
diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp
index 75f4c7ed4cb83..ae16f3cc01cd4 100644
--- a/src/coreclr/jit/optimizer.cpp
+++ b/src/coreclr/jit/optimizer.cpp
@@ -490,7 +490,7 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar
{
reportBefore();
/* The loop has a new head - Just update the loop table */
- loop.lpHead = block->bbPrev;
+ loop.lpHead = block->Prev();
}
reportAfter();
@@ -741,9 +741,9 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT
bool initBlockOk = (predBlock == initBlock);
if (!initBlockOk)
{
- if (predBlock->KindIs(BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) &&
+ if (predBlock->KindIs(BBJ_NONE) && predBlock->NextIs(optLoopTable[loopInd].lpEntry) &&
(predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) &&
- (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough())
+ !predBlock->IsFirst() && predBlock->Prev()->bbFallsThrough())
{
initBlockOk = true;
}
@@ -1150,10 +1150,10 @@ bool Compiler::optExtractInitTestIncr(
// If we are rebuilding the loop table, we would already have the pre-header block introduced
// the first time, which might be empty if no hoisting has yet occurred. In this case, look a
// little harder for the possible loop initialization statement.
- if (initBlock->KindIs(BBJ_NONE) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) &&
- (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough())
+ if (initBlock->KindIs(BBJ_NONE) && initBlock->NextIs(top) && (initBlock->countOfInEdges() == 1) &&
+ !initBlock->IsFirst() && initBlock->Prev()->bbFallsThrough())
{
- initBlock = initBlock->bbPrev;
+ initBlock = initBlock->Prev();
phdrStmt = initBlock->firstStmt();
}
}
@@ -1377,7 +1377,7 @@ void Compiler::optCheckPreds()
{
// make sure this pred is part of the BB list
BasicBlock* bb;
- for (bb = fgFirstBB; bb; bb = bb->bbNext)
+ for (bb = fgFirstBB; bb; bb = bb->Next())
{
if (bb == predBlock)
{
@@ -1394,7 +1394,7 @@ void Compiler::optCheckPreds()
}
FALLTHROUGH;
case BBJ_NONE:
- noway_assert(bb->bbNext == block);
+ noway_assert(bb->NextIs(block));
break;
case BBJ_EHFILTERRET:
case BBJ_ALWAYS:
@@ -1888,7 +1888,7 @@ class LoopSearch
// otherwise the loop is still valid and this may be a (flow-wise) back-edge
// of an outer loop. For the dominance test, if `predBlock` is a new block, use
// its unique predecessor since the dominator tree has info for that.
- BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->bbPrev : predBlock);
+ BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->Prev() : predBlock);
if (comp->fgDominate(entry, effectivePred))
{
// Outer loop back-edge
@@ -1923,14 +1923,13 @@ class LoopSearch
isFirstVisit = true;
}
- if (isFirstVisit && (predBlock->bbNext != nullptr) &&
- (PositionNum(predBlock->bbNext) == predBlock->bbNum))
+ if (isFirstVisit && !predBlock->IsLast() && (PositionNum(predBlock->Next()) == predBlock->bbNum))
{
// We've created a new block immediately after `predBlock` to
// reconnect what was fall-through. Mark it as in-loop also;
// it needs to stay with `prev` and if it exits the loop we'd
// just need to re-create it if we tried to move it out.
- loopBlocks.Insert(predBlock->bbNext->bbNum);
+ loopBlocks.Insert(predBlock->Next()->bbNum);
}
}
}
@@ -1960,9 +1959,9 @@ class LoopSearch
// This must be a block we inserted to connect fall-through after moving blocks.
// To determine if it's in the loop or not, use the number of its unique predecessor
// block.
- assert(block->bbPreds->getSourceBlock() == block->bbPrev);
+ assert(block->PrevIs(block->bbPreds->getSourceBlock()));
assert(block->bbPreds->getNextPredEdge() == nullptr);
- return block->bbPrev->bbNum;
+ return block->Prev()->bbNum;
}
return block->bbNum;
}
@@ -1982,9 +1981,9 @@ class LoopSearch
// Compaction (if it needs to happen) will require an insertion point.
BasicBlock* moveAfter = nullptr;
- for (BasicBlock* previous = top->bbPrev; previous != bottom;)
+ for (BasicBlock* previous = top->Prev(); previous != bottom;)
{
- BasicBlock* block = previous->bbNext;
+ BasicBlock* block = previous->Next();
if (loopBlocks.IsMember(block->bbNum))
{
@@ -2008,11 +2007,11 @@ class LoopSearch
// If so, give up on recognition of this loop.
//
BasicBlock* lastNonLoopBlock = block;
- BasicBlock* nextLoopBlock = block->bbNext;
+ BasicBlock* nextLoopBlock = block->Next();
while ((nextLoopBlock != nullptr) && !loopBlocks.IsMember(nextLoopBlock->bbNum))
{
lastNonLoopBlock = nextLoopBlock;
- nextLoopBlock = nextLoopBlock->bbNext;
+ nextLoopBlock = nextLoopBlock->Next();
}
if (nextLoopBlock == nullptr)
@@ -2048,7 +2047,7 @@ class LoopSearch
}
// Now physically move the blocks.
- BasicBlock* moveBefore = moveAfter->bbNext;
+ BasicBlock* moveBefore = moveAfter->Next();
comp->fgUnlinkRange(block, lastNonLoopBlock);
comp->fgMoveBlocksAfter(block, lastNonLoopBlock, moveAfter);
@@ -2135,7 +2134,7 @@ class LoopSearch
//
BasicBlock* TryAdvanceInsertionPoint(BasicBlock* oldMoveAfter)
{
- BasicBlock* newMoveAfter = oldMoveAfter->bbNext;
+ BasicBlock* newMoveAfter = oldMoveAfter->Next();
if (!BasicBlock::sameEHRegion(oldMoveAfter, newMoveAfter))
{
@@ -2324,7 +2323,7 @@ class LoopSearch
else if (block->KindIs(BBJ_ALWAYS) && (block->bbJumpDest == newNext))
{
// We've made `block`'s jump target its bbNext, so remove the jump.
- if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev))
+ if (!comp->fgOptimizeBranchToNext(block, newNext, block->Prev()))
{
// If optimizing away the goto-next failed for some reason, mark it KEEP_BBJ_ALWAYS to
// prevent assertions from complaining about it.
@@ -2463,7 +2462,7 @@ class LoopSearch
break;
}
- if (block->bbFallsThrough() && !loopBlocks.IsMember(block->bbNext->bbNum))
+ if (block->bbFallsThrough() && !loopBlocks.IsMember(block->Next()->bbNum))
{
// Found a fall-through exit.
lastExit = block;
@@ -2502,9 +2501,9 @@ void Compiler::optFindNaturalLoops()
LoopSearch search(this);
- for (BasicBlock* head = fgFirstBB; head->bbNext != nullptr; head = head->bbNext)
+ for (BasicBlock* head = fgFirstBB; !head->IsLast(); head = head->Next())
{
- BasicBlock* top = head->bbNext;
+ BasicBlock* top = head->Next();
// Blocks that are rarely run have a zero bbWeight and should never be optimized here.
if (top->bbWeight == BB_ZERO_WEIGHT)
@@ -2733,7 +2732,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R
if (addPreds && blk->bbFallsThrough())
{
- fgAddRefPred(blk->bbNext, blk);
+ fgAddRefPred(blk->Next(), blk);
}
BasicBlock* newJumpDest = nullptr;
@@ -3031,7 +3030,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
//
BasicBlock* const t = optLoopTable[loopInd].lpTop;
assert(siblingB->KindIs(BBJ_COND));
- assert(siblingB->bbNext == t);
+ assert(siblingB->NextIs(t));
JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling);
@@ -3205,7 +3204,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati
// Because of this, introducing a block before t automatically gives us
// the right flow out of h.
//
- assert(h->bbNext == t);
+ assert(h->NextIs(t));
assert(h->bbFallsThrough());
assert(h->KindIs(BBJ_NONE, BBJ_COND));
if (h->KindIs(BBJ_COND))
@@ -3329,8 +3328,8 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati
}
}
- assert(h->bbNext == newT);
- assert(newT->bbNext == t);
+ assert(h->NextIs(newT));
+ assert(newT->NextIs(t));
// With the Option::Current we are changing which block is loop top.
// Make suitable updates.
@@ -3360,7 +3359,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati
childLoop = optLoopTable[childLoop].lpSibling)
{
if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) &&
- newT->KindIs(BBJ_NONE) && (newT->bbNext == origE))
+ newT->KindIs(BBJ_NONE) && newT->NextIs(origE))
{
optUpdateLoopHead(childLoop, h, newT);
@@ -3434,7 +3433,7 @@ BasicBlock* Compiler::optLoopEntry(BasicBlock* preHeader)
if (preHeader->KindIs(BBJ_NONE))
{
- return preHeader->bbNext;
+ return preHeader->Next();
}
else
{
@@ -4347,7 +4346,7 @@ PhaseStatus Compiler::optUnrollLoops()
BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt));
BasicBlock* insertAfter = bottom;
- BasicBlock* const tail = bottom->bbNext;
+ BasicBlock* const tail = bottom->Next();
BasicBlock::loopNumber newLoopNum = loop.lpParent;
bool anyNestedLoopsUnrolledThisLoop = false;
int lval;
@@ -4358,7 +4357,7 @@ PhaseStatus Compiler::optUnrollLoops()
// Note: we can't use the loop.LoopBlocks() iterator, as it captures loop.lpBottom->bbNext at the
// beginning of iteration, and we insert blocks before that. So we need to evaluate lpBottom->bbNext
// every iteration.
- for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext)
+ for (BasicBlock* block = loop.lpTop; !loop.lpBottom->NextIs(block); block = block->Next())
{
BasicBlock* newBlock = insertAfter =
fgNewBBafter(block->GetBBJumpKind(), insertAfter, /*extendRegion*/ true);
@@ -4370,8 +4369,7 @@ PhaseStatus Compiler::optUnrollLoops()
// to clone a block in the loop, splice out and forget all the blocks we cloned so far:
// put the loop blocks back to how they were before we started cloning blocks,
// and abort unrolling the loop.
- bottom->bbNext = tail;
- tail->bbPrev = bottom;
+ bottom->SetNext(tail);
loop.lpFlags |= LPFLG_DONT_UNROLL; // Mark it so we don't try to unroll it again.
INDEBUG(++unrollFailures);
JITDUMP("Failed to unroll loop " FMT_LP ": block cloning failed on " FMT_BB "\n", lnum,
@@ -4422,7 +4420,7 @@ PhaseStatus Compiler::optUnrollLoops()
// Now redirect any branches within the newly-cloned iteration.
// Don't include `bottom` in the iteration, since we've already changed the
// newBlock->bbJumpKind, above.
- for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->bbNext)
+ for (BasicBlock* block = loop.lpTop; block != loop.lpBottom; block = block->Next())
{
BasicBlock* newBlock = blockMap[block];
optCopyBlkDest(block, newBlock);
@@ -4434,7 +4432,7 @@ PhaseStatus Compiler::optUnrollLoops()
// After doing this, all the newly cloned blocks now have proper flow and pred lists.
//
BasicBlock* const clonedTop = blockMap[loop.lpTop];
- fgAddRefPred(clonedTop, clonedTop->bbPrev);
+ fgAddRefPred(clonedTop, clonedTop->Prev());
/* update the new value for the unrolled iterator */
@@ -4478,7 +4476,7 @@ PhaseStatus Compiler::optUnrollLoops()
//
for (BasicBlock* succ : block->Succs(this))
{
- if ((block == bottom) && (succ == bottom->bbNext))
+ if ((block == bottom) && bottom->NextIs(succ))
{
continue;
}
@@ -4735,7 +4733,7 @@ bool Compiler::optReachWithoutCall(BasicBlock* topBB, BasicBlock* botBB)
}
}
- curBB = curBB->bbNext;
+ curBB = curBB->Next();
}
// If we didn't find any blocks that contained a gc safe point and
@@ -4860,14 +4858,14 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
//
BasicBlock* const bTop = bTest->bbJumpDest;
- if (bTop != block->bbNext)
+ if (!block->NextIs(bTop))
{
return false;
}
// Since bTest is a BBJ_COND it will have a bbNext
//
- BasicBlock* const bJoin = bTest->bbNext;
+ BasicBlock* const bJoin = bTest->Next();
noway_assert(bJoin != nullptr);
// 'block' must be in the same try region as the condition, since we're going to insert a duplicated condition
@@ -4879,7 +4877,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
return false;
}
- // The duplicated condition block will branch to bTest->bbNext, so that also better be in the
+ // The duplicated condition block will branch to bTest->Next(), so that also better be in the
// same try region (or no try region) to avoid generating illegal flow.
if (bJoin->hasTryIndex() && !BasicBlock::sameTryRegion(block, bJoin))
{
@@ -5216,15 +5214,15 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
weight_t const testToAfterWeight = weightTop * testToAfterLikelihood;
FlowEdge* const edgeTestToNext = fgGetPredForBlock(bTop, bTest);
- FlowEdge* const edgeTestToAfter = fgGetPredForBlock(bTest->bbNext, bTest);
+ FlowEdge* const edgeTestToAfter = fgGetPredForBlock(bTest->Next(), bTest);
JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (iterate loop)\n", bTest->bbNum, bTop->bbNum,
testToNextWeight);
JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (exit loop)\n", bTest->bbNum,
- bTest->bbNext->bbNum, testToAfterWeight);
+ bTest->Next()->bbNum, testToAfterWeight);
edgeTestToNext->setEdgeWeights(testToNextWeight, testToNextWeight, bTop);
- edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->bbNext);
+ edgeTestToAfter->setEdgeWeights(testToAfterWeight, testToAfterWeight, bTest->Next());
// Adjust edges out of block, using the same distribution.
//
@@ -5236,15 +5234,15 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
weight_t const blockToNextWeight = weightBlock * blockToNextLikelihood;
weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood;
- FlowEdge* const edgeBlockToNext = fgGetPredForBlock(bNewCond->bbNext, bNewCond);
+ FlowEdge* const edgeBlockToNext = fgGetPredForBlock(bNewCond->Next(), bNewCond);
FlowEdge* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->bbJumpDest, bNewCond);
JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum,
- bNewCond->bbNext->bbNum, blockToNextWeight);
+ bNewCond->Next()->bbNum, blockToNextWeight);
JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (avoid loop)\n", bNewCond->bbNum,
bNewCond->bbJumpDest->bbNum, blockToAfterWeight);
- edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->bbNext);
+ edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->Next());
edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->bbJumpDest);
#ifdef DEBUG
@@ -5253,7 +5251,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
if ((activePhaseChecks & PhaseChecks::CHECK_PROFILE) == PhaseChecks::CHECK_PROFILE)
{
const ProfileChecks checks = (ProfileChecks)JitConfig.JitProfileChecks();
- const bool nextProfileOk = fgDebugCheckIncomingProfileData(bNewCond->bbNext, checks);
+ const bool nextProfileOk = fgDebugCheckIncomingProfileData(bNewCond->Next(), checks);
const bool jumpProfileOk = fgDebugCheckIncomingProfileData(bNewCond->bbJumpDest, checks);
if (hasFlag(checks, ProfileChecks::RAISE_ASSERT))
@@ -5269,7 +5267,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
if (verbose)
{
printf("\nDuplicated loop exit block at " FMT_BB " for loop (" FMT_BB " - " FMT_BB ")\n", bNewCond->bbNum,
- bNewCond->bbNext->bbNum, bTest->bbNum);
+ bNewCond->Next()->bbNum, bTest->bbNum);
printf("Estimated code size expansion is %d\n", estDupCostSz);
fgDumpBlock(bNewCond);
@@ -6215,7 +6213,7 @@ bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip,
break;
}
- beg = beg->bbNext;
+ beg = beg->Next();
}
return false;
@@ -6278,7 +6276,7 @@ bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var)
return true;
}
- return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext, optLoopTable[lnum].lpBottom, nullptr, var);
+ return optIsVarAssigned(optLoopTable[lnum].lpHead->Next(), optLoopTable[lnum].lpBottom, nullptr, var);
}
}
@@ -7987,7 +7985,7 @@ bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* loopVnInv
//
void Compiler::fgSetEHRegionForNewLoopHead(BasicBlock* newHead, BasicBlock* top)
{
- assert(newHead->bbNext == top);
+ assert(newHead->NextIs(top));
assert(!fgIsFirstBlockOfFilterOrHandler(top));
if ((top->bbFlags & BBF_TRY_BEG) != 0)
@@ -8200,13 +8198,13 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum)
{
// Allow for either the fall-through or branch to target 'entry'.
BasicBlock* skipLoopBlock;
- if (head->bbNext == entry)
+ if (head->NextIs(entry))
{
skipLoopBlock = head->bbJumpDest;
}
else
{
- skipLoopBlock = head->bbNext;
+ skipLoopBlock = head->Next();
}
assert(skipLoopBlock != entry);
@@ -8302,7 +8300,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum)
case BBJ_NONE:
// This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop,
// meaning it must be fall-through to 'entry', and we must have a top-entry loop.
- noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead));
+ noway_assert((entry == top) && (predBlock == head) && predBlock->NextIs(preHead));
fgRemoveRefPred(entry, predBlock);
fgAddRefPred(preHead, predBlock);
break;
@@ -8311,11 +8309,11 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum)
if (predBlock->bbJumpDest == entry)
{
predBlock->bbJumpDest = preHead;
- noway_assert(predBlock->bbNext != preHead);
+ noway_assert(!predBlock->NextIs(preHead));
}
else
{
- noway_assert((entry == top) && (predBlock == head) && (predBlock->bbNext == preHead));
+ noway_assert((entry == top) && (predBlock == head) && predBlock->NextIs(preHead));
}
fgRemoveRefPred(entry, predBlock);
fgAddRefPred(preHead, predBlock);
diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp
index 017509086d208..7c99c264439ca 100644
--- a/src/coreclr/jit/patchpoint.cpp
+++ b/src/coreclr/jit/patchpoint.cpp
@@ -52,7 +52,7 @@ class PatchpointTransformer
}
int count = 0;
- for (BasicBlock* const block : compiler->Blocks(compiler->fgFirstBB->bbNext))
+ for (BasicBlock* const block : compiler->Blocks(compiler->fgFirstBB->Next()))
{
if (block->bbFlags & BBF_PATCHPOINT)
{
diff --git a/src/coreclr/jit/promotionliveness.cpp b/src/coreclr/jit/promotionliveness.cpp
index 77078bddb4c29..422dc3f7e710a 100644
--- a/src/coreclr/jit/promotionliveness.cpp
+++ b/src/coreclr/jit/promotionliveness.cpp
@@ -299,9 +299,9 @@ void PromotionLiveness::InterBlockLiveness()
{
changed = false;
- for (BasicBlock* block = m_compiler->fgLastBB; block != nullptr; block = block->bbPrev)
+ for (BasicBlock* block = m_compiler->fgLastBB; block != nullptr; block = block->Prev())
{
- m_hasPossibleBackEdge |= block->bbNext && (block->bbNext->bbNum <= block->bbNum);
+ m_hasPossibleBackEdge |= !block->IsLast() && (block->Next()->bbNum <= block->bbNum);
changed |= PerBlockLiveness(block);
}
diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp
index faf0641451d4f..7dcae117530c0 100644
--- a/src/coreclr/jit/rangecheck.cpp
+++ b/src/coreclr/jit/rangecheck.cpp
@@ -871,7 +871,7 @@ void RangeCheck::MergeEdgeAssertions(ValueNum normalLclVN, ASSERT_VALARG_TP asse
}
int curCns = pRange->uLimit.cns;
- int limCns = (limit.IsBinOpArray()) ? limit.cns : 0;
+ int limCns = limit.IsBinOpArray() ? limit.cns : 0;
// Incoming limit doesn't tighten the existing upper limit.
if (limCns >= curCns)
@@ -935,7 +935,7 @@ void RangeCheck::MergeAssertion(BasicBlock* block, GenTree* op, Range* pRange DE
{
GenTreePhiArg* arg = (GenTreePhiArg*)op;
BasicBlock* pred = arg->gtPredBB;
- if (pred->bbFallsThrough() && pred->bbNext == block)
+ if (pred->bbFallsThrough() && pred->NextIs(block))
{
assertions = pred->bbAssertionOut;
JITDUMP("Merge assertions from pred " FMT_BB " edge: ", pred->bbNum);
diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp
index dfbd1863cb4b6..a8365f7b93bdf 100644
--- a/src/coreclr/jit/redundantbranchopts.cpp
+++ b/src/coreclr/jit/redundantbranchopts.cpp
@@ -48,7 +48,7 @@ PhaseStatus Compiler::optRedundantBranches()
{
bool madeChangesThisBlock = m_compiler->optRedundantRelop(block);
- BasicBlock* const bbNext = block->bbNext;
+ BasicBlock* const bbNext = block->Next();
BasicBlock* const bbJump = block->bbJumpDest;
madeChangesThisBlock |= m_compiler->optRedundantBranch(block);
@@ -568,7 +568,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block)
(rii.vnRelation == ValueNumStore::VN_RELATION_KIND::VRK_Swap);
BasicBlock* const trueSuccessor = domBlock->bbJumpDest;
- BasicBlock* const falseSuccessor = domBlock->bbNext;
+ BasicBlock* const falseSuccessor = domBlock->Next();
// If we can trace the flow from the dominating relop, we can infer its value.
//
@@ -613,7 +613,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block)
//
const bool relopIsFalse = rii.reverseSense ^ (domIsSameRelop | domIsInferredRelop);
JITDUMP("Fall through successor " FMT_BB " of " FMT_BB " reaches, relop [%06u] must be %s\n",
- domBlock->bbNext->bbNum, domBlock->bbNum, dspTreeID(tree),
+ domBlock->Next()->bbNum, domBlock->bbNum, dspTreeID(tree),
relopIsFalse ? "false" : "true");
relopValue = relopIsFalse ? 0 : 1;
break;
@@ -711,7 +711,7 @@ struct JumpThreadInfo
JumpThreadInfo(Compiler* comp, BasicBlock* block)
: m_block(block)
, m_trueTarget(block->bbJumpDest)
- , m_falseTarget(block->bbNext)
+ , m_falseTarget(block->Next())
, m_fallThroughPred(nullptr)
, m_ambiguousVNBlock(nullptr)
, m_truePreds(BlockSetOps::MakeEmpty(comp))
@@ -1072,8 +1072,8 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl
// latter should prove useful in subsequent work, where we aim to enable jump
// threading in cases where block has side effects.
//
- BasicBlock* const domTrueSuccessor = domIsSameRelop ? domBlock->bbJumpDest : domBlock->bbNext;
- BasicBlock* const domFalseSuccessor = domIsSameRelop ? domBlock->bbNext : domBlock->bbJumpDest;
+ BasicBlock* const domTrueSuccessor = domIsSameRelop ? domBlock->bbJumpDest : domBlock->Next();
+ BasicBlock* const domFalseSuccessor = domIsSameRelop ? domBlock->Next() : domBlock->bbJumpDest;
JumpThreadInfo jti(this, block);
for (BasicBlock* const predBlock : block->PredBlocks())
@@ -1143,7 +1143,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl
// Note if the true or false pred is the fall through pred.
//
- if (predBlock->bbNext == block)
+ if (predBlock->NextIs(block))
{
JITDUMP(FMT_BB " is the fall-through pred\n", predBlock->bbNum);
assert(jti.m_fallThroughPred == nullptr);
@@ -1403,7 +1403,7 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN
// Note if the true or false pred is the fall through pred.
//
- if (predBlock->bbNext == block)
+ if (predBlock->NextIs(block))
{
JITDUMP(FMT_BB " is the fall-through pred\n", predBlock->bbNum);
assert(jti.m_fallThroughPred == nullptr);
diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp
index 125c2cf2fbebe..fe1ecb8d39d74 100644
--- a/src/coreclr/jit/switchrecognition.cpp
+++ b/src/coreclr/jit/switchrecognition.cpp
@@ -26,7 +26,7 @@ PhaseStatus Compiler::optSwitchRecognition()
// a series of ccmp instruction (see ifConvert phase).
#ifdef TARGET_XARCH
bool modified = false;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next())
{
// block->KindIs(BBJ_COND) check is for better throughput.
if (block->KindIs(BBJ_COND) && !block->isRunRarely() && optSwitchDetectAndConvert(block))
@@ -95,10 +95,10 @@ bool IsConstantTestCondBlock(const BasicBlock* block,
}
*isReversed = rootNode->gtGetOp1()->OperIs(GT_NE);
- *blockIfTrue = *isReversed ? block->bbNext : block->bbJumpDest;
- *blockIfFalse = *isReversed ? block->bbJumpDest : block->bbNext;
+ *blockIfTrue = *isReversed ? block->Next() : block->bbJumpDest;
+ *blockIfFalse = *isReversed ? block->bbJumpDest : block->Next();
- if ((block->bbNext == block->bbJumpDest) || (block->bbJumpDest == block))
+ if (block->NextIs(block->bbJumpDest) || (block->bbJumpDest == block))
{
// Ignoring weird cases like a condition jumping to itself
return false;
@@ -166,7 +166,7 @@ bool Compiler::optSwitchDetectAndConvert(BasicBlock* firstBlock)
const BasicBlock* prevBlock = firstBlock;
// Now walk the next blocks and see if they are basically the same type of test
- for (const BasicBlock* currBb = firstBlock->bbNext; currBb != nullptr; currBb = currBb->bbNext)
+ for (const BasicBlock* currBb = firstBlock->Next(); currBb != nullptr; currBb = currBb->Next())
{
GenTree* currVariableNode = nullptr;
ssize_t currCns = 0;
@@ -309,7 +309,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t*
const BasicBlock* lastBlock = firstBlock;
for (int i = 0; i < testsCount - 1; i++)
{
- lastBlock = lastBlock->bbNext;
+ lastBlock = lastBlock->Next();
}
BasicBlock* blockIfTrue = nullptr;
@@ -338,11 +338,11 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t*
gtUpdateStmtSideEffects(firstBlock->lastStmt());
// Unlink and remove the whole chain of conditional blocks
- BasicBlock* blockToRemove = firstBlock->bbNext;
+ BasicBlock* blockToRemove = firstBlock->Next();
fgRemoveRefPred(blockToRemove, firstBlock);
- while (blockToRemove != lastBlock->bbNext)
+ while (!lastBlock->NextIs(blockToRemove))
{
- BasicBlock* nextBlock = blockToRemove->bbNext;
+ BasicBlock* nextBlock = blockToRemove->Next();
fgRemoveBlock(blockToRemove, true);
blockToRemove = nextBlock;
}
@@ -351,12 +351,12 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t*
assert((jumpCount > 0) && (jumpCount <= SWITCH_MAX_DISTANCE + 1));
const auto jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jumpCount + 1 /*default case*/];
+ fgHasSwitch = true;
firstBlock->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc;
firstBlock->bbJumpSwt->bbsCount = jumpCount + 1;
firstBlock->bbJumpSwt->bbsHasDefault = true;
firstBlock->bbJumpSwt->bbsDstTab = jmpTab;
- firstBlock->bbNext = isReversed ? blockIfTrue : blockIfFalse;
- fgHasSwitch = true;
+ firstBlock->SetNext(isReversed ? blockIfTrue : blockIfFalse);
// Splitting doesn't work well with jump-tables currently
opts.compProcedureSplitting = false;
diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp
index 27047d50a19be..e035b1188a22f 100644
--- a/src/coreclr/jit/unwind.cpp
+++ b/src/coreclr/jit/unwind.cpp
@@ -127,9 +127,9 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func,
{
assert(func->funKind == FUNC_HANDLER);
*ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndBeg));
- *ppEndLoc = (HBtab->ebdHndLast->bbNext == nullptr)
- ? nullptr
- : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->bbNext));
+ *ppEndLoc = HBtab->ebdHndLast->IsLast() ? nullptr
+ : new (this, CMK_UnwindInfo)
+ emitLocation(ehEmitCookie(HBtab->ebdHndLast->Next()));
}
}
}