Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

JIT: Make BasicBlock::bbPrev and bbNext private #93032

Merged
merged 15 commits into from
Oct 6, 2023
Merged
2 changes: 1 addition & 1 deletion src/coreclr/jit/assertionprop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5268,7 +5268,7 @@ class AssertionPropFlowCallback
{
// Scenario where next block and conditional block, both point to the same block.
// In such case, intersect the assertions present on both the out edges of predBlock.
assert(predBlock->bbNext == block);
assert(predBlock->NextIs(block));
BitVecOps::IntersectionD(apTraits, pAssertionOut, predBlock->bbAssertionOut);

if (VerboseDataflow())
Expand Down
42 changes: 36 additions & 6 deletions src/coreclr/jit/block.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
// these cannot cause transfer to the handler...)
// TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via
// something like:
// for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->bbNext; bb = bb->bbNext)
// for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->Next(); bb = bb->Next())
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
Expand All @@ -160,7 +160,7 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
if (enclosingDsc->HasFilter())
{
for (BasicBlock* filterBlk = enclosingDsc->ebdFilter; filterBlk != enclosingDsc->ebdHndBeg;
filterBlk = filterBlk->bbNext)
filterBlk = filterBlk->Next())
{
res = new (this, CMK_FlowEdge) FlowEdge(filterBlk, res);

Expand All @@ -186,6 +186,36 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
return res;
}

//------------------------------------------------------------------------
// IsLastHotBlock: see if this is the last block before the cold section
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we have a corresponding IsFirstColdBlock(Compiler*)?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can add that in for consistency.

//
// Arguments:
// compiler - current compiler instance
//
// Returns:
// true if the next block is fgFirstColdBlock
// (if fgFirstColdBlock is null, this call is equivalent to IsLast())
//
bool BasicBlock::IsLastHotBlock(Compiler* compiler) const
{
return (bbNext == compiler->fgFirstColdBlock);
}

//------------------------------------------------------------------------
// IsFirstColdBlock: see if this is the first block in the cold section
//
// Arguments:
// compiler - current compiler instance
//
// Returns:
// true if this is fgFirstColdBlock
// (fgFirstColdBlock is null if there is no cold code)
//
bool BasicBlock::IsFirstColdBlock(Compiler* compiler) const
{
return (this == compiler->fgFirstColdBlock);
}

//------------------------------------------------------------------------
// checkPredListOrder: see if pred list is properly ordered
//
Expand Down Expand Up @@ -1509,10 +1539,10 @@ bool BasicBlock::isBBCallAlwaysPair() const
assert(!(this->bbFlags & BBF_RETLESS_CALL));
#endif
// Some asserts that the next block is a BBJ_ALWAYS of the proper form.
assert(this->bbNext != nullptr);
assert(this->bbNext->KindIs(BBJ_ALWAYS));
assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS);
assert(this->bbNext->isEmpty());
assert(!this->IsLast());
assert(this->Next()->KindIs(BBJ_ALWAYS));
assert(this->Next()->bbFlags & BBF_KEEP_BBJ_ALWAYS);
assert(this->Next()->isEmpty());

return true;
}
Expand Down
113 changes: 78 additions & 35 deletions src/coreclr/jit/block.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,10 +508,49 @@ struct BasicBlock : private LIR::Range
{
friend class LIR;

private:
BasicBlock* bbNext; // next BB in ascending PC offset order
BasicBlock* bbPrev;

void setNext(BasicBlock* next)
BBjumpKinds bbJumpKind; // jump (if any) at the end of this block

public:
BBjumpKinds GetBBJumpKind() const
{
return bbJumpKind;
}

void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* compiler))
{
#ifdef DEBUG
// BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout
// TODO: Change assert to check if compiler is in appropriate optimization phase to use BBJ_NONE
// (right now, this assertion does the null check to avoid unused variable warnings)
assert((kind != BBJ_NONE) || (compiler != nullptr));
#endif // DEBUG
bbJumpKind = kind;
}

BasicBlock* Prev() const
{
return bbPrev;
}

void SetPrev(BasicBlock* prev)
{
bbPrev = prev;
if (prev)
{
prev->bbNext = this;
}
}

BasicBlock* Next() const
{
return bbNext;
}

void SetNext(BasicBlock* next)
{
bbNext = next;
if (next)
Expand All @@ -520,6 +559,37 @@ struct BasicBlock : private LIR::Range
}
}

bool IsFirst() const
{
return (bbPrev == nullptr);
}

bool IsLast() const
{
return (bbNext == nullptr);
}

bool PrevIs(BasicBlock* block) const
{
return (bbPrev == block);
}

bool NextIs(BasicBlock* block) const
{
return (bbNext == block);
}

bool IsLastHotBlock(Compiler* compiler) const;

bool IsFirstColdBlock(Compiler* compiler) const;

/* The following union describes the jump target(s) of this block */
union {
unsigned bbJumpOffs; // PC offset (temporary only)
BasicBlock* bbJumpDest; // basic block
BBswtDesc* bbJumpSwt; // switch descriptor
};

BasicBlockFlags bbFlags;

static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0);
Expand Down Expand Up @@ -702,33 +772,6 @@ struct BasicBlock : private LIR::Range
// a block corresponding to an exit from the try of a try/finally.
bool isBBCallAlwaysPairTail() const;

private:
BBjumpKinds bbJumpKind; // jump (if any) at the end of this block

public:
BBjumpKinds GetBBJumpKind() const
{
return bbJumpKind;
}

void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp))
{
#ifdef DEBUG
// BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout
// TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE
// (right now, this assertion does the null check to avoid unused variable warnings)
assert((kind != BBJ_NONE) || (comp != nullptr));
#endif // DEBUG
bbJumpKind = kind;
}

/* The following union describes the jump target(s) of this block */
union {
unsigned bbJumpOffs; // PC offset (temporary only)
BasicBlock* bbJumpDest; // basic block
BBswtDesc* bbJumpSwt; // switch descriptor
};

bool KindIs(BBjumpKinds kind) const
{
return bbJumpKind == kind;
Expand Down Expand Up @@ -1435,10 +1478,10 @@ class BasicBlockIterator
{
assert(m_block != nullptr);
// Check that we haven't been spliced out of the list.
assert((m_block->bbNext == nullptr) || (m_block->bbNext->bbPrev == m_block));
assert((m_block->bbPrev == nullptr) || (m_block->bbPrev->bbNext == m_block));
assert(m_block->IsLast() || m_block->Next()->PrevIs(m_block));
assert(m_block->IsFirst() || m_block->Prev()->NextIs(m_block));

m_block = m_block->bbNext;
m_block = m_block->Next();
return *this;
}

Expand Down Expand Up @@ -1501,7 +1544,7 @@ class BasicBlockRangeList

BasicBlockIterator end() const
{
return BasicBlockIterator(m_end->bbNext); // walk until we see the block *following* the `m_end` block
return BasicBlockIterator(m_end->Next()); // walk until we see the block *following* the `m_end` block
}
};

Expand Down Expand Up @@ -1596,18 +1639,18 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
break;

case BBJ_NONE:
m_succs[0] = block->bbNext;
m_succs[0] = block->Next();
m_begin = &m_succs[0];
m_end = &m_succs[1];
break;

case BBJ_COND:
m_succs[0] = block->bbNext;
m_succs[0] = block->Next();
m_begin = &m_succs[0];

// If both fall-through and branch successors are identical, then only include
// them once in the iteration (this is the same behavior as NumSucc()/GetSucc()).
if (block->bbJumpDest == block->bbNext)
if (block->NextIs(block->bbJumpDest))
{
m_end = &m_succs[1];
}
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/clrjit.natvis
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
<Exec>varIndex++</Exec>
<Exec>bbLiveInMap = bbLiveInMap >> 1</Exec>
</Loop>
<Exec>block = block->bbNext</Exec>
<Exec>block = block->Next()</Exec>
</Loop>
</CustomListItems>
<Item Name="outVarToRegMaps">"OutVarToRegMaps"</Item>
Expand All @@ -124,7 +124,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
<Exec>varIndex++</Exec>
<Exec>bbLiveInMap = bbLiveInMap >> 1</Exec>
</Loop>
<Exec>block = block->bbNext</Exec>
<Exec>block = block->Next()</Exec>
</Loop>
</CustomListItems>
<Item Name="AvailableRegs mask">this-&gt;m_AvailableRegs</Item>
Expand Down
12 changes: 6 additions & 6 deletions src/coreclr/jit/codegenarm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,12 +123,12 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
// we would have otherwise created retless calls.
assert(block->isBBCallAlwaysPair());

assert(block->bbNext != NULL);
assert(block->bbNext->KindIs(BBJ_ALWAYS));
assert(block->bbNext->bbJumpDest != NULL);
assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
assert(!block->IsLast());
assert(block->Next()->KindIs(BBJ_ALWAYS));
assert(block->Next()->bbJumpDest != NULL);
assert(block->Next()->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);

bbFinallyRet = block->bbNext->bbJumpDest;
bbFinallyRet = block->Next()->bbJumpDest;

// Load the address where the finally funclet should return into LR.
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
Expand All @@ -143,7 +143,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
// block is RETLESS.
assert(!(block->bbFlags & BBF_RETLESS_CALL));
assert(block->isBBCallAlwaysPair());
return block->bbNext;
return block->Next();
}

//------------------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/codegenarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2160,7 +2160,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
}
GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);

BasicBlock* const nextBlock = block->bbNext;
BasicBlock* const nextBlock = block->Next();

if (block->bbFlags & BBF_RETLESS_CALL)
{
Expand All @@ -2184,7 +2184,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
BasicBlock* const jumpDest = nextBlock->bbJumpDest;

// Now go to where the finally funclet needs to return to.
if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
{
// Fall-through.
// TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/codegenarmarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3338,8 +3338,8 @@ void CodeGen::genCall(GenTreeCall* call)
#ifdef FEATURE_READYTORUN
else if (call->IsR2ROrVirtualStubRelativeIndir())
{
assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert((call->IsR2RRelativeIndir() && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
(call->IsVirtualStubRelativeIndir() && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert(call->gtControlExpr == nullptr);

regNumber tmpReg = call->GetSingleTempReg();
Expand Down
Loading