Skip to content

Commit

Permalink
JIT: Optimize SSA (dotnet#94672)
Browse files Browse the repository at this point in the history
* Stop considering "successor EH successors" to be EH successors of each
  block. Instead, add this special case where it is actually needed:
  when computing the dominator tree.
* Update PHI arg insertion to insert the handler phi args with the 'try'
  entry block as the pred instead of the pred of the 'try'
* Optimize computation of the dominator tree to avoid multiple
  enumerations of the preds
* Optimize computation of the dominator tree to utilize computed
  postorder indices for "visited" checks, instead of a block set

Diffs due to LSRA using all-succs as an approximation to figure out whether
to insert exposed uses.
  • Loading branch information
jakobbotsch authored Nov 16, 2023
1 parent fe3abf5 commit e0b1e4d
Show file tree
Hide file tree
Showing 5 changed files with 160 additions and 200 deletions.
155 changes: 107 additions & 48 deletions src/coreclr/jit/block.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,12 @@ AllSuccessorEnumerator::AllSuccessorEnumerator(Compiler* comp, BasicBlock* block

FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
{
unsigned tryIndex;
if (!bbIsExFlowBlock(blk, &tryIndex))
{
return blk->bbPreds;
}

BlockToFlowEdgeMap* ehPreds = GetBlockToEHPreds();
FlowEdge* res;
if (ehPreds->Lookup(blk, &res))
Expand All @@ -113,76 +119,129 @@ FlowEdge* Compiler::BlockPredsWithEH(BasicBlock* blk)
}

res = blk->bbPreds;
unsigned tryIndex;
if (bbIsExFlowBlock(blk, &tryIndex))
{
// Find the first block of the try.
EHblkDsc* ehblk = ehGetDsc(tryIndex);
BasicBlock* tryStart = ehblk->ebdTryBeg;
for (BasicBlock* const tryStartPredBlock : tryStart->PredBlocks())
// Add all blocks handled by this handler (except for second blocks of BBJ_CALLFINALLY/BBJ_ALWAYS pairs;
// these cannot cause transfer to the handler...)
// TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via
// something like:
// for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->Next(); bb = bb->Next())
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
for (BasicBlock* const bb : Blocks())
{
if (bbInExnFlowRegions(tryIndex, bb) && !bb->isBBCallAlwaysPairTail())
{
res = new (this, CMK_FlowEdge) FlowEdge(tryStartPredBlock, res);
res = new (this, CMK_FlowEdge) FlowEdge(bb, res);

#if MEASURE_BLOCK_SIZE
genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(FlowEdge);
#endif // MEASURE_BLOCK_SIZE
}
}

// Now add all blocks handled by this handler (except for second blocks of BBJ_CALLFINALLY/BBJ_ALWAYS pairs;
// these cannot cause transfer to the handler...)
// TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via
// something like:
// for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->Next(); bb = bb->Next())
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
for (BasicBlock* const bb : Blocks())
EHblkDsc* ehblk = ehGetDsc(tryIndex);
if (ehblk->HasFinallyOrFaultHandler() && (ehblk->ebdHndBeg == blk))
{
// block is a finally or fault handler; all enclosing filters are predecessors
unsigned enclosing = ehblk->ebdEnclosingTryIndex;
while (enclosing != EHblkDsc::NO_ENCLOSING_INDEX)
{
if (bbInExnFlowRegions(tryIndex, bb) && !bb->isBBCallAlwaysPairTail())
EHblkDsc* enclosingDsc = ehGetDsc(enclosing);
if (enclosingDsc->HasFilter())
{
res = new (this, CMK_FlowEdge) FlowEdge(bb, res);
for (BasicBlock* filterBlk = enclosingDsc->ebdFilter; filterBlk != enclosingDsc->ebdHndBeg;
filterBlk = filterBlk->Next())
{
res = new (this, CMK_FlowEdge) FlowEdge(filterBlk, res);

#if MEASURE_BLOCK_SIZE
genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(FlowEdge);
#endif // MEASURE_BLOCK_SIZE
assert(filterBlk->VisitEHSecondPassSuccs(this, [blk](BasicBlock* succ) {
return succ == blk ? BasicBlockVisit::Abort : BasicBlockVisit::Continue;
}) == BasicBlockVisit::Abort);
}
}

enclosing = enclosingDsc->ebdEnclosingTryIndex;
}
}

if (ehblk->HasFinallyOrFaultHandler() && (ehblk->ebdHndBeg == blk))
#ifdef DEBUG
unsigned hash = SsaStressHashHelper();
if (hash != 0)
{
res = ShuffleHelper(hash, res);
}
#endif // DEBUG
ehPreds->Set(blk, res);
return res;
}

//------------------------------------------------------------------------
// BlockDominancePreds:
// Return list of dominance predecessors. This is the set that we know for
// sure contains a block that was fully executed before control reached
// 'blk'.
//
// Arguments:
// blk - Block to get dominance predecessors for.
//
// Returns:
// List of edges.
//
// Remarks:
// Differs from BlockPredsWithEH only in the treatment of handler blocks;
// enclosed blocks are never dominance preds, while all predecessors of
// blocks in the 'try' are (currently only the first try block expected).
//
FlowEdge* Compiler::BlockDominancePreds(BasicBlock* blk)
{
unsigned tryIndex;
if (!bbIsExFlowBlock(blk, &tryIndex))
{
return blk->bbPreds;
}

EHblkDsc* ehblk = ehGetDsc(tryIndex);
if (!ehblk->HasFinallyOrFaultHandler() || (ehblk->ebdHndBeg != blk))
{
return ehblk->ebdTryBeg->bbPreds;
}

// Finally/fault handlers can be preceded by enclosing filters due to 2
// pass EH, so add those and keep them cached.
BlockToFlowEdgeMap* domPreds = GetDominancePreds();
FlowEdge* res;
if (domPreds->Lookup(blk, &res))
{
return res;
}

res = ehblk->ebdTryBeg->bbPreds;
if (ehblk->HasFinallyOrFaultHandler() && (ehblk->ebdHndBeg == blk))
{
// block is a finally or fault handler; all enclosing filters are predecessors
unsigned enclosing = ehblk->ebdEnclosingTryIndex;
while (enclosing != EHblkDsc::NO_ENCLOSING_INDEX)
{
// block is a finally or fault handler; all enclosing filters are predecessors
unsigned enclosing = ehblk->ebdEnclosingTryIndex;
while (enclosing != EHblkDsc::NO_ENCLOSING_INDEX)
EHblkDsc* enclosingDsc = ehGetDsc(enclosing);
if (enclosingDsc->HasFilter())
{
EHblkDsc* enclosingDsc = ehGetDsc(enclosing);
if (enclosingDsc->HasFilter())
for (BasicBlock* filterBlk = enclosingDsc->ebdFilter; filterBlk != enclosingDsc->ebdHndBeg;
filterBlk = filterBlk->Next())
{
for (BasicBlock* filterBlk = enclosingDsc->ebdFilter; filterBlk != enclosingDsc->ebdHndBeg;
filterBlk = filterBlk->Next())
{
res = new (this, CMK_FlowEdge) FlowEdge(filterBlk, res);

assert(filterBlk->VisitEHSecondPassSuccs(this, [blk](BasicBlock* succ) {
return succ == blk ? BasicBlockVisit::Abort : BasicBlockVisit::Continue;
}) == BasicBlockVisit::Abort);
}
}
res = new (this, CMK_FlowEdge) FlowEdge(filterBlk, res);

enclosing = enclosingDsc->ebdEnclosingTryIndex;
assert(filterBlk->VisitEHSecondPassSuccs(this, [blk](BasicBlock* succ) {
return succ == blk ? BasicBlockVisit::Abort : BasicBlockVisit::Continue;
}) == BasicBlockVisit::Abort);
}
}
}

#ifdef DEBUG
unsigned hash = SsaStressHashHelper();
if (hash != 0)
{
res = ShuffleHelper(hash, res);
enclosing = enclosingDsc->ebdEnclosingTryIndex;
}
#endif // DEBUG
ehPreds->Set(blk, res);
}

domPreds->Set(blk, res);
return res;
}

Expand Down
2 changes: 2 additions & 0 deletions src/coreclr/jit/compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1945,6 +1945,7 @@ void Compiler::compInit(ArenaAllocator* pAlloc,
#endif
m_switchDescMap = nullptr;
m_blockToEHPreds = nullptr;
m_dominancePreds = nullptr;
m_fieldSeqStore = nullptr;
m_refAnyClass = nullptr;
for (MemoryKind memoryKind : allMemoryKinds())
Expand Down Expand Up @@ -5739,6 +5740,7 @@ void Compiler::ResetOptAnnotations()
fgResetForSsa();
vnStore = nullptr;
m_blockToEHPreds = nullptr;
m_dominancePreds = nullptr;
fgSsaPassesCompleted = 0;
fgVNPassesCompleted = 0;
fgSsaChecksEnabled = false;
Expand Down
16 changes: 11 additions & 5 deletions src/coreclr/jit/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -2372,12 +2372,8 @@ class Compiler
} // Get the index to use as the cache key for sharing throw blocks
#endif // !FEATURE_EH_FUNCLETS

// Returns a FlowEdge representing the "EH predecessors" of "blk". These are the normal predecessors of
// "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the
// first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor,
// for example, we want to consider that the immediate dominator of the catch clause start block, so it's
// convenient to also consider it a predecessor.)
FlowEdge* BlockPredsWithEH(BasicBlock* blk);
FlowEdge* BlockDominancePreds(BasicBlock* blk);

// This table is useful for memoization of the method above.
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, FlowEdge*> BlockToFlowEdgeMap;
Expand All @@ -2391,6 +2387,16 @@ class Compiler
return m_blockToEHPreds;
}

BlockToFlowEdgeMap* m_dominancePreds;
BlockToFlowEdgeMap* GetDominancePreds()
{
if (m_dominancePreds == nullptr)
{
m_dominancePreds = new (getAllocator()) BlockToFlowEdgeMap(getAllocator());
}
return m_dominancePreds;
}

void* ehEmitCookie(BasicBlock* block);
UNATIVE_OFFSET ehCodeOffset(BasicBlock* block);

Expand Down
Loading

0 comments on commit e0b1e4d

Please sign in to comment.