From 26954f5e1d7e8101b5042b17e290c9cb7de5328c Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Tue, 5 Oct 2021 18:40:45 -0700 Subject: [PATCH] JIT: better support for patchpoints in try regions (#59784) This change adds control flow to ensure that an OSR method for a patchpoint nested in try regions enters those regions try from the first block of each try rather than mid-try. This lets these OSR methods conform to the data flow expectations that the only way control flow can enter a try is via its first block. See #33658 for more details on the approach taken here. Fixes #35687. --- src/coreclr/jit/compiler.cpp | 18 ++- src/coreclr/jit/compiler.h | 4 +- src/coreclr/jit/fgbasic.cpp | 66 +++++++---- src/coreclr/jit/fgdiagnostic.cpp | 6 - src/coreclr/jit/fgopt.cpp | 185 ++++++++++++++++++++++++++++++- src/coreclr/jit/importer.cpp | 5 +- 6 files changed, 241 insertions(+), 43 deletions(-) diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 1b3d6298279b6..7db418920e093 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -4541,8 +4541,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // If this is a viable inline candidate if (compIsForInlining() && !compDonotInline()) { - // Filter out unimported BBs - fgRemoveEmptyBlocks(); + // Filter out unimported BBs in the inlinee + // + fgPostImportationCleanup(); // Update type of return spill temp if we have gathered // better info when importing the inlinee, and the return @@ -4679,8 +4680,10 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl } #endif // defined(DEBUG) && defined(TARGET_X86) - // Filter out unimported BBs - fgRemoveEmptyBlocks(); + // Update flow graph after importation. + // Removes un-imported blocks, trims EH, and ensures correct OSR entry flow. + // + fgPostImportationCleanup(); }; DoPhase(this, PHASE_MORPH_INIT, morphInitPhase); @@ -6379,6 +6382,13 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, { // We are jitting the root method, or inlining. fgFindBasicBlocks(); + + // If we are doing OSR, update flow to initially reach the appropriate IL offset. + // + if (opts.IsOSR()) + { + fgFixEntryFlowForOSR(); + } } // If we're inlining and the candidate is bad, bail out. diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 3758c56da45f5..a4aca5450edb5 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4901,6 +4901,7 @@ class Compiler BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point + BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif @@ -5210,6 +5211,7 @@ class Compiler #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); + void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); @@ -5814,7 +5816,7 @@ class Compiler unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); - void fgRemoveEmptyBlocks(); + void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 5dbf989ff86fa..da643cfd6d4db 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -50,6 +50,7 @@ void Compiler::fgInit() fgLastBB = nullptr; fgFirstColdBlock = nullptr; fgEntryBB = nullptr; + fgOSREntryBB = nullptr; #if defined(FEATURE_EH_FUNCLETS) fgFirstFuncletBB = nullptr; @@ -3134,28 +3135,6 @@ void Compiler::fgFindBasicBlocks() return; } - // If we are doing OSR, add an entry block that simply branches to the right IL offset. - if (opts.IsOSR()) - { - // Remember the original entry block in case this method is tail recursive. - fgEntryBB = fgLookupBB(0); - - // Find the OSR entry block. - assert(info.compILEntry >= 0); - BasicBlock* bbTarget = fgLookupBB(info.compILEntry); - - fgEnsureFirstBBisScratch(); - fgFirstBB->bbJumpKind = BBJ_ALWAYS; - fgFirstBB->bbJumpDest = bbTarget; - fgAddRefPred(bbTarget, fgFirstBB); - - JITDUMP("OSR: redirecting flow at entry via " FMT_BB " to " FMT_BB " (il offset 0x%x)\n", fgFirstBB->bbNum, - bbTarget->bbNum, info.compILEntry); - - // rebuild lookup table... should be able to avoid this by leaving room up front. - fgInitBBLookup(); - } - /* Mark all blocks within 'try' blocks as such */ if (info.compXcptnsCount == 0) @@ -3545,6 +3524,49 @@ void Compiler::fgFindBasicBlocks() fgNormalizeEH(); } +//------------------------------------------------------------------------ +// fgFixEntryFlowForOSR: add control flow path from method start to +// the appropriate IL offset for the OSR method +// +// Notes: +// This is simply a branch from the method entry to the OSR entry -- +// the block where the OSR method should begin execution. +// +// If the OSR entry is within a try we will eventually need add +// suitable step blocks to reach the OSR entry without jumping into +// the middle of the try. But we defer that until after importation. +// See fgPostImportationCleanup. +// +void Compiler::fgFixEntryFlowForOSR() +{ + // Ensure lookup IL->BB lookup table is valid + // + fgInitBBLookup(); + + // Remember the original entry block in case this method is tail recursive. + // + fgEntryBB = fgLookupBB(0); + + // Find the OSR entry block. + // + assert(info.compILEntry >= 0); + BasicBlock* const osrEntry = fgLookupBB(info.compILEntry); + + // Remember the OSR entry block so we can find it again later. + // + fgOSREntryBB = osrEntry; + + // Now branch from method start to the right spot. + // + fgEnsureFirstBBisScratch(); + fgFirstBB->bbJumpKind = BBJ_ALWAYS; + fgFirstBB->bbJumpDest = osrEntry; + fgAddRefPred(osrEntry, fgFirstBB); + + JITDUMP("OSR: redirecting flow at entry from entry " FMT_BB " to OSR entry " FMT_BB " for the importer\n", + fgFirstBB->bbNum, osrEntry->bbNum); +} + /***************************************************************************** * Check control flow constraints for well formed IL. Bail if any of the constraints * are violated. diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 1287ae128fca9..939131e2b2977 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -2419,12 +2419,6 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb return true; } - // For OSR, we allow the firstBB to branch to the middle of a try. - if (comp->opts.IsOSR() && (blockPred == comp->fgFirstBB)) - { - return true; - } - printf("Jump into the middle of try region: " FMT_BB " branches to " FMT_BB "\n", blockPred->bbNum, block->bbNum); assert(!"Jump into middle of try region"); return false; diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 71daf460ca69f..aa8a13505e748 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1218,7 +1218,7 @@ void Compiler::fgInitBlockVarSets() } //------------------------------------------------------------------------ -// fgRemoveEmptyBlocks: clean up flow graph after importation +// fgPostImportationCleanups: clean up flow graph after importation // // Notes: // @@ -1229,9 +1229,49 @@ void Compiler::fgInitBlockVarSets() // Update the end of try and handler regions where trailing blocks were not imported. // Update the start of try regions that were partially imported (OSR) // -void Compiler::fgRemoveEmptyBlocks() +// For OSR, add "step blocks" and conditional logic to ensure the path from +// method entry to the OSR logical entry point always flows through the first +// block of any enclosing try. +// +// In particular, given a method like +// +// S0; +// try { +// S1; +// try { +// S2; +// for (...) {} // OSR logical entry here +// } +// } +// +// Where the Sn are arbitrary hammocks of code, the OSR logical entry point +// would be in the middle of a nested try. We can't branch there directly +// from the OSR method entry. So we transform the flow to: +// +// _firstCall = 0; +// goto pt1; +// S0; +// pt1: +// try { +// if (_firstCall == 0) goto pt2; +// S1; +// pt2: +// try { +// if (_firstCall == 0) goto pp; +// S2; +// pp: +// _firstCall = 1; +// for (...) +// } +// } +// +// where the "state variable" _firstCall guides execution appropriately +// from OSR method entry, and flow always enters the try blocks at the +// first block of the try. +// +void Compiler::fgPostImportationCleanup() { - JITDUMP("\n*************** In fgRemoveEmptyBlocks\n"); + JITDUMP("\n*************** In fgPostImportationCleanup\n"); BasicBlock* cur; BasicBlock* nxt; @@ -1272,8 +1312,10 @@ void Compiler::fgRemoveEmptyBlocks() } } - // If no blocks were removed, we're done - if (removedBlks == 0) + // If no blocks were removed, we're done. + // Unless we are an OSR method with a try entry. + // + if ((removedBlks == 0) && !(opts.IsOSR() && fgOSREntryBB->hasTryIndex())) { return; } @@ -1462,8 +1504,139 @@ void Compiler::fgRemoveEmptyBlocks() fgSkipRmvdBlocks(HBtab); } + // If this is OSR, and the OSR entry was mid-try or in a nested try entry, + // add the appropriate step block logic. + // + if (opts.IsOSR()) + { + BasicBlock* const osrEntry = fgOSREntryBB; + BasicBlock* entryJumpTarget = osrEntry; + + if (osrEntry->hasTryIndex()) + { + EHblkDsc* enclosingTry = ehGetBlockTryDsc(osrEntry); + BasicBlock* tryEntry = enclosingTry->ebdTryBeg; + bool const inNestedTry = (enclosingTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX); + bool const osrEntryMidTry = (osrEntry != tryEntry); + + if (inNestedTry || osrEntryMidTry) + { + JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is %s%s try region EH#%u\n", info.compILEntry, + osrEntry->bbNum, osrEntryMidTry ? "within " : "at the start of ", inNestedTry ? "nested" : "", + osrEntry->getTryIndex()); + + // We'll need a state variable to control the branching. + // + // It will be zero when the OSR method is entered and set to one + // once flow reaches the osrEntry. + // + unsigned const entryStateVar = lvaGrabTemp(false DEBUGARG("OSR entry state var")); + lvaTable[entryStateVar].lvType = TYP_INT; + lvaTable[entryStateVar].lvMustInit = true; + + // Set the state variable when we reach the entry. + // + GenTree* const setEntryState = gtNewTempAssign(entryStateVar, gtNewOneConNode(TYP_INT)); + fgNewStmtAtBeg(osrEntry, setEntryState); + + // Helper method to add flow + // + auto addConditionalFlow = [this, entryStateVar, &entryJumpTarget](BasicBlock* fromBlock, + BasicBlock* toBlock) { + fgSplitBlockAtBeginning(fromBlock); + fromBlock->bbFlags |= BBF_INTERNAL; + + GenTree* const entryStateLcl = gtNewLclvNode(entryStateVar, TYP_INT); + GenTree* const compareEntryStateToZero = + gtNewOperNode(GT_EQ, TYP_INT, entryStateLcl, gtNewZeroConNode(TYP_INT)); + GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); + fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); + + fromBlock->bbJumpKind = BBJ_COND; + fromBlock->bbJumpDest = toBlock; + fgAddRefPred(toBlock, fromBlock); + + entryJumpTarget = fromBlock; + }; + + // If this is a mid-try entry, add a conditional branch from the start of the try to osr entry point. + // + if (osrEntryMidTry) + { + addConditionalFlow(tryEntry, osrEntry); + } + + // Add conditional branches for each successive enclosing try with a distinct + // entry block. + // + while (enclosingTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) + { + EHblkDsc* const nextTry = ehGetDsc(enclosingTry->ebdEnclosingTryIndex); + BasicBlock* const nextTryEntry = nextTry->ebdTryBeg; + + // We don't need to add flow for mutual-protect regions + // (multiple tries that all share the same entry block). + // + if (nextTryEntry != tryEntry) + { + addConditionalFlow(nextTryEntry, tryEntry); + } + enclosingTry = nextTry; + tryEntry = nextTryEntry; + } + + // Transform the method entry flow, if necessary. + // + // Note even if the OSR is in a nested try, if it's a mutual protect try + // it can be reached directly from "outside". + // + assert(fgFirstBB->bbJumpDest == osrEntry); + assert(fgFirstBB->bbJumpKind == BBJ_ALWAYS); + + if (entryJumpTarget != osrEntry) + { + fgFirstBB->bbJumpDest = entryJumpTarget; + fgRemoveRefPred(osrEntry, fgFirstBB); + fgAddRefPred(entryJumpTarget, fgFirstBB); + + JITDUMP("OSR: redirecting flow from method entry " FMT_BB " to OSR entry " FMT_BB + " via step blocks.\n", + fgFirstBB->bbNum, fgOSREntryBB->bbNum); + } + else + { + JITDUMP("OSR: leaving direct flow from method entry " FMT_BB " to OSR entry " FMT_BB + ", no step blocks needed.\n", + fgFirstBB->bbNum, fgOSREntryBB->bbNum); + } + } + else + { + // If OSR entry is the start of an un-nested try, no work needed. + // + // We won't hit this case today as we don't allow the try entry to be the target of a backedge, + // and currently patchpoints only appear at targets of backedges. + // + JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB + ") is start of an un-nested try region, no step blocks needed.\n", + info.compILEntry, osrEntry->bbNum); + assert(entryJumpTarget == osrEntry); + assert(fgOSREntryBB == osrEntry); + } + } + else + { + // If OSR entry is not within a try, no work needed. + // + JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is not in a try region, no step blocks needed.\n", + info.compILEntry, osrEntry->bbNum); + assert(entryJumpTarget == osrEntry); + assert(fgOSREntryBB == osrEntry); + } + } + // Renumber the basic blocks - JITDUMP("\nRenumbering the basic blocks for fgRemoveEmptyBlocks\n"); + JITDUMP("\nRenumbering the basic blocks for fgPostImporterCleanup\n"); fgRenumberBlocks(); #ifdef DEBUG diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 6601bdcc56c10..f8dd90d66c203 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -18849,7 +18849,6 @@ void Compiler::impImport() // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // - // We expect a linear flow to the first non-internal block. But not necessarily straght-line flow. BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) @@ -18861,10 +18860,8 @@ void Compiler::impImport() { entryBlock = entryBlock->bbNext; } - else if (entryBlock->bbJumpKind == BBJ_ALWAYS) + else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { - // Only expected for OSR - assert(opts.IsOSR()); entryBlock = entryBlock->bbJumpDest; } else