diff --git a/src/coreclr/jit/async.cpp b/src/coreclr/jit/async.cpp index 2d76d37f587c41..7049640513f642 100644 --- a/src/coreclr/jit/async.cpp +++ b/src/coreclr/jit/async.cpp @@ -481,14 +481,6 @@ bool AsyncLiveness::IsLocalCaptureUnnecessary(unsigned lclNum) return true; } -#ifdef FEATURE_EH_WINDOWS_X86 - if (lclNum == m_comp->lvaShadowSPslotsVar) - { - // Only expected to be live in handlers - return true; - } -#endif - if (lclNum == m_comp->lvaRetAddrVar) { return true; diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index a3b73955593b81..0dc138339952f0 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -1260,9 +1260,6 @@ class CodeGen final : public CodeGenInterface #endif // TARGET_ARM64 void genEHCatchRet(BasicBlock* block); -#if defined(FEATURE_EH_WINDOWS_X86) - void genEHFinallyOrFilterRet(BasicBlock* block); -#endif // FEATURE_EH_WINDOWS_X86 void genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode); void genMultiRegStoreToLocal(GenTreeLclVar* lclNode); diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index b086fd1c8e75b3..66225a93461724 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -501,7 +501,6 @@ void CodeGen::genMarkLabelsForCodegen() case BBJ_CALLFINALLY: // The finally target itself will get marked by walking the EH table, below, and marking // all handler begins. - if (compiler->UsesCallFinallyThunks()) { // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair, // as that's needed for identifying the range of the "duplicate finally" region in EH data. @@ -1626,7 +1625,7 @@ void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKi { bool useThrowHlpBlk = compiler->fgUseThrowHelperBlocks(); #if defined(UNIX_X86_ABI) - // TODO: Is this really UNIX_X86_ABI specific? Should we guard with compiler->UsesFunclets() instead? + // TODO: Is this really UNIX_X86_ABI specific? // Inline exception-throwing code in funclet to make it possible to unwind funclet frames. useThrowHlpBlk = useThrowHlpBlk && (compiler->funCurrentFunc()->funKind == FUNC_ROOT); #endif // UNIX_X86_ABI @@ -4493,7 +4492,6 @@ void CodeGen::genReserveEpilog(BasicBlock* block) void CodeGen::genReserveFuncletProlog(BasicBlock* block) { - assert(compiler->UsesFunclets()); assert(block != nullptr); /* Currently, no registers are live on entry to the prolog, except maybe @@ -4524,7 +4522,6 @@ void CodeGen::genReserveFuncletProlog(BasicBlock* block) void CodeGen::genReserveFuncletEpilog(BasicBlock* block) { - assert(compiler->UsesFunclets()); assert(block != nullptr); JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum); @@ -5379,31 +5376,6 @@ void CodeGen::genFnProlog() genZeroInitFrame(untrLclHi, untrLclLo, initReg, &initRegZeroed); -#if defined(FEATURE_EH_WINDOWS_X86) - if (!compiler->UsesFunclets()) - { - // when compInitMem is true the genZeroInitFrame will zero out the shadow SP slots - if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem) - { - // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) - unsigned filterEndOffsetSlotOffs = - compiler->lvaLclStackHomeSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE; - - // Zero out the slot for nesting level 0 - unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE; - - if (!initRegZeroed) - { - instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg); - initRegZeroed = true; - } - - GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar, - firstSlotOffs); - } - } -#endif // FEATURE_EH_WINDOWS_X86 - genReportGenericContextArg(initReg, &initRegZeroed); #ifdef JIT32_GCENCODER @@ -5890,14 +5862,11 @@ void CodeGen::genGeneratePrologsAndEpilogs() // Generate all the prologs and epilogs. - if (compiler->UsesFunclets()) - { - // Capture the data we're going to use in the funclet prolog and epilog generation. This is - // information computed during codegen, or during function prolog generation, like - // frame offsets. It must run after main function prolog generation. + // Capture the data we're going to use in the funclet prolog and epilog generation. This is + // information computed during codegen, or during function prolog generation, like + // frame offsets. It must run after main function prolog generation. - genCaptureFuncletPrologEpilogInfo(); - } + genCaptureFuncletPrologEpilogInfo(); // Walk the list of prologs and epilogs and generate them. // We maintain a list of prolog and epilog basic blocks in @@ -7101,24 +7070,10 @@ void CodeGen::genReturn(GenTree* treeNode) #if defined(DEBUG) && defined(TARGET_XARCH) bool doStackPointerCheck = compiler->opts.compStackCheckOnRet; - if (compiler->UsesFunclets()) + // Don't do stack pointer check at the return from a funclet; only for the main function. + if (compiler->funCurrentFunc()->funKind != FUNC_ROOT) { - // Don't do stack pointer check at the return from a funclet; only for the main function. - if (compiler->funCurrentFunc()->funKind != FUNC_ROOT) - { - doStackPointerCheck = false; - } - } - else - { -#if defined(FEATURE_EH_WINDOWS_X86) - // Don't generate stack checks for x86 finally/filter EH returns: these are not invoked - // with the same SP as the main function. See also CodeGen::genEHFinallyOrFilterRet(). - if (compiler->compCurBB->KindIs(BBJ_EHFINALLYRET, BBJ_EHFAULTRET, BBJ_EHFILTERRET)) - { - doStackPointerCheck = false; - } -#endif // FEATURE_EH_WINDOWS_X86 + doStackPointerCheck = false; } genStackPointerCheck(doStackPointerCheck, compiler->lvaReturnSpCheck); diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index b71d8efffefa13..662623afbda076 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -658,26 +658,6 @@ void CodeGen::genCodeForBBlist() #endif // DEBUG } //------------------ END-FOR each block of the method ------------------- -#if defined(FEATURE_EH_WINDOWS_X86) - // If this is a synchronized method on x86, and we generated all the code without - // generating the "exit monitor" call, then we must have deleted the single return block - // with that call because it was dead code. We still need to report the monitor range - // to the VM in the GC info, so create a label at the very end so we have a marker for - // the monitor end range. - // - // Do this before cleaning the GC refs below; we don't want to create an IG that clears - // the `this` pointer for lvaKeepAliveAndReportThis. - - if (!compiler->UsesFunclets() && (compiler->info.compFlags & CORINFO_FLG_SYNCH) && - (compiler->syncEndEmitCookie == nullptr)) - { - JITDUMP("Synchronized method with missing exit monitor call; adding final label\n"); - compiler->syncEndEmitCookie = - GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur); - noway_assert(compiler->syncEndEmitCookie != nullptr); - } -#endif - // There could be variables alive at this point. For example see lvaKeepAliveAndReportThis. // This call is for cleaning the GC refs genUpdateLife(VarSetOps::MakeEmpty(compiler)); @@ -848,23 +828,13 @@ BasicBlock* CodeGen::genEmitEndBlock(BasicBlock* block) break; case BBJ_EHCATCHRET: - assert(compiler->UsesFunclets()); genEHCatchRet(block); FALLTHROUGH; case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: case BBJ_EHFILTERRET: - if (compiler->UsesFunclets()) - { - genReserveFuncletEpilog(block); - } -#if defined(FEATURE_EH_WINDOWS_X86) - else - { - genEHFinallyOrFilterRet(block); - } -#endif // FEATURE_EH_WINDOWS_X86 + genReserveFuncletEpilog(block); break; case BBJ_SWITCH: diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 320d53d7cef864..7ef3a1fb000efe 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -136,112 +136,51 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const nextBlock = block->Next(); - if (compiler->UsesFunclets()) - { - // Generate a call to the finally, like this: - // call finally-funclet - // jmp finally-return // Only for non-retless finally calls - // The jmp can be a NOP if we're going to the next block. + // Generate a call to the finally, like this: + // call finally-funclet + // jmp finally-return // Only for non-retless finally calls + // The jmp can be a NOP if we're going to the next block. - if (block->HasFlag(BBF_RETLESS_CALL)) - { - GetEmitter()->emitIns_J(INS_call, block->GetTarget()); + if (block->HasFlag(BBF_RETLESS_CALL)) + { + GetEmitter()->emitIns_J(INS_call, block->GetTarget()); - // We have a retless call, and the last instruction generated was a call. - // If the next block is in a different EH region (or is the end of the code - // block), then we need to generate a breakpoint here (since it will never - // get executed) to get proper unwind behavior. + // We have a retless call, and the last instruction generated was a call. + // If the next block is in a different EH region (or is the end of the code + // block), then we need to generate a breakpoint here (since it will never + // get executed) to get proper unwind behavior. - if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) - { - instGen(INS_BREAKPOINT); // This should never get executed - } - } - else + if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) { - // Because of the way the flowgraph is connected, the liveness info for this one instruction - // after the call is not (can not be) correct in cases where a variable has a last use in the - // handler. So turn off GC reporting once we execute the call and reenable after the jmp/nop - GetEmitter()->emitDisableGC(); - - GetEmitter()->emitIns_J(INS_call, block->GetTarget()); - - // Now go to where the finally funclet needs to return to. - BasicBlock* const finallyContinuation = nextBlock->GetFinallyContinuation(); - if (nextBlock->NextIs(finallyContinuation) && - !compiler->fgInDifferentRegions(nextBlock, finallyContinuation)) - { - // Fall-through. - // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly - // to the next instruction? This would depend on stack walking from within the finally - // handler working without this instruction being in this special EH region. - instGen(INS_nop); - } - else - { - inst_JMP(EJ_jmp, finallyContinuation); - } - - GetEmitter()->emitEnableGC(); + instGen(INS_BREAKPOINT); // This should never get executed } } -#if defined(FEATURE_EH_WINDOWS_X86) else { - // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot - // corresponding to the finally's nesting level. When invoked in response to an exception, the - // EE does this. - // - // We have a BBJ_CALLFINALLY possibly paired with a following BBJ_CALLFINALLYRET. - // - // We will emit : - // mov [ebp - (n + 1)], 0 - // mov [ebp - n ], 0xFC - // push &step - // jmp finallyBlock - // ... - // step: - // mov [ebp - n ], 0 - // jmp leaveTarget - // ... - // leaveTarget: - - noway_assert(isFramePointerUsed()); - - // Get the nesting level which contains the finally - unsigned finallyNesting = 0; - compiler->fgGetNestingLevel(block, &finallyNesting); - - // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) - unsigned filterEndOffsetSlotOffs; - filterEndOffsetSlotOffs = - (unsigned)(compiler->lvaLclStackHomeSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE); - - unsigned curNestingSlotOffs; - curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE)); - - // Zero out the slot for the next nesting level - GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, - curNestingSlotOffs - TARGET_POINTER_SIZE, 0); - GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, - LCL_FINALLY_MARK); - - // Now push the address where the finally funclet should return to directly. - if (!block->HasFlag(BBF_RETLESS_CALL)) - { - assert(block->isBBCallFinallyPair()); - GetEmitter()->emitIns_J(INS_push_hide, nextBlock->GetFinallyContinuation()); + // Because of the way the flowgraph is connected, the liveness info for this one instruction + // after the call is not (can not be) correct in cases where a variable has a last use in the + // handler. So turn off GC reporting once we execute the call and reenable after the jmp/nop + GetEmitter()->emitDisableGC(); + + GetEmitter()->emitIns_J(INS_call, block->GetTarget()); + + // Now go to where the finally funclet needs to return to. + BasicBlock* const finallyContinuation = nextBlock->GetFinallyContinuation(); + if (nextBlock->NextIs(finallyContinuation) && !compiler->fgInDifferentRegions(nextBlock, finallyContinuation)) + { + // Fall-through. + // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly + // to the next instruction? This would depend on stack walking from within the finally + // handler working without this instruction being in this special EH region. + instGen(INS_nop); } else { - // EE expects a DWORD, so we provide 0 - inst_IV(INS_push_hide, 0); + inst_JMP(EJ_jmp, finallyContinuation); } - // Jump to the finally BB - inst_JMP(EJ_jmp, block->GetTarget()); + GetEmitter()->emitEnableGC(); } -#endif // FEATURE_EH_WINDOWS_X86 // The BBJ_CALLFINALLYRET is used because the BBJ_CALLFINALLY can't point to the // jump target using bbTargetEdge - that is already used to point @@ -264,38 +203,6 @@ void CodeGen::genEHCatchRet(BasicBlock* block) GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->GetTarget(), REG_INTRET); } -#if defined(FEATURE_EH_WINDOWS_X86) - -void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) -{ - assert(!compiler->UsesFunclets()); - // The last statement of the block must be a GT_RETFILT, which has already been generated. - assert(block->lastNode() != nullptr); - assert(block->lastNode()->OperGet() == GT_RETFILT); - - if (block->KindIs(BBJ_EHFINALLYRET, BBJ_EHFAULTRET)) - { - assert(block->lastNode()->AsOp()->gtOp1 == nullptr); // op1 == nullptr means endfinally - - // Return using a pop-jmp sequence. As the "try" block calls - // the finally with a jmp, this leaves the x86 call-ret stack - // balanced in the normal flow of path. - - noway_assert(isFramePointerRequired()); - inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL); - inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL); - } - else - { - assert(block->KindIs(BBJ_EHFILTERRET)); - - // The return value has already been computed. - instGen_Return(0); - } -} - -#endif // FEATURE_EH_WINDOWS_X86 - // Move an immediate value into an integer register void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, @@ -2216,40 +2123,6 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genCodeForAsyncContinuation(treeNode); break; -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: - { - // Find the eh table entry via the eh ID - // - unsigned const ehID = (unsigned)treeNode->AsVal()->gtVal1; - assert(ehID < compiler->compEHID); - assert(compiler->m_EHIDtoEHblkDsc != nullptr); - - EHblkDsc* HBtab = nullptr; - bool found = compiler->m_EHIDtoEHblkDsc->Lookup(ehID, &HBtab); - assert(found); - assert(HBtab != nullptr); - - // Have to clear the ShadowSP of the nesting level which encloses the finally. Generates: - // mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var - // - const size_t finallyNesting = HBtab->ebdHandlerNestingLevel; - noway_assert(finallyNesting < compiler->compHndBBtabCount); - - // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) - unsigned filterEndOffsetSlotOffs; - assert(compiler->lvaLclStackHomeSize(compiler->lvaShadowSPslotsVar) > TARGET_POINTER_SIZE); - filterEndOffsetSlotOffs = - (unsigned)(compiler->lvaLclStackHomeSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE); - - size_t curNestingSlotOffs; - curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE); - GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, (unsigned)curNestingSlotOffs, - 0); - break; - } -#endif // FEATURE_EH_WINDOWS_X86 - case GT_PINVOKE_PROLOG: noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask(compiler->info.compCallConv)) == 0); @@ -6204,41 +6077,6 @@ void CodeGen::genCall(GenTreeCall* call) compiler->lvaCallSpCheck, call->CallerPop() ? 0 : stackArgBytes, REG_ARG_0); #endif // defined(DEBUG) && defined(TARGET_X86) -#if defined(FEATURE_EH_WINDOWS_X86) - if (!compiler->UsesFunclets()) - { - //------------------------------------------------------------------------- - // Create a label for tracking of region protected by the monitor in synchronized methods. - // This needs to be here, rather than above where fPossibleSyncHelperCall is set, - // so the GC state vars have been updated before creating the label. - - if (call->IsHelperCall() && (compiler->info.compFlags & CORINFO_FLG_SYNCH)) - { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); - switch (helperNum) - { - case CORINFO_HELP_MON_ENTER: - noway_assert(compiler->syncStartEmitCookie == nullptr); - compiler->syncStartEmitCookie = - GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, - gcInfo.gcRegByrefSetCur); - noway_assert(compiler->syncStartEmitCookie != nullptr); - break; - case CORINFO_HELP_MON_EXIT: - noway_assert(compiler->syncEndEmitCookie == nullptr); - compiler->syncEndEmitCookie = - GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, - gcInfo.gcRegByrefSetCur); - noway_assert(compiler->syncEndEmitCookie != nullptr); - break; - default: - break; - } - } - } -#endif // FEATURE_EH_WINDOWS_X86 - unsigned stackAdjustBias = 0; #if defined(TARGET_X86) @@ -8627,7 +8465,6 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, // We should do this before gcInfoBlockHdrSave since varPtrTableSize must be finalized before it if (compiler->ehAnyFunclets()) { - assert(compiler->UsesFunclets()); gcInfo.gcMarkFilterVarsPinned(); } diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 2951cb2df4f0e9..f6cca4b8d88c89 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -336,10 +336,6 @@ Compiler::Compiler(ArenaAllocator* arena, #endif // DEBUG -#if defined(FEATURE_EH_WINDOWS_X86) - eeIsNativeAotAbi = IsTargetAbi(CORINFO_NATIVEAOT_ABI); -#endif - if (compIsForInlining()) { m_inlineStrategy = nullptr; @@ -4871,12 +4867,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // DoPhase(this, PHASE_EMPTY_TRY_CATCH_FAULT_3, &Compiler::fgRemoveEmptyTryCatchOrTryFault); - if (UsesFunclets()) - { - // Create funclets from the EH handlers. - // - DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); - } + // Create funclets from the EH handlers. + // + DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); // Expand casts DoPhase(this, PHASE_EXPAND_CASTS, &Compiler::fgLateCastExpansion); @@ -5115,90 +5108,6 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // void Compiler::FinalizeEH() { -#if defined(FEATURE_EH_WINDOWS_X86) - - // Grab space for exception handling info on the frame - // - if (!UsesFunclets() && ehNeedsShadowSPslots()) - { - // Recompute the handler nesting levels, as they may have changed. - // - unsigned const oldHandlerNestingCount = ehMaxHndNestingCount; - ehMaxHndNestingCount = 0; - - if (compHndBBtabCount > 0) - { - for (int XTnum = compHndBBtabCount - 1; XTnum >= 0; XTnum--) - { - EHblkDsc* const HBtab = &compHndBBtab[XTnum]; - unsigned const enclosingHndIndex = HBtab->ebdEnclosingHndIndex; - - if (enclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) - { - EHblkDsc* const enclosingHBtab = &compHndBBtab[enclosingHndIndex]; - unsigned const newNestingLevel = enclosingHBtab->ebdHandlerNestingLevel + 1; - HBtab->ebdHandlerNestingLevel = (unsigned short)newNestingLevel; - - if (newNestingLevel > ehMaxHndNestingCount) - { - ehMaxHndNestingCount = newNestingLevel; - } - } - else - { - HBtab->ebdHandlerNestingLevel = 0; - } - } - - // When there is EH, we need to record nesting level + 1 - // - ehMaxHndNestingCount++; - } - - if (oldHandlerNestingCount != ehMaxHndNestingCount) - { - JITDUMP("Finalize EH: max handler nesting count now %u (was %u)\n", oldHandlerNestingCount, - ehMaxHndNestingCount); - } - - // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) - // ie. the offset of the end-of-last-executed-filter - unsigned slotsNeeded = 1; - - unsigned handlerNestingLevel = ehMaxHndNestingCount; - - if (opts.compDbgEnC && (handlerNestingLevel < (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL)) - handlerNestingLevel = (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL; - - slotsNeeded += handlerNestingLevel; - - // For a filter (which can be active at the same time as a catch/finally handler) - slotsNeeded++; - // For zero-termination of the shadow-Stack-pointer chain - slotsNeeded++; - - lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar")); - lvaSetStruct(lvaShadowSPslotsVar, typGetBlkLayout(slotsNeeded * TARGET_POINTER_SIZE), false); - lvaSetVarAddrExposed(lvaShadowSPslotsVar DEBUGARG(AddressExposedReason::EXTERNALLY_VISIBLE_IMPLICITLY)); - } - - // Build up a mapping from EH IDs to EHblkDsc* - // - assert(m_EHIDtoEHblkDsc == nullptr); - - if (compHndBBtabCount > 0) - { - m_EHIDtoEHblkDsc = new (getAllocator()) EHIDtoEHblkDscMap(getAllocator()); - - for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) - { - EHblkDsc* const HBtab = &compHndBBtab[XTnum]; - m_EHIDtoEHblkDsc->Set(HBtab->ebdID, HBtab); - } - } - -#endif // FEATURE_EH_WINDOWS_X86 - // We should not make any more alterations to the EH table structure. // ehTableFinalized = true; @@ -5266,15 +5175,13 @@ bool Compiler::shouldAlignLoop(FlowGraphNaturalLoop* loop, BasicBlock* top) assert(!top->IsFirst()); - if (UsesCallFinallyThunks() && top->Prev()->KindIs(BBJ_CALLFINALLY)) + if (top->Prev()->KindIs(BBJ_CALLFINALLY)) { // It must be a retless BBJ_CALLFINALLY if we get here. assert(!top->Prev()->isBBCallFinallyPair()); - // If the block before the loop start is a retless BBJ_CALLFINALLY - // with UsesCallFinallyThunks, we can't add alignment - // because it will affect reported EH region range. For x86 (where - // !UsesCallFinallyThunks), we can allow this. + // If the block before the loop start is a retless BBJ_CALLFINALLY, + // we can't add alignment because it will affect reported EH region range. JITDUMP("Skipping alignment for " FMT_LP "; its top block follows a CALLFINALLY block\n", loop->GetIndex()); return false; @@ -5284,8 +5191,8 @@ bool Compiler::shouldAlignLoop(FlowGraphNaturalLoop* loop, BasicBlock* top) { // If the previous block is the BBJ_CALLFINALLYRET of a // BBJ_CALLFINALLY/BBJ_CALLFINALLYRET pair, then we can't add alignment - // because we can't add instructions in that block. In the - // UsesCallFinallyThunks case, it would affect the reported EH, as above. + // because we can't add instructions in that block. + // It would affect the reported EH, as above. JITDUMP("Skipping alignment for " FMT_LP "; its top block follows a CALLFINALLY/ALWAYS pair\n", loop->GetIndex()); return false; diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index a1220115fc5130..8ceac277725eb2 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2733,24 +2733,6 @@ class Compiler // Exception handling functions // -#if defined(FEATURE_EH_WINDOWS_X86) - - bool ehNeedsShadowSPslots() - { - return ((compHndBBtabCount > 0) || opts.compDbgEnC); - } - - // 0 for methods with no EH - // 1 for methods with non-nested EH, or where only the try blocks are nested - // 2 for a method with a catch within a catch - // etc. - unsigned ehMaxHndNestingCount = 0; - - typedef JitHashTable, EHblkDsc*> EHIDtoEHblkDscMap; - EHIDtoEHblkDscMap* m_EHIDtoEHblkDsc = nullptr; - -#endif // FEATURE_EH_WINDOWS_X86 - EHblkDsc* ehFindEHblkDscById(unsigned short ehID); bool ehTableFinalized = false; void FinalizeEH(); @@ -4048,11 +4030,6 @@ class Compiler //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync -#if defined(FEATURE_EH_WINDOWS_X86) - // This is used for the callable handlers - unsigned lvaShadowSPslotsVar = BAD_VAR_NUM; // Block-layout TYP_STRUCT variable for all the shadow SP slots -#endif // FEATURE_EH_WINDOWS_X86 - int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer @@ -4666,9 +4643,6 @@ class Compiler GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); -#if defined(FEATURE_EH_WINDOWS_X86) - void impImportLeaveEHRegions(BasicBlock* block); -#endif void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); GenTree* impGetGenericTypeDefinition(GenTree* type); @@ -5417,8 +5391,6 @@ class Compiler void fgUpdateACDsBeforeEHTableEntryRemoval(unsigned XTnum); - void fgCleanupContinuation(BasicBlock* continuation); - PhaseStatus fgTailMergeThrows(); bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, @@ -8502,30 +8474,6 @@ class Compiler return false; } -#if defined(FEATURE_EH_WINDOWS_X86) - bool eeIsNativeAotAbi = false; - bool UsesFunclets() const - { - return eeIsNativeAotAbi; - } - - bool UsesCallFinallyThunks() const - { - // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. - return UsesFunclets(); - } -#else - bool UsesFunclets() const - { - return true; - } - - bool UsesCallFinallyThunks() const - { - return true; - } -#endif - bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) @@ -8752,24 +8700,16 @@ class Compiler FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; - FuncInfoDsc compFuncInfoRoot; unsigned short compFuncCount() { - if (UsesFunclets()) - { - assert(fgFuncletsCreated); - return compFuncInfoCount; - } - else - { - return 1; - } + assert(fgFuncletsCreated); + return compFuncInfoCount; } unsigned short funCurrentFuncIdx() { - return UsesFunclets() ? compCurrFuncIdx : 0; + return compCurrFuncIdx; } FuncInfoDsc* funCurrentFunc(); @@ -10890,15 +10830,6 @@ class Compiler unsigned compHndBBtabAllocCount = 0; // element count of allocated elements in EH data array unsigned short compEHID = 0; // unique ID for EH data array entries -#if defined(FEATURE_EH_WINDOWS_X86) - - //------------------------------------------------------------------------- - // Tracking of region covered by the monitor in synchronized methods - void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER - void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT - -#endif // FEATURE_EH_WINDOWS_X86 - Phases mostRecentlyActivePhase = PHASE_PRE_IMPORT; // the most recently active phase PhaseChecks activePhaseChecks = PhaseChecks::CHECK_NONE; // the currently active phase checks PhaseDumps activePhaseDumps = PhaseDumps::DUMP_ALL; // the currently active phase dumps @@ -11780,9 +11711,6 @@ class GenTreeVisitor case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: -#endif // !FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 148b87544d1f95..3465a46b8dd223 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -832,14 +832,7 @@ inline bool BasicBlock::HasPotentialEHSuccs(Compiler* comp) */ inline FuncInfoDsc* Compiler::funCurrentFunc() { - if (UsesFunclets()) - { - return funGetFunc(compCurrFuncIdx); - } - else - { - return &compFuncInfoRoot; - } + return funGetFunc(compCurrFuncIdx); } /***************************************************************************** @@ -849,17 +842,10 @@ inline FuncInfoDsc* Compiler::funCurrentFunc() */ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) { - if (UsesFunclets()) - { - assert(fgFuncletsCreated); - assert(FitsIn(funcIdx)); - noway_assert(funcIdx < compFuncInfoCount); - compCurrFuncIdx = (unsigned short)funcIdx; - } - else - { - assert(funcIdx == 0); - } + assert(fgFuncletsCreated); + assert(FitsIn(funcIdx)); + noway_assert(funcIdx < compFuncInfoCount); + compCurrFuncIdx = (unsigned short)funcIdx; } /***************************************************************************** @@ -869,17 +855,9 @@ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) */ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) { - if (UsesFunclets()) - { - assert(fgFuncletsCreated); - assert(funcIdx < compFuncInfoCount); - return &compFuncInfos[funcIdx]; - } - else - { - assert(funcIdx == 0); - return &compFuncInfoRoot; - } + assert(fgFuncletsCreated); + assert(funcIdx < compFuncInfoCount); + return &compFuncInfos[funcIdx]; } /***************************************************************************** @@ -892,30 +870,23 @@ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) */ inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) { - if (UsesFunclets()) - { - assert(bbIsFuncletBeg(block)); + assert(bbIsFuncletBeg(block)); - EHblkDsc* eh = ehGetDsc(block->getHndIndex()); - unsigned int funcIdx = eh->ebdFuncIndex; - if (eh->ebdHndBeg != block) - { - // If this is a filter EH clause, but we want the funclet - // for the filter (not the filter handler), it is the previous one - noway_assert(eh->HasFilter()); - noway_assert(eh->ebdFilter == block); - assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); - assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); - assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); - funcIdx--; - } - - return funcIdx; - } - else + EHblkDsc* eh = ehGetDsc(block->getHndIndex()); + unsigned int funcIdx = eh->ebdFuncIndex; + if (eh->ebdHndBeg != block) { - return 0; + // If this is a filter EH clause, but we want the funclet + // for the filter (not the filter handler), it is the previous one + noway_assert(eh->HasFilter()); + noway_assert(eh->ebdFilter == block); + assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); + assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); + assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); + funcIdx--; } + + return funcIdx; } #if HAS_FIXED_REGISTER_SET @@ -4517,9 +4488,6 @@ GenTree::VisitResult GenTree::VisitOperands(TVisitor visitor) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: -#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 2482c1b22c6ce8..13f3fc7f3c2f25 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -2300,10 +2300,7 @@ void emitter::emitGeneratePrologEpilog() if (emitComp->verbose) { printf("%d prologs, %d epilogs", prologCnt, epilogCnt); - if (emitComp->UsesFunclets()) - { - printf(", %d funclet prologs, %d funclet epilogs", funcletPrologCnt, funcletEpilogCnt); - } + printf(", %d funclet prologs, %d funclet epilogs", funcletPrologCnt, funcletEpilogCnt); printf("\n"); // prolog/epilog code doesn't use this yet @@ -2514,7 +2511,6 @@ void emitter::emitEndFnEpilog() void emitter::emitBegFuncletProlog(insGroup* igPh) { - assert(emitComp->UsesFunclets()); emitBegPrologEpilog(igPh); } @@ -2525,7 +2521,6 @@ void emitter::emitBegFuncletProlog(insGroup* igPh) void emitter::emitEndFuncletProlog() { - assert(emitComp->UsesFunclets()); emitEndPrologEpilog(); } @@ -2536,7 +2531,6 @@ void emitter::emitEndFuncletProlog() void emitter::emitBegFuncletEpilog(insGroup* igPh) { - assert(emitComp->UsesFunclets()); emitBegPrologEpilog(igPh); } @@ -2547,7 +2541,6 @@ void emitter::emitBegFuncletEpilog(insGroup* igPh) void emitter::emitEndFuncletEpilog() { - assert(emitComp->UsesFunclets()); emitEndPrologEpilog(); } @@ -6762,7 +6755,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, emitFullGCinfo = fullPtrMap; #if TARGET_X86 // On x86 with funclets we emit full ptr map even for EBP frames - emitFullArgInfo = comp->UsesFunclets() ? fullPtrMap : !emitHasFramePtr; + emitFullArgInfo = fullPtrMap; #else emitFullArgInfo = !emitHasFramePtr; #endif @@ -7165,16 +7158,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, // printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs); -#if defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) - // Remember the frame offset of the "this" argument for synchronized methods. - if (!emitComp->UsesFunclets() && emitComp->lvaIsOriginalThisArg(num) && - emitComp->lvaKeepAliveAndReportThis()) - { - emitSyncThisObjOffs = offs; - offs |= this_OFFSET_FLAG; - } -#endif // JIT32_GCENCODER && FEATURE_EH_WINDOWS_X86 - if (dsc->TypeIs(TYP_BYREF)) { offs |= byref_OFFSET_FLAG; @@ -8920,13 +8903,6 @@ void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp /* the lower 2 bits encode props about the stk ptr */ -#if defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) - if (!emitComp->UsesFunclets() && offs == emitSyncThisObjOffs) - { - desc->vpdVarNum |= this_OFFSET_FLAG; - } -#endif - if (gcType == GCT_BYREF) { desc->vpdVarNum |= byref_OFFSET_FLAG; diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index c650555c9f381f..9af008e90f3300 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -3925,10 +3925,6 @@ void Compiler::fgFindBasicBlocks() * try-finally blocks. */ -#if defined(FEATURE_EH_WINDOWS_X86) - HBtab->ebdHandlerNestingLevel = 0; -#endif // FEATURE_EH_WINDOWS_X86 - HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; @@ -3937,13 +3933,6 @@ void Compiler::fgFindBasicBlocks() for (EHblkDsc* xtab = compHndBBtab; xtab < HBtab; xtab++) { -#if defined(FEATURE_EH_WINDOWS_X86) - if (!UsesFunclets() && jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) - { - xtab->ebdHandlerNestingLevel++; - } -#endif // FEATURE_EH_WINDOWS_X86 - /* If we haven't recorded an enclosing try index for xtab then see * if this EH region should be recorded. We check if the * first offset in the xtab lies within our region. If so, @@ -3975,17 +3964,6 @@ void Compiler::fgFindBasicBlocks() } // end foreach handler table entry -#if defined(FEATURE_EH_WINDOWS_X86) - if (!UsesFunclets()) - { - for (EHblkDsc* const HBtab : EHClauses(this)) - { - if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) - ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; - } - } -#endif // FEATURE_EH_WINDOWS_X86 - { // always run these checks for a debug build verCheckNestingLevel(initRoot); @@ -4225,7 +4203,7 @@ void Compiler::fgCheckBasicBlockControlFlow() } break; - case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only if UsesFunclets() == true) + case BBJ_EHCATCHRET: // block ends with a leave out of a catch case BBJ_CALLFINALLY: // block always calls the target finally default: noway_assert(!"Unexpected bbKind"); // these blocks don't get created until importing @@ -5063,16 +5041,13 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) } #ifdef DEBUG - if (UsesFunclets()) + // You can't unlink a range that includes the first funclet block. A range certainly + // can't cross the non-funclet/funclet region. And you can't unlink the first block + // of the first funclet with this, either. (If that's necessary, it could be allowed + // by updating fgFirstFuncletBB to bEnd->bbNext.) + for (BasicBlock* tempBB = bBeg; tempBB != bEnd->Next(); tempBB = tempBB->Next()) { - // You can't unlink a range that includes the first funclet block. A range certainly - // can't cross the non-funclet/funclet region. And you can't unlink the first block - // of the first funclet with this, either. (If that's necessary, it could be allowed - // by updating fgFirstFuncletBB to bEnd->bbNext.) - for (BasicBlock* tempBB = bBeg; tempBB != bEnd->Next(); tempBB = tempBB->Next()) - { - assert(tempBB != fgFirstFuncletBB); - } + assert(tempBB != fgFirstFuncletBB); } #endif // DEBUG } @@ -5341,7 +5316,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r BasicBlock* bPrev = nullptr; // We don't support moving try regions... yet? - noway_assert(!UsesFunclets() || relocateType == FG_RELOCATE_HANDLER); + noway_assert(relocateType == FG_RELOCATE_HANDLER); HBtab = ehGetDsc(regionIndex); @@ -5379,24 +5354,11 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r goto FAILURE; } -#if defined(FEATURE_EH_WINDOWS_X86) - // In the funclets case, we still need to set some information on the handler blocks - if (!UsesFunclets() && bLast->IsLast()) - { - INDEBUG(reason = "region is already at the end of the method";) - goto FAILURE; - } -#endif // FEATURE_EH_WINDOWS_X86 - // Walk the block list for this purpose: // 1. Verify that all the blocks in the range are either all rarely run or not rarely run. // When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks // in the range. -#if defined(FEATURE_EH_WINDOWS_X86) - bool isRare; - isRare = bStart->isRunRarely(); -#endif // FEATURE_EH_WINDOWS_X86 block = fgFirstBB; while (true) { @@ -5414,15 +5376,6 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r if (inTheRange) { -#if defined(FEATURE_EH_WINDOWS_X86) - // Unless all blocks are (not) run rarely we must return false. - if (!UsesFunclets() && isRare != block->isRunRarely()) - { - INDEBUG(reason = "this region contains both rarely run and non-rarely run blocks";) - goto FAILURE; - } -#endif // FEATURE_EH_WINDOWS_X86 - validRange = true; } @@ -5449,15 +5402,6 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r fgDispHandlerTab(); } -#if defined(FEATURE_EH_WINDOWS_X86) - // This is really expensive, and quickly becomes O(n^n) with funclets - // so only do it once after we've created them (see fgCreateFunclets) - if (!UsesFunclets() && expensiveDebugCheckLevel >= 2) - { - fgDebugCheckBBlist(); - } -#endif - #endif // DEBUG BasicBlock* bNext; @@ -5469,7 +5413,6 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r BasicBlock* insertAfterBlk; insertAfterBlk = fgLastBB; - if (UsesFunclets()) { // There are several cases we need to consider when moving an EH range. // If moving a range X, we must consider its relationship to every other EH @@ -5572,59 +5515,6 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r // Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex #endif // DEBUG } - else - { -#if defined(FEATURE_EH_WINDOWS_X86) - for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) - { - if (XTnum == regionIndex) - { - // Don't update our handler's Last info - continue; - } - - if (HBtab->ebdTryLast == bLast) - { - // If we moved a set of blocks that were at the end of - // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != NULL; block = block->Next()) - { - if (block == bPrev) - { - fgSetTryEnd(HBtab, bPrev); - break; - } - else if (HBtab->ebdTryLast->NextIs(block)) - { - // bPrev does not come after the TryBeg - break; - } - } - } - if (HBtab->ebdHndLast == bLast) - { - // If we moved a set of blocks that were at the end of - // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != NULL; block = block->Next()) - { - if (block == bPrev) - { - fgSetHndEnd(HBtab, bPrev); - break; - } - else if (HBtab->ebdHndLast->NextIs(block)) - { - // bPrev does not come after the HndBeg - break; - } - } - } - } // end exception table iteration - - // We have decided to insert the block(s) after fgLastBlock - fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); -#endif // FEATURE_EH_WINDOWS_X86 - } goto DONE; @@ -6242,7 +6132,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, // Figure out the start and end block range to search for an insertion location. Pick the beginning and // ending blocks of the target EH region (the 'endBlk' is one past the last block of the EH region, to make - // loop iteration easier). Note that, after funclets have been created (for UsesFunclets() == true), + // loop iteration easier). Note that, after funclets have been created, // this linear block range will not include blocks of handlers for try/handler clauses nested within // this EH region, as those blocks have been extracted as funclets. That is ok, though, because we don't // want to insert a block in any nested EH region. diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index c1f703095d4aa0..146866f961d696 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -3109,7 +3109,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // try { // try { // LEAVE L_OUTER; // this becomes a branch to a BBJ_CALLFINALLY in an outer try region - // // (in the UsesCallFinallyThunks case) // } catch { // } // } finally { @@ -3120,8 +3119,8 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef if (ehDsc->ebdTryBeg == succBlock) { // The BBJ_CALLFINALLY is the first block of it's `try` region. Don't check the predecessor. - // Note that this case won't occur in the UsesCallFinallyThunks case, since the - // BBJ_CALLFINALLY in that case won't exist in the `try` region of the `finallyIndex`. + // Note that this case won't occur since the BBJ_CALLFINALLY in that case won't exist in the + // `try` region of the `finallyIndex`. } else { diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index dfd8fa5e973570..5ead84705c03aa 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -181,9 +181,6 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() postTryFinallyBlock->increaseBBProfileWeight(currentBlock->bbWeight); } - // Cleanup the postTryFinallyBlock - fgCleanupContinuation(postTryFinallyBlock); - // Make sure iteration isn't going off the deep end. assert(leaveBlock != endCallFinallyRangeBlock); } @@ -545,62 +542,36 @@ PhaseStatus Compiler::fgRemoveEmptyTry() continue; } - if (UsesCallFinallyThunks()) + // Look for blocks that are always jumps to a call finally + // pair that targets the finally + if (!firstTryBlock->KindIs(BBJ_ALWAYS)) { - // Look for blocks that are always jumps to a call finally - // pair that targets the finally - if (!firstTryBlock->KindIs(BBJ_ALWAYS)) - { - JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, - firstTryBlock->bbNum); - XTnum++; - continue; - } - - callFinally = firstTryBlock->GetTarget(); + JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, + firstTryBlock->bbNum); + XTnum++; + continue; + } - // Look for call finally pair. Note this will also disqualify - // empty try removal in cases where the finally doesn't - // return. - if (!callFinally->isBBCallFinallyPair() || !callFinally->TargetIs(firstHandlerBlock)) - { - JITDUMP("EH#%u first try block " FMT_BB " always jumps but not to a callfinally; skipping.\n", XTnum, - firstTryBlock->bbNum); - XTnum++; - continue; - } + callFinally = firstTryBlock->GetTarget(); - // Try itself must be a single block. - if (firstTryBlock != lastTryBlock) - { - JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum, - firstTryBlock->Next()->bbNum); - XTnum++; - continue; - } - } - else + // Look for call finally pair. Note this will also disqualify + // empty try removal in cases where the finally doesn't + // return. + if (!callFinally->isBBCallFinallyPair() || !callFinally->TargetIs(firstHandlerBlock)) { - // Look for call finally pair within the try itself. Note this - // will also disqualify empty try removal in cases where the - // finally doesn't return. - if (!firstTryBlock->isBBCallFinallyPair() || !firstTryBlock->TargetIs(firstHandlerBlock)) - { - JITDUMP("EH#%u first try block " FMT_BB " not a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); - XTnum++; - continue; - } - - callFinally = firstTryBlock; + JITDUMP("EH#%u first try block " FMT_BB " always jumps but not to a callfinally; skipping.\n", XTnum, + firstTryBlock->bbNum); + XTnum++; + continue; + } - // Try must be a callalways pair of blocks. - if (!firstTryBlock->NextIs(lastTryBlock)) - { - JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, - firstTryBlock->Next()->bbNum); - XTnum++; - continue; - } + // Try itself must be a single block. + if (firstTryBlock != lastTryBlock) + { + JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum, + firstTryBlock->Next()->bbNum); + XTnum++; + continue; } JITDUMP("EH#%u has empty try, removing the try region and promoting the finally.\n", XTnum); @@ -687,16 +658,11 @@ PhaseStatus Compiler::fgRemoveEmptyTry() callFinally->SetKind(BBJ_ALWAYS); callFinally->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY - // (4) Cleanup the continuation - fgCleanupContinuation(continuation); - - // (5) Update the directly contained handler blocks' handler index. + // (4) Update the directly contained handler blocks' handler index. // Handler index of any nested blocks will update when we // remove the EH table entry. Change handler exits to jump to // the continuation. Clear catch type on handler entry. // - // GT_END_LFIN no longer need updates here, now their gtVal1 fields refer to EH IDs. - // for (BasicBlock* const block : Blocks(firstHandlerBlock, lastHandlerBlock)) { if (block == firstHandlerBlock) @@ -733,17 +699,17 @@ PhaseStatus Compiler::fgRemoveEmptyTry() } } - // (6) Update any impacted ACDs. + // (5) Update any impacted ACDs. // fgUpdateACDsBeforeEHTableEntryRemoval(XTnum); - // (7) Remove the try-finally EH region. This will compact the + // (6) Remove the try-finally EH region. This will compact the // EH table so XTnum now points at the next entry and will update // the EH region indices of any nested EH in the (former) handler. // fgRemoveEHTableEntry(XTnum); - // (8) The handler entry has an artificial extra ref count. Remove it. + // (7) The handler entry has an artificial extra ref count. Remove it. // There also should be one normal ref, from the try, and the handler // may contain internal branches back to its start. So the ref count // should currently be at least 2. @@ -1264,26 +1230,19 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* jumpDest = nullptr; - if (UsesCallFinallyThunks()) - { - // Blocks that transfer control to callfinallies are usually - // BBJ_ALWAYS blocks, but the last block of a try may fall - // through to a callfinally, or could be the target of a BBJ_CALLFINALLYRET, - // indicating a chained callfinally. - - if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET)) - { - jumpDest = block->GetTarget(); - } + // Blocks that transfer control to callfinallies are usually + // BBJ_ALWAYS blocks, but the last block of a try may fall + // through to a callfinally, or could be the target of a BBJ_CALLFINALLYRET, + // indicating a chained callfinally. - if (jumpDest == nullptr) - { - continue; - } + if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET)) + { + jumpDest = block->GetTarget(); } - else + + if (jumpDest == nullptr) { - jumpDest = block; + continue; } // The jumpDest must be a callfinally that in turn invokes the @@ -1351,28 +1310,18 @@ PhaseStatus Compiler::fgCloneFinally() isUpdate = true; } - if (UsesCallFinallyThunks()) - { - // When there are callfinally thunks, we don't expect to see the - // callfinally within a handler region either. - assert(!jumpDest->hasHndIndex()); + // When there are callfinally thunks, we don't expect to see the + // callfinally within a handler region either. + assert(!jumpDest->hasHndIndex()); - // Update the clone insertion point to just after the - // call always pair. - cloneInsertAfter = finallyReturnBlock; + // Update the clone insertion point to just after the + // call always pair. + cloneInsertAfter = finallyReturnBlock; - JITDUMP("%s path to clone: try block " FMT_BB " jumps to callfinally at " FMT_BB ";" - " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", - isUpdate ? "Updating" : "Choosing", block->bbNum, jumpDest->bbNum, finallyReturnBlock->bbNum, - postTryFinallyBlock->bbNum); - } - else - { - JITDUMP("%s path to clone: try block " FMT_BB " is a callfinally;" - " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", - isUpdate ? "Updating" : "Choosing", block->bbNum, finallyReturnBlock->bbNum, - postTryFinallyBlock->bbNum); - } + JITDUMP("%s path to clone: try block " FMT_BB " jumps to callfinally at " FMT_BB ";" + " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", + isUpdate ? "Updating" : "Choosing", block->bbNum, jumpDest->bbNum, finallyReturnBlock->bbNum, + postTryFinallyBlock->bbNum); // For non-pgo just take the first one we find. // For pgo, keep searching in case we find one we like better. @@ -1621,9 +1570,6 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* firstClonedBlock = blockMap[firstBlock]; firstClonedBlock->bbCatchTyp = BBCT_NONE; - // Cleanup the continuation - fgCleanupContinuation(normalCallFinallyReturn); - // If we have profile data, compute how the weights split, // and update the weights in both the clone and the original. // @@ -1770,7 +1716,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // logically "belong" to a child region and the exit // path validity will be checked when looking at the // try blocks in that region. - if (UsesCallFinallyThunks() && block->KindIs(BBJ_CALLFINALLY)) + if (block->KindIs(BBJ_CALLFINALLY)) { continue; } @@ -1784,24 +1730,18 @@ void Compiler::fgDebugCheckTryFinallyExits() // There are various ways control can properly leave a // try-finally (or try-fault-was-finally): // - // (a1) via a jump to a callfinally (only for finallys, only for call finally thunks) - // (a2) via a callfinally (only for finallys, only for !call finally thunks) + // (a) via a jump to a callfinally (only for finallys) // (b) via a jump to a begin finally clone block // (c) via a jump to an empty block to (b) // (d) via the callfinallyret half of a callfinally pair // (e) via an always jump clonefinally exit bool isCallToFinally = false; - if (UsesCallFinallyThunks() && succBlock->KindIs(BBJ_CALLFINALLY)) + if (succBlock->KindIs(BBJ_CALLFINALLY)) { - // case (a1) + // case (a) isCallToFinally = isFinally && succBlock->TargetIs(finallyBlock); } - else if (!UsesCallFinallyThunks() && block->KindIs(BBJ_CALLFINALLY)) - { - // case (a2) - isCallToFinally = isFinally && block->TargetIs(finallyBlock); - } bool isJumpToClonedFinally = false; @@ -1866,58 +1806,6 @@ void Compiler::fgDebugCheckTryFinallyExits() #endif // DEBUG -//------------------------------------------------------------------------ -// fgCleanupContinuation: cleanup a finally continuation after a -// finally is removed or converted to normal control flow. -// -// Notes: -// The continuation is the block targeted by the second half of -// a callfinally pair. -// -// Used by finally cloning, empty try removal, and empty -// finally removal. -// -void Compiler::fgCleanupContinuation(BasicBlock* continuation) -{ -#if defined(FEATURE_EH_WINDOWS_X86) - if (!UsesFunclets()) - { - // The continuation may be a finalStep block. - // It is now a normal block, so clear the special keep - // always flag. - continuation->RemoveFlags(BBF_KEEP_BBJ_ALWAYS); - - // Remove the GT_END_LFIN from the continuation, - // Note we only expect to see one such statement. - // - bool foundEndLFin = false; - bool isEmpty = true; - for (Statement* const stmt : continuation->Statements()) - { - isEmpty = false; - GenTree* expr = stmt->GetRootNode(); - if (expr->OperIs(GT_END_LFIN)) - { - assert(!foundEndLFin); - fgRemoveStmt(continuation, stmt); - foundEndLFin = true; - } - } - - // If the continuation is unreachable, morph may - // have changed the continuation to an empty BBJ_THROW. - // Tolerate. - // - if (isEmpty && continuation->KindIs(BBJ_THROW)) - { - return; - } - - assert(foundEndLFin); - } -#endif // FEATURE_EH_WINDOWS_X86 -} - //------------------------------------------------------------------------ // fgMergeFinallyChains: tail merge finally invocations // @@ -1957,35 +1845,6 @@ PhaseStatus Compiler::fgMergeFinallyChains() return PhaseStatus::MODIFIED_NOTHING; } - bool enableMergeFinallyChains = true; - -#if defined(FEATURE_EH_WINDOWS_X86) - if (!UsesFunclets()) - { - // For non-funclet models (x86) the callfinallys may contain - // statements and the continuations contain GT_END_LFINs. So no - // merging is possible until the GT_END_LFIN blocks can be merged - // and merging is not safe unless the callfinally blocks are split. - JITDUMP("EH using non-funclet model; merging not yet implemented.\n"); - enableMergeFinallyChains = false; - } -#endif // FEATURE_EH_WINDOWS_X86 - - if (!UsesCallFinallyThunks()) - { - // For non-thunk EH models (x86) the callfinallys may contain - // statements, and merging is not safe unless the callfinally - // blocks are split. - JITDUMP("EH using non-callfinally thunk model; merging not yet implemented.\n"); - enableMergeFinallyChains = false; - } - - if (!enableMergeFinallyChains) - { - JITDUMP("fgMergeFinallyChains disabled\n"); - return PhaseStatus::MODIFIED_NOTHING; - } - #ifdef DEBUG if (verbose) { @@ -2609,27 +2468,6 @@ BasicBlock* Compiler::fgCloneTryRegion(BasicBlock* tryEntry, CloneTryInfo& info, else if (block->KindIs(BBJ_CALLFINALLYRET) && block->Prev()->TargetIs(ebd->ebdHndBeg)) { addBlockToClone(block, "callfinallyret"); - -#if defined(FEATURE_EH_WINDOWS_X86) - - // For non-funclet X86 we must also clone the next block after the callfinallyret. - // (it will contain an END_LFIN). But if this block is also a CALLFINALLY we - // bail out, since we can't clone it in isolation, but we need to clone it. - // (a proper fix would be to split the block, perhaps). - // - if (!UsesFunclets()) - { - BasicBlock* const lfin = block->GetTarget(); - - if (lfin->KindIs(BBJ_CALLFINALLY)) - { - JITDUMP("Can't clone, as an END_LFIN is contained in CALLFINALLY block " FMT_BB "\n", - lfin->bbNum); - return nullptr; - } - addBlockToClone(lfin, "lfin-continuation"); - } -#endif } } } @@ -3017,22 +2855,6 @@ BasicBlock* Compiler::fgCloneTryRegion(BasicBlock* tryEntry, CloneTryInfo& info, newBlock->bbRefs++; } } - -#if defined(FEATURE_EH_WINDOWS_X86) - // Update the EH ID for any cloned GT_END_LFIN. - // - for (Statement* const stmt : newBlock->Statements()) - { - GenTree* const rootNode = stmt->GetRootNode(); - if (rootNode->OperIs(GT_END_LFIN)) - { - GenTreeVal* const endNode = rootNode->AsVal(); - EHblkDsc* const oldEbd = ehFindEHblkDscById((unsigned short)endNode->gtVal1); - EHblkDsc* const newEbd = oldEbd + indexShift; - endNode->gtVal1 = newEbd->ebdID; - } - } -#endif } JITDUMP("Done fixing region indices\n"); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 480cc6a88263e1..09d0c8819b68a1 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1465,7 +1465,6 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) * abort exceptions to work. Insert a NOP in the empty block * to ensure we generate code for the block, if we keep it. */ - if (UsesFunclets()) { BasicBlock* succBlock = block->GetTarget(); diff --git a/src/coreclr/jit/fgstmt.cpp b/src/coreclr/jit/fgstmt.cpp index f5ab387e262416..996f5b684e7bcf 100644 --- a/src/coreclr/jit/fgstmt.cpp +++ b/src/coreclr/jit/fgstmt.cpp @@ -540,9 +540,6 @@ inline bool OperIsControlFlow(genTreeOps oper) case GT_RETFILT: case GT_SWIFT_ERROR_RET: case GT_RETURN_SUSPEND: -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: -#endif // FEATURE_EH_WINDOWS_X86 return true; default: diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 43a0305c9b48c7..469a10ca922840 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1410,8 +1410,6 @@ GenTree* Compiler::fgGetCritSectOfStaticMethod() void Compiler::fgAddSyncMethodEnterExit() { - assert(UsesFunclets()); - assert((info.compFlags & CORINFO_FLG_SYNCH) != 0); // We need to do this transformation before funclets are created. @@ -2391,7 +2389,7 @@ PhaseStatus Compiler::fgAddInternal() // BBJ_RETURN block gets placed at the top-level, not within an EH region. (Otherwise, // we'd have to be really careful when creating the synchronized method try/finally // not to include the BBJ_RETURN block.) - if (UsesFunclets() && (info.compFlags & CORINFO_FLG_SYNCH) != 0) + if ((info.compFlags & CORINFO_FLG_SYNCH) != 0) { fgAddSyncMethodEnterExit(); } @@ -2521,77 +2519,6 @@ PhaseStatus Compiler::fgAddInternal() madeChanges = true; } -#if defined(FEATURE_EH_WINDOWS_X86) - - /* Is this a 'synchronized' method? */ - - if (!UsesFunclets() && (info.compFlags & CORINFO_FLG_SYNCH)) - { - GenTree* tree = nullptr; - - /* Insert the expression "enterCrit(this)" or "enterCrit(handle)" */ - - if (info.compIsStatic) - { - tree = fgGetCritSectOfStaticMethod(); - } - else - { - noway_assert(lvaTable[info.compThisArg].lvType == TYP_REF); - tree = gtNewLclvNode(info.compThisArg, TYP_REF); - } - - tree = gtNewHelperCallNode(CORINFO_HELP_MON_ENTER, TYP_VOID, tree); - - fgNewStmtAtBeg(fgFirstBB, tree); - -#ifdef DEBUG - if (verbose) - { - printf("\nSynchronized method - Add enterCrit statement in first basic block %s\n", - fgFirstBB->dspToString()); - gtDispTree(tree); - printf("\n"); - } -#endif - - /* We must be generating a single exit point for this to work */ - - noway_assert(genReturnBB != nullptr); - - /* Create the expression "exitCrit(this)" or "exitCrit(handle)" */ - - if (info.compIsStatic) - { - tree = fgGetCritSectOfStaticMethod(); - } - else - { - tree = gtNewLclvNode(info.compThisArg, TYP_REF); - } - - tree = gtNewHelperCallNode(CORINFO_HELP_MON_EXIT, TYP_VOID, tree); - - fgNewStmtNearEnd(genReturnBB, tree); - -#ifdef DEBUG - if (verbose) - { - printf("\nSynchronized method - Add exitCrit statement in single return block %s\n", - genReturnBB->dspToString()); - gtDispTree(tree); - printf("\n"); - } -#endif - - // Reset cookies used to track start and end of the protected region in synchronized methods - syncStartEmitCookie = nullptr; - syncEndEmitCookie = nullptr; - madeChanges = true; - } - -#endif // FEATURE_EH_WINDOWS_X86 - if (opts.IsReversePInvoke()) { fgAddReversePInvokeEnterExit(); @@ -2955,7 +2882,6 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) } #endif - assert(UsesFunclets()); assert(block->hasHndIndex()); assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler @@ -3025,7 +2951,6 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // void Compiler::fgCreateFuncletPrologBlocks() { - assert(UsesFunclets()); noway_assert(fgPredsComputed); assert(!fgFuncletsCreated); @@ -3090,7 +3015,6 @@ void Compiler::fgCreateFuncletPrologBlocks() // PhaseStatus Compiler::fgCreateFunclets() { - assert(UsesFunclets()); assert(!fgFuncletsCreated); fgCreateFuncletPrologBlocks(); @@ -3166,8 +3090,6 @@ PhaseStatus Compiler::fgCreateFunclets() // bool Compiler::fgFuncletsAreCold() { - assert(UsesFunclets()); - for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->Next()) { if (!block->isRunRarely()) @@ -3505,7 +3427,7 @@ void Compiler::fgAddCodeRef(BasicBlock* srcBlk, SpecialCodeKind kind) add->acdTryIndex = srcBlk->bbTryIndex; // For non-funclet EH we don't constrain ACD placement via handler regions - add->acdHndIndex = UsesFunclets() ? srcBlk->bbHndIndex : 0; + add->acdHndIndex = srcBlk->bbHndIndex; add->acdKeyDsg = dsg; add->acdKind = kind; @@ -3769,19 +3691,6 @@ Compiler::AddCodeDsc* Compiler::fgFindExcptnTarget(SpecialCodeKind kind, BasicBl // unsigned Compiler::bbThrowIndex(BasicBlock* blk, AcdKeyDesignator* dsg) { - if (!UsesFunclets()) - { - if (blk->hasTryIndex()) - { - *dsg = AcdKeyDesignator::KD_TRY; - } - else - { - *dsg = AcdKeyDesignator::KD_NONE; - } - return blk->bbTryIndex; - } - const unsigned tryIndex = blk->bbTryIndex; const unsigned hndIndex = blk->bbHndIndex; const bool inTry = tryIndex > 0; @@ -3907,14 +3816,7 @@ bool Compiler::AddCodeDsc::UpdateKeyDesignator(Compiler* compiler) AcdKeyDesignator newDsg = AcdKeyDesignator::KD_NONE; - if (!compiler->UsesFunclets()) - { - // Non-funclet case - // - assert(acdKeyDsg != AcdKeyDesignator::KD_FLT); - newDsg = inTry ? AcdKeyDesignator::KD_TRY : AcdKeyDesignator::KD_NONE; - } - else if (!inTry && !inHnd) + if (!inTry && !inHnd) { // Moved outside of all EH regions. // diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp index 2f4ad6c3322931..c2679ab07bd728 100644 --- a/src/coreclr/jit/gcencode.cpp +++ b/src/coreclr/jit/gcencode.cpp @@ -85,7 +85,6 @@ ReturnKind GCInfo::getReturnKind() // void GCInfo::gcMarkFilterVarsPinned() { - assert(compiler->UsesFunclets()); assert(compiler->ehAnyFunclets()); for (EHblkDsc* const HBtab : EHClauses(compiler)) @@ -293,8 +292,6 @@ void GCInfo::gcMarkFilterVarsPinned() void GCInfo::gcInsertVarPtrDscSplit(varPtrDsc* desc, varPtrDsc* begin) { - assert(compiler->UsesFunclets()); - #ifndef JIT32_GCENCODER (void)begin; desc->vpdNext = gcVarPtrList; @@ -333,8 +330,6 @@ void GCInfo::gcDumpVarPtrDsc(varPtrDsc* desc) const GCtype gcType = (desc->vpdVarNum & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF; const bool isPin = (desc->vpdVarNum & pinned_OFFSET_FLAG) != 0; - assert(compiler->UsesFunclets()); - printf("[%08X] %s%s var at [%s", dspPtr(desc), GCtypeStr(gcType), isPin ? "pinned-ptr" : "", compiler->isFramePointerUsed() ? STR_FPBASE : STR_SPBASE); @@ -1590,24 +1585,7 @@ size_t GCInfo::gcInfoBlockHdrSave( header->syncStartOffset = INVALID_SYNC_OFFSET; header->syncEndOffset = INVALID_SYNC_OFFSET; -#if defined(FEATURE_EH_WINDOWS_X86) - // JIT is responsible for synchronization on funclet-based EH model that x86/Linux uses. - if (!compiler->UsesFunclets() && compiler->info.compFlags & CORINFO_FLG_SYNCH) - { - assert(compiler->syncStartEmitCookie != nullptr); - header->syncStartOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncStartEmitCookie, 0); - assert(header->syncStartOffset != INVALID_SYNC_OFFSET); - - assert(compiler->syncEndEmitCookie != nullptr); - header->syncEndOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncEndEmitCookie, 0); - assert(header->syncEndOffset != INVALID_SYNC_OFFSET); - - assert(header->syncStartOffset < header->syncEndOffset); - // synchronized methods can't have more than 1 epilog - assert(header->epilogCount <= 1); - } -#endif - if (compiler->UsesFunclets() && compiler->info.compFlags & CORINFO_FLG_SYNCH) + if (compiler->info.compFlags & CORINFO_FLG_SYNCH) { // While the sync start offset and end offset are not used by the stackwalker/EH system // in funclets mode, we do need to know if the code is synchronized if we are generating @@ -2385,45 +2363,16 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un ************************************************************************** */ - bool keepThisAlive = false; - if (!compiler->info.compIsStatic) { unsigned thisArgNum = compiler->info.compThisArg; - gcIsUntrackedLocalOrNonEnregisteredArg(thisArgNum, &keepThisAlive); + gcIsUntrackedLocalOrNonEnregisteredArg(thisArgNum); } // First we check for the most common case - no lifetimes at all. if (header.varPtrTableSize != 0) { -#if defined(FEATURE_EH_WINDOWS_X86) - if (!compiler->UsesFunclets() && keepThisAlive) - { - // Encoding of untracked variables does not support reporting - // "this". So report it as a tracked variable with a liveness - // extending over the entire method. - - assert(compiler->lvaTable[compiler->info.compThisArg].TypeIs(TYP_REF)); - - unsigned varOffs = compiler->lvaTable[compiler->info.compThisArg].GetStackOffset(); - - /* For negative stack offsets we must reset the low bits, - * take abs and then set them back */ - - varOffs = abs(static_cast(varOffs)); - varOffs |= this_OFFSET_FLAG; - - size_t sz = 0; - sz = encodeUnsigned(mask ? (dest + sz) : NULL, varOffs); - sz += encodeUDelta(mask ? (dest + sz) : NULL, 0, 0); - sz += encodeUDelta(mask ? (dest + sz) : NULL, codeSize, 0); - - dest += (sz & mask); - totalSize += sz; - } -#endif // FEATURE_EH_WINDOWS_X86 - /* We'll use a delta encoding for the lifetime offsets */ lastOffset = 0; @@ -2665,7 +2614,6 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un assert((codeDelta & 0x7) == codeDelta); *dest++ = 0xB0 | (BYTE)codeDelta; - assert(compiler->UsesFunclets() || !compiler->isFramePointerUsed()); /* Remember the new 'last' offset */ @@ -2791,7 +2739,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un dest = gceByrefPrefixI(genRegPtrTemp, dest); - if (!keepThisAlive && genRegPtrTemp->rpdIsThis) + if (genRegPtrTemp->rpdIsThis) { // Mark with 'this' pointer prefix *dest++ = 0xBC; @@ -3230,7 +3178,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un unsigned origCodeDelta = codeDelta; #endif - if (!keepThisAlive && genRegPtrTemp->rpdIsThis) + if (genRegPtrTemp->rpdIsThis) { unsigned tmpMask = genRegPtrTemp->rpdCompiler.rpdAdd; @@ -4761,22 +4709,6 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode // unused by alignment static_assert((OFFSET_MASK + 1) <= sizeof(int)); -#if defined(DEBUG) && defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) - if (!compiler->UsesFunclets() && mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) - { - // Tracked variables can't be pinned, and the encoding takes - // advantage of that by using the same bit for 'pinned' and 'this' - // Since we don't track 'this', we should never see either flag here. - // Check it now before we potentially add some pinned flags. - for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext) - { - const unsigned flags = varTmp->vpdVarNum & OFFSET_MASK; - assert((flags & pinned_OFFSET_FLAG) == 0); - assert((flags & this_OFFSET_FLAG) == 0); - } - } -#endif - // Only need to do this once, and only if we have EH. if ((mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) && compiler->ehAnyFunclets()) { diff --git a/src/coreclr/jit/gcinfo.cpp b/src/coreclr/jit/gcinfo.cpp index b68c662c9fccb4..6f3b7a0a8d469e 100644 --- a/src/coreclr/jit/gcinfo.cpp +++ b/src/coreclr/jit/gcinfo.cpp @@ -442,7 +442,6 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, unsigned varNum; LclVarDsc* varDsc; - bool keepThisAlive = false; // did we track "this" in a synchronized method? unsigned int untrackedCount = 0; // Count the untracked locals and non-enregistered args. @@ -458,7 +457,7 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, if (varTypeIsGC(varDsc->TypeGet())) { - if (!gcIsUntrackedLocalOrNonEnregisteredArg(varNum, &keepThisAlive)) + if (!gcIsUntrackedLocalOrNonEnregisteredArg(varNum)) { continue; } @@ -539,11 +538,6 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, unsigned int varPtrTableSize = 0; - if (keepThisAlive) - { - varPtrTableSize++; - } - if (gcVarPtrList != nullptr) { // We'll use a delta encoding for the lifetime offsets. @@ -591,13 +585,11 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, // // Arguments: // varNum - the variable number to check; -// pKeepThisAlive - if !UsesFunclets() and the argument != nullptr remember -// if `this` should be kept alive and considered tracked. // // Return value: // true if it an untracked pointer value. // -bool GCInfo::gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum, bool* pKeepThisAlive) +bool GCInfo::gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); @@ -640,26 +632,6 @@ bool GCInfo::gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum, bool* pKeep } } -#if defined(FEATURE_EH_WINDOWS_X86) - if (!compiler->UsesFunclets() && compiler->lvaIsOriginalThisArg(varNum) && compiler->lvaKeepAliveAndReportThis()) - { - // "this" is in the untracked variable area, but encoding of untracked variables does not support reporting - // "this". So report it as a tracked variable with a liveness extending over the entire method. - // - // TODO-x86-Cleanup: the semantic here is not clear, it would be useful to check different cases and - // add a description where "this" is saved and how it is tracked in each of them: - // 1) when UsesFunclets() == true (x86 Linux); - // 2) when UsesFunclets() == false, lvaKeepAliveAndReportThis == true, compJmpOpUsed == true; - // 3) when there is regPtrDsc for "this", but keepThisAlive == true; - // etc. - - if (pKeepThisAlive != nullptr) - { - *pKeepThisAlive = true; - } - return false; - } -#endif // FEATURE_EH_WINDOWS_X86 return true; } diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 05edb2adb5f170..c269f5a216e7bc 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -6751,9 +6751,6 @@ bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: -#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: @@ -9622,9 +9619,6 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree) copy = new (this, oper) GenTree(oper, tree->gtType); goto DONE; -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: -#endif // FEATURE_EH_WINDOWS_X86 case GT_JMP: case GT_RECORD_ASYNC_RESUME: case GT_ASYNC_RESUME_INFO: @@ -10396,9 +10390,6 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: -#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: @@ -11906,12 +11897,6 @@ void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, cons ilName = "OutArgs"; } #endif // FEATURE_FIXED_OUT_ARGS -#if defined(FEATURE_EH_WINDOWS_X86) - else if (lclNum == lvaShadowSPslotsVar) - { - ilName = "EHSlots"; - } -#endif // FEATURE_EH_WINDOWS_X86 #ifdef JIT32_GCENCODER else if (lclNum == lvaLocAllocSPvar) { @@ -12448,12 +12433,6 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack) } break; -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: - printf(" ehID=%d", tree->AsVal()->gtVal1); - break; -#endif // FEATURE_EH_WINDOWS_X86 - // Vanilla leaves. No qualifying information available. So do nothing case GT_NOP: diff --git a/src/coreclr/jit/gtlist.h b/src/coreclr/jit/gtlist.h index 367b91498c21ba..d30b6f10c5d612 100644 --- a/src/coreclr/jit/gtlist.h +++ b/src/coreclr/jit/gtlist.h @@ -331,9 +331,6 @@ GTNODE(START_PREEMPTGC , GenTree ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHI GTNODE(PROF_HOOK , GenTree ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Profiler Enter/Leave/TailCall hook. GTNODE(RETFILT , GenTreeOp ,0,1,GTK_UNOP|GTK_NOVALUE) // End filter with TYP_I_IMPL return value. -#if defined(FEATURE_EH_WINDOWS_X86) -GTNODE(END_LFIN , GenTreeVal ,0,0,GTK_LEAF|GTK_NOVALUE) // End locally-invoked finally. -#endif // FEATURE_EH_WINDOWS_X86 //----------------------------------------------------------------------------- // Swift interop-specific nodes: diff --git a/src/coreclr/jit/gtstructs.h b/src/coreclr/jit/gtstructs.h index b9cf9839f31f8d..6e7d62e496f038 100644 --- a/src/coreclr/jit/gtstructs.h +++ b/src/coreclr/jit/gtstructs.h @@ -50,11 +50,7 @@ GTSTRUCT_0(UnOp , GT_OP) GTSTRUCT_0(Op , GT_OP) -#if defined(FEATURE_EH_WINDOWS_X86) -GTSTRUCT_N(Val , GT_END_LFIN, GT_JMP, GT_RECORD_ASYNC_RESUME, GT_ASYNC_RESUME_INFO) -#else GTSTRUCT_N(Val , GT_JMP, GT_RECORD_ASYNC_RESUME, GT_ASYNC_RESUME_INFO) -#endif GTSTRUCT_2_SPECIAL(IntConCommon, GT_CNS_INT, GT_CNS_LNG) GTSTRUCT_1(IntCon , GT_CNS_INT) GTSTRUCT_1(LngCon , GT_CNS_LNG) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 63da7f19ad610d..3314e2d960bdef 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4583,305 +4583,8 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op) // After this function, the BBJ_LEAVE block has been converted to a different type. // -#if defined(FEATURE_EH_WINDOWS_X86) - -void Compiler::impImportLeaveEHRegions(BasicBlock* block) -{ -#ifdef DEBUG - if (verbose) - { - printf("\nBefore import CEE_LEAVE:\n"); - fgDispBasicBlocks(); - fgDispHandlerTab(); - } -#endif // DEBUG - - unsigned const blkAddr = block->bbCodeOffs; - BasicBlock* const leaveTarget = block->GetTarget(); - unsigned const jmpAddr = leaveTarget->bbCodeOffs; - - // LEAVE clears the stack, spill side effects, and set stack to 0 - - impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); - stackState.esStackDepth = 0; - - assert(block->KindIs(BBJ_LEAVE)); - assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary - - BasicBlock* step = DUMMY_INIT(NULL); - unsigned encFinallies = 0; // Number of enclosing finallies. - GenTree* endCatches = NULL; - Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. - - unsigned XTnum; - EHblkDsc* HBtab; - - for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) - { - // Grab the handler offsets - - IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); - IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); - IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); - IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); - - // Is this a catch-handler we are CEE_LEAVE'ing out of? If so, we need to call CORINFO_HELP_ENDCATCH. - - if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) - { - // Can't CEE_LEAVE out of a finally/fault handler - if (HBtab->HasFinallyOrFaultHandler()) - { - BADCODE("leave out of fault/finally block"); - } - - // Create the call to CORINFO_HELP_ENDCATCH - GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); - - // Make a list of all the currently pending endCatches - if (endCatches) - { - endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); - } - else - { - endCatches = endCatch; - } - -#ifdef DEBUG - if (verbose) - { - printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " - "CORINFO_HELP_ENDCATCH\n", - block->bbNum, XTnum); - } -#endif - } - else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && - !jitIsBetween(jmpAddr, tryBeg, tryEnd)) - { - // This is a finally-protected try we are jumping out of. - // - // If there are any pending endCatches, and we have already jumped out of a finally-protected try, - // then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. - // Else, just append to the original block. - - BasicBlock* callBlock; - - // If we have finallies, we better have an endLFin tree, and vice-versa. - assert(!encFinallies == !endLFinStmt); - - if (encFinallies == 0) - { - assert(step == DUMMY_INIT(NULL)); - callBlock = block; - - // callBlock calls the finally handler - assert(callBlock->HasInitializedTarget()); - fgRedirectEdge(callBlock->TargetEdgeRef(), HBtab->ebdHndBeg); - callBlock->SetKind(BBJ_CALLFINALLY); - - if (endCatches) - { - impAppendTree(endCatches, CHECK_SPILL_NONE, impCurStmtDI); - } - -#ifdef DEBUG - if (verbose) - { - printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " - "block %s\n", - callBlock->dspToString()); - } -#endif - } - else - { - assert(step != DUMMY_INIT(NULL)); - - // Calling the finally block. - - // callBlock calls the finally handler - callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); - - { - FlowEdge* const newEdge = fgAddRefPred(HBtab->ebdHndBeg, callBlock); - callBlock->SetTargetEdge(newEdge); - } - - // step's jump target shouldn't be set yet - assert(!step->HasInitializedTarget()); - - { - // the previous call to a finally returns to this call (to the next finally in the chain) - FlowEdge* const newEdge = fgAddRefPred(callBlock, step); - step->SetTargetEdge(newEdge); - } - - // The new block will inherit this block's weight. - callBlock->inheritWeight(block); - -#ifdef DEBUG - if (verbose) - { - printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", - callBlock->dspToString()); - } -#endif - - Statement* lastStmt; - - if (endCatches) - { - lastStmt = gtNewStmt(endCatches); - endLFinStmt->SetNextStmt(lastStmt); - lastStmt->SetPrevStmt(endLFinStmt); - } - else - { - lastStmt = endLFinStmt; - } - - // note that this sets BBF_IMPORTED on the block - impEndTreeList(callBlock, endLFinStmt, lastStmt); - } - - // callBlock should be set up at this point - assert(callBlock->TargetIs(HBtab->ebdHndBeg)); - - // Note: we don't know the jump target yet - step = fgNewBBafter(BBJ_CALLFINALLYRET, callBlock, true); - // The new block will inherit this block's weight. - step->inheritWeight(block); - step->SetFlags(BBF_IMPORTED); - -#ifdef DEBUG - if (verbose) - { - printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_CALLFINALLYRET) " - "block %s\n", - step->dspToString()); - } -#endif - - // We now record the EH region ID on GT_END_LFIN instead of the finally nesting depth, - // as the later can change as we optimize the code. - // - unsigned const ehID = compHndBBtab[XTnum].ebdID; - assert(ehID <= impInlineRoot()->compEHID); - - GenTree* const endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, ehID); - endLFinStmt = gtNewStmt(endLFin); - endCatches = NULL; - - encFinallies++; - } - } - - // Append any remaining endCatches, if any. - - assert(!encFinallies == !endLFinStmt); - - if (encFinallies == 0) - { - assert(step == DUMMY_INIT(NULL)); - block->SetKind(BBJ_ALWAYS); // convert the BBJ_LEAVE to a BBJ_ALWAYS - - if (endCatches) - { - impAppendTree(endCatches, CHECK_SPILL_NONE, impCurStmtDI); - } - -#ifdef DEBUG - if (verbose) - { - printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " - "block %s\n", - block->dspToString()); - } -#endif - } - else - { - // If leaveTarget is the start of another try block, we want to make sure that - // we do not insert finalStep into that try block. Hence, we find the enclosing - // try block. - unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); - - // Insert a new BB either in the try region indicated by tryIndex or - // the handler region indicated by leaveTarget->bbHndIndex, - // depending on which is the inner region. - BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); - finalStep->SetFlags(BBF_KEEP_BBJ_ALWAYS); - - // step's jump target shouldn't be set yet - assert(!step->HasInitializedTarget()); - - { - FlowEdge* const newEdge = fgAddRefPred(finalStep, step); - step->SetTargetEdge(newEdge); - } - - // The new block will inherit this block's weight. - finalStep->inheritWeight(block); - -#ifdef DEBUG - if (verbose) - { - printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, - finalStep->dspToString()); - } -#endif - - Statement* lastStmt; - - if (endCatches) - { - lastStmt = gtNewStmt(endCatches); - endLFinStmt->SetNextStmt(lastStmt); - lastStmt->SetPrevStmt(endLFinStmt); - } - else - { - lastStmt = endLFinStmt; - } - - impEndTreeList(finalStep, endLFinStmt, lastStmt); - - // this is the ultimate destination of the LEAVE - { - FlowEdge* const newEdge = fgAddRefPred(leaveTarget, finalStep); - finalStep->SetTargetEdge(newEdge); - } - - // Queue up the jump target for importing - - impImportBlockPending(leaveTarget); - } - -#ifdef DEBUG - fgVerifyHandlerTab(); - - if (verbose) - { - printf("\nAfter import CEE_LEAVE:\n"); - fgDispBasicBlocks(); - fgDispHandlerTab(); - } -#endif // DEBUG -} - -#endif // FEATURE_EH_WINDOWS_X86 - void Compiler::impImportLeave(BasicBlock* block) { -#if defined(FEATURE_EH_WINDOWS_X86) - if (!UsesFunclets()) - { - return impImportLeaveEHRegions(block); - } -#endif - #ifdef DEBUG if (verbose) { @@ -5007,7 +4710,7 @@ void Compiler::impImportLeave(BasicBlock* block) BasicBlock* callBlock; - if (step == nullptr && UsesCallFinallyThunks()) + if (step == nullptr) { // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = @@ -5038,24 +4741,6 @@ void Compiler::impImportLeave(BasicBlock* block) " to BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } -#endif - } - else if (step == nullptr) // && !UsesCallFinallyThunks() - { - callBlock = block; - - // callBlock calls the finally handler - assert(callBlock->HasInitializedTarget()); - fgRedirectEdge(callBlock->TargetEdgeRef(), HBtab->ebdHndBeg); - callBlock->SetKind(BBJ_CALLFINALLY); - -#ifdef DEBUG - if (verbose) - { - printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB - " to BBJ_CALLFINALLY block\n", - XTnum, callBlock->bbNum); - } #endif } else @@ -5079,7 +4764,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(step->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET)); assert((step == block) || !step->HasInitializedTarget()); - if (UsesCallFinallyThunks() && step->KindIs(BBJ_EHCATCHRET)) + if (step->KindIs(BBJ_EHCATCHRET)) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. @@ -5114,20 +4799,10 @@ void Compiler::impImportLeave(BasicBlock* block) unsigned callFinallyTryIndex; unsigned callFinallyHndIndex; - if (UsesCallFinallyThunks()) - { - callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) - ? 0 - : HBtab->ebdEnclosingTryIndex + 1; - callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) - ? 0 - : HBtab->ebdEnclosingHndIndex + 1; - } - else - { - callFinallyTryIndex = XTnum + 1; - callFinallyHndIndex = 0; // don't care - } + callFinallyTryIndex = + (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; + callFinallyHndIndex = + (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; assert(step->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET)); assert((step == block) || !step->HasInitializedTarget()); @@ -5364,7 +5039,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // work around this we will duplicate B0 (call it B0Dup) before resetting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. - if (UsesFunclets() && block->KindIs(BBJ_CALLFINALLY)) + if (block->KindIs(BBJ_CALLFINALLY)) { BasicBlock* dupBlock = BasicBlock::New(this); dupBlock->CopyFlags(block); diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 09d26c7152776d..a398bb01508dcb 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -243,17 +243,6 @@ void EHblkDsc::DispEntry(unsigned XTnum) { printf(" %2u %2u ::", ebdID, XTnum); -#if defined(FEATURE_EH_WINDOWS_X86) - if (ebdHandlerNestingLevel == 0) - { - printf(" "); - } - else - { - printf(" %2u ", ebdHandlerNestingLevel); - } -#endif // FEATURE_EH_WINDOWS_X86 - if (ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) { printf(" "); @@ -671,30 +660,14 @@ bool Compiler::bbIsHandlerBeg(const BasicBlock* block) // bool Compiler::bbIsFuncletBeg(const BasicBlock* block) { - if (UsesFunclets()) - { - assert(fgFuncletsCreated); - return bbIsHandlerBeg(block); - } - - return false; + assert(fgFuncletsCreated); + return bbIsHandlerBeg(block); } bool Compiler::ehHasCallableHandlers() { - if (UsesFunclets()) - { - // Any EH in the function? - return compHndBBtabCount > 0; - } - else - { -#if defined(FEATURE_EH_WINDOWS_X86) - return ehNeedsShadowSPslots(); -#else - return false; -#endif // FEATURE_EH_WINDOWS_X86 - } + // Any EH in the function? + return compHndBBtabCount > 0; } /****************************************************************************************** @@ -1021,15 +994,7 @@ unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTr assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); - if (UsesCallFinallyThunks()) - { - return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); - } - else - { - *inTryRegion = true; - return finallyIndex; - } + return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); } void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** startBlock, BasicBlock** lastBlock) @@ -1039,38 +1004,29 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** st assert(startBlock != nullptr); assert(lastBlock != nullptr); - if (UsesCallFinallyThunks()) + bool inTryRegion; + unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); + + if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) + { + *startBlock = fgFirstBB; + *lastBlock = fgLastBBInMainFunction(); + } + else { - bool inTryRegion; - unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); + EHblkDsc* ehDsc = ehGetDsc(callFinallyRegionIndex); - if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) + if (inTryRegion) { - *startBlock = fgFirstBB; - *lastBlock = fgLastBBInMainFunction(); + *startBlock = ehDsc->ebdTryBeg; + *lastBlock = ehDsc->ebdTryLast; } else { - EHblkDsc* ehDsc = ehGetDsc(callFinallyRegionIndex); - - if (inTryRegion) - { - *startBlock = ehDsc->ebdTryBeg; - *lastBlock = ehDsc->ebdTryLast; - } - else - { - *startBlock = ehDsc->ebdHndBeg; - *lastBlock = ehDsc->ebdHndLast; - } + *startBlock = ehDsc->ebdHndBeg; + *lastBlock = ehDsc->ebdHndLast; } } - else - { - EHblkDsc* ehDsc = ehGetDsc(finallyIndex); - *startBlock = ehDsc->ebdTryBeg; - *lastBlock = ehDsc->ebdTryLast; - } } #ifdef DEBUG @@ -1126,14 +1082,7 @@ bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsign bool Compiler::ehAnyFunclets() { - if (UsesFunclets()) - { - return compHndBBtabCount > 0; // if there is any EH, there will be funclets - } - else - { - return false; - } + return compHndBBtabCount > 0; // if there is any EH, there will be funclets } /***************************************************************************** @@ -1145,24 +1094,17 @@ bool Compiler::ehAnyFunclets() unsigned Compiler::ehFuncletCount() { - if (UsesFunclets()) - { - unsigned funcletCnt = 0; + unsigned funcletCnt = 0; - for (EHblkDsc* const HBtab : EHClauses(this)) + for (EHblkDsc* const HBtab : EHClauses(this)) + { + if (HBtab->HasFilter()) { - if (HBtab->HasFilter()) - { - ++funcletCnt; - } ++funcletCnt; } - return funcletCnt; - } - else - { - return 0; + ++funcletCnt; } + return funcletCnt; } /***************************************************************************** @@ -1404,7 +1346,7 @@ void Compiler::fgFindTryRegionEnds() for (EHblkDsc* const HBtab : EHClauses(this)) { // Ignore try regions inside funclet regions. - if (!UsesFunclets() || !HBtab->ebdTryLast->hasHndIndex()) + if (!HBtab->ebdTryLast->hasHndIndex()) { HBtab->ebdTryLast = nullptr; unsetTryEnds++; @@ -1525,26 +1467,19 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) */ void Compiler::fgAllocEHTable() { - if (UsesFunclets()) - { - // We need to allocate space for EH clauses that will be used by funclets - // as well as one for each EH clause from the IL. Nested EH clauses pulled - // out as funclets create one EH clause for each enclosing region. Thus, - // the maximum number of clauses we will need might be very large. We allocate - // twice the number of EH clauses in the IL, which should be good in practice. - // In extreme cases, we might need to abandon this and reallocate. See - // fgTryAddEHTableEntries() for more details. + // We need to allocate space for EH clauses that will be used by funclets + // as well as one for each EH clause from the IL. Nested EH clauses pulled + // out as funclets create one EH clause for each enclosing region. Thus, + // the maximum number of clauses we will need might be very large. We allocate + // twice the number of EH clauses in the IL, which should be good in practice. + // In extreme cases, we might need to abandon this and reallocate. See + // fgTryAddEHTableEntries() for more details. #ifdef DEBUG - compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG -#else // DEBUG - compHndBBtabAllocCount = info.compXcptnsCount * 2; -#endif // DEBUG - } - else - { - compHndBBtabAllocCount = info.compXcptnsCount; - } + compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG +#else // DEBUG + compHndBBtabAllocCount = info.compXcptnsCount * 2; +#endif // DEBUG compHndBBtab = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount]; @@ -3830,12 +3765,6 @@ void Compiler::fgDispHandlerTab() } printf("\n id, index "); -#if defined(FEATURE_EH_WINDOWS_X86) - if (!UsesFunclets()) - { - printf("nest, "); - } -#endif // FEATURE_EH_WINDOWS_X86 printf("eTry, eHnd\n"); unsigned XTnum; @@ -4333,14 +4262,13 @@ void Compiler::verCheckNestingLevel(EHNodeDsc* root) bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) { // Some simple preconditions (as stated above) - assert(UsesFunclets()); assert(!fgFuncletsCreated); assert(fgGetPredForBlock(block, predBlock) != nullptr); assert(block->hasHndIndex()); EHblkDsc* xtab = ehGetDsc(block->getHndIndex()); - if (UsesCallFinallyThunks() && xtab->HasFinallyHandler()) + if (xtab->HasFinallyHandler()) { assert((xtab->ebdHndBeg == block) || // The normal case (xtab->ebdHndBeg->NextIs(block) && @@ -4436,7 +4364,6 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block) { - assert(UsesFunclets()); assert(block->hasHndIndex()); assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler diff --git a/src/coreclr/jit/jiteh.h b/src/coreclr/jit/jiteh.h index 34cae18ec950cb..9f994e034d87e3 100644 --- a/src/coreclr/jit/jiteh.h +++ b/src/coreclr/jit/jiteh.h @@ -93,12 +93,6 @@ struct EHblkDsc EHHandlerType ebdHandlerType; -#if defined(FEATURE_EH_WINDOWS_X86) - // How nested is the try/handler within other *handlers* - 0 for outermost clauses, 1 for nesting with a handler, - // etc. - unsigned short ebdHandlerNestingLevel; -#endif // FEATURE_EH_WINDOWS_X86 - static const unsigned short NO_ENCLOSING_INDEX = USHRT_MAX; // The index of the enclosing outer try region, NO_ENCLOSING_INDEX if none. diff --git a/src/coreclr/jit/jitgcinfo.h b/src/coreclr/jit/jitgcinfo.h index 5cb226c02db3b8..4f8e608695cfff 100644 --- a/src/coreclr/jit/jitgcinfo.h +++ b/src/coreclr/jit/jitgcinfo.h @@ -294,7 +294,7 @@ class GCInfo UNALIGNED unsigned int* pVarPtrTableSize, UNALIGNED unsigned int* pNoGCRegionCount); - bool gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum, bool* pThisKeptAliveIsInUntracked = nullptr); + bool gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum); size_t gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset); #else diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 78f3431dc912e1..eeaa06532a58af 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -5244,28 +5244,6 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } #endif -#if defined(FEATURE_EH_WINDOWS_X86) - /* If we need space for slots for shadow SP, reserve it now */ - if (!UsesFunclets() && ehNeedsShadowSPslots()) - { - noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect - if (!lvaReportParamTypeArg()) - { -#ifndef JIT32_GCENCODER - if (!lvaKeepAliveAndReportThis()) -#endif - { - // In order to keep the gc info encoding smaller, the VM assumes that all methods with EH - // have also saved space for a ParamTypeArg, so we need to do that here - lvaIncrementFrameSize(TARGET_POINTER_SIZE); - stkOffs -= TARGET_POINTER_SIZE; - } - } - stkOffs = - lvaAllocLocalAndSetVirtualOffset(lvaShadowSPslotsVar, lvaLclStackHomeSize(lvaShadowSPslotsVar), stkOffs); - } -#endif // FEATURE_EH_WINDOWS_X86 - if (compGSReorderStackLayout) { assert(getNeedsGSSecurityCookie()); @@ -5467,9 +5445,6 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() // These need to be located as the very first variables (highest memory address) // and so they have already been assigned an offset if ( -#if defined(FEATURE_EH_WINDOWS_X86) - lclNum == lvaShadowSPslotsVar || -#endif // FEATURE_EH_WINDOWS_X86 #ifdef JIT32_GCENCODER lclNum == lvaLocAllocSPvar || #endif // JIT32_GCENCODER diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index f3f1f3f68eac0d..da034e4b98c9d5 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -1494,9 +1494,6 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: -#endif // FEATURE_EH_WINDOWS_X86 case GT_SWITCH_TABLE: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index 03ed72a5552eec..00c3dfe47c314d 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -629,13 +629,6 @@ int LinearScan::BuildNode(GenTree* tree) BuildDef(tree, RBM_ASYNC_CONTINUATION_RET.GetIntRegSet()); break; -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: - srcCount = 0; - assert(dstCount == 0); - break; -#endif - case GT_INDEX_ADDR: { assert(dstCount == 1); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index c9d29a447aeca5..06834ae75faa8b 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -14158,12 +14158,9 @@ void Compiler::fgSetOptions() codeGen->setFramePointerRequiredEH(true); #ifdef TARGET_X86 - if (UsesFunclets()) - { - assert(!codeGen->isGCTypeFixed()); - // Enforce fully interruptible codegen for funclet unwinding - SetInterruptible(true); - } + assert(!codeGen->isGCTypeFixed()); + // Enforce fully interruptible codegen for funclet unwinding + SetInterruptible(true); #endif // TARGET_X86 } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index d538ca44b2413d..736e3163d27045 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -2223,12 +2223,6 @@ PhaseStatus Compiler::optOptimizePreLayout() // Run a late pass of unconditional-to-conditional branch optimization, skipping handler blocks. for (BasicBlock* block = fgFirstBB; block != fgFirstFuncletBB; block = block->Next()) { - if (!UsesFunclets() && block->hasHndIndex()) - { - block = ehGetDsc(block->getHndIndex())->ebdHndLast; - continue; - } - modified |= fgOptimizeBranch(block); } @@ -2786,7 +2780,7 @@ bool Compiler::optCanonicalizeExit(FlowGraphNaturalLoop* loop, BasicBlock* exit) JITDUMP("Canonicalize exit " FMT_BB " for " FMT_LP " to have only loop predecessors\n", exit->bbNum, loop->GetIndex()); - if (UsesCallFinallyThunks() && exit->KindIs(BBJ_CALLFINALLY)) + if (exit->KindIs(BBJ_CALLFINALLY)) { // Branches to a BBJ_CALLFINALLY _must_ come from inside its associated // try region, and when we have callfinally thunks the BBJ_CALLFINALLY diff --git a/src/coreclr/jit/scopeinfo.cpp b/src/coreclr/jit/scopeinfo.cpp index 6ded2a0d587d9d..8751934f348827 100644 --- a/src/coreclr/jit/scopeinfo.cpp +++ b/src/coreclr/jit/scopeinfo.cpp @@ -1562,41 +1562,30 @@ void CodeGen::siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned in // Check if there are any scopes on the current block's start boundary. VarScopeDsc* varScope = nullptr; - if (compiler->UsesFunclets()) + // If we find a spot where the code offset isn't what we expect, because + // there is a gap, it might be because we've moved the funclets out of + // line. Catch up with the enter and exit scopes of the current block. + // Ignore the enter/exit scope changes of the missing scopes, which for + // funclets must be matched. + if (lastBlockILEndOffset != beginOffs) { - // If we find a spot where the code offset isn't what we expect, because - // there is a gap, it might be because we've moved the funclets out of - // line. Catch up with the enter and exit scopes of the current block. - // Ignore the enter/exit scope changes of the missing scopes, which for - // funclets must be matched. - if (lastBlockILEndOffset != beginOffs) - { - assert(beginOffs > 0); - assert(lastBlockILEndOffset < beginOffs); - - JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", lastBlockILEndOffset, beginOffs); + assert(beginOffs > 0); + assert(lastBlockILEndOffset < beginOffs); - // Skip enter scopes - while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr) - { - /* do nothing */ - JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum); - } + JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", lastBlockILEndOffset, beginOffs); - // Skip exit scopes - while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr) - { - /* do nothing */ - JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum); - } + // Skip enter scopes + while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr) + { + /* do nothing */ + JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum); } - } - else - { - if (lastBlockILEndOffset != beginOffs) + + // Skip exit scopes + while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr) { - assert(lastBlockILEndOffset < beginOffs); - return; + /* do nothing */ + JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum); } } diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index fac8165008b0bf..2662479fd664b6 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -51,8 +51,6 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc) { - assert(UsesFunclets()); - if (func->funKind == FUNC_ROOT) { // Since all funclets are pulled out of line, the main code size is everything @@ -196,22 +194,19 @@ void Compiler::unwindBegPrologCFI() { assert(compGeneratingProlog); - if (UsesFunclets()) - { - FuncInfoDsc* func = funCurrentFunc(); + FuncInfoDsc* func = funCurrentFunc(); - // There is only one prolog for a function/funclet, and it comes first. So now is - // a good time to initialize all the unwind data structures. + // There is only one prolog for a function/funclet, and it comes first. So now is + // a good time to initialize all the unwind data structures. - unwindGetFuncLocations(func, true, &func->startLoc, &func->endLoc); + unwindGetFuncLocations(func, true, &func->startLoc, &func->endLoc); - if (fgFirstColdBlock != nullptr) - { - unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc); - } - - func->cfiCodes = new (getAllocator(CMK_UnwindInfo)) CFICodeVector(getAllocator()); + if (fgFirstColdBlock != nullptr) + { + unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc); } + + func->cfiCodes = new (getAllocator(CMK_UnwindInfo)) CFICodeVector(getAllocator()); } void Compiler::unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat) diff --git a/src/coreclr/jit/unwindx86.cpp b/src/coreclr/jit/unwindx86.cpp index 3c434b6f1cb09d..0659f4333e1f33 100644 --- a/src/coreclr/jit/unwindx86.cpp +++ b/src/coreclr/jit/unwindx86.cpp @@ -70,16 +70,13 @@ void Compiler::unwindSaveReg(regNumber reg, unsigned offset) // void Compiler::unwindReserve() { - if (UsesFunclets()) - { - assert(!compGeneratingProlog); - assert(!compGeneratingEpilog); + assert(!compGeneratingProlog); + assert(!compGeneratingEpilog); - assert(compFuncInfoCount > 0); - for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) - { - unwindReserveFunc(funGetFunc(funcIdx)); - } + assert(compFuncInfoCount > 0); + for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + { + unwindReserveFunc(funGetFunc(funcIdx)); } } @@ -92,16 +89,13 @@ void Compiler::unwindReserve() // void Compiler::unwindEmit(void* pHotCode, void* pColdCode) { - if (UsesFunclets()) - { - assert(!compGeneratingProlog); - assert(!compGeneratingEpilog); + assert(!compGeneratingProlog); + assert(!compGeneratingEpilog); - assert(compFuncInfoCount > 0); - for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) - { - unwindEmitFunc(funGetFunc(funcIdx), pHotCode, pColdCode); - } + assert(compFuncInfoCount > 0); + for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + { + unwindEmitFunc(funGetFunc(funcIdx), pHotCode, pColdCode); } } @@ -114,7 +108,6 @@ void Compiler::unwindEmit(void* pHotCode, void* pColdCode) // void Compiler::unwindReserveFunc(FuncInfoDsc* func) { - assert(UsesFunclets()); unwindReserveFuncHelper(func, true); if (fgFirstColdBlock != nullptr) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index da7f28a59a8a67..402360ba94240c 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -12649,9 +12649,6 @@ void Compiler::fgValueNumberTree(GenTree* tree) case GT_GCPOLL: case GT_JMP: // Control flow case GT_LABEL: // Control flow -#if defined(FEATURE_EH_WINDOWS_X86) - case GT_END_LFIN: // Control flow -#endif tree->gtVNPair = vnStore->VNPForVoid(); break;