diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 61c98f2b557731..defd254309b1ed 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5001,6 +5001,8 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl stackLevelSetter.Run(); m_pLowering->FinalizeOutgoingArgSpace(); + FinalizeEH(); + // We can not add any new tracked variables after this point. lvaTrackedFixed = true; @@ -5156,6 +5158,85 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl #endif // FUNC_INFO_LOGGING } +//---------------------------------------------------------------------------------------------- +// FinalizeEH: Finalize EH information +// +void Compiler::FinalizeEH() +{ +#if defined(FEATURE_EH_WINDOWS_X86) + + // Grab space for exception handling info on the frame + // + if (!UsesFunclets() && ehNeedsShadowSPslots()) + { + // Recompute the handler nesting levels, as they may have changed. + // + unsigned const oldHandlerNestingCount = ehMaxHndNestingCount; + ehMaxHndNestingCount = 0; + + if (compHndBBtabCount > 0) + { + for (int XTnum = compHndBBtabCount - 1; XTnum >= 0; XTnum--) + { + EHblkDsc* const HBtab = &compHndBBtab[XTnum]; + unsigned const enclosingHndIndex = HBtab->ebdEnclosingHndIndex; + + if (enclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) + { + EHblkDsc* const enclosingHBtab = &compHndBBtab[enclosingHndIndex]; + unsigned const newNestingLevel = enclosingHBtab->ebdHandlerNestingLevel + 1; + HBtab->ebdHandlerNestingLevel = (unsigned short)newNestingLevel; + + if (newNestingLevel > ehMaxHndNestingCount) + { + ehMaxHndNestingCount = newNestingLevel; + } + } + else + { + HBtab->ebdHandlerNestingLevel = 0; + } + } + + // When there is EH, we need to record nesting level + 1 + // + ehMaxHndNestingCount++; + } + + if (oldHandlerNestingCount != ehMaxHndNestingCount) + { + JITDUMP("Finalize EH: max handler nesting count now %u (was %u)\n", oldHandlerNestingCount, + ehMaxHndNestingCount); + } + + // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) + // ie. the offset of the end-of-last-executed-filter + unsigned slotsNeeded = 1; + + unsigned handlerNestingLevel = ehMaxHndNestingCount; + + if (opts.compDbgEnC && (handlerNestingLevel < (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL)) + handlerNestingLevel = (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL; + + slotsNeeded += handlerNestingLevel; + + // For a filter (which can be active at the same time as a catch/finally handler) + slotsNeeded++; + // For zero-termination of the shadow-Stack-pointer chain + slotsNeeded++; + + lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar")); + lvaSetStruct(lvaShadowSPslotsVar, typGetBlkLayout(slotsNeeded * TARGET_POINTER_SIZE), false); + lvaSetVarAddrExposed(lvaShadowSPslotsVar DEBUGARG(AddressExposedReason::EXTERNALLY_VISIBLE_IMPLICITLY)); + } + +#endif // FEATURE_EH_WINDOWS_X86 + + // We should not make any more alterations to the EH table structure. + // + ehTableFinalized = true; +} + #if FEATURE_LOOP_ALIGN //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 6763f1f2d5231f..098d64da7d11a9 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2704,7 +2704,7 @@ class Compiler bool ehNeedsShadowSPslots() { - return (info.compXcptnsCount || opts.compDbgEnC); + return ((compHndBBtabCount > 0) || opts.compDbgEnC); } // 0 for methods with no EH @@ -2715,6 +2715,9 @@ class Compiler #endif // FEATURE_EH_WINDOWS_X86 + bool ehTableFinalized = false; + void FinalizeEH(); + static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index cca9e2e6755dbe..a1e706fde6083e 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -2482,21 +2482,17 @@ inline void LclVarDsc::incRefCnts(weight_t weight, Compiler* comp, RefCountState #endif } -/***************************************************************************** - Is this a synchronized instance method? If so, we will need to report "this" - in the GC information, so that the EE can release the object lock - in case of an exception - - We also need to report "this" and keep it alive for all shared generic - code that gets the actual generic context from the "this" pointer and - has exception handlers. - - For example, if List::m() is shared between T = object and T = string, - then inside m() an exception handler "catch E" needs to be able to fetch - the 'this' pointer to find out what 'T' is in order to tell if we - should catch the exception or not. - */ - +//------------------------------------------------------------------------ +// lvaKeepAliveAndReportThis: check if there implicit references to this during method execution +// +// Returns: +// true if this must remain alive throughout the method, even if unreferenced +// +// Notes: +// In a synchronized instance method we need to report "this" +// in the GC information, so that the EE can release the object lock +// in case of an exception +// inline bool Compiler::lvaKeepAliveAndReportThis() { if (info.compIsStatic || (lvaTable[0].TypeGet() != TYP_REF)) @@ -2507,17 +2503,11 @@ inline bool Compiler::lvaKeepAliveAndReportThis() const bool genericsContextIsThis = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0; #ifdef JIT32_GCENCODER - if (info.compFlags & CORINFO_FLG_SYNCH) return true; if (genericsContextIsThis) { - // TODO: Check if any of the exception clauses are - // typed using a generic type. Else, we do not need to report this. - if (info.compXcptnsCount > 0) - return true; - if (opts.compDbgCode) return true; diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 43b022b99e67d4..413932546ae51e 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -7635,6 +7635,15 @@ void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, return; } + // The inliner gets confused when the unmanaged convention reverses arg order (like x86). + // Just suppress for all targets for now. + // + if (call->GetUnmanagedCallConv() != CorInfoCallConvExtension::Managed) + { + inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_UNMANAGED_CALLCONV); + return; + } + /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ diff --git a/src/coreclr/jit/inline.def b/src/coreclr/jit/inline.def index efacbd4deb27b6..2b045ad5d20009 100644 --- a/src/coreclr/jit/inline.def +++ b/src/coreclr/jit/inline.def @@ -28,6 +28,7 @@ INLINE_OBSERVATION(UNUSED_INITIAL, bool, "unused initial observatio INLINE_OBSERVATION(BAD_ARGUMENT_NUMBER, bool, "invalid argument number", FATAL, CALLEE) INLINE_OBSERVATION(BAD_LOCAL_NUMBER, bool, "invalid local number", FATAL, CALLEE) INLINE_OBSERVATION(COMPILATION_ERROR, bool, "compilation error", FATAL, CALLEE) +INLINE_OBSERVATION(EXPLICIT_TAIL_PREFIX, bool, "explicit tail prefix in callee", FATAL, CALLEE) INLINE_OBSERVATION(HAS_EH, bool, "has exception handling", FATAL, CALLEE) INLINE_OBSERVATION(HAS_ENDFILTER, bool, "has endfilter", FATAL, CALLEE) INLINE_OBSERVATION(HAS_ENDFINALLY, bool, "has endfinally", FATAL, CALLEE) @@ -36,6 +37,7 @@ INLINE_OBSERVATION(HAS_MANAGED_VARARGS, bool, "managed varargs", INLINE_OBSERVATION(HAS_NATIVE_VARARGS, bool, "native varargs", FATAL, CALLEE) INLINE_OBSERVATION(HAS_NO_BODY, bool, "has no body", FATAL, CALLEE) INLINE_OBSERVATION(HAS_NULL_FOR_LDELEM, bool, "has null pointer for ldelem", FATAL, CALLEE) +INLINE_OBSERVATION(HAS_UNMANAGED_CALLCONV, bool, "has unmanaged calling convention", FATAL, CALLEE) INLINE_OBSERVATION(IS_ARRAY_METHOD, bool, "is array method", FATAL, CALLEE) INLINE_OBSERVATION(IS_GENERIC_VIRTUAL, bool, "generic virtual", FATAL, CALLEE) INLINE_OBSERVATION(IS_JIT_NOINLINE, bool, "noinline per JitNoinline", FATAL, CALLEE) @@ -55,7 +57,6 @@ INLINE_OBSERVATION(STACK_CRAWL_MARK, bool, "uses stack crawl mark", INLINE_OBSERVATION(STFLD_NEEDS_HELPER, bool, "stfld needs helper", FATAL, CALLEE) INLINE_OBSERVATION(TOO_MANY_ARGUMENTS, bool, "too many arguments", FATAL, CALLEE) INLINE_OBSERVATION(TOO_MANY_LOCALS, bool, "too many locals", FATAL, CALLEE) -INLINE_OBSERVATION(EXPLICIT_TAIL_PREFIX, bool, "explicit tail prefix in callee", FATAL, CALLEE) // ------ Callee Performance ------- diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index c833f2164fa0bc..946ac89df435e7 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1513,6 +1513,7 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum) { assert(compHndBBtabCount > 0); assert(XTnum < compHndBBtabCount); + assert(!ehTableFinalized); EHblkDsc* HBtab; @@ -1727,6 +1728,8 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum) // EHblkDsc* Compiler::fgTryAddEHTableEntries(unsigned XTnum, unsigned count, bool deferAdding) { + assert(!ehTableFinalized); + bool reallocate = false; bool const insert = (XTnum != compHndBBtabCount); unsigned const newCount = compHndBBtabCount + count; diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 1bee35953e59fd..38e251139928b4 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -3643,35 +3643,6 @@ PhaseStatus Compiler::lvaMarkLocalVars() unsigned const lvaCountOrig = lvaCount; -#if defined(FEATURE_EH_WINDOWS_X86) - - // Grab space for exception handling - - if (!UsesFunclets() && ehNeedsShadowSPslots()) - { - // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) - // ie. the offset of the end-of-last-executed-filter - unsigned slotsNeeded = 1; - - unsigned handlerNestingLevel = ehMaxHndNestingCount; - - if (opts.compDbgEnC && (handlerNestingLevel < (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL)) - handlerNestingLevel = (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL; - - slotsNeeded += handlerNestingLevel; - - // For a filter (which can be active at the same time as a catch/finally handler) - slotsNeeded++; - // For zero-termination of the shadow-Stack-pointer chain - slotsNeeded++; - - lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar")); - lvaSetStruct(lvaShadowSPslotsVar, typGetBlkLayout(slotsNeeded * TARGET_POINTER_SIZE), false); - lvaSetVarAddrExposed(lvaShadowSPslotsVar DEBUGARG(AddressExposedReason::EXTERNALLY_VISIBLE_IMPLICITLY)); - } - -#endif // FEATURE_EH_WINDOWS_X86 - #ifdef JIT32_GCENCODER // LocAllocSPvar is only required by the implicit frame layout expected by the VM on x86. Whether // a function contains a Localloc is conveyed in the GC information, in the InfoHdrSmall.localloc