From 2bb4bb7ef9435c58adaf079dc9c07b2b77e79843 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Mon, 4 Nov 2024 17:12:15 +0000 Subject: [PATCH] JIT: Delete IL verification failure helpers in importer (#108996) --- src/coreclr/jit/block.cpp | 1 - src/coreclr/jit/block.h | 71 +++--- src/coreclr/jit/compiler.h | 39 +-- src/coreclr/jit/hwintrinsicarm64.cpp | 28 +-- src/coreclr/jit/hwintrinsicxarch.cpp | 41 ++- src/coreclr/jit/importer.cpp | 362 ++++++++++----------------- src/coreclr/jit/importercalls.cpp | 39 ++- 7 files changed, 227 insertions(+), 354 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 5fe74c76fa1c8..26e731252a39f 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -462,7 +462,6 @@ void BasicBlock::dspFlags() const {BBF_REMOVED, "del"}, {BBF_DONT_REMOVE, "keep"}, {BBF_INTERNAL, "internal"}, - {BBF_FAILED_VERIFICATION, "failV"}, {BBF_HAS_SUPPRESSGC_CALL, "sup-gc"}, {BBF_LOOP_HEAD, "loophead"}, {BBF_HAS_LABEL, "label"}, diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 569307a63eaae..18f7d3232bf24 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -424,44 +424,43 @@ enum BasicBlockFlags : uint64_t BBF_DONT_REMOVE = MAKE_BBFLAG( 3), // BB should not be removed during flow graph optimizations BBF_IMPORTED = MAKE_BBFLAG( 4), // BB byte-code has been imported BBF_INTERNAL = MAKE_BBFLAG( 5), // BB has been added by the compiler - BBF_FAILED_VERIFICATION = MAKE_BBFLAG( 6), // BB has verification exception - BBF_NEEDS_GCPOLL = MAKE_BBFLAG( 7), // BB may need a GC poll because it uses the slow tail call helper - BBF_FUNCLET_BEG = MAKE_BBFLAG( 8), // BB is the beginning of a funclet - BBF_CLONED_FINALLY_BEGIN = MAKE_BBFLAG( 9), // First block of a cloned finally region - BBF_CLONED_FINALLY_END = MAKE_BBFLAG(10), // Last block of a cloned finally region - BBF_HAS_NULLCHECK = MAKE_BBFLAG(11), // BB contains a null check - BBF_HAS_SUPPRESSGC_CALL = MAKE_BBFLAG(12), // BB contains a call to a method with SuppressGCTransitionAttribute - BBF_RUN_RARELY = MAKE_BBFLAG(13), // BB is rarely run (catch clauses, blocks with throws etc) - BBF_LOOP_HEAD = MAKE_BBFLAG(14), // BB is the head of a loop (can reach a predecessor) - BBF_HAS_LABEL = MAKE_BBFLAG(15), // BB needs a label - BBF_LOOP_ALIGN = MAKE_BBFLAG(16), // Block is lexically the first block in a loop we intend to align. - BBF_HAS_ALIGN = MAKE_BBFLAG(17), // BB ends with 'align' instruction - BBF_HAS_JMP = MAKE_BBFLAG(18), // BB executes a JMP instruction (instead of return) - BBF_GC_SAFE_POINT = MAKE_BBFLAG(19), // BB has a GC safe point (e.g. a call) - BBF_HAS_IDX_LEN = MAKE_BBFLAG(20), // BB contains simple index or length expressions on an SD array local var. - BBF_HAS_MD_IDX_LEN = MAKE_BBFLAG(21), // BB contains simple index, length, or lower bound expressions on an MD array local var. - BBF_HAS_MDARRAYREF = MAKE_BBFLAG(22), // Block has a multi-dimensional array reference - BBF_HAS_NEWOBJ = MAKE_BBFLAG(23), // BB contains 'new' of an object type. - - BBF_RETLESS_CALL = MAKE_BBFLAG(24), // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired + BBF_NEEDS_GCPOLL = MAKE_BBFLAG( 6), // BB may need a GC poll because it uses the slow tail call helper + BBF_FUNCLET_BEG = MAKE_BBFLAG( 7), // BB is the beginning of a funclet + BBF_CLONED_FINALLY_BEGIN = MAKE_BBFLAG( 8), // First block of a cloned finally region + BBF_CLONED_FINALLY_END = MAKE_BBFLAG( 9), // Last block of a cloned finally region + BBF_HAS_NULLCHECK = MAKE_BBFLAG(10), // BB contains a null check + BBF_HAS_SUPPRESSGC_CALL = MAKE_BBFLAG(11), // BB contains a call to a method with SuppressGCTransitionAttribute + BBF_RUN_RARELY = MAKE_BBFLAG(12), // BB is rarely run (catch clauses, blocks with throws etc) + BBF_LOOP_HEAD = MAKE_BBFLAG(13), // BB is the head of a loop (can reach a predecessor) + BBF_HAS_LABEL = MAKE_BBFLAG(14), // BB needs a label + BBF_LOOP_ALIGN = MAKE_BBFLAG(15), // Block is lexically the first block in a loop we intend to align. + BBF_HAS_ALIGN = MAKE_BBFLAG(16), // BB ends with 'align' instruction + BBF_HAS_JMP = MAKE_BBFLAG(17), // BB executes a JMP instruction (instead of return) + BBF_GC_SAFE_POINT = MAKE_BBFLAG(18), // BB has a GC safe point (e.g. a call) + BBF_HAS_IDX_LEN = MAKE_BBFLAG(19), // BB contains simple index or length expressions on an SD array local var. + BBF_HAS_MD_IDX_LEN = MAKE_BBFLAG(20), // BB contains simple index, length, or lower bound expressions on an MD array local var. + BBF_HAS_MDARRAYREF = MAKE_BBFLAG(21), // Block has a multi-dimensional array reference + BBF_HAS_NEWOBJ = MAKE_BBFLAG(22), // BB contains 'new' of an object type. + + BBF_RETLESS_CALL = MAKE_BBFLAG(23), // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired // BBJ_CALLFINALLYRET); see isBBCallFinallyPair(). - BBF_COLD = MAKE_BBFLAG(25), // BB is cold - BBF_PROF_WEIGHT = MAKE_BBFLAG(26), // BB weight is computed from profile data - BBF_KEEP_BBJ_ALWAYS = MAKE_BBFLAG(27), // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind + BBF_COLD = MAKE_BBFLAG(24), // BB is cold + BBF_PROF_WEIGHT = MAKE_BBFLAG(25), // BB weight is computed from profile data + BBF_KEEP_BBJ_ALWAYS = MAKE_BBFLAG(26), // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind // as BBJ_ALWAYS. Used on x86 for the final step block out of a finally. - BBF_HAS_CALL = MAKE_BBFLAG(28), // BB contains a call - BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY = MAKE_BBFLAG(29), // Block is dominated by exceptional entry. - BBF_BACKWARD_JUMP = MAKE_BBFLAG(30), // BB is surrounded by a backward jump/switch arc - BBF_BACKWARD_JUMP_SOURCE = MAKE_BBFLAG(31), // Block is a source of a backward jump - BBF_BACKWARD_JUMP_TARGET = MAKE_BBFLAG(32), // Block is a target of a backward jump - BBF_PATCHPOINT = MAKE_BBFLAG(33), // Block is a patchpoint - BBF_PARTIAL_COMPILATION_PATCHPOINT = MAKE_BBFLAG(34), // Block is a partial compilation patchpoint - BBF_HAS_HISTOGRAM_PROFILE = MAKE_BBFLAG(35), // BB contains a call needing a histogram profile - BBF_TAILCALL_SUCCESSOR = MAKE_BBFLAG(36), // BB has pred that has potential tail call - BBF_RECURSIVE_TAILCALL = MAKE_BBFLAG(37), // Block has recursive tailcall that may turn into a loop - BBF_NO_CSE_IN = MAKE_BBFLAG(38), // Block should kill off any incoming CSE - BBF_CAN_ADD_PRED = MAKE_BBFLAG(39), // Ok to add pred edge to this block, even when "safe" edge creation disabled - BBF_HAS_VALUE_PROFILE = MAKE_BBFLAG(40), // Block has a node that needs a value probing + BBF_HAS_CALL = MAKE_BBFLAG(27), // BB contains a call + BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY = MAKE_BBFLAG(28), // Block is dominated by exceptional entry. + BBF_BACKWARD_JUMP = MAKE_BBFLAG(29), // BB is surrounded by a backward jump/switch arc + BBF_BACKWARD_JUMP_SOURCE = MAKE_BBFLAG(30), // Block is a source of a backward jump + BBF_BACKWARD_JUMP_TARGET = MAKE_BBFLAG(31), // Block is a target of a backward jump + BBF_PATCHPOINT = MAKE_BBFLAG(32), // Block is a patchpoint + BBF_PARTIAL_COMPILATION_PATCHPOINT = MAKE_BBFLAG(33), // Block is a partial compilation patchpoint + BBF_HAS_HISTOGRAM_PROFILE = MAKE_BBFLAG(34), // BB contains a call needing a histogram profile + BBF_TAILCALL_SUCCESSOR = MAKE_BBFLAG(35), // BB has pred that has potential tail call + BBF_RECURSIVE_TAILCALL = MAKE_BBFLAG(36), // Block has recursive tailcall that may turn into a loop + BBF_NO_CSE_IN = MAKE_BBFLAG(37), // Block should kill off any incoming CSE + BBF_CAN_ADD_PRED = MAKE_BBFLAG(38), // Ok to add pred edge to this block, even when "safe" edge creation disabled + BBF_HAS_VALUE_PROFILE = MAKE_BBFLAG(39), // Block has a node that needs a value probing // The following are sets of flags. diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index cf912b002877f..ce3bd8611f122 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4912,7 +4912,7 @@ class Compiler void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being - // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState + // reimported for some reason. It specifically does *not* look at stackState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); @@ -11230,37 +11230,22 @@ class Compiler } #endif // DEBUG - /* - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - XX XX - XX IL verification stuff XX - XX XX - XX XX - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - */ - public: - EntryState verCurrentState; - - void verInitBBEntryState(BasicBlock* block, EntryState* currentState); + EntryState stackState; - void verInitCurrentState(); - void verResetCurrentState(BasicBlock* block, EntryState* currentState); + void initBBEntryState(BasicBlock* block, EntryState* currentState); - void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); - void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); - typeInfo verMakeTypeInfoForLocal(unsigned lclNum); - typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo - typeInfo verMakeTypeInfo(CorInfoType ciType, - CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo + void initCurrentState(); + void resetCurrentState(BasicBlock* block, EntryState* currentState); - typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); + typeInfo makeTypeInfoForLocal(unsigned lclNum); + typeInfo makeTypeInfo(CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo + typeInfo makeTypeInfo(CorInfoType ciType, + CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo - bool verCheckTailCallConstraint(OPCODE opcode, - CORINFO_RESOLVED_TOKEN* pResolvedToken, - CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken); + bool checkTailCallConstraint(OPCODE opcode, + CORINFO_RESOLVED_TOKEN* pResolvedToken, + CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken); #ifdef DEBUG diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index 75d32b86e37fd..fac3e6c6216fe 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -1263,7 +1263,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, break; } - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for vector CreateSequence")); op2 = impPopStack().val; @@ -1554,11 +1554,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 3); assert(varTypeIsFloating(simdBaseType)); - impSpillSideEffect(true, verCurrentState.esStackDepth - - 3 DEBUGARG("Spilling op1 side effects for FusedMultiplyAdd")); + impSpillSideEffect(true, + stackState.esStackDepth - 3 DEBUGARG("Spilling op1 side effects for FusedMultiplyAdd")); - impSpillSideEffect(true, verCurrentState.esStackDepth - - 2 DEBUGARG("Spilling op2 side effects for FusedMultiplyAdd")); + impSpillSideEffect(true, + stackState.esStackDepth - 2 DEBUGARG("Spilling op2 side effects for FusedMultiplyAdd")); op3 = impSIMDPopStack(); op2 = impSIMDPopStack(); @@ -2011,10 +2011,10 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 3 DEBUGARG("Spilling op1 side effects for MultiplyAddEstimate")); - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op2 side effects for MultiplyAddEstimate")); } @@ -2273,8 +2273,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (sig->numArgs == 3) { - impSpillSideEffect(true, verCurrentState.esStackDepth - - 3 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, + stackState.esStackDepth - 3 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); op3 = impPopStack().val; } @@ -2282,8 +2282,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 2); - impSpillSideEffect(true, verCurrentState.esStackDepth - - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, + stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); } op2 = impPopStack().val; @@ -2323,8 +2323,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, var_types simdType = getSIMDTypeForSize(simdSize); - impSpillSideEffect(true, - verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); op2 = impPopStack().val; @@ -2356,8 +2355,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, var_types simdType = getSIMDTypeForSize(simdSize); - impSpillSideEffect(true, - verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); op2 = impPopStack().val; diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index 95d96fb63180f..685b8450abc6f 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -1271,8 +1271,7 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT { // These intrinsics have overloads that take op2 in a simd register and just read the lowest 8-bits - impSpillSideEffect(true, - verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); GenTree* op2 = impPopStack().val; GenTree* op1 = impSIMDPopStack(); @@ -1300,8 +1299,7 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT static_assert_no_msg(NI_AVX10v1_RotateLeftVariable == (NI_AVX10v1_RotateLeft + 1)); static_assert_no_msg(NI_AVX10v1_RotateRightVariable == (NI_AVX10v1_RotateRight + 1)); - impSpillSideEffect(true, - verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); GenTree* op2 = impPopStack().val; GenTree* op1 = impSIMDPopStack(); @@ -2360,7 +2358,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } } - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for vector CreateSequence")); op2 = impPopStack().val; @@ -3548,8 +3546,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (sig->numArgs == 3) { - impSpillSideEffect(true, verCurrentState.esStackDepth - - 3 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, + stackState.esStackDepth - 3 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); op3 = impPopStack().val; } @@ -3557,8 +3555,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 2); - impSpillSideEffect(true, verCurrentState.esStackDepth - - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, + stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); } op2 = impPopStack().val; @@ -3591,8 +3589,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, var_types simdType = getSIMDTypeForSize(simdSize); - impSpillSideEffect(true, - verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); op2 = impPopStack().val; @@ -3617,8 +3614,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, var_types simdType = getSIMDTypeForSize(simdSize); - impSpillSideEffect(true, - verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); op2 = impPopStack().val; @@ -3981,8 +3977,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (!supportsAvx) { - impSpillSideEffect(true, verCurrentState.esStackDepth - - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, + stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); } op2 = impSIMDPopStack(); @@ -4073,8 +4069,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (!supportsAvx) { - impSpillSideEffect(true, verCurrentState.esStackDepth - - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, + stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); } op2 = impSIMDPopStack(); @@ -4180,8 +4176,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { simdBaseJitType = getBaseJitTypeOfSIMDType(sig->retTypeSigClass); - impSpillSideEffect(true, - verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); // swap the two operands GenTree* idxVector = impSIMDPopStack(); @@ -4436,13 +4431,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (spillOp1) { - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 3 DEBUGARG("Spilling op1 side effects for HWIntrinsic")); } if (spillOp2) { - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op2 side effects for HWIntrinsic")); } @@ -5055,7 +5050,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 2); - impSpillSideEffect(true, verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 for ZeroHighBits")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 for ZeroHighBits")); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; @@ -5074,7 +5069,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } assert(sig->numArgs == 2); - impSpillSideEffect(true, verCurrentState.esStackDepth - 2 DEBUGARG("Spilling op1 for BitFieldExtract")); + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op1 for BitFieldExtract")); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index f340bdedb636e..c274ae230cab3 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -28,14 +28,14 @@ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ - if ((verCurrentState.esStackDepth >= info.compMaxStack) && - (verCurrentState.esStackDepth >= impStkSize || !compCurBB->HasFlag(BBF_IMPORTED))) + if ((stackState.esStackDepth >= info.compMaxStack) && + (stackState.esStackDepth >= impStkSize || !compCurBB->HasFlag(BBF_IMPORTED))) { BADCODE("stack overflow"); } - verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; - verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; + stackState.esStack[stackState.esStackDepth].seTypeInfo = ti; + stackState.esStack[stackState.esStackDepth++].val = tree; if (tree->gtType == TYP_LONG) { @@ -120,12 +120,12 @@ void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolv // StackEntry Compiler::impPopStack() { - if (verCurrentState.esStackDepth == 0) + if (stackState.esStackDepth == 0) { BADCODE("stack underflow"); } - return verCurrentState.esStack[--verCurrentState.esStackDepth]; + return stackState.esStack[--stackState.esStackDepth]; } //------------------------------------------------------------------------ @@ -136,12 +136,12 @@ StackEntry Compiler::impPopStack() // void Compiler::impPopStack(unsigned n) { - if (verCurrentState.esStackDepth < n) + if (stackState.esStackDepth < n) { BADCODE("stack underflow"); } - verCurrentState.esStackDepth -= n; + stackState.esStackDepth -= n; } /***************************************************************************** @@ -151,17 +151,17 @@ void Compiler::impPopStack(unsigned n) StackEntry& Compiler::impStackTop(unsigned n) { - if (verCurrentState.esStackDepth <= n) + if (stackState.esStackDepth <= n) { BADCODE("stack underflow"); } - return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; + return stackState.esStack[stackState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { - return verCurrentState.esStackDepth; + return stackState.esStackDepth; } /***************************************************************************** @@ -196,12 +196,12 @@ static bool impValidSpilledStackEntry(GenTree* tree) void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { - savePtr->ssDepth = verCurrentState.esStackDepth; + savePtr->ssDepth = stackState.esStackDepth; - if (verCurrentState.esStackDepth) + if (stackState.esStackDepth) { - savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; - size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); + savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[stackState.esStackDepth]; + size_t saveSize = stackState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { @@ -209,10 +209,10 @@ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) /* Make a fresh copy of all the stack entries */ - for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) + for (unsigned level = 0; level < stackState.esStackDepth; level++, table++) { - table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; - GenTree* tree = verCurrentState.esStack[level].val; + table->seTypeInfo = stackState.esStack[level].seTypeInfo; + GenTree* tree = stackState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); @@ -236,19 +236,18 @@ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) } else { - memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); + memcpy(savePtr->ssTrees, stackState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { - verCurrentState.esStackDepth = savePtr->ssDepth; + stackState.esStackDepth = savePtr->ssDepth; - if (verCurrentState.esStackDepth) + if (stackState.esStackDepth) { - memcpy(verCurrentState.esStack, savePtr->ssTrees, - verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); + memcpy(stackState.esStack, savePtr->ssTrees, stackState.esStackDepth * sizeof(*stackState.esStack)); } } @@ -322,10 +321,10 @@ void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) if (chkLevel == CHECK_SPILL_ALL) { - chkLevel = verCurrentState.esStackDepth; + chkLevel = stackState.esStackDepth; } - if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == CHECK_SPILL_NONE) + if (stackState.esStackDepth == 0 || chkLevel == 0 || chkLevel == CHECK_SPILL_NONE) { return; } @@ -338,7 +337,7 @@ void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { for (unsigned level = 0; level < chkLevel; level++) { - assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); + assert((stackState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } @@ -351,7 +350,7 @@ void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { - GenTree* stkTree = verCurrentState.esStack[level].val; + GenTree* stkTree = stackState.esStack[level].val; assert(!gtHasRef(stkTree, lclNum) || impIsInvariant(stkTree)); assert(!lvaTable[lclNum].IsAddressExposed() || ((stkTree->gtFlags & GTF_SIDE_EFFECT) == 0)); } @@ -361,7 +360,7 @@ void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { for (unsigned level = 0; level < chkLevel; level++) { - assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); + assert((stackState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } @@ -387,12 +386,12 @@ void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsu { if (chkLevel == CHECK_SPILL_ALL) { - chkLevel = verCurrentState.esStackDepth; + chkLevel = stackState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != CHECK_SPILL_NONE)) { - assert(chkLevel <= verCurrentState.esStackDepth); + assert(chkLevel <= stackState.esStackDepth); // If the statement being appended has any side-effects, check the stack to see if anything // needs to be spilled to preserve correct ordering. @@ -1723,7 +1722,7 @@ bool Compiler::impSpillStackEntry(unsigned level, guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif - GenTree* tree = verCurrentState.esStack[level].val; + GenTree* tree = stackState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ @@ -1752,7 +1751,7 @@ bool Compiler::impSpillStackEntry(unsigned level, // If temp is newly introduced and a ref type, grab what type info we can. if (lvaTable[tnum].lvType == TYP_REF) { - CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandleForObjRef(); + CORINFO_CLASS_HANDLE stkHnd = stackState.esStack[level].seTypeInfo.GetClassHandleForObjRef(); lvaSetClass(tnum, tree, stkHnd); } @@ -1777,9 +1776,9 @@ bool Compiler::impSpillStackEntry(unsigned level, } // The tree type may be modified by impStoreToTemp, so use the type of the lclVar. - var_types type = genActualType(lvaTable[tnum].TypeGet()); - GenTree* temp = gtNewLclvNode(tnum, type); - verCurrentState.esStack[level].val = temp; + var_types type = genActualType(lvaTable[tnum].TypeGet()); + GenTree* temp = gtNewLclvNode(tnum, type); + stackState.esStack[level].val = temp; return true; } @@ -1793,9 +1792,9 @@ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); - for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) + for (unsigned level = 0; level < stackState.esStackDepth; level++) { - GenTree* tree = verCurrentState.esStack[level].val; + GenTree* tree = stackState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { @@ -1826,7 +1825,7 @@ void Compiler::impSpillStackEnsure(bool spillLeaves) void Compiler::impEvalSideEffects() { impSpillSideEffects(false, CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); - verCurrentState.esStackDepth = 0; + stackState.esStackDepth = 0; } /***************************************************************************** @@ -1838,10 +1837,10 @@ void Compiler::impEvalSideEffects() void Compiler::impSpillSideEffect(bool spillGlobEffects, unsigned i DEBUGARG(const char* reason)) { - assert(i <= verCurrentState.esStackDepth); + assert(i <= stackState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; - GenTree* tree = verCurrentState.esStack[i].val; + GenTree* tree = stackState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true @@ -1871,10 +1870,10 @@ void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBU if (chkLevel == CHECK_SPILL_ALL) { - chkLevel = verCurrentState.esStackDepth; + chkLevel = stackState.esStackDepth; } - assert(chkLevel <= verCurrentState.esStackDepth); + assert(chkLevel <= stackState.esStackDepth); for (unsigned i = 0; i < chkLevel; i++) { @@ -1897,9 +1896,9 @@ void Compiler::impSpillSpecialSideEff() return; } - for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) + for (unsigned level = 0; level < stackState.esStackDepth; level++) { - GenTree* tree = verCurrentState.esStack[level].val; + GenTree* tree = stackState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { @@ -1923,14 +1922,14 @@ void Compiler::impSpillLclRefs(unsigned lclNum, unsigned chkLevel) if (chkLevel == CHECK_SPILL_ALL) { - chkLevel = verCurrentState.esStackDepth; + chkLevel = stackState.esStackDepth; } - assert(chkLevel <= verCurrentState.esStackDepth); + assert(chkLevel <= stackState.esStackDepth); for (unsigned level = 0; level < chkLevel; level++) { - GenTree* tree = verCurrentState.esStack[level].val; + GenTree* tree = stackState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill stores to the local if the local is on entry @@ -2111,7 +2110,7 @@ DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); - bool isStackEmpty = verCurrentState.esStackDepth <= 0; + bool isStackEmpty = stackState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } @@ -2225,7 +2224,7 @@ unsigned Compiler::impInitBlockLineInfo() IL_OFFSET blockOffs = compCurBB->bbCodeOffs; - if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) + if ((stackState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } @@ -2516,71 +2515,7 @@ GenTree* Compiler::impGetGenericTypeDefinition(GenTree* type) return nullptr; } -/***************************************************************************** - * 'logMsg' is true if a log message needs to be logged. false if the caller has - * already logged it (presumably in a more detailed fashion than done here) - */ - -void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) -{ - block->SetKindAndTargetEdge(BBJ_THROW); - block->SetFlags(BBF_FAILED_VERIFICATION); - block->RemoveFlags(BBF_IMPORTED); - - impCurStmtOffsSet(block->bbCodeOffs); - - // Clear the statement list as it exists so far; we're only going to have a verification exception. - impStmtList = impLastStmt = nullptr; - -#ifdef DEBUG - if (logMsg) - { - JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, - block->bbCodeOffs, block->bbCodeOffsEnd)); - if (verbose) - { - printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); - } - } - - if (JitConfig.DebugBreakOnVerificationFailure()) - { - DebugBreak(); - } -#endif - - impBeginTreeList(); - - // if the stack is non-empty evaluate all the side-effects - if (verCurrentState.esStackDepth > 0) - { - impEvalSideEffects(); - } - assert(verCurrentState.esStackDepth == 0); - - GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewIconNode(block->bbCodeOffs)); - // verCurrentState.esStackDepth = 0; - impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); - - // The inliner is not able to handle methods that require throw block, so - // make sure this methods never gets inlined. - info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); -} - -/***************************************************************************** - * - */ -void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) -{ - verResetCurrentState(block, &verCurrentState); - verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); - -#ifdef DEBUG - impNoteLastILoffs(); // Remember at which BC offset the tree was finished -#endif // DEBUG -} - -typeInfo Compiler::verMakeTypeInfoForLocal(unsigned lclNum) +typeInfo Compiler::makeTypeInfoForLocal(unsigned lclNum) { LclVarDsc* varDsc = lvaGetDesc(lclNum); @@ -2592,7 +2527,7 @@ typeInfo Compiler::verMakeTypeInfoForLocal(unsigned lclNum) return typeInfo(varDsc->TypeGet()); } -typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) +typeInfo Compiler::makeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { if (ciType == CORINFO_TYPE_CLASS) { @@ -2602,31 +2537,10 @@ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsH return typeInfo(JITtype2varType(ciType)); } -typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd) +typeInfo Compiler::makeTypeInfo(CORINFO_CLASS_HANDLE clsHnd) { assert(clsHnd != NO_CLASS_HANDLE); - return verMakeTypeInfo(info.compCompHnd->asCorInfoType(clsHnd), clsHnd); -} - -typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) -{ - CORINFO_CLASS_HANDLE classHandle; - CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); - - var_types type = JITtype2varType(ciType); - if (varTypeIsGC(type)) - { - // For efficiency, getArgType only returns something in classHandle for - // value types. For other types that have addition type info, you - // have to call back explicitly - classHandle = info.compCompHnd->getArgClass(sig, args); - if (!classHandle) - { - NO_WAY("Could not figure out Class specified in argument or local signature"); - } - } - - return verMakeTypeInfo(ciType, classHandle); + return makeTypeInfo(info.compCompHnd->asCorInfoType(clsHnd), clsHnd); } /***************************************************************************** @@ -2634,9 +2548,9 @@ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_L * Check if a TailCall is legal. */ -bool Compiler::verCheckTailCallConstraint(OPCODE opcode, - CORINFO_RESOLVED_TOKEN* pResolvedToken, - CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken) +bool Compiler::checkTailCallConstraint(OPCODE opcode, + CORINFO_RESOLVED_TOKEN* pResolvedToken, + CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken) { DWORD mflags; CORINFO_SIG_INFO sig; @@ -2775,7 +2689,7 @@ bool Compiler::verCheckTailCallConstraint(OPCODE opcode, } // For tailcall, stack must be empty. - if (verCurrentState.esStackDepth != popCount) + if (stackState.esStackDepth != popCount) { return false; } @@ -4592,7 +4506,7 @@ void Compiler::impImportLeaveEHRegions(BasicBlock* block) // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); - verCurrentState.esStackDepth = 0; + stackState.esStackDepth = 0; assert(block->KindIs(BBJ_LEAVE)); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary @@ -4885,7 +4799,7 @@ void Compiler::impImportLeave(BasicBlock* block) // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); - verCurrentState.esStackDepth = 0; + stackState.esStackDepth = 0; assert(block->KindIs(BBJ_LEAVE)); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary @@ -5955,7 +5869,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1, _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ - op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ + op2 ? varTypeName(op2->TypeGet()) : "NULL", stackState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) @@ -6069,7 +5983,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Patchpoints at backedge sources, if possible, otherwise targets. // addPatchpoint = block->HasFlag(BBF_BACKWARD_JUMP_SOURCE); - mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); + mustUseTargetPatchpoint = (stackState.esStackDepth != 0) || block->hasHndIndex(); break; } @@ -6082,7 +5996,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // We should not have allowed OSR if there were backedges in handlers. // assert(!block->hasHndIndex()); - addPatchpoint = block->HasFlag(BBF_BACKWARD_JUMP_TARGET) && (verCurrentState.esStackDepth == 0); + addPatchpoint = block->HasFlag(BBF_BACKWARD_JUMP_TARGET) && (stackState.esStackDepth == 0); break; } @@ -6098,13 +6012,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We don't know backedge count, so just use ref count. // - addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0); + addPatchpoint = (block->bbRefs > 1) && (stackState.esStackDepth == 0); } if (!addPatchpoint && block->HasFlag(BBF_BACKWARD_JUMP_SOURCE)) { addPatchpoint = true; - mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); + mustUseTargetPatchpoint = (stackState.esStackDepth != 0) || block->hasHndIndex(); // Also force target patchpoint if target block has multiple (backedge) preds. // @@ -6194,7 +6108,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; - if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && + if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (stackState.esStackDepth == 0) && !block->hasHndIndex() && !block->HasFlag(BBF_PATCHPOINT)) { // Block start can have a patchpoint. See if we should add one. @@ -6249,7 +6163,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Note unlike OSR, it's ok to forgo these. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && - compCanHavePatchpoints() && !compTailPrefixSeen && (verCurrentState.esStackDepth == 0) && + compCanHavePatchpoints() && !compTailPrefixSeen && (stackState.esStackDepth == 0) && !block->HasFlag(BBF_PATCHPOINT) && !block->hasHndIndex()) { // Is this block a good place for partial compilation? @@ -6361,7 +6275,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ - if (verCurrentState.esStackDepth) + if (stackState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ @@ -6395,7 +6309,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); - if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) + if (stackState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form @@ -6451,7 +6365,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && - (verCurrentState.esStackDepth == 0)) + (stackState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update @@ -6508,7 +6422,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); - JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); + JITDUMP("\n [%2u] %3u (0x%03x) ", stackState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: @@ -6967,7 +6881,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) return; } - if (verCurrentState.esStackDepth > 0) + if (stackState.esStackDepth > 0) { impEvalSideEffects(); } @@ -6977,7 +6891,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) BADCODE("endfinally outside finally"); } - assert(verCurrentState.esStackDepth == 0); + assert(stackState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; @@ -7017,7 +6931,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); - if (verCurrentState.esStackDepth != 0) + if (stackState.esStackDepth != 0) { BADCODE("stack must be 1 on end of filter"); } @@ -7051,7 +6965,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) BADCODE("Jmp not allowed in reverse P/Invoke"); } - if (verCurrentState.esStackDepth != 0) + if (stackState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } @@ -7142,7 +7056,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) ldelemClsHnd = resolvedToken.hClass; lclTyp = TypeHandleToVarType(ldelemClsHnd); - tiRetVal = verMakeTypeInfo(ldelemClsHnd); + tiRetVal = makeTypeInfo(ldelemClsHnd); goto ARR_LD; case CEE_LDELEM_I1: @@ -9076,8 +8990,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // Do a more detailed evaluation of legality const bool passedConstraintCheck = - verCheckTailCallConstraint(opcode, &resolvedToken, - constraintCall ? &constrainedResolvedToken : nullptr); + checkTailCallConstraint(opcode, &resolvedToken, + constraintCall ? &constrainedResolvedToken : nullptr); // Avoid setting compHasBackwardsJump = true via tail call stress if the method cannot have // patchpoints. @@ -9270,7 +9184,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else { - tiRetVal = verMakeTypeInfo(fieldInfo.fieldType, clsHnd); + tiRetVal = makeTypeInfo(fieldInfo.fieldType, clsHnd); } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); @@ -9718,7 +9632,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - tiRetVal = verMakeTypeInfo(resolvedToken.hClass); + tiRetVal = makeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); @@ -9838,7 +9752,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); - if (verCurrentState.esStackDepth != 0) + if (stackState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } @@ -10119,7 +10033,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) #endif op1->AsCall()->gtRetClsHnd = tokenType; - tiRetVal = verMakeTypeInfo(tokenType); + tiRetVal = makeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; @@ -10340,7 +10254,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // If non register passable struct we have it materialized in the RetBuf. assert(op1->TypeIs(TYP_STRUCT)); - tiRetVal = verMakeTypeInfo(resolvedToken.hClass); + tiRetVal = makeTypeInfo(resolvedToken.hClass); } impPushOnStack(op1, tiRetVal); @@ -10366,7 +10280,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); - verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; + stackState.esStack[stackState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } @@ -10518,12 +10432,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Fall through to clear out the eval stack. EVAL_APPEND: - if (verCurrentState.esStackDepth > 0) + if (stackState.esStackDepth > 0) { impEvalSideEffects(); } - assert(verCurrentState.esStackDepth == 0); + assert(stackState.esStackDepth == 0); goto APPEND; @@ -10744,7 +10658,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) impAppendTree(storeData, CHECK_SPILL_ALL, impCurStmtDI); impAppendTree(storeType, CHECK_SPILL_ALL, impCurStmtDI); - impPushOnStack(gtNewLclVarNode(refAnyLcl, TYP_STRUCT), verMakeTypeInfo(impGetRefAnyClass())); + impPushOnStack(gtNewLclVarNode(refAnyLcl, TYP_STRUCT), makeTypeInfo(impGetRefAnyClass())); break; } @@ -10759,7 +10673,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) OBJ: ClassLayout* layout; lclTyp = TypeHandleToVarType(resolvedToken.hClass, &layout); - tiRetVal = verMakeTypeInfo(resolvedToken.hClass); + tiRetVal = makeTypeInfo(resolvedToken.hClass); op1 = impPopStack().val; assertImp((genActualType(op1) == TYP_I_IMPL) || op1->TypeIs(TYP_BYREF)); @@ -10864,7 +10778,7 @@ GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET o // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset) { - impPushOnStack(impCreateLocalNode(lclNum DEBUGARG(offset)), verMakeTypeInfoForLocal(lclNum)); + impPushOnStack(impCreateLocalNode(lclNum DEBUGARG(offset)), makeTypeInfoForLocal(lclNum)); } // Load an argument on the operand stack @@ -11049,7 +10963,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } else { - if (verCurrentState.esStackDepth != 0) + if (stackState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); @@ -11301,7 +11215,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) // We must have imported a tailcall and jumped to RET if (isTailCall) { - assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); + assert(stackState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES @@ -11346,7 +11260,7 @@ void Compiler::impPoisonImplicitByrefsBeforeReturn() if (!spilled) { - for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) + for (unsigned level = 0; level < stackState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(true) DEBUGARG("Stress poisoning byrefs before return")); } @@ -11460,7 +11374,7 @@ void Compiler::impVerifyEHBlock(BasicBlock* block) // either empty or one that contains just // the Exception Object that we are dealing with // - verCurrentState.esStackDepth = 0; + stackState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { @@ -11500,7 +11414,7 @@ void Compiler::impVerifyEHBlock(BasicBlock* block) if (!filterBB->HasFlag(BBF_IMPORTED) && (impGetPendingBlockMember(filterBB) == 0)) { - verCurrentState.esStackDepth = 0; + stackState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! @@ -11570,7 +11484,7 @@ void Compiler::impImportBlock(BasicBlock* block) #endif /* Set the current stack state to the merged result */ - verResetCurrentState(block, &verCurrentState); + resetCurrentState(block, &stackState); if (block->hasTryIndex()) { @@ -11597,7 +11511,7 @@ void Compiler::impImportBlock(BasicBlock* block) /* If the stack is non-empty, we might have to spill its contents */ - if (verCurrentState.esStackDepth != 0) + if (stackState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply @@ -11694,9 +11608,9 @@ void Compiler::impImportBlock(BasicBlock* block) // Spill all stack entries into temps JITDUMP("\nSpilling stack entries into temps\n"); - for (unsigned level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) + for (unsigned level = 0, tempNum = baseTmp; level < stackState.esStackDepth; level++, tempNum++) { - GenTree* tree = verCurrentState.esStack[level].val; + GenTree* tree = stackState.esStack[level].val; // VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from // the other. This should merge to a byref in unverifiable code. @@ -11723,7 +11637,7 @@ void Compiler::impImportBlock(BasicBlock* block) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. - verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); + stackState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves @@ -11745,7 +11659,7 @@ void Compiler::impImportBlock(BasicBlock* block) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. - verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); + stackState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT @@ -11761,7 +11675,7 @@ void Compiler::impImportBlock(BasicBlock* block) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. - verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); + stackState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we @@ -11819,9 +11733,6 @@ void Compiler::impImportBlock(BasicBlock* block) BADCODE("bad stack state"); } - // Oops. Something went wrong when spilling. Bad code. - verHandleVerificationFailure(block DEBUGARG(true)); - goto SPILLSTACK; } } @@ -11911,19 +11822,18 @@ void Compiler::impImportBlockPending(BasicBlock* block) // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation - if ((block->bbEntryState == nullptr) && !block->HasAnyFlag(BBF_IMPORTED | BBF_FAILED_VERIFICATION) && - (impGetPendingBlockMember(block) == 0)) + if ((block->bbEntryState == nullptr) && !block->HasFlag(BBF_IMPORTED) && (impGetPendingBlockMember(block) == 0)) { - verInitBBEntryState(block, &verCurrentState); + initBBEntryState(block, &stackState); assert(block->bbStkDepth == 0); - block->bbStkDepth = static_cast(verCurrentState.esStackDepth); + block->bbStkDepth = static_cast(stackState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. - if (block->bbStkDepth != verCurrentState.esStackDepth) + if (block->bbStkDepth != stackState.esStackDepth) { #ifdef DEBUG char buffer[400]; @@ -11931,7 +11841,7 @@ void Compiler::impImportBlockPending(BasicBlock* block) "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, - verCurrentState.esStackDepth); + stackState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else @@ -11975,11 +11885,11 @@ void Compiler::impImportBlockPending(BasicBlock* block) } dsc->pdBB = block; - dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; + dsc->pdSavedStack.ssDepth = stackState.esStackDepth; // Save the stack trees for later - if (verCurrentState.esStackDepth) + if (stackState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } @@ -12169,16 +12079,13 @@ void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* if (!blk->HasFlag(BBF_IMPORTED) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { - // If we haven't imported this block and we're not going to (because it isn't on - // the pending list) then just ignore it for now. - - // This block has either never been imported (EntryState == NULL) or it failed - // verification. Neither state requires us to force it to be imported now. - assert((blk->bbEntryState == nullptr) || blk->HasFlag(BBF_FAILED_VERIFICATION)); + // If we haven't imported this block (EntryState == NULL) and we're not going to + // (because it isn't on the pending list) then just ignore it for now. + assert(blk->bbEntryState == nullptr); return; } - // For successors we have a valid verCurrentState, so just mark them for reimport + // For successors we have a valid stackState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. @@ -12189,7 +12096,7 @@ void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState - m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); + m_pComp->resetCurrentState(blk, &m_pComp->stackState); m_pComp->impImportBlockPending(blk); } @@ -12243,7 +12150,7 @@ unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. - unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); + unsigned baseTmp = lvaGrabTemps(stackState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor @@ -12277,7 +12184,7 @@ void Compiler::impReimportSpillClique(BasicBlock* block) // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. -void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) +void Compiler::initBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0) { @@ -12308,7 +12215,7 @@ void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) /* * Resets the current state to the state at the start of the basic block */ -void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) +void Compiler::resetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { @@ -12326,14 +12233,14 @@ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) } } -void Compiler::verInitCurrentState() +void Compiler::initCurrentState() { // initialize stack info - verCurrentState.esStackDepth = 0; - assert(verCurrentState.esStack != nullptr); + stackState.esStackDepth = 0; + assert(stackState.esStack != nullptr); // copy current state to entry state of first BB - verInitBBEntryState(fgFirstBB, &verCurrentState); + initBBEntryState(fgFirstBB, &stackState); } Compiler* Compiler::impInlineRoot() @@ -12401,7 +12308,7 @@ void Compiler::impImport() if (this == inlineRoot) { // Allocate the stack contents - verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; + stackState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { @@ -12409,15 +12316,15 @@ void Compiler::impImport() // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { - inlineRoot->impStkSize = impStkSize; - inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; + inlineRoot->impStkSize = impStkSize; + inlineRoot->stackState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } - verCurrentState.esStack = inlineRoot->verCurrentState.esStack; + stackState.esStack = inlineRoot->stackState.esStack; } // initialize the entry state at start of method - verInitCurrentState(); + initCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. @@ -12490,8 +12397,8 @@ void Compiler::impImport() /* Restore the stack state */ - verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; - if (verCurrentState.esStackDepth) + stackState.esStackDepth = dsc->pdSavedStack.ssDepth; + if (stackState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } @@ -12502,20 +12409,11 @@ void Compiler::impImport() impPendingFree = dsc; /* Now import the block */ + impImportBlock(dsc->pdBB); - if (dsc->pdBB->HasFlag(BBF_FAILED_VERIFICATION)) - { - verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); - impEndTreeList(dsc->pdBB); - } - else + if (compDonotInline()) { - impImportBlock(dsc->pdBB); - - if (compDonotInline()) - { - return; - } + return; } } @@ -13900,9 +13798,9 @@ bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* ad } } - for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) + for (unsigned level = 0; level < stackState.esStackDepth; level++) { - GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; + GenTreeFlags stackTreeFlags = stackState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 7ba613e61053c..e25d63aa22074 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -1043,7 +1043,7 @@ var_types Compiler::impImportCall(OPCODE opcode, assert(newobjThis->IsLclVarAddr()); unsigned lclNum = newobjThis->AsLclVarCommon()->GetLclNum(); - impPushOnStack(gtNewLclvNode(lclNum, lvaGetRealType(lclNum)), verMakeTypeInfo(clsHnd)); + impPushOnStack(gtNewLclvNode(lclNum, lvaGetRealType(lclNum)), makeTypeInfo(clsHnd)); } else { @@ -1093,7 +1093,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // // For implicit tail calls, we perform this check after return types are // known to be compatible. - if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) + if (isExplicitTailCall && (stackState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } @@ -1112,7 +1112,7 @@ var_types Compiler::impImportCall(OPCODE opcode, } // Stack empty check for implicit tail calls. - if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) + if (canTailCall && isImplicitTailCall && (stackState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } @@ -1256,7 +1256,7 @@ var_types Compiler::impImportCall(OPCODE opcode, { if (gtIsRecursiveCall(methHnd)) { - assert(verCurrentState.esStackDepth == 0); + assert(stackState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { @@ -1316,8 +1316,8 @@ var_types Compiler::impImportCall(OPCODE opcode, if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. - assert(verCurrentState.esStackDepth > 0); - impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); + assert(stackState.esStackDepth > 0); + impAppendTree(call, stackState.esStackDepth - 1, impCurStmtDI); } else if (JitConfig.JitProfileValues() && call->IsCall() && call->AsCall()->IsSpecialIntrinsic(this, NI_System_SpanHelpers_Memmove)) @@ -1537,7 +1537,7 @@ var_types Compiler::impImportCall(OPCODE opcode, } } - typeInfo tiRetVal = verMakeTypeInfo(sig->retType, retTypeClass); + typeInfo tiRetVal = makeTypeInfo(sig->retType, retTypeClass); impPushOnStack(call, tiRetVal); } @@ -1935,8 +1935,7 @@ GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugI if ((sig->callConv != CORINFO_CALLCONV_DEFAULT || sig->totalILArgs() > 0) && !impStackTop().val->OperIs(GT_LCL_VAR, GT_FTN_ADDR, GT_CNS_INT)) { - impSpillStackEntry(verCurrentState.esStackDepth - 1, - BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); + impSpillStackEntry(stackState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ @@ -2018,16 +2017,16 @@ void Compiler::impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* s argsToReverse = 0; #endif - for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) + for (unsigned level = stackState.esStackDepth - argsToReverse; level < stackState.esStackDepth; level++) { - if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) + if (stackState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } - else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) + else if (stackState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { @@ -2279,7 +2278,7 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, // TODO-CQ: If we enable FEATURE_IMPLICIT_BYREFS on all platforms // where we support Swift we can probably let normal implicit byref // handling handle the unlowered case. - impSpillStackEntry(verCurrentState.esStackDepth - sig->numArgs + argIndex, + impSpillStackEntry(stackState.esStackDepth - sig->numArgs + argIndex, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("Swift struct arg with lowering")); } } @@ -4248,10 +4247,10 @@ GenTree* Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd, // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 3 DEBUGARG("Spilling op1 side effects for FusedMultiplyAdd")); - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op2 side effects for FusedMultiplyAdd")); GenTree* op3 = impImplicitR4orR8Cast(impPopStack().val, callType); @@ -5134,8 +5133,8 @@ GenTree* Compiler::impSRCSUnsafeIntrinsic(NamedIntrinsic intrinsic, // sub // ret - impSpillSideEffect(true, verCurrentState.esStackDepth - - 2 DEBUGARG("Spilling op1 side effects for Unsafe.ByteOffset")); + impSpillSideEffect(true, + stackState.esStackDepth - 2 DEBUGARG("Spilling op1 side effects for Unsafe.ByteOffset")); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; @@ -6296,7 +6295,7 @@ GenTree* Compiler::impTransformThis(GenTree* thisPtr, // This pushes on the dereferenced byref // This is then used immediately to box. - impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass)); + impPushOnStack(obj, makeTypeInfo(pConstrainedResolvedToken->hClass)); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. @@ -9290,10 +9289,10 @@ GenTree* Compiler::impEstimateIntrinsic(CORINFO_METHOD_HANDLE method, // AdvSimd.FusedMultiplyAdd expects (addend, left, right), while the APIs take (left, right, addend) - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 3 DEBUGARG("Spilling op1 side effects for MultiplyAddEstimate")); - impSpillSideEffect(true, verCurrentState.esStackDepth - + impSpillSideEffect(true, stackState.esStackDepth - 2 DEBUGARG("Spilling op2 side effects for MultiplyAddEstimate")); swapOp1AndOp3 = true;