From de6253928fd42e758892a53e9a02579ad3161dfb Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Tue, 6 Jan 2026 14:59:59 -0800 Subject: [PATCH] JIT: fix CEA to handle a few more cases Conditional escape analysis was not happening when an object's `GetEnumerator` returns the result of another `GetEnumerator` call. These calls must be specially flagged and the JIT was not handling this case. Fix the flagging logic so that when processing a return from a flagged inlinee, if the return value is a call, also flag that call. Also relax a constraint on the location of enumerator temp appearances, as there can now be another layer of temp assignments involved in returning the enumerator instance. Closes #122856. --- src/coreclr/jit/importer.cpp | 31 +++++++++++++++++++++++++++++++ src/coreclr/jit/objectalloc.cpp | 22 +++++++++++++++++++--- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 63da7f19ad610d..2901211c3a4e7a 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -11435,6 +11435,37 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } + // If the call we're inlining was flagged as part of an enumerator + // GDV, and we're replacing it with another call, flag that call instead. + // + // This handles cases like ReadOnlyArray where GetEnumerator is + // expressed via another GetEnumerator call. + // + if ((info.compRetType == TYP_REF) && hasImpEnumeratorGdvLocalMap()) + { + GenTree* const origCall = impInlineInfo->iciCall; + Compiler::NodeToUnsignedMap* const map = getImpEnumeratorGdvLocalMap(); + unsigned enumeratorLcl = BAD_VAR_NUM; + + if (map->Lookup(origCall, &enumeratorLcl)) + { + GenTree* returnValue = op2; + if (returnValue->OperIs(GT_RET_EXPR)) + { + returnValue = returnValue->AsRetExpr()->gtInlineCandidate; + } + + if (returnValue->IsCall()) + { + JITDUMP("Flagging [%06u] for enumerator cloning via V%02u\n", dspTreeID(returnValue), + enumeratorLcl); + + map->Remove(origCall); + map->Set(returnValue, enumeratorLcl); + } + } + } + if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index 4042fd7063fa73..a78f4288fcbda7 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -4120,6 +4120,22 @@ bool ObjectAllocator::CheckCanClone(CloneInfo* info) // The allocation block must dominate all T appearances, save for the final T use. // + // If we have an empty static case, we generalized slightly, and allow T appearances + // to be dominated by the allocation block's (unique) pred. + // + BasicBlock* domCheckBlock = allocBlock; + const char* domCheckBlockName = "alloc"; + if ((info->m_allocTree->gtFlags & GTF_ALLOCOBJ_EMPTY_STATIC) != 0) + { + BasicBlock* const uniquePred = domCheckBlock->GetUniquePred(comp); + + if (uniquePred != nullptr) + { + domCheckBlock = uniquePred; + domCheckBlockName = "alloc-pred"; + } + } + for (unsigned lclNum : EnumeratorVarMap::KeyIteration(info->m_appearanceMap)) { EnumeratorVar* ev = nullptr; @@ -4138,10 +4154,10 @@ bool ObjectAllocator::CheckCanClone(CloneInfo* info) continue; } - if (!comp->m_domTree->Dominates(allocBlock, a->m_block)) + if (!comp->m_domTree->Dominates(domCheckBlock, a->m_block)) { - JITDUMP("Alloc temp V%02u %s in " FMT_BB " not dominated by alloc " FMT_BB "\n", a->m_lclNum, - a->m_isDef ? "def" : "use", a->m_block->bbNum, allocBlock->bbNum); + JITDUMP("Alloc temp V%02u %s in " FMT_BB " not dominated by %s " FMT_BB "\n", a->m_lclNum, + a->m_isDef ? "def" : "use", a->m_block->bbNum, domCheckBlockName, domCheckBlock->bbNum); return false; } }