Skip to content

Commit

Permalink
Some more precise debug info improvements (dotnet#61419)
Browse files Browse the repository at this point in the history
* We were not recording precise info in inlinees except for at IL offset
  0 because most of the logic that handles determining when to attach
  debug info did not run for inlinees. There are no changes in what the
  EE sees since we were normalizing debug info back to the root anyway.

* Propagate debug info even further than just until rationalization, to
  make it simpler to dump the precise debug info. This means we create
  some more GT_IL_OFFSET nodes, in particular when the inlinee debug
  info is valid but the root info is invalid. This is currently
  happening for newobj IL instructions when the constructor is inlined.
  We generate two statements:
  GT_ASG(GT_LCL_VAR(X), ALLOCOBJ(CLS));
  GT_CALL(CTOR, GT_LCL_VAR(X))
  and the first statement ends up "consuming" the debug info, meaning we
  end up with no debug info for the GT_CALL, which eventually propagates
  into the inline tree. I have held off on fixing this for now since it
  causes debug info diffs in the data reported back to the EE.

  The additional nodes in LIR result in 0.15% more memory use and 0.015%
  more instructions retired for SPMI over libraries.

There is also a small fix in gtlist.h for GT_BFIZ when
MEASURE_NODE_SIZES is defined.

No SPMI diffs.
  • Loading branch information
jakobbotsch authored Nov 11, 2021
1 parent 6f5de0b commit a761b9f
Show file tree
Hide file tree
Showing 7 changed files with 98 additions and 105 deletions.
12 changes: 8 additions & 4 deletions src/coreclr/jit/codegenlinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -432,10 +432,14 @@ void CodeGen::genCodeForBBlist()
if (node->OperGet() == GT_IL_OFFSET)
{
GenTreeILOffset* ilOffset = node->AsILOffset();
genEnsureCodeEmitted(currentDI);
currentDI = ilOffset->gtStmtDI;
genIPmappingAdd(IPmappingDscKind::Normal, currentDI, firstMapping);
firstMapping = false;
DebugInfo rootDI = ilOffset->gtStmtDI.GetRoot();
if (rootDI.IsValid())
{
genEnsureCodeEmitted(currentDI);
currentDI = rootDI;
genIPmappingAdd(IPmappingDscKind::Normal, currentDI, firstMapping);
firstMapping = false;
}
#ifdef DEBUG
assert(ilOffset->gtStmtLastILoffs <= compiler->info.compILCodeSize ||
ilOffset->gtStmtLastILoffs == BAD_IL_OFFSET);
Expand Down
1 change: 1 addition & 0 deletions src/coreclr/jit/debuginfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class ILLocation
return m_isStackEmpty;
}

// Is this a call instruction? Used for managed return values.
bool IsCall() const
{
return m_isCall;
Expand Down
6 changes: 3 additions & 3 deletions src/coreclr/jit/fgbasic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4240,10 +4240,10 @@ BasicBlock* Compiler::fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node)
if ((*riter)->gtOper == GT_IL_OFFSET)
{
GenTreeILOffset* ilOffset = (*riter)->AsILOffset();
if (ilOffset->gtStmtDI.IsValid())
DebugInfo rootDI = ilOffset->gtStmtDI.GetRoot();
if (rootDI.IsValid())
{
assert(ilOffset->gtStmtDI.GetInlineContext()->IsRoot());
splitPointILOffset = ilOffset->gtStmtDI.GetLocation().GetOffset();
splitPointILOffset = rootDI.GetLocation().GetOffset();
break;
}
}
Expand Down
11 changes: 2 additions & 9 deletions src/coreclr/jit/gentree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11478,15 +11478,8 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
break;

case GT_IL_OFFSET:
printf(" IL offset: ");
if (!tree->AsILOffset()->gtStmtDI.IsValid())
{
printf("???");
}
else
{
printf("0x%x", tree->AsILOffset()->gtStmtDI.GetLocation().GetOffset());
}
printf(" ");
tree->AsILOffset()->gtStmtDI.Dump(true);
break;

case GT_JCC:
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/gtlist.h
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ GTNODE(MADD , GenTreeOp ,0, GTK_BINOP) // Ge
GTNODE(JMPTABLE , GenTree ,0, (GTK_LEAF|GTK_NOCONTAIN)) // Generates the jump table for switches
GTNODE(SWITCH_TABLE , GenTreeOp ,0, (GTK_BINOP|GTK_NOVALUE)) // Jump Table based switch construct
#ifdef TARGET_ARM64
GTNODE(BFIZ, GenTreeBfiz ,0, GTK_BINOP) // Bitfield Insert in Zero
GTNODE(BFIZ , GenTreeOp ,0, GTK_BINOP) // Bitfield Insert in Zero
#endif

//-----------------------------------------------------------------------------
Expand Down
156 changes: 74 additions & 82 deletions src/coreclr/jit/importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -618,7 +618,7 @@ inline void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel)
// Once we set the current offset as debug info in an appended tree, we are
// ready to report the following offsets. Note that we need to compare
// offsets here instead of debug info, since we do not set the "is call"
// debug in impCurStmtDI.
// bit in impCurStmtDI.

if (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())
{
Expand Down Expand Up @@ -3021,11 +3021,6 @@ unsigned Compiler::impInitBlockLineInfo()
impCurStmtOffsSet(blockOffs);
}

if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
{
impCurStmtOffsSet(blockOffs);
}

/* Always report IL offset 0 or some tests get confused.
Probably a good idea anyways */

Expand Down Expand Up @@ -11721,111 +11716,108 @@ void Compiler::impImportBlockCode(BasicBlock* block)
if (opts.compDbgInfo)
#endif
{
if (!compIsForInlining())
{
nxtStmtOffs =
(nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
nxtStmtOffs =
(nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;

/* Have we reached the next stmt boundary ? */

/* Have we reached the next stmt boundary ? */
if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
{
assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);

if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
{
assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
/* We need to provide accurate IP-mapping at this point.
So spill anything on the stack so that it will form
gtStmts with the correct stmt offset noted */

if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
{
/* We need to provide accurate IP-mapping at this point.
So spill anything on the stack so that it will form
gtStmts with the correct stmt offset noted */
impSpillStackEnsure(true);
}

impSpillStackEnsure(true);
}
// Have we reported debug info for any tree?

// Have we reported debug info for any tree?
if (impCurStmtDI.IsValid() && opts.compDbgCode)
{
GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);

if (impCurStmtDI.IsValid() && opts.compDbgCode)
{
GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
assert(!impCurStmtDI.IsValid());
}

assert(!impCurStmtDI.IsValid());
}
if (!impCurStmtDI.IsValid())
{
/* Make sure that nxtStmtIndex is in sync with opcodeOffs.
If opcodeOffs has gone past nxtStmtIndex, catch up */

if (!impCurStmtDI.IsValid())
while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
{
/* Make sure that nxtStmtIndex is in sync with opcodeOffs.
If opcodeOffs has gone past nxtStmtIndex, catch up */

while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
{
nxtStmtIndex++;
}
nxtStmtIndex++;
}

/* Go to the new stmt */
/* Go to the new stmt */

impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);

/* Update the stmt boundary index */
/* Update the stmt boundary index */

nxtStmtIndex++;
assert(nxtStmtIndex <= info.compStmtOffsetsCount);
nxtStmtIndex++;
assert(nxtStmtIndex <= info.compStmtOffsetsCount);

/* Are there any more line# entries after this one? */
/* Are there any more line# entries after this one? */

if (nxtStmtIndex < info.compStmtOffsetsCount)
{
/* Remember where the next line# starts */
if (nxtStmtIndex < info.compStmtOffsetsCount)
{
/* Remember where the next line# starts */

nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
}
else
{
/* No more line# entries */
nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
}
else
{
/* No more line# entries */

nxtStmtOffs = BAD_IL_OFFSET;
}
nxtStmtOffs = BAD_IL_OFFSET;
}
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
(verCurrentState.esStackDepth == 0))
{
/* At stack-empty locations, we have already added the tree to
the stmt list with the last offset. We just need to update
impCurStmtDI
*/
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
(verCurrentState.esStackDepth == 0))
{
/* At stack-empty locations, we have already added the tree to
the stmt list with the last offset. We just need to update
impCurStmtDI
*/

impCurStmtOffsSet(opcodeOffs);
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
impOpcodeIsCallSiteBoundary(prevOpcode))
{
/* Make sure we have a type cached */
assert(callTyp != TYP_COUNT);

if (callTyp == TYP_VOID)
{
impCurStmtOffsSet(opcodeOffs);
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
impOpcodeIsCallSiteBoundary(prevOpcode))
else if (opts.compDbgCode)
{
/* Make sure we have a type cached */
assert(callTyp != TYP_COUNT);

if (callTyp == TYP_VOID)
{
impCurStmtOffsSet(opcodeOffs);
}
else if (opts.compDbgCode)
{
impSpillStackEnsure(true);
impCurStmtOffsSet(opcodeOffs);
}
impSpillStackEnsure(true);
impCurStmtOffsSet(opcodeOffs);
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
{
if (opts.compDbgCode)
{
if (opts.compDbgCode)
{
impSpillStackEnsure(true);
}

impCurStmtOffsSet(opcodeOffs);
impSpillStackEnsure(true);
}

assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) ||
(impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs));
impCurStmtOffsSet(opcodeOffs);
}

assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) ||
(impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs));
}

CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
Expand Down
15 changes: 9 additions & 6 deletions src/coreclr/jit/rationalize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -958,12 +958,15 @@ PhaseStatus Rationalizer::DoPhase()
BlockRange().InsertAtEnd(LIR::Range(statement->GetTreeList(), statement->GetRootNode()));

// If this statement has correct debug information, change it
// into a debug info node and insert it into the LIR. Currently
// we do not support describing IL offsets in inlinees in the
// emitter, so we normalize all debug info to be in the inline
// root here.
DebugInfo di = statement->GetDebugInfo().GetRoot();
if (di.IsValid())
// into a debug info node and insert it into the LIR. Note that
// we are currently reporting root info only back to the EE, so
// if the leaf debug info is invalid we still attach it.
// Note that we would like to have the invariant di.IsValid()
// => parent.IsValid() but it is currently not the case for
// NEWOBJ IL instructions where the debug info ends up attached
// to the allocation instead of the constructor call.
DebugInfo di = statement->GetDebugInfo();
if (di.IsValid() || di.GetRoot().IsValid())
{
GenTreeILOffset* ilOffset =
new (comp, GT_IL_OFFSET) GenTreeILOffset(di DEBUGARG(statement->GetLastILOffset()));
Expand Down

0 comments on commit a761b9f

Please sign in to comment.