Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce fgDebugCheckTypes #94621

Merged
merged 12 commits into from
Nov 25, 2023
76 changes: 74 additions & 2 deletions src/coreclr/jit/assertionprop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -232,24 +232,96 @@ bool IntegralRange::Contains(int64_t value) const
switch (node->AsHWIntrinsic()->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_Vector128_op_Equality:
case NI_Vector128_op_Inequality:
case NI_Vector256_op_Equality:
case NI_Vector256_op_Inequality:
case NI_Vector512_op_Equality:
case NI_Vector512_op_Inequality:
case NI_SSE_CompareScalarOrderedEqual:
case NI_SSE_CompareScalarOrderedNotEqual:
case NI_SSE_CompareScalarOrderedLessThan:
case NI_SSE_CompareScalarOrderedLessThanOrEqual:
case NI_SSE_CompareScalarOrderedGreaterThan:
case NI_SSE_CompareScalarOrderedGreaterThanOrEqual:
case NI_SSE_CompareScalarUnorderedEqual:
case NI_SSE_CompareScalarUnorderedNotEqual:
case NI_SSE_CompareScalarUnorderedLessThanOrEqual:
case NI_SSE_CompareScalarUnorderedLessThan:
case NI_SSE_CompareScalarUnorderedGreaterThanOrEqual:
case NI_SSE_CompareScalarUnorderedGreaterThan:
case NI_SSE2_CompareScalarOrderedEqual:
case NI_SSE2_CompareScalarOrderedNotEqual:
case NI_SSE2_CompareScalarOrderedLessThan:
case NI_SSE2_CompareScalarOrderedLessThanOrEqual:
case NI_SSE2_CompareScalarOrderedGreaterThan:
case NI_SSE2_CompareScalarOrderedGreaterThanOrEqual:
case NI_SSE2_CompareScalarUnorderedEqual:
case NI_SSE2_CompareScalarUnorderedNotEqual:
case NI_SSE2_CompareScalarUnorderedLessThanOrEqual:
case NI_SSE2_CompareScalarUnorderedLessThan:
case NI_SSE2_CompareScalarUnorderedGreaterThanOrEqual:
case NI_SSE2_CompareScalarUnorderedGreaterThan:
case NI_SSE41_TestC:
case NI_SSE41_TestZ:
case NI_SSE41_TestNotZAndNotC:
case NI_AVX_TestC:
case NI_AVX_TestZ:
case NI_AVX_TestNotZAndNotC:
return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::One};

case NI_SSE2_Extract:
case NI_SSE41_Extract:
case NI_SSE41_X64_Extract:
case NI_Vector128_ToScalar:
case NI_Vector256_ToScalar:
case NI_Vector512_ToScalar:
case NI_Vector128_GetElement:
case NI_Vector256_GetElement:
case NI_Vector512_GetElement:
if (varTypeIsSmall(node->AsHWIntrinsic()->GetSimdBaseType()))
{
return ForType(node->AsHWIntrinsic()->GetSimdBaseType());
}
break;

case NI_BMI1_TrailingZeroCount:
case NI_BMI1_X64_TrailingZeroCount:
case NI_LZCNT_LeadingZeroCount:
case NI_LZCNT_X64_LeadingZeroCount:
case NI_POPCNT_PopCount:
case NI_POPCNT_X64_PopCount:
// TODO-Casts: specify more precise ranges once "IntegralRange" supports them.
return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::ByteMax};
#elif defined(TARGET_ARM64)
case NI_Vector64_op_Equality:
case NI_Vector64_op_Inequality:
case NI_Vector128_op_Equality:
case NI_Vector128_op_Inequality:
return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::One};

case NI_AdvSimd_Extract:
case NI_Vector64_ToScalar:
case NI_Vector128_ToScalar:
case NI_Vector64_GetElement:
case NI_Vector128_GetElement:
if (varTypeIsSmall(node->AsHWIntrinsic()->GetSimdBaseType()))
{
return ForType(node->AsHWIntrinsic()->GetSimdBaseType());
}
break;

case NI_AdvSimd_PopCount:
case NI_AdvSimd_LeadingZeroCount:
case NI_AdvSimd_LeadingSignCount:
case NI_ArmBase_LeadingZeroCount:
case NI_ArmBase_Arm64_LeadingZeroCount:
case NI_ArmBase_Arm64_LeadingSignCount:
// TODO-Casts: specify more precise ranges once "IntegralRange" supports them.
return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::ByteMax};
#else
#error Unsupported platform
#endif
// TODO-Casts: specify more precise ranges once "IntegralRange" supports them.
return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::ByteMax};
default:
break;
}
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/codegenarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4289,7 +4289,7 @@ void CodeGen::genCodeForSwap(GenTreeOp* tree)

// Do the xchg
emitAttr size = EA_PTRSIZE;
if (varTypeGCtype(type1) != varTypeGCtype(type2))
if (varTypeIsGC(type1) != varTypeIsGC(type2))
{
// If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
// Otherwise it will leave them alone, which is correct if they have the same GC-ness.
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/codegencommon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3664,7 +3664,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
* have to "swap" the registers in the GC reg pointer mask
*/

if (varTypeGCtype(varDscSrc->TypeGet()) != varTypeGCtype(varDscDest->TypeGet()))
if (varTypeIsGC(varDscSrc) != varTypeIsGC(varDscDest))
{
size = EA_GCREF;
}
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/codegenxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5654,7 +5654,7 @@ void CodeGen::genCodeForSwap(GenTreeOp* tree)

// Do the xchg
emitAttr size = EA_PTRSIZE;
if (varTypeGCtype(type1) != varTypeGCtype(type2))
if (varTypeIsGC(type1) != varTypeIsGC(type2))
{
// If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
// Otherwise it will leave them alone, which is correct if they have the same GC-ness.
Expand Down Expand Up @@ -6884,7 +6884,7 @@ void CodeGen::genCompareInt(GenTree* treeNode)
// The common type cannot be smaller than any of the operand types, we're probably mixing int/long
assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type)));
// Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons
assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
assert(!(varTypeIsSmall(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
// If op1 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory());
// If op2 is smaller then it cannot be in memory, we're probably missing a cast
Expand Down
1 change: 1 addition & 0 deletions src/coreclr/jit/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -5668,6 +5668,7 @@ class Compiler
void fgDebugCheckLoopTable();
void fgDebugCheckSsa();

void fgDebugCheckTypes(GenTree* tree);
void fgDebugCheckFlags(GenTree* tree, BasicBlock* block);
void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags);
void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags);
Expand Down
66 changes: 66 additions & 0 deletions src/coreclr/jit/fgdiagnostic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3239,6 +3239,71 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef
}
}

//------------------------------------------------------------------------
// fgDebugCheckTypes: Validate node types used in the given tree
//
// Arguments:
// tree - the tree to (recursively) check types for
//
void Compiler::fgDebugCheckTypes(GenTree* tree)
{
struct NodeTypeValidator : GenTreeVisitor<NodeTypeValidator>
{
enum
{
DoPostOrder = true,
};

NodeTypeValidator(Compiler* comp) : GenTreeVisitor(comp)
{
}

fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) const
{
GenTree* node = *use;

// Validate types of nodes in the IR:
//
// * TYP_ULONG and TYP_UINT are not legal.
// * Small types are only legal for the following nodes:
// * All kinds of indirections including GT_NULLCHECK
// * All kinds of locals
// * GT_COMMA wrapped around any of the above.
//
if (node->TypeIs(TYP_ULONG, TYP_UINT))
{
m_compiler->gtDispTree(node);
assert(!"TYP_ULONG and TYP_UINT are not legal in IR");
}

if (varTypeIsSmall(node))
{
if (node->OperIs(GT_COMMA))
{
// TODO: it's only allowed if its underlying effective node is also a small type.
return WALK_CONTINUE;
}

if (node->OperIsIndir() || node->OperIs(GT_NULLCHECK) || node->IsPhiNode() || node->IsAnyLocal())
{
return WALK_CONTINUE;
}

m_compiler->gtDispTree(node);
assert(!"Unexpected small type in IR");
}

// TODO: validate types in GT_CAST nodes.
// Validate mismatched types in binopt's arguments, etc.
//
return WALK_CONTINUE;
}
};

NodeTypeValidator walker(this);
walker.WalkTree(&tree, nullptr);
}

//------------------------------------------------------------------------
// fgDebugCheckFlags: Validate various invariants related to the propagation
// and setting of tree, block, and method flags
Expand Down Expand Up @@ -3846,6 +3911,7 @@ void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees)
}

fgDebugCheckFlags(stmt->GetRootNode(), block);
fgDebugCheckTypes(stmt->GetRootNode());

// Not only will this stress fgMorphBlockStmt(), but we also get all the checks
// done by fgMorphTree()
Expand Down
6 changes: 3 additions & 3 deletions src/coreclr/jit/gentree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21736,7 +21736,7 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode(
genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_UBYTE);
assert(type == TYP_INT);

var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
Expand Down Expand Up @@ -21875,7 +21875,7 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode(
genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_UBYTE);
assert(type == TYP_INT);

var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
Expand Down Expand Up @@ -23861,7 +23861,7 @@ GenTree* Compiler::gtNewSimdShuffleNode(
#if defined(TARGET_XARCH)
uint8_t control = 0;
bool crossLane = false;
bool needsZero = varTypeIsSmallInt(simdBaseType) && (simdSize <= 16);
bool needsZero = varTypeIsSmall(simdBaseType) && (simdSize <= 16);
uint64_t value = 0;
simd_t vecCns = {};
simd_t mskCns = {};
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/hwintrinsic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic);
int numArgs = sig->numArgs;
var_types retType = JITtype2varType(sig->retType);
var_types retType = genActualType(JITtype2varType(sig->retType));
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
GenTree* retNode = nullptr;

Expand Down
9 changes: 6 additions & 3 deletions src/coreclr/jit/hwintrinsicarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1073,15 +1073,17 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,

op1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize);
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8);
op1 = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8);
op1 = gtNewSimdHWIntrinsicNode(genActualType(simdBaseType), op1, NI_Vector64_ToScalar, simdBaseJitType,
8);
op1 = gtNewCastNode(TYP_INT, op1, /* isUnsigned */ true, TYP_INT);

GenTree* zero = gtNewZeroConNode(TYP_SIMD16);
ssize_t index = 8 / genTypeSize(simdBaseType);

op2 = gtNewSimdGetUpperNode(TYP_SIMD8, op2, simdBaseJitType, simdSize);
op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8);
op2 = gtNewSimdHWIntrinsicNode(simdBaseType, op2, NI_Vector64_ToScalar, simdBaseJitType, 8);
op2 = gtNewSimdHWIntrinsicNode(genActualType(simdBaseType), op2, NI_Vector64_ToScalar, simdBaseJitType,
8);
op2 = gtNewCastNode(TYP_INT, op2, /* isUnsigned */ true, TYP_INT);

op2 = gtNewOperNode(GT_LSH, TYP_INT, op2, gtNewIconNode(8));
Expand Down Expand Up @@ -1110,7 +1112,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
simdSize);
}

retNode = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8);
retNode = gtNewSimdHWIntrinsicNode(genActualType(simdBaseType), op1, NI_Vector64_ToScalar,
simdBaseJitType, 8);

if ((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT))
{
Expand Down
8 changes: 1 addition & 7 deletions src/coreclr/jit/hwintrinsicxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1817,8 +1817,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
if ((simdSize != 32) || varTypeIsFloating(simdBaseType) ||
compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
var_types simdType = getSIMDTypeForSize(simdSize);

op2 = impSIMDPopStack();
op1 = impSIMDPopStack();

Expand All @@ -1835,8 +1833,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,

if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
var_types simdType = getSIMDTypeForSize(simdSize);

op2 = impSIMDPopStack();
op1 = impSIMDPopStack();

Expand All @@ -1854,8 +1850,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,

if (IsBaselineVector512IsaSupportedOpportunistically())
{
var_types simdType = getSIMDTypeForSize(simdSize);

op1 = impSIMDPopStack();
op1 =
gtNewSimdHWIntrinsicNode(TYP_MASK, op1, NI_AVX512F_ConvertVectorToMask, simdBaseJitType, simdSize);
Expand Down Expand Up @@ -2728,7 +2722,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
{
assert(simdSize == 16);

if (varTypeIsSmallInt(simdBaseType) && !compOpportunisticallyDependsOn(InstructionSet_SSSE3))
if (varTypeIsSmall(simdBaseType) && !compOpportunisticallyDependsOn(InstructionSet_SSSE3))
{
// TYP_BYTE, TYP_UBYTE, TYP_SHORT, and TYP_USHORT need SSSE3 to be able to shuffle any operation
break;
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/importervectorization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ GenTree* Compiler::impExpandHalfConstEqualsSIMD(
// Optimization: use a single load when byteLen equals simdSize.
// For code simplicity we always create nodes for two vectors case.
const bool useSingleVector = simdSize == byteLen;
return gtNewSimdCmpOpAllNode(GT_EQ, TYP_UBYTE, useSingleVector ? xor1 : orr, gtNewZeroConNode(simdType), baseType,
return gtNewSimdCmpOpAllNode(GT_EQ, TYP_INT, useSingleVector ? xor1 : orr, gtNewZeroConNode(simdType), baseType,
simdSize);

// Codegen example for byteLen=40 and OrdinalIgnoreCase mode with AVX:
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2042,7 +2042,7 @@ bool Lowering::LowerCallMemcmp(GenTreeCall* call, GenTree** next)
if (GenTree::OperIsCmpCompare(oper))
{
assert(type == TYP_INT);
return comp->gtNewSimdCmpOpAllNode(oper, TYP_UBYTE, op1, op2, CORINFO_TYPE_NATIVEUINT,
return comp->gtNewSimdCmpOpAllNode(oper, TYP_INT, op1, op2, CORINFO_TYPE_NATIVEUINT,
genTypeSize(op1));
}
return comp->gtNewSimdBinOpNode(oper, op1->TypeGet(), op1, op2, CORINFO_TYPE_NATIVEUINT,
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/lowerarmarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1273,7 +1273,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
assert(node->gtType == TYP_UBYTE);
assert(node->TypeIs(TYP_INT));
assert((cmpOp == GT_EQ) || (cmpOp == GT_NE));

// We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality):
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/lowerxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1790,7 +1790,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
assert(node->gtType == TYP_UBYTE);
assert(node->TypeIs(TYP_INT));
assert((cmpOp == GT_EQ) || (cmpOp == GT_NE));

// We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality):
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/lsra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2852,7 +2852,7 @@ bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPo
{
ssize_t v1 = refPosition->treeNode->AsIntCon()->IconValue();
ssize_t v2 = otherTreeNode->AsIntCon()->IconValue();
if ((v1 == v2) && (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode) || v1 == 0))
if ((v1 == v2) && (varTypeIsGC(refPosition->treeNode) == varTypeIsGC(otherTreeNode) || v1 == 0))
{
#ifdef TARGET_64BIT
// If the constant is negative, only reuse registers of the same type.
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/optimizebools.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock()

genTreeOps foldOp;
genTreeOps cmpOp;
var_types foldType = m_c1->TypeGet();
var_types foldType = genActualType(m_c1);
if (varTypeIsGC(foldType))
{
foldType = TYP_I_IMPL;
Expand Down Expand Up @@ -1412,7 +1412,7 @@ bool OptBoolsDsc::optOptimizeBoolsReturnBlock(BasicBlock* b3)
// Get the fold operator (m_foldOp, e.g., GT_OR/GT_AND) and
// the comparison operator (m_cmpOp, e.g., GT_EQ/GT_NE/GT_GE/GT_LT)

var_types foldType = m_c1->TypeGet();
var_types foldType = genActualType(m_c1->TypeGet());
if (varTypeIsGC(foldType))
{
foldType = TYP_I_IMPL;
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/rangecheck.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1481,7 +1481,7 @@ Range RangeCheck::ComputeRange(BasicBlock* block, GenTree* expr, bool monIncreas
JITDUMP("%s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly()));
}
}
else if (varTypeIsSmallInt(expr->TypeGet()))
else if (varTypeIsSmall(expr))
{
range = GetRangeFromType(expr->TypeGet());
JITDUMP("%s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly()));
Expand Down
Loading
Loading