diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 4d1a2fd36f43fd..915ad3b2671e99 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -4289,7 +4289,7 @@ void CodeGen::genCodeForSwap(GenTreeOp* tree) // Do the xchg emitAttr size = EA_PTRSIZE; - if (varTypeGCtype(type1) != varTypeGCtype(type2)) + if (varTypeIsGC(type1) != varTypeIsGC(type2)) { // If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers. // Otherwise it will leave them alone, which is correct if they have the same GC-ness. diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 225700208b4a2d..dedf8735fc65dc 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -3701,7 +3701,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere * have to "swap" the registers in the GC reg pointer mask */ - if (varTypeGCtype(varDscSrc->TypeGet()) != varTypeGCtype(varDscDest->TypeGet())) + if (varTypeIsGC(varDscSrc) != varTypeIsGC(varDscDest)) { size = EA_GCREF; } diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 3fa633913fd4bd..8438fc07424bf0 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -5654,7 +5654,7 @@ void CodeGen::genCodeForSwap(GenTreeOp* tree) // Do the xchg emitAttr size = EA_PTRSIZE; - if (varTypeGCtype(type1) != varTypeGCtype(type2)) + if (varTypeIsGC(type1) != varTypeIsGC(type2)) { // If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers. // Otherwise it will leave them alone, which is correct if they have the same GC-ness. @@ -6884,7 +6884,7 @@ void CodeGen::genCompareInt(GenTree* treeNode) // The common type cannot be smaller than any of the operand types, we're probably mixing int/long assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type))); // Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons - assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0)); + assert(!(varTypeIsSmall(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0)); // If op1 is smaller then it cannot be in memory, we're probably missing a cast assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory()); // If op2 is smaller then it cannot be in memory, we're probably missing a cast diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 25c82920978863..0c54ee364a8528 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -5661,6 +5661,7 @@ class Compiler void fgDebugCheckLoopTable(); void fgDebugCheckSsa(); + void fgDebugCheckTypes(GenTree* tree); void fgDebugCheckFlags(GenTree* tree, BasicBlock* block); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 8e8331490043fb..407b0623bdab8a 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -3150,6 +3150,56 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef } } +//------------------------------------------------------------------------ +// fgDebugCheckTypes: Validate node types used in the given tree +// +// Arguments: +// tree - the tree to (recursively) check types for +// +void Compiler::fgDebugCheckTypes(GenTree* tree) +{ + fgWalkTreePost(&tree, [](GenTree** pNode, fgWalkData* data) -> fgWalkResult { + GenTree* node = *pNode; + + // Validate types of nodes in the IR: + // + // * TYP_ULONG and TYP_UINT are not legal. + // * Small types are only legal for the following nodes: + // * All kinds of indirections including GT_NULLCHECK + // * All kinds of locals + // * GT_COMMA wrapped around any of the above. + // + + if (node->TypeIs(TYP_ULONG, TYP_UINT)) + { + data->compiler->gtDispTree(node); + assert(!"TYP_ULONG and TYP_UINT are not legal in IR"); + } + + if (varTypeIsSmall(node)) + { + if (node->OperIs(GT_COMMA)) + { + // TODO: it's only allowed if its underlying effective node is also a small type. + return WALK_CONTINUE; + } + + if (node->OperIsIndir() || node->OperIs(GT_NULLCHECK) || node->IsPhiNode() || node->IsAnyLocal()) + { + return WALK_CONTINUE; + } + + data->compiler->gtDispTree(node); + assert(!"Unexpected small type in IR"); + } + + // TODO: validate types in GT_CAST nodes. + // Validate mismatched types in binopt's arguments, etc. + // + return WALK_CONTINUE; + }); +} + //------------------------------------------------------------------------ // fgDebugCheckFlags: Validate various invariants related to the propagation // and setting of tree, block, and method flags @@ -3757,6 +3807,7 @@ void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees) } fgDebugCheckFlags(stmt->GetRootNode(), block); + fgDebugCheckTypes(stmt->GetRootNode()); // Not only will this stress fgMorphBlockStmt(), but we also get all the checks // done by fgMorphTree() diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 180149a5cf83a2..47d059f1298b2d 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -21736,7 +21736,7 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode( genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); - assert(type == TYP_UBYTE); + assert(type == TYP_INT); var_types simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); @@ -21875,7 +21875,7 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode( genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); - assert(type == TYP_UBYTE); + assert(type == TYP_INT); var_types simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); @@ -23861,7 +23861,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( #if defined(TARGET_XARCH) uint8_t control = 0; bool crossLane = false; - bool needsZero = varTypeIsSmallInt(simdBaseType) && (simdSize <= 16); + bool needsZero = varTypeIsSmall(simdBaseType) && (simdSize <= 16); uint64_t value = 0; simd_t vecCns = {}; simd_t mskCns = {}; diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp index 287b214d0f40b8..027f0b1815da2f 100644 --- a/src/coreclr/jit/hwintrinsic.cpp +++ b/src/coreclr/jit/hwintrinsic.cpp @@ -1060,7 +1060,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic); int numArgs = sig->numArgs; - var_types retType = JITtype2varType(sig->retType); + var_types retType = genActualType(JITtype2varType(sig->retType)); CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; GenTree* retNode = nullptr; diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index e2cb1ceaead47c..f598370773066f 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -2728,7 +2728,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(simdSize == 16); - if (varTypeIsSmallInt(simdBaseType) && !compOpportunisticallyDependsOn(InstructionSet_SSSE3)) + if (varTypeIsSmall(simdBaseType) && !compOpportunisticallyDependsOn(InstructionSet_SSSE3)) { // TYP_BYTE, TYP_UBYTE, TYP_SHORT, and TYP_USHORT need SSSE3 to be able to shuffle any operation break; diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index 89d67112521007..01ee4916d4eef2 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -200,7 +200,7 @@ GenTree* Compiler::impExpandHalfConstEqualsSIMD( // Optimization: use a single load when byteLen equals simdSize. // For code simplicity we always create nodes for two vectors case. const bool useSingleVector = simdSize == byteLen; - return gtNewSimdCmpOpAllNode(GT_EQ, TYP_UBYTE, useSingleVector ? xor1 : orr, gtNewZeroConNode(simdType), baseType, + return gtNewSimdCmpOpAllNode(GT_EQ, TYP_INT, useSingleVector ? xor1 : orr, gtNewZeroConNode(simdType), baseType, simdSize); // Codegen example for byteLen=40 and OrdinalIgnoreCase mode with AVX: diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 670ee35e96f468..0fb4822070bdeb 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -2042,7 +2042,7 @@ bool Lowering::LowerCallMemcmp(GenTreeCall* call, GenTree** next) if (GenTree::OperIsCmpCompare(oper)) { assert(type == TYP_INT); - return comp->gtNewSimdCmpOpAllNode(oper, TYP_UBYTE, op1, op2, CORINFO_TYPE_NATIVEUINT, + return comp->gtNewSimdCmpOpAllNode(oper, TYP_INT, op1, op2, CORINFO_TYPE_NATIVEUINT, genTypeSize(op1)); } return comp->gtNewSimdBinOpNode(oper, op1->TypeGet(), op1, op2, CORINFO_TYPE_NATIVEUINT, diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp index 0ef5399dff06d1..045810b12781b9 100644 --- a/src/coreclr/jit/lowerarmarch.cpp +++ b/src/coreclr/jit/lowerarmarch.cpp @@ -1273,7 +1273,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); - assert(node->gtType == TYP_UBYTE); + assert(node->TypeIs(TYP_INT)); assert((cmpOp == GT_EQ) || (cmpOp == GT_NE)); // We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality): diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index 3daa96268d7213..c95a60087a31d7 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -1768,7 +1768,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); - assert(node->gtType == TYP_UBYTE); + assert(node->TypeIs(TYP_INT)); assert((cmpOp == GT_EQ) || (cmpOp == GT_NE)); // We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality): diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 306971171052b2..b6208d53ecbbf1 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -2852,7 +2852,7 @@ bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPo { ssize_t v1 = refPosition->treeNode->AsIntCon()->IconValue(); ssize_t v2 = otherTreeNode->AsIntCon()->IconValue(); - if ((v1 == v2) && (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode) || v1 == 0)) + if ((v1 == v2) && (varTypeIsGC(refPosition->treeNode) == varTypeIsGC(otherTreeNode) || v1 == 0)) { #ifdef TARGET_64BIT // If the constant is negative, only reuse registers of the same type. diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index b587fd8fb98cd5..6ef082032b6ed3 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -185,7 +185,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock() genTreeOps foldOp; genTreeOps cmpOp; - var_types foldType = m_c1->TypeGet(); + var_types foldType = genActualType(m_c1); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; @@ -1412,7 +1412,7 @@ bool OptBoolsDsc::optOptimizeBoolsReturnBlock(BasicBlock* b3) // Get the fold operator (m_foldOp, e.g., GT_OR/GT_AND) and // the comparison operator (m_cmpOp, e.g., GT_EQ/GT_NE/GT_GE/GT_LT) - var_types foldType = m_c1->TypeGet(); + var_types foldType = genActualType(m_c1->TypeGet()); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 89720f6c7748b6..6a6397d57aaaf8 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -1481,7 +1481,7 @@ Range RangeCheck::ComputeRange(BasicBlock* block, GenTree* expr, bool monIncreas JITDUMP("%s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly())); } } - else if (varTypeIsSmallInt(expr->TypeGet())) + else if (varTypeIsSmall(expr)) { range = GetRangeFromType(expr->TypeGet()); JITDUMP("%s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly())); diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index f7c69b3a1a3856..faeaada9d4b989 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -274,7 +274,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, } CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; - var_types retType = JITtype2varType(sig->retType); + var_types retType = genActualType(JITtype2varType(sig->retType)); CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; var_types simdType = TYP_UNKNOWN; unsigned simdSize = 0; diff --git a/src/coreclr/jit/vartype.h b/src/coreclr/jit/vartype.h index 65a4c50de78865..725aab9017376c 100644 --- a/src/coreclr/jit/vartype.h +++ b/src/coreclr/jit/vartype.h @@ -176,16 +176,10 @@ inline bool varTypeIsArithmetic(T vt) return ((varTypeClassification[TypeGet(vt)] & (VTF_INT | VTF_FLT)) != 0); } -template -inline unsigned varTypeGCtype(T vt) -{ - return (unsigned)(varTypeClassification[TypeGet(vt)] & (VTF_GCR | VTF_BYR)); -} - template inline bool varTypeIsGC(T vt) { - return (varTypeGCtype(vt) != 0); + return (TypeGet(vt) == TYP_REF) || (TypeGet(vt) == TYP_BYREF); } template @@ -218,12 +212,6 @@ inline bool varTypeIsSmall(T vt) return (TypeGet(vt) >= TYP_BYTE) && (TypeGet(vt) <= TYP_USHORT); } -template -inline bool varTypeIsSmallInt(T vt) -{ - return (TypeGet(vt) >= TYP_BYTE) && (TypeGet(vt) <= TYP_USHORT); -} - template inline bool varTypeIsIntOrI(T vt) {