diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index fc3908b1d43f43..db5b6a6c97006f 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -232,24 +232,96 @@ bool IntegralRange::Contains(int64_t value) const switch (node->AsHWIntrinsic()->GetHWIntrinsicId()) { #if defined(TARGET_XARCH) + case NI_Vector128_op_Equality: + case NI_Vector128_op_Inequality: + case NI_Vector256_op_Equality: + case NI_Vector256_op_Inequality: + case NI_Vector512_op_Equality: + case NI_Vector512_op_Inequality: + case NI_SSE_CompareScalarOrderedEqual: + case NI_SSE_CompareScalarOrderedNotEqual: + case NI_SSE_CompareScalarOrderedLessThan: + case NI_SSE_CompareScalarOrderedLessThanOrEqual: + case NI_SSE_CompareScalarOrderedGreaterThan: + case NI_SSE_CompareScalarOrderedGreaterThanOrEqual: + case NI_SSE_CompareScalarUnorderedEqual: + case NI_SSE_CompareScalarUnorderedNotEqual: + case NI_SSE_CompareScalarUnorderedLessThanOrEqual: + case NI_SSE_CompareScalarUnorderedLessThan: + case NI_SSE_CompareScalarUnorderedGreaterThanOrEqual: + case NI_SSE_CompareScalarUnorderedGreaterThan: + case NI_SSE2_CompareScalarOrderedEqual: + case NI_SSE2_CompareScalarOrderedNotEqual: + case NI_SSE2_CompareScalarOrderedLessThan: + case NI_SSE2_CompareScalarOrderedLessThanOrEqual: + case NI_SSE2_CompareScalarOrderedGreaterThan: + case NI_SSE2_CompareScalarOrderedGreaterThanOrEqual: + case NI_SSE2_CompareScalarUnorderedEqual: + case NI_SSE2_CompareScalarUnorderedNotEqual: + case NI_SSE2_CompareScalarUnorderedLessThanOrEqual: + case NI_SSE2_CompareScalarUnorderedLessThan: + case NI_SSE2_CompareScalarUnorderedGreaterThanOrEqual: + case NI_SSE2_CompareScalarUnorderedGreaterThan: + case NI_SSE41_TestC: + case NI_SSE41_TestZ: + case NI_SSE41_TestNotZAndNotC: + case NI_AVX_TestC: + case NI_AVX_TestZ: + case NI_AVX_TestNotZAndNotC: + return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::One}; + + case NI_SSE2_Extract: + case NI_SSE41_Extract: + case NI_SSE41_X64_Extract: + case NI_Vector128_ToScalar: + case NI_Vector256_ToScalar: + case NI_Vector512_ToScalar: + case NI_Vector128_GetElement: + case NI_Vector256_GetElement: + case NI_Vector512_GetElement: + if (varTypeIsSmall(node->AsHWIntrinsic()->GetSimdBaseType())) + { + return ForType(node->AsHWIntrinsic()->GetSimdBaseType()); + } + break; + case NI_BMI1_TrailingZeroCount: case NI_BMI1_X64_TrailingZeroCount: case NI_LZCNT_LeadingZeroCount: case NI_LZCNT_X64_LeadingZeroCount: case NI_POPCNT_PopCount: case NI_POPCNT_X64_PopCount: + // TODO-Casts: specify more precise ranges once "IntegralRange" supports them. + return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::ByteMax}; #elif defined(TARGET_ARM64) + case NI_Vector64_op_Equality: + case NI_Vector64_op_Inequality: + case NI_Vector128_op_Equality: + case NI_Vector128_op_Inequality: + return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::One}; + + case NI_AdvSimd_Extract: + case NI_Vector64_ToScalar: + case NI_Vector128_ToScalar: + case NI_Vector64_GetElement: + case NI_Vector128_GetElement: + if (varTypeIsSmall(node->AsHWIntrinsic()->GetSimdBaseType())) + { + return ForType(node->AsHWIntrinsic()->GetSimdBaseType()); + } + break; + case NI_AdvSimd_PopCount: case NI_AdvSimd_LeadingZeroCount: case NI_AdvSimd_LeadingSignCount: case NI_ArmBase_LeadingZeroCount: case NI_ArmBase_Arm64_LeadingZeroCount: case NI_ArmBase_Arm64_LeadingSignCount: + // TODO-Casts: specify more precise ranges once "IntegralRange" supports them. + return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::ByteMax}; #else #error Unsupported platform #endif - // TODO-Casts: specify more precise ranges once "IntegralRange" supports them. - return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::ByteMax}; default: break; } diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 4d1a2fd36f43fd..915ad3b2671e99 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -4289,7 +4289,7 @@ void CodeGen::genCodeForSwap(GenTreeOp* tree) // Do the xchg emitAttr size = EA_PTRSIZE; - if (varTypeGCtype(type1) != varTypeGCtype(type2)) + if (varTypeIsGC(type1) != varTypeIsGC(type2)) { // If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers. // Otherwise it will leave them alone, which is correct if they have the same GC-ness. diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index a8c0c4d73ef1dd..80606216d76bdb 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -3664,7 +3664,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere * have to "swap" the registers in the GC reg pointer mask */ - if (varTypeGCtype(varDscSrc->TypeGet()) != varTypeGCtype(varDscDest->TypeGet())) + if (varTypeIsGC(varDscSrc) != varTypeIsGC(varDscDest)) { size = EA_GCREF; } diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 3fa633913fd4bd..8438fc07424bf0 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -5654,7 +5654,7 @@ void CodeGen::genCodeForSwap(GenTreeOp* tree) // Do the xchg emitAttr size = EA_PTRSIZE; - if (varTypeGCtype(type1) != varTypeGCtype(type2)) + if (varTypeIsGC(type1) != varTypeIsGC(type2)) { // If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers. // Otherwise it will leave them alone, which is correct if they have the same GC-ness. @@ -6884,7 +6884,7 @@ void CodeGen::genCompareInt(GenTree* treeNode) // The common type cannot be smaller than any of the operand types, we're probably mixing int/long assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type))); // Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons - assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0)); + assert(!(varTypeIsSmall(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0)); // If op1 is smaller then it cannot be in memory, we're probably missing a cast assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory()); // If op2 is smaller then it cannot be in memory, we're probably missing a cast diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 0f2d6ec1c1a00e..e4908c41c94e23 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -5668,6 +5668,7 @@ class Compiler void fgDebugCheckLoopTable(); void fgDebugCheckSsa(); + void fgDebugCheckTypes(GenTree* tree); void fgDebugCheckFlags(GenTree* tree, BasicBlock* block); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index b5e51a247bb4c6..be280349b7cb0b 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -3239,6 +3239,71 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef } } +//------------------------------------------------------------------------ +// fgDebugCheckTypes: Validate node types used in the given tree +// +// Arguments: +// tree - the tree to (recursively) check types for +// +void Compiler::fgDebugCheckTypes(GenTree* tree) +{ + struct NodeTypeValidator : GenTreeVisitor + { + enum + { + DoPostOrder = true, + }; + + NodeTypeValidator(Compiler* comp) : GenTreeVisitor(comp) + { + } + + fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) const + { + GenTree* node = *use; + + // Validate types of nodes in the IR: + // + // * TYP_ULONG and TYP_UINT are not legal. + // * Small types are only legal for the following nodes: + // * All kinds of indirections including GT_NULLCHECK + // * All kinds of locals + // * GT_COMMA wrapped around any of the above. + // + if (node->TypeIs(TYP_ULONG, TYP_UINT)) + { + m_compiler->gtDispTree(node); + assert(!"TYP_ULONG and TYP_UINT are not legal in IR"); + } + + if (varTypeIsSmall(node)) + { + if (node->OperIs(GT_COMMA)) + { + // TODO: it's only allowed if its underlying effective node is also a small type. + return WALK_CONTINUE; + } + + if (node->OperIsIndir() || node->OperIs(GT_NULLCHECK) || node->IsPhiNode() || node->IsAnyLocal()) + { + return WALK_CONTINUE; + } + + m_compiler->gtDispTree(node); + assert(!"Unexpected small type in IR"); + } + + // TODO: validate types in GT_CAST nodes. + // Validate mismatched types in binopt's arguments, etc. + // + return WALK_CONTINUE; + } + }; + + NodeTypeValidator walker(this); + walker.WalkTree(&tree, nullptr); +} + //------------------------------------------------------------------------ // fgDebugCheckFlags: Validate various invariants related to the propagation // and setting of tree, block, and method flags @@ -3846,6 +3911,7 @@ void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees) } fgDebugCheckFlags(stmt->GetRootNode(), block); + fgDebugCheckTypes(stmt->GetRootNode()); // Not only will this stress fgMorphBlockStmt(), but we also get all the checks // done by fgMorphTree() diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index ff0091719fa279..7eecaa9b7e2a08 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -21736,7 +21736,7 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode( genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); - assert(type == TYP_UBYTE); + assert(type == TYP_INT); var_types simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); @@ -21875,7 +21875,7 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode( genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); - assert(type == TYP_UBYTE); + assert(type == TYP_INT); var_types simdType = getSIMDTypeForSize(simdSize); assert(varTypeIsSIMD(simdType)); @@ -23861,7 +23861,7 @@ GenTree* Compiler::gtNewSimdShuffleNode( #if defined(TARGET_XARCH) uint8_t control = 0; bool crossLane = false; - bool needsZero = varTypeIsSmallInt(simdBaseType) && (simdSize <= 16); + bool needsZero = varTypeIsSmall(simdBaseType) && (simdSize <= 16); uint64_t value = 0; simd_t vecCns = {}; simd_t mskCns = {}; diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp index 06803b3a76894c..872cdd4dec9854 100644 --- a/src/coreclr/jit/hwintrinsic.cpp +++ b/src/coreclr/jit/hwintrinsic.cpp @@ -1060,7 +1060,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic); CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic); int numArgs = sig->numArgs; - var_types retType = JITtype2varType(sig->retType); + var_types retType = genActualType(JITtype2varType(sig->retType)); CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; GenTree* retNode = nullptr; diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index beda87e8034459..97a23f8ef2c35c 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -1073,7 +1073,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8); - op1 = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8); + op1 = gtNewSimdHWIntrinsicNode(genActualType(simdBaseType), op1, NI_Vector64_ToScalar, simdBaseJitType, + 8); op1 = gtNewCastNode(TYP_INT, op1, /* isUnsigned */ true, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_SIMD16); @@ -1081,7 +1082,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = gtNewSimdGetUpperNode(TYP_SIMD8, op2, simdBaseJitType, simdSize); op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8); - op2 = gtNewSimdHWIntrinsicNode(simdBaseType, op2, NI_Vector64_ToScalar, simdBaseJitType, 8); + op2 = gtNewSimdHWIntrinsicNode(genActualType(simdBaseType), op2, NI_Vector64_ToScalar, simdBaseJitType, + 8); op2 = gtNewCastNode(TYP_INT, op2, /* isUnsigned */ true, TYP_INT); op2 = gtNewOperNode(GT_LSH, TYP_INT, op2, gtNewIconNode(8)); @@ -1110,7 +1112,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, simdSize); } - retNode = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8); + retNode = gtNewSimdHWIntrinsicNode(genActualType(simdBaseType), op1, NI_Vector64_ToScalar, + simdBaseJitType, 8); if ((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) { diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index e2cb1ceaead47c..c175496f344c64 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -1817,8 +1817,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || varTypeIsFloating(simdBaseType) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { - var_types simdType = getSIMDTypeForSize(simdSize); - op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); @@ -1835,8 +1833,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || compOpportunisticallyDependsOn(InstructionSet_AVX2)) { - var_types simdType = getSIMDTypeForSize(simdSize); - op2 = impSIMDPopStack(); op1 = impSIMDPopStack(); @@ -1854,8 +1850,6 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (IsBaselineVector512IsaSupportedOpportunistically()) { - var_types simdType = getSIMDTypeForSize(simdSize); - op1 = impSIMDPopStack(); op1 = gtNewSimdHWIntrinsicNode(TYP_MASK, op1, NI_AVX512F_ConvertVectorToMask, simdBaseJitType, simdSize); @@ -2728,7 +2722,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(simdSize == 16); - if (varTypeIsSmallInt(simdBaseType) && !compOpportunisticallyDependsOn(InstructionSet_SSSE3)) + if (varTypeIsSmall(simdBaseType) && !compOpportunisticallyDependsOn(InstructionSet_SSSE3)) { // TYP_BYTE, TYP_UBYTE, TYP_SHORT, and TYP_USHORT need SSSE3 to be able to shuffle any operation break; diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index 89d67112521007..01ee4916d4eef2 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -200,7 +200,7 @@ GenTree* Compiler::impExpandHalfConstEqualsSIMD( // Optimization: use a single load when byteLen equals simdSize. // For code simplicity we always create nodes for two vectors case. const bool useSingleVector = simdSize == byteLen; - return gtNewSimdCmpOpAllNode(GT_EQ, TYP_UBYTE, useSingleVector ? xor1 : orr, gtNewZeroConNode(simdType), baseType, + return gtNewSimdCmpOpAllNode(GT_EQ, TYP_INT, useSingleVector ? xor1 : orr, gtNewZeroConNode(simdType), baseType, simdSize); // Codegen example for byteLen=40 and OrdinalIgnoreCase mode with AVX: diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 39e2d09aeb767e..ff5f351fd46e45 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -2042,7 +2042,7 @@ bool Lowering::LowerCallMemcmp(GenTreeCall* call, GenTree** next) if (GenTree::OperIsCmpCompare(oper)) { assert(type == TYP_INT); - return comp->gtNewSimdCmpOpAllNode(oper, TYP_UBYTE, op1, op2, CORINFO_TYPE_NATIVEUINT, + return comp->gtNewSimdCmpOpAllNode(oper, TYP_INT, op1, op2, CORINFO_TYPE_NATIVEUINT, genTypeSize(op1)); } return comp->gtNewSimdBinOpNode(oper, op1->TypeGet(), op1, op2, CORINFO_TYPE_NATIVEUINT, diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp index 4facf33a15d9a8..719c1422b0910e 100644 --- a/src/coreclr/jit/lowerarmarch.cpp +++ b/src/coreclr/jit/lowerarmarch.cpp @@ -1273,7 +1273,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); - assert(node->gtType == TYP_UBYTE); + assert(node->TypeIs(TYP_INT)); assert((cmpOp == GT_EQ) || (cmpOp == GT_NE)); // We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality): diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index 954f795c1f7553..002fc2e6ac8538 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -1790,7 +1790,7 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); - assert(node->gtType == TYP_UBYTE); + assert(node->TypeIs(TYP_INT)); assert((cmpOp == GT_EQ) || (cmpOp == GT_NE)); // We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality): diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 306971171052b2..b6208d53ecbbf1 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -2852,7 +2852,7 @@ bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPo { ssize_t v1 = refPosition->treeNode->AsIntCon()->IconValue(); ssize_t v2 = otherTreeNode->AsIntCon()->IconValue(); - if ((v1 == v2) && (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode) || v1 == 0)) + if ((v1 == v2) && (varTypeIsGC(refPosition->treeNode) == varTypeIsGC(otherTreeNode) || v1 == 0)) { #ifdef TARGET_64BIT // If the constant is negative, only reuse registers of the same type. diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 090ea30d5379c9..76f01ae535277b 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -185,7 +185,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock() genTreeOps foldOp; genTreeOps cmpOp; - var_types foldType = m_c1->TypeGet(); + var_types foldType = genActualType(m_c1); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; @@ -1412,7 +1412,7 @@ bool OptBoolsDsc::optOptimizeBoolsReturnBlock(BasicBlock* b3) // Get the fold operator (m_foldOp, e.g., GT_OR/GT_AND) and // the comparison operator (m_cmpOp, e.g., GT_EQ/GT_NE/GT_GE/GT_LT) - var_types foldType = m_c1->TypeGet(); + var_types foldType = genActualType(m_c1->TypeGet()); if (varTypeIsGC(foldType)) { foldType = TYP_I_IMPL; diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 89720f6c7748b6..6a6397d57aaaf8 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -1481,7 +1481,7 @@ Range RangeCheck::ComputeRange(BasicBlock* block, GenTree* expr, bool monIncreas JITDUMP("%s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly())); } } - else if (varTypeIsSmallInt(expr->TypeGet())) + else if (varTypeIsSmall(expr)) { range = GetRangeFromType(expr->TypeGet()); JITDUMP("%s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly())); diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index f7c69b3a1a3856..faeaada9d4b989 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -274,7 +274,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, } CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; - var_types retType = JITtype2varType(sig->retType); + var_types retType = genActualType(JITtype2varType(sig->retType)); CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; var_types simdType = TYP_UNKNOWN; unsigned simdSize = 0; diff --git a/src/coreclr/jit/vartype.h b/src/coreclr/jit/vartype.h index 65a4c50de78865..27dd5b3329574f 100644 --- a/src/coreclr/jit/vartype.h +++ b/src/coreclr/jit/vartype.h @@ -176,16 +176,10 @@ inline bool varTypeIsArithmetic(T vt) return ((varTypeClassification[TypeGet(vt)] & (VTF_INT | VTF_FLT)) != 0); } -template -inline unsigned varTypeGCtype(T vt) -{ - return (unsigned)(varTypeClassification[TypeGet(vt)] & (VTF_GCR | VTF_BYR)); -} - template inline bool varTypeIsGC(T vt) { - return (varTypeGCtype(vt) != 0); + return ((varTypeClassification[TypeGet(vt)] & (VTF_GCR | VTF_BYR)) != 0); } template @@ -218,12 +212,6 @@ inline bool varTypeIsSmall(T vt) return (TypeGet(vt) >= TYP_BYTE) && (TypeGet(vt) <= TYP_USHORT); } -template -inline bool varTypeIsSmallInt(T vt) -{ - return (TypeGet(vt) >= TYP_BYTE) && (TypeGet(vt) <= TYP_USHORT); -} - template inline bool varTypeIsIntOrI(T vt) {