From 2367f406723a50238df83eaf35a0d4cb532bcbf8 Mon Sep 17 00:00:00 2001 From: Khushal Modi Date: Thu, 7 Mar 2024 22:02:00 -0800 Subject: [PATCH] Prevent new codepaths for X86 --- src/coreclr/jit/compiler.h | 4 +-- src/coreclr/jit/gentree.cpp | 37 +++++++++++++------------ src/coreclr/jit/hwintrinsicxarch.cpp | 34 ++++++++++++++--------- src/coreclr/jit/simdashwintrinsic.cpp | 40 +++++++++++++++++---------- 4 files changed, 69 insertions(+), 46 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index aebb563ca3c8d8..066a3f15769071 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -3172,7 +3172,7 @@ class Compiler GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize); - +#if defined(TARGET_AMD64) GenTree* gtNewSimdCvtNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, @@ -3184,7 +3184,7 @@ class Compiler var_types sourceType, var_types targetType, unsigned simdSize); - +#endif //TARGET_AMD64 GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 0a3a8e89a8c4af..ef46aae3ca161b 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -21251,9 +21251,8 @@ GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, CorInfoType s return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); } -#if defined(TARGET_XARCH) -GenTreeVecCon* Compiler::gtCvtCtrlTbl - (var_types type, var_types sourceType, var_types targetType, unsigned simdSize) +#if defined(TARGET_AMD64) +GenTreeVecCon* Compiler::gtCvtCtrlTbl(var_types type, var_types sourceType, var_types targetType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); assert(IsBaselineVector512IsaSupportedDebugOnly()); @@ -21262,7 +21261,6 @@ GenTreeVecCon* Compiler::gtCvtCtrlTbl assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); - GenTreeVecCon* tbl = gtNewVconNode(type); switch (sourceType) @@ -21277,7 +21275,7 @@ GenTreeVecCon* Compiler::gtCvtCtrlTbl tbl->gtSimdVal.i64[i] = 0x08080088; } break; - + case TYP_INT: case TYP_LONG: for (int i = 0; i < 8; i++) @@ -21285,12 +21283,12 @@ GenTreeVecCon* Compiler::gtCvtCtrlTbl tbl->gtSimdVal.i64[i] = 0x00000088; } break; - + default: unreached(); } break; - + case TYP_FLOAT: switch (targetType) { @@ -21301,7 +21299,7 @@ GenTreeVecCon* Compiler::gtCvtCtrlTbl tbl->gtSimdVal.i32[i] = 0x08080088; } break; - + case TYP_INT: case TYP_LONG: for (int i = 0; i < 16; i++) @@ -21309,20 +21307,24 @@ GenTreeVecCon* Compiler::gtCvtCtrlTbl tbl->gtSimdVal.i32[i] = 0x00000088; } break; - + default: unreached(); } break; - + default: unreached(); } return tbl; } -GenTree* Compiler::gtNewSimdCvtNode - (var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdTargetBaseJitType, CorInfoType simdSourceBaseJitType, unsigned simdSize) +GenTree* Compiler::gtNewSimdCvtNode(var_types type, + GenTree* op1, + NamedIntrinsic hwIntrinsicID, + CorInfoType simdTargetBaseJitType, + CorInfoType simdSourceBaseJitType, + unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); assert(IsBaselineVector512IsaSupportedDebugOnly()); @@ -21343,8 +21345,8 @@ GenTree* Compiler::gtNewSimdCvtNode GenTree* op1Clone = fgMakeMultiUse(&op1); // run vfixupimmsd base on table and no flags reporting - GenTree* fixupVal = gtNewSimdHWIntrinsicNode(type, op1, op1Clone, tbl, gtNewIconNode(0), - NI_AVX512F_Fixup, simdSourceBaseJitType, simdSize); + GenTree* fixupVal = gtNewSimdHWIntrinsicNode(type, op1, op1Clone, tbl, gtNewIconNode(0), NI_AVX512F_Fixup, + simdSourceBaseJitType, simdSize); if (varTypeIsSigned(simdTargetBaseType)) { @@ -21352,8 +21354,9 @@ GenTree* Compiler::gtNewSimdCvtNode GenTree* maxVal = gtNewDconNode(static_cast(actualMaxVal), simdSourceBaseType); - maxVal = gtNewSimdCreateBroadcastNode(type, maxVal, simdSourceBaseJitType, simdSize); - GenTree* maxValDup = gtNewSimdCreateBroadcastNode(type, gtNewIconNode(actualMaxVal, simdTargetBaseType), simdTargetBaseJitType, simdSize); + maxVal = gtNewSimdCreateBroadcastNode(type, maxVal, simdSourceBaseJitType, simdSize); + GenTree* maxValDup = gtNewSimdCreateBroadcastNode(type, gtNewIconNode(actualMaxVal, simdTargetBaseType), + simdTargetBaseJitType, simdSize); // we will be using the input value twice GenTree* fixupValDup = fgMakeMultiUse(&fixupVal); @@ -21372,7 +21375,7 @@ GenTree* Compiler::gtNewSimdCvtNode return gtNewSimdHWIntrinsicNode(type, fixupVal, hwIntrinsicID, simdSourceBaseJitType, simdSize); } } -#endif //TARGET_XARCH +#endif // TARGET_AMD64 GenTree* Compiler::gtNewSimdCmpOpNode( genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index 3b9236ec8906eb..7c782aa5e3131a 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -1446,16 +1446,18 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(simdBaseType == TYP_DOUBLE); +#if defined(TARGET_AMD64) if (IsBaselineVector512IsaSupportedOpportunistically()) { op1 = impSIMDPopStack(); intrinsic = (simdSize == 16) ? NI_AVX512DQ_VL_ConvertToVector128Int64WithTruncation - : (simdSize == 32) ? NI_AVX512DQ_VL_ConvertToVector256Int64WithTruncation - : NI_AVX512DQ_ConvertToVector512Int64WithTruncation; + : (simdSize == 32) ? NI_AVX512DQ_VL_ConvertToVector256Int64WithTruncation + : NI_AVX512DQ_ConvertToVector512Int64WithTruncation; retNode = gtNewSimdCvtNode(retType, op1, intrinsic, CORINFO_TYPE_LONG, simdBaseJitType, simdSize); } +#endif break; } @@ -1465,15 +1467,17 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(simdBaseType == TYP_FLOAT); +#if defined(TARGET_AMD64) if (IsBaselineVector512IsaSupportedOpportunistically()) { - op1 = impSIMDPopStack(); + op1 = impSIMDPopStack(); intrinsic = (simdSize == 16) ? NI_AVX512F_VL_ConvertToVector128UInt32WithTruncation - : (simdSize == 32) ? NI_AVX512F_VL_ConvertToVector256UInt32WithTruncation - : NI_AVX512F_ConvertToVector512UInt32WithTruncation; + : (simdSize == 32) ? NI_AVX512F_VL_ConvertToVector256UInt32WithTruncation + : NI_AVX512F_ConvertToVector512UInt32WithTruncation; retNode = gtNewSimdCvtNode(retType, op1, intrinsic, CORINFO_TYPE_UINT, simdBaseJitType, simdSize); } +#endif break; } @@ -1483,15 +1487,17 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(simdBaseType == TYP_DOUBLE); +#if defined(TARGET_AMD64) if (IsBaselineVector512IsaSupportedOpportunistically()) { - op1 = impSIMDPopStack(); + op1 = impSIMDPopStack(); intrinsic = (simdSize == 16) ? NI_AVX512DQ_VL_ConvertToVector128UInt64WithTruncation - : (simdSize == 32) ? NI_AVX512DQ_VL_ConvertToVector256UInt64WithTruncation - : NI_AVX512DQ_ConvertToVector512UInt64WithTruncation; + : (simdSize == 32) ? NI_AVX512DQ_VL_ConvertToVector256UInt64WithTruncation + : NI_AVX512DQ_ConvertToVector512UInt64WithTruncation; retNode = gtNewSimdCvtNode(retType, op1, intrinsic, CORINFO_TYPE_ULONG, simdBaseJitType, simdSize); } +#endif break; } @@ -1501,15 +1507,17 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(simdBaseType == TYP_FLOAT); +#if defined(TARGET_AMD64) if (IsBaselineVector512IsaSupportedOpportunistically()) { - op1 = impSIMDPopStack(); + op1 = impSIMDPopStack(); intrinsic = (simdSize == 16) ? NI_SSE2_ConvertToVector128Int32WithTruncation - : (simdSize == 32) ? NI_AVX_ConvertToVector256Int32WithTruncation - : NI_AVX512F_ConvertToVector512Int32WithTruncation; + : (simdSize == 32) ? NI_AVX_ConvertToVector256Int32WithTruncation + : NI_AVX512F_ConvertToVector512Int32WithTruncation; retNode = gtNewSimdCvtNode(retType, op1, intrinsic, CORINFO_TYPE_INT, simdBaseJitType, simdSize); } +#endif break; } @@ -1538,8 +1546,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, else if (simdBaseType == TYP_UINT && IsBaselineVector512IsaSupportedOpportunistically()) { intrinsic = (simdSize == 16) ? NI_AVX512F_VL_ConvertToVector128Single - : (simdSize == 32) ? NI_AVX512F_VL_ConvertToVector256Single - : NI_AVX512F_ConvertToVector512Single; + : (simdSize == 32) ? NI_AVX512F_VL_ConvertToVector256Single + : NI_AVX512F_ConvertToVector512Single; } if (intrinsic != NI_Illegal) { diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index 7cdad951692623..8c07d6aab0b06f 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -532,12 +532,12 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT_ConvertToUInt32: case NI_VectorT_ConvertToUInt64: { -#ifdef TARGET_XARCH +#ifdef TARGET_AMD64 if (IsBaselineVector512IsaSupportedOpportunistically()) { break; } -#endif // TARGET_XARCH +#endif // TARGET_AMD64 return nullptr; } @@ -1181,14 +1181,17 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(simdBaseType == TYP_DOUBLE); +#if defined(TARGET_AMD64) if (IsBaselineVector512IsaSupportedOpportunistically()) { - NamedIntrinsic intrinsic = (simdSize == 16) ? NI_AVX512DQ_VL_ConvertToVector128Int64WithTruncation - : (simdSize == 32) ? NI_AVX512DQ_VL_ConvertToVector256Int64WithTruncation - : NI_AVX512DQ_ConvertToVector512Int64WithTruncation; + NamedIntrinsic intrinsic = + (simdSize == 16) ? NI_AVX512DQ_VL_ConvertToVector128Int64WithTruncation + : (simdSize == 32) ? NI_AVX512DQ_VL_ConvertToVector256Int64WithTruncation + : NI_AVX512DQ_ConvertToVector512Int64WithTruncation; return gtNewSimdCvtNode(retType, op1, intrinsic, CORINFO_TYPE_LONG, simdBaseJitType, simdSize); } +#endif return nullptr; } @@ -1196,14 +1199,17 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(simdBaseType == TYP_FLOAT); +#if defined(TARGET_AMD64) if (IsBaselineVector512IsaSupportedOpportunistically()) { - NamedIntrinsic intrinsic = (simdSize == 16) ? NI_AVX512F_VL_ConvertToVector128UInt32WithTruncation - : (simdSize == 32) ? NI_AVX512F_VL_ConvertToVector256UInt32WithTruncation - : NI_AVX512F_ConvertToVector512UInt32WithTruncation; + NamedIntrinsic intrinsic = + (simdSize == 16) ? NI_AVX512F_VL_ConvertToVector128UInt32WithTruncation + : (simdSize == 32) ? NI_AVX512F_VL_ConvertToVector256UInt32WithTruncation + : NI_AVX512F_ConvertToVector512UInt32WithTruncation; return gtNewSimdCvtNode(retType, op1, intrinsic, CORINFO_TYPE_UINT, simdBaseJitType, simdSize); } +#endif return nullptr; } @@ -1211,28 +1217,34 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); assert(simdBaseType == TYP_DOUBLE); +#if defined(TARGET_AMD64) if (IsBaselineVector512IsaSupportedOpportunistically()) { - NamedIntrinsic intrinsic = (simdSize == 16) ? NI_AVX512DQ_VL_ConvertToVector128UInt64WithTruncation - : (simdSize == 32) ? NI_AVX512DQ_VL_ConvertToVector256UInt64WithTruncation - : NI_AVX512DQ_ConvertToVector512UInt64WithTruncation; + NamedIntrinsic intrinsic = + (simdSize == 16) ? NI_AVX512DQ_VL_ConvertToVector128UInt64WithTruncation + : (simdSize == 32) ? NI_AVX512DQ_VL_ConvertToVector256UInt64WithTruncation + : NI_AVX512DQ_ConvertToVector512UInt64WithTruncation; return gtNewSimdCvtNode(retType, op1, intrinsic, CORINFO_TYPE_ULONG, simdBaseJitType, simdSize); } +#endif return nullptr; } case NI_VectorT_ConvertToInt32: { assert(simdBaseType == TYP_FLOAT); +#if defined(TARGET_AMD64) if (IsBaselineVector512IsaSupportedOpportunistically()) { - NamedIntrinsic intrinsic = (simdSize == 16) ? NI_SSE2_ConvertToVector128Int32WithTruncation - : (simdSize == 32) ? NI_AVX_ConvertToVector256Int32WithTruncation - : NI_AVX512F_ConvertToVector512Int32WithTruncation; + NamedIntrinsic intrinsic = + (simdSize == 16) ? NI_SSE2_ConvertToVector128Int32WithTruncation + : (simdSize == 32) ? NI_AVX_ConvertToVector256Int32WithTruncation + : NI_AVX512F_ConvertToVector512Int32WithTruncation; return gtNewSimdCvtNode(retType, op1, intrinsic, CORINFO_TYPE_INT, simdBaseJitType, simdSize); } +#endif return nullptr; }