diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index f61bd9251f6ffc..2c1bd87df5171c 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -1618,11 +1618,11 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, { noway_assert(op2->gtOper == GT_CNS_DBL); /* If we have an NaN value then don't record it */ - if (_isnan(op2->AsDblCon()->gtDconVal)) + if (_isnan(op2->AsDblCon()->DconValue())) { goto DONE_ASSERTION; // Don't make an assertion } - assertion.op2.dconVal = op2->AsDblCon()->gtDconVal; + assertion.op2.dconVal = op2->AsDblCon()->DconValue(); } // diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 5a1fb18ce59fab..6ab5c260243931 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -258,7 +258,7 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTre case GT_CNS_DBL: { GenTreeDblCon* dblConst = tree->AsDblCon(); - double constValue = dblConst->AsDblCon()->gtDconVal; + double constValue = dblConst->AsDblCon()->DconValue(); // TODO-ARM-CQ: Do we have a faster/smaller way to generate 0.0 in thumb2 ISA ? if (targetType == TYP_FLOAT) { diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 40d6e7cbbadbdf..63d8e0a2910667 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -2334,7 +2334,7 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTre { emitter* emit = GetEmitter(); emitAttr size = emitActualTypeSize(tree); - double constValue = tree->AsDblCon()->gtDconVal; + double constValue = tree->AsDblCon()->DconValue(); // Make sure we use "movi reg, 0x00" only for positive zero (0.0) and not for negative zero (-0.0) if (*(__int64*)&constValue == 0) diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 21a3755f8a2ac1..0a67cddb0eeab7 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1800,7 +1800,7 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTre { emitter* emit = GetEmitter(); emitAttr size = emitActualTypeSize(tree); - double constValue = tree->AsDblCon()->gtDconVal; + double constValue = tree->AsDblCon()->DconValue(); // Make sure we use "daddiu reg, zero, 0x00" only for positive zero (0.0) // and not for negative zero (-0.0) diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 1f7b777f852887..b16d2d221a76ea 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -505,7 +505,7 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTre } else { - double cns = tree->AsDblCon()->gtDconVal; + double cns = tree->AsDblCon()->DconValue(); CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(cns, size); emit->emitIns_R_C(ins_Load(targetType), size, targetReg, hnd, 0); @@ -7507,7 +7507,7 @@ void CodeGen::genSSE41RoundOp(GenTreeOp* treeNode) case GT_CNS_DBL: { GenTreeDblCon* dblConst = srcNode->AsDblCon(); - CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(dblConst->gtDconVal, emitTypeSize(dblConst)); + CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(dblConst->DconValue(), emitTypeSize(dblConst)); emit->emitIns_R_C_I(ins, size, dstReg, hnd, 0, ival); return; diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index d8ba65821e7b1e..d50a82ec64b8d2 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -1617,7 +1617,7 @@ void GenTree::BashToConst(T value, var_types type /* = TYP_UNDEF */) case GT_CNS_DBL: assert(varTypeIsFloating(type)); - AsDblCon()->gtDconVal = static_cast(value); + AsDblCon()->SetDconValue(static_cast(value)); break; default: diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index cbba666f80eb7b..20a700e6913408 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -3874,7 +3874,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G assert(src->IsCnsFltOrDbl()); GenTreeDblCon* dblCns = src->AsDblCon(); - CORINFO_FIELD_HANDLE hnd = emitFltOrDblConst(dblCns->gtDconVal, emitTypeSize(dblCns)); + CORINFO_FIELD_HANDLE hnd = emitFltOrDblConst(dblCns->DconValue(), emitTypeSize(dblCns)); emitIns_R_C(ins, attr, dst->GetRegNum(), hnd, 0); } } diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 73445b88d223da..a036932bccb8ac 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -389,7 +389,7 @@ void Compiler::fgDumpTree(FILE* fgxFile, GenTree* const tree) } else if (tree->IsCnsFltOrDbl()) { - fprintf(fgxFile, "%g", tree->AsDblCon()->gtDconVal); + fprintf(fgxFile, "%g", tree->AsDblCon()->DconValue()); } else if (tree->IsLocal()) { diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 6e5b0db78d7d53..0becde0a8002e5 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -2401,7 +2401,7 @@ bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK) break; case GT_CNS_DBL: - if (op1->AsDblCon()->gtDconVal == op2->AsDblCon()->gtDconVal) + if (op1->AsDblCon()->DconValue() == op2->AsDblCon()->DconValue()) return true; break; #endif @@ -2841,13 +2841,16 @@ unsigned Compiler::gtHashValue(GenTree* tree) #endif break; case GT_CNS_DBL: - bits = *(UINT64*)(&tree->AsDblCon()->gtDconVal); + { + double dcon = tree->AsDblCon()->DconValue(); + memcpy(&bits, &dcon, sizeof(dcon)); #ifdef HOST_64BIT add = bits; #else // 32-bit host add = genTreeHashAdd(uhi32(bits), ulo32(bits)); #endif break; + } case GT_CNS_STR: add = tree->AsStrCon()->gtSconCPX; break; @@ -4616,7 +4619,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costSz = 2 + 8; } #elif defined(TARGET_ARM64) - if (tree->IsFloatPositiveZero() || emitter::emitIns_valid_imm_for_fmov(tree->AsDblCon()->gtDconVal)) + if (tree->IsFloatPositiveZero() || emitter::emitIns_valid_imm_for_fmov(tree->AsDblCon()->DconValue())) { // Zero and certain other immediates can be specially created with a single instruction // These can be cheaply reconstituted but still take up 4-bytes of native codegen @@ -8149,7 +8152,7 @@ GenTree* Compiler::gtClone(GenTree* tree, bool complexOK) case GT_CNS_DBL: { - copy = gtNewDconNode(tree->AsDblCon()->gtDconVal, tree->TypeGet()); + copy = gtNewDconNode(tree->AsDblCon()->DconValue(), tree->TypeGet()); break; } @@ -8325,7 +8328,7 @@ GenTree* Compiler::gtCloneExpr( case GT_CNS_DBL: { - copy = gtNewDconNode(tree->AsDblCon()->gtDconVal, tree->TypeGet()); + copy = gtNewDconNode(tree->AsDblCon()->DconValue(), tree->TypeGet()); goto DONE; } @@ -11227,15 +11230,25 @@ void Compiler::gtDispConst(GenTree* tree) break; case GT_CNS_DBL: - if (*((__int64*)&tree->AsDblCon()->gtDconVal) == (__int64)I64(0x8000000000000000)) + { + double dcon = tree->AsDblCon()->DconValue(); + if (FloatingPointUtils::isNegativeZero(dcon)) { printf(" -0.00000"); } + else if (FloatingPointUtils::isNaN(dcon)) + { + uint64_t bits; + static_assert_no_msg(sizeof(bits) == sizeof(dcon)); + memcpy(&bits, &dcon, sizeof(dcon)); + printf(" %#.17g(0x%llx)\n", dcon, bits); + } else { - printf(" %#.17g", tree->AsDblCon()->gtDconVal); + printf(" %#.17g", dcon); } break; + } case GT_CNS_STR: printf(""); @@ -14388,7 +14401,7 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) // Fold constant DOUBLE unary operator. - d1 = op1->AsDblCon()->gtDconVal; + d1 = op1->AsDblCon()->DconValue(); switch (tree->OperGet()) { @@ -15024,11 +15037,11 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) } assert(op1->OperIs(GT_CNS_DBL)); - d1 = op1->AsDblCon()->gtDconVal; + d1 = op1->AsDblCon()->DconValue(); assert(varTypeIsFloating(op2->TypeGet())); assert(op2->OperIs(GT_CNS_DBL)); - d2 = op2->AsDblCon()->gtDconVal; + d2 = op2->AsDblCon()->DconValue(); // Special case - check if we have NaN operands. // For comparisons if not an unordered operation always return 0. @@ -17199,7 +17212,7 @@ bool GenTreeVecCon::HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, simd { if (arg->IsCnsFltOrDbl()) { - simd32Val.f32[argIdx] = static_cast(arg->AsDblCon()->gtDconVal); + simd32Val.f32[argIdx] = static_cast(arg->AsDblCon()->DconValue()); return true; } else @@ -17215,7 +17228,7 @@ bool GenTreeVecCon::HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, simd { if (arg->IsCnsFltOrDbl()) { - simd32Val.f64[argIdx] = static_cast(arg->AsDblCon()->gtDconVal); + simd32Val.f64[argIdx] = static_cast(arg->AsDblCon()->DconValue()); return true; } else diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 25aaf4541c7aaf..205644138c2168 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -3306,8 +3306,20 @@ inline void GenTreeIntConCommon::SetValueTruncating(T value) struct GenTreeDblCon : public GenTree { +private: double gtDconVal; +public: + double DconValue() const + { + return gtDconVal; + } + + void SetDconValue(double value) + { + gtDconVal = FloatingPointUtils::normalize(value); + } + bool isBitwiseEqual(GenTreeDblCon* other) { unsigned __int64 bits = *(unsigned __int64*)(>DconVal); @@ -3315,9 +3327,10 @@ struct GenTreeDblCon : public GenTree return (bits == otherBits); } - GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val) + GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type) { assert(varTypeIsFloating(type)); + SetDconValue(val); } #if DEBUGGABLE_GENTREE GenTreeDblCon() : GenTree() @@ -8446,7 +8459,7 @@ inline bool GenTree::IsFloatAllBitsSet() const { if (IsCnsFltOrDbl()) { - double constValue = AsDblCon()->gtDconVal; + double constValue = AsDblCon()->DconValue(); if (TypeIs(TYP_FLOAT)) { @@ -8473,7 +8486,7 @@ inline bool GenTree::IsFloatNaN() const { if (IsCnsFltOrDbl()) { - double constValue = AsDblCon()->gtDconVal; + double constValue = AsDblCon()->DconValue(); return FloatingPointUtils::isNaN(constValue); } @@ -8491,7 +8504,7 @@ inline bool GenTree::IsFloatNegativeZero() const { if (IsCnsFltOrDbl()) { - double constValue = AsDblCon()->gtDconVal; + double constValue = AsDblCon()->DconValue(); return FloatingPointUtils::isNegativeZero(constValue); } @@ -8512,7 +8525,7 @@ inline bool GenTree::IsFloatPositiveZero() const // This implementation is almost identical to IsCnsNonZeroFltOrDbl // but it is easier to parse out // rather than using !IsCnsNonZeroFltOrDbl. - double constValue = AsDblCon()->gtDconVal; + double constValue = AsDblCon()->DconValue(); return FloatingPointUtils::isPositiveZero(constValue); } @@ -9300,7 +9313,7 @@ inline bool GenTree::IsCnsNonZeroFltOrDbl() const { if (IsCnsFltOrDbl()) { - double constValue = AsDblCon()->gtDconVal; + double constValue = AsDblCon()->DconValue(); return *(__int64*)&constValue != 0; } diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index aea858b4a2f47c..38d32e1e00f841 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -689,7 +689,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, for (uint32_t index = 0; index < sig->numArgs; index++) { - cnsVal = static_cast(impPopStack().val->AsDblCon()->gtDconVal); + cnsVal = static_cast(impPopStack().val->AsDblCon()->DconValue()); vecCon->gtSimd16Val.f32[simdLength - 1 - index] = cnsVal; } @@ -709,7 +709,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, for (uint32_t index = 0; index < sig->numArgs; index++) { - cnsVal = static_cast(impPopStack().val->AsDblCon()->gtDconVal); + cnsVal = static_cast(impPopStack().val->AsDblCon()->DconValue()); vecCon->gtSimd16Val.f64[simdLength - 1 - index] = cnsVal; } diff --git a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp index add3a61d122236..b52bdf0d94df44 100644 --- a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp +++ b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp @@ -613,7 +613,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) assert(intrin.op2->isContainedIntOrIImmed()); assert(intrin.op2->AsIntCon()->gtIconVal == 0); - const double dataValue = intrin.op3->AsDblCon()->gtDconVal; + const double dataValue = intrin.op3->AsDblCon()->DconValue(); GetEmitter()->emitIns_R_F(INS_fmov, emitSize, targetReg, dataValue, opt); } else @@ -736,7 +736,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) if (intrin.op1->isContainedFltOrDblImmed()) { // fmov reg, #imm8 - const double dataValue = intrin.op1->AsDblCon()->gtDconVal; + const double dataValue = intrin.op1->AsDblCon()->DconValue(); GetEmitter()->emitIns_R_F(ins, emitTypeSize(intrin.baseType), targetReg, dataValue, INS_OPTS_NONE); } else if (varTypeIsFloating(intrin.baseType)) @@ -799,7 +799,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) { if (intrin.op1->isContainedFltOrDblImmed()) { - const double dataValue = intrin.op1->AsDblCon()->gtDconVal; + const double dataValue = intrin.op1->AsDblCon()->DconValue(); GetEmitter()->emitIns_R_F(INS_fmov, emitSize, targetReg, dataValue, opt); } else if (intrin.id == NI_AdvSimd_Arm64_DuplicateToVector64) diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index f8e8a38695ba04..ec4dd2ff037ecc 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -1040,7 +1040,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, for (uint32_t index = 0; index < sig->numArgs; index++) { - cnsVal = static_cast(impPopStack().val->AsDblCon()->gtDconVal); + cnsVal = static_cast(impPopStack().val->AsDblCon()->DconValue()); vecCon->gtSimd32Val.f32[simdLength - 1 - index] = cnsVal; } @@ -1060,7 +1060,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, for (uint32_t index = 0; index < sig->numArgs; index++) { - cnsVal = static_cast(impPopStack().val->AsDblCon()->gtDconVal); + cnsVal = static_cast(impPopStack().val->AsDblCon()->DconValue()); vecCon->gtSimd32Val.f64[simdLength - 1 - index] = cnsVal; } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 78336fed4a3e9d..3989a753d3438c 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4432,14 +4432,14 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, if (ni == NI_System_Math_Max) { - cnsNode->gtDconVal = - FloatingPointUtils::maximum(cnsNode->gtDconVal, otherNode->AsDblCon()->gtDconVal); + cnsNode->SetDconValue( + FloatingPointUtils::maximum(cnsNode->DconValue(), otherNode->AsDblCon()->DconValue())); } else { assert(ni == NI_System_Math_Min); - cnsNode->gtDconVal = - FloatingPointUtils::minimum(cnsNode->gtDconVal, otherNode->AsDblCon()->gtDconVal); + cnsNode->SetDconValue( + FloatingPointUtils::minimum(cnsNode->DconValue(), otherNode->AsDblCon()->DconValue())); } retNode = cnsNode; @@ -4530,11 +4530,11 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, if (callJitType == CORINFO_TYPE_FLOAT) { - vecCon->gtSimd16Val.f32[0] = (float)op1->AsDblCon()->gtDconVal; + vecCon->gtSimd16Val.f32[0] = (float)op1->AsDblCon()->DconValue(); } else { - vecCon->gtSimd16Val.f64[0] = op1->AsDblCon()->gtDconVal; + vecCon->gtSimd16Val.f64[0] = op1->AsDblCon()->DconValue(); } op1 = vecCon; @@ -4848,7 +4848,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, { impPopStack(); - double f64Cns = op1->AsDblCon()->gtDconVal; + double f64Cns = op1->AsDblCon()->DconValue(); retNode = gtNewLconNode(*reinterpret_cast(&f64Cns)); } #if TARGET_64BIT @@ -4915,7 +4915,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, if (op1->IsCnsFltOrDbl()) { - float f32Cns = (float)op1->AsDblCon()->gtDconVal; + float f32Cns = (float)op1->AsDblCon()->DconValue(); retNode = gtNewIconNode(*reinterpret_cast(&f32Cns)); } else diff --git a/src/coreclr/jit/instr.cpp b/src/coreclr/jit/instr.cpp index 572a556c0858d6..d8bb92d89fdf7c 100644 --- a/src/coreclr/jit/instr.cpp +++ b/src/coreclr/jit/instr.cpp @@ -745,7 +745,7 @@ CodeGen::OperandDesc CodeGen::genOperandDesc(GenTree* op) break; case GT_CNS_DBL: - return OperandDesc(emit->emitFltOrDblConst(op->AsDblCon()->gtDconVal, emitTypeSize(op))); + return OperandDesc(emit->emitFltOrDblConst(op->AsDblCon()->DconValue(), emitTypeSize(op))); case GT_CNS_INT: assert(op->isContainedIntOrIImmed()); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 3af37e2c002c38..2939899c32fce4 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -7111,7 +7111,7 @@ void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind) { // Optimize *x = DCON to *x = ICON which can be slightly faster and/or smaller. GenTree* data = ind->Data(); - double dblCns = data->AsDblCon()->gtDconVal; + double dblCns = data->AsDblCon()->DconValue(); ssize_t intCns = 0; var_types type = TYP_UNKNOWN; // XARCH: we can always contain the immediates. @@ -7413,7 +7413,7 @@ void Lowering::LowerSIMD(GenTreeSIMD* simdNode) if (arg->IsCnsFltOrDbl()) { noway_assert(constArgCount < ArrLen(constArgValues)); - constArgValues[constArgCount] = static_cast(arg->AsDblCon()->gtDconVal); + constArgValues[constArgCount] = static_cast(arg->AsDblCon()->DconValue()); constArgCount++; } } diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp index 76cba89ae81eaa..28d3ee90595e90 100644 --- a/src/coreclr/jit/lowerarmarch.cpp +++ b/src/coreclr/jit/lowerarmarch.cpp @@ -1095,7 +1095,7 @@ bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node) assert(varTypeIsFloating(node->GetSimdBaseType())); assert(castOp == nullptr); - const double dataValue = op1->AsDblCon()->gtDconVal; + const double dataValue = op1->AsDblCon()->DconValue(); return comp->GetEmitter()->emitIns_valid_imm_for_fmov(dataValue); } @@ -2451,7 +2451,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) { assert(varTypeIsFloating(intrin.baseType)); - const double dataValue = intrin.op3->AsDblCon()->gtDconVal; + const double dataValue = intrin.op3->AsDblCon()->DconValue(); if (comp->GetEmitter()->emitIns_valid_imm_for_fmov(dataValue)) { diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp index b2c815bb1e396e..1d6ad58b67ce2c 100644 --- a/src/coreclr/jit/lsraarm64.cpp +++ b/src/coreclr/jit/lsraarm64.cpp @@ -144,7 +144,7 @@ int LinearScan::BuildNode(GenTree* tree) case GT_CNS_DBL: { GenTreeDblCon* dblConst = tree->AsDblCon(); - double constValue = dblConst->AsDblCon()->gtDconVal; + double constValue = dblConst->AsDblCon()->DconValue(); if (emitter::emitIns_valid_imm_for_fmov(constValue)) { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 6da47320d59a46..1d007ab9be7f35 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -9966,13 +9966,13 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA // so multiplication by the reciprocal is safe in this scenario if (fgGlobalMorph && op2->IsCnsFltOrDbl()) { - double divisor = op2->AsDblCon()->gtDconVal; + double divisor = op2->AsDblCon()->DconValue(); if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) || ((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor)))) { oper = GT_MUL; tree->ChangeOper(oper); - op2->AsDblCon()->gtDconVal = 1.0 / divisor; + op2->AsDblCon()->SetDconValue(1.0 / divisor); } } @@ -12612,7 +12612,7 @@ GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl()) { - double multiplierValue = op2->AsDblCon()->gtDconVal; + double multiplierValue = op2->AsDblCon()->DconValue(); if (multiplierValue == 1.0) { diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index 3898d338d138ae..e1e3e2595311e7 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -2446,7 +2446,6 @@ double FloatingPointUtils::minimum(double val1, double val2) // Return Value: // Either val1 or val2 // - float FloatingPointUtils::minimum(float val1, float val2) { if (val1 != val2 && !isNaN(val1)) @@ -2456,6 +2455,42 @@ float FloatingPointUtils::minimum(float val1, float val2) return isNegative(val1) ? val1 : val2; } +//------------------------------------------------------------------------ +// normalize: Normalize a floating point value. +// +// Arguments: +// value - the value +// +// Return Value: +// Normalized value. +// +// Remarks: +// This is a no-op on all host platforms but x86. On x86 floats are returned on +// the x87 stack. Since `fld` will automatically quiet signalling NaNs this +// means that it is very easy for a float to nondeterministically change bit +// representation if it is a snan, depending on whether a function that +// returns the value is inlined or not by the C++ compiler. To get around the +// nondeterminism we quiet the NaNs ahead of time as a best-effort fix. +// +double FloatingPointUtils::normalize(double value) +{ +#ifdef HOST_X86 + if (!isNaN(value)) + { + return value; + } + + uint64_t bits; + static_assert_no_msg(sizeof(bits) == sizeof(value)); + memcpy(&bits, &value, sizeof(value)); + bits |= 1ull << 51; + memcpy(&value, &bits, sizeof(bits)); + return value; +#else + return value; +#endif +} + namespace MagicDivide { template diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h index 8ae84d0d8858c2..9dbce41679cc6b 100644 --- a/src/coreclr/jit/utils.h +++ b/src/coreclr/jit/utils.h @@ -726,6 +726,8 @@ class FloatingPointUtils static double minimum(double val1, double val2); static float minimum(float val1, float val2); + + static double normalize(double x); }; // The CLR requires that critical section locks be initialized via its ClrCreateCriticalSection API...but diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 84da05d0dede36..9b6238101fab22 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -8145,13 +8145,13 @@ void Compiler::fgValueNumberTreeConst(GenTree* tree) case TYP_FLOAT: { - tree->gtVNPair.SetBoth(vnStore->VNForFloatCon((float)tree->AsDblCon()->gtDconVal)); + tree->gtVNPair.SetBoth(vnStore->VNForFloatCon((float)tree->AsDblCon()->DconValue())); break; } case TYP_DOUBLE: { - tree->gtVNPair.SetBoth(vnStore->VNForDoubleCon(tree->AsDblCon()->gtDconVal)); + tree->gtVNPair.SetBoth(vnStore->VNForDoubleCon(tree->AsDblCon()->DconValue())); break; }