From 90cb15fdbea2fa70c2bd68d8b286409acb6d0c7f Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Fri, 29 Apr 2022 19:54:05 +0300 Subject: [PATCH 01/17] Work around FIELD turning into IND(struct) We must know precise load sizes for numbering purposes. --- src/coreclr/jit/morph.cpp | 7 ++++--- src/coreclr/jit/morphblock.cpp | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 65b50de28f1108..9e8647c08d4ab0 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -9872,10 +9872,8 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne } if (needsIndirection) { - if (indirTree != nullptr) + if ((indirTree != nullptr) && (indirTree->OperIsBlk() || !isBlkReqd)) { - // If we have an indirection and a block is required, it should already be a block. - assert(indirTree->OperIsBlk() || !isBlkReqd); effectiveVal->gtType = asgType; } else @@ -9894,11 +9892,14 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne newTree = gtNewObjNode(clsHnd, addr); gtSetObjGcInfo(newTree->AsObj()); } + + gtUpdateNodeSideEffects(newTree); } else { newTree = gtNewIndir(asgType, addr); } + effectiveVal = newTree; } } diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index bfbc35a8fdcd07..ad1b6bd149efb4 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -926,12 +926,13 @@ void MorphCopyBlockHelper::MorphStructCases() m_dst = m_comp->fgMorphBlockOperand(m_dst, asgType, m_blockSize, isBlkReqd); m_dst->gtFlags |= GTF_DONT_CSE; m_asg->gtOp1 = m_dst; - m_asg->gtFlags |= (m_dst->gtFlags & GTF_ALL_EFFECT); - // Eliminate the "OBJ or BLK" node on the src. - m_src = m_comp->fgMorphBlockOperand(m_src, asgType, m_blockSize, false /*!isBlkReqd*/); + m_src = m_comp->fgMorphBlockOperand(m_src, asgType, m_blockSize, isBlkReqd); m_asg->gtOp2 = m_src; + m_asg->SetAllEffectsFlags(m_dst, m_src); + m_asg->gtFlags |= GTF_ASG; + m_result = m_asg; m_transformationDecision = BlockTransformation::StructBlock; } From 2f7141acebc8ad0fc802b4d2fe43d4feac92b838 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Wed, 20 Apr 2022 14:25:48 +0300 Subject: [PATCH 02/17] Delete "ROH" It was breaking the "precise maps only have memory types" invariant. --- src/coreclr/jit/valuenum.cpp | 26 ++++++++------------------ src/coreclr/jit/valuenum.h | 16 +++------------- src/coreclr/jit/valuenumfuncs.h | 5 +++-- 3 files changed, 14 insertions(+), 33 deletions(-) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 5152acdaa9b128..a9cf8c8a30b663 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -6531,9 +6531,8 @@ static const char* s_reservedNameArr[] = { "$VN.Recursive", // -2 RecursiveVN "$VN.No", // -1 NoVN "$VN.Null", // 0 VNForNull() - "$VN.ReadOnlyHeap", // 1 VNForROH() - "$VN.Void", // 2 VNForVoid() - "$VN.EmptyExcSet" // 3 VNForEmptyExcSet() + "$VN.Void", // 1 VNForVoid() + "$VN.EmptyExcSet" // 2 VNForEmptyExcSet() }; // Returns the string name of "vn" when it is a reserved value number, nullptr otherwise @@ -8597,7 +8596,6 @@ void Compiler::fgValueNumberTree(GenTree* tree) vnStore->VNPUnpackExc(addr->gtVNPair, &addrNvnp, &addrXvnp); // Is the dereference immutable? If so, model it as referencing the read-only heap. - // TODO-VNTypes: this code needs to encode the types of the indirections. if (tree->gtFlags & GTF_IND_INVARIANT) { assert(!isVolatile); // We don't expect both volatile and invariant @@ -8633,21 +8631,13 @@ void Compiler::fgValueNumberTree(GenTree* tree) vnStore->VNForFunc(tree->TypeGet(), VNF_PtrToStatic, boxAddrVN, fieldSeqVN); tree->gtVNPair = ValueNumPair(staticAddrVN, staticAddrVN); } - // Is this invariant indirect expected to always return a non-null value? - // TODO-VNTypes: non-null indirects should only be used for TYP_REFs. - else if ((tree->gtFlags & GTF_IND_NONNULL) != 0) + else // TODO-VNTypes: this code needs to encode the types of the indirections. { - assert(tree->gtFlags & GTF_IND_NONFAULTING); - tree->gtVNPair = vnStore->VNPairForFunc(tree->TypeGet(), VNF_NonNullIndirect, addrNvnp); - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); - } - else - { - tree->gtVNPair = - ValueNumPair(vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, ValueNumStore::VNForROH(), - addrNvnp.GetLiberal()), - vnStore->VNForMapSelect(VNK_Conservative, TYP_REF, ValueNumStore::VNForROH(), - addrNvnp.GetConservative())); + // Is this invariant indirect expected to always return a non-null value? + VNFunc loadFunc = + ((tree->gtFlags & GTF_IND_NONNULL) != 0) ? VNF_InvariantNonNullLoad : VNF_InvariantLoad; + + tree->gtVNPair = vnStore->VNPairForFunc(tree->TypeGet(), loadFunc, addrNvnp); tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } } diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 71a1556d74c38c..864c40bb81d86d 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -421,25 +421,16 @@ class ValueNumStore // And the single constant for an object reference type. static ValueNum VNForNull() { - // We reserve Chunk 0 for "special" VNs. SRC_Null (== 0) is the VN of "null". return ValueNum(SRC_Null); } - // The ROH map is the map for the "read-only heap". We assume that this is never mutated, and always - // has the same value number. - static ValueNum VNForROH() - { - // We reserve Chunk 0 for "special" VNs. Let SRC_ReadOnlyHeap (== 3) be the read-only heap. - return ValueNum(SRC_ReadOnlyHeap); - } - // A special value number for "void" -- sometimes a type-void thing is an argument, // and we want the args to be non-NoVN. static ValueNum VNForVoid() { - // We reserve Chunk 0 for "special" VNs. Let SRC_Void (== 4) be the value for "void". return ValueNum(SRC_Void); } + static ValueNumPair VNPForVoid() { return ValueNumPair(VNForVoid(), VNForVoid()); @@ -448,10 +439,9 @@ class ValueNumStore // A special value number for the empty set of exceptions. static ValueNum VNForEmptyExcSet() { - // We reserve Chunk 0 for "special" VNs. Let SRC_EmptyExcSet (== 5) be the value for the empty set of - // exceptions. return ValueNum(SRC_EmptyExcSet); } + static ValueNumPair VNPForEmptyExcSet() { return ValueNumPair(VNForEmptyExcSet(), VNForEmptyExcSet()); @@ -1405,10 +1395,10 @@ class ValueNumStore return m_VNFunc4Map; } + // We reserve Chunk 0 for "special" VNs. enum SpecialRefConsts { SRC_Null, - SRC_ReadOnlyHeap, SRC_Void, SRC_EmptyExcSet, diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h index 32f17c685e137f..082dd09810d982 100644 --- a/src/coreclr/jit/valuenumfuncs.h +++ b/src/coreclr/jit/valuenumfuncs.h @@ -139,8 +139,9 @@ ValueNumFuncDef(JitReadyToRunNewArr, 3, false, true, false) ValueNumFuncDef(Box, 3, false, false, false) ValueNumFuncDef(BoxNullable, 3, false, false, false) -ValueNumFuncDef(LazyStrCns, 2, false, true, false) // lazy-initialized string literal (helper) -ValueNumFuncDef(NonNullIndirect, 1, false, true, false) // this indirect is expected to always return a non-null value +ValueNumFuncDef(LazyStrCns, 2, false, true, false) // Lazy-initialized string literal (helper) +ValueNumFuncDef(InvariantLoad, 1, false, false, false) // Args: 0: (VN of) the address. +ValueNumFuncDef(InvariantNonNullLoad, 1, false, true, false) // Args: 0: (VN of) the address. ValueNumFuncDef(Unbox, 2, false, true, false) ValueNumFuncDef(LT_UN, 2, false, false, false) // unsigned or unordered comparisons From 4d45eccd0c82cf616582b0c7f727229248e25494 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Mon, 18 Apr 2022 19:23:21 +0300 Subject: [PATCH 03/17] Physical VN -- locals --- src/coreclr/jit/compiler.h | 7 + src/coreclr/jit/gentree.cpp | 68 ++- src/coreclr/jit/gentree.h | 13 +- src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/valuenum.cpp | 935 +++++++++++++++++++------------- src/coreclr/jit/valuenum.h | 45 ++ src/coreclr/jit/valuenumfuncs.h | 7 +- 7 files changed, 681 insertions(+), 396 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 26801adca120df..708102589bd316 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4730,6 +4730,13 @@ class Compiler // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); + void fgValueNumberLocalStore(GenTree* storeNode, + GenTreeLclVarCommon* lclDefNode, + unsigned storeSize, + ssize_t storeOffset, + ValueNumPair value, + bool normalize = true); + unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index d4d76c70734ae7..8d658ff0773362 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -15997,7 +15997,7 @@ bool GenTree::IsPartialLclFld(Compiler* comp) (comp->lvaTable[this->AsLclVarCommon()->GetLclNum()].lvExactSize != genTypeSize(gtType))); } -bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire) +bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire, ssize_t* pOffset) { GenTreeBlk* blkNode = nullptr; if (OperIs(GT_ASG)) @@ -16017,12 +16017,16 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo *pIsEntire = true; } } + if (pOffset != nullptr) + { + *pOffset = AsOp()->gtOp1->AsLclVarCommon()->GetLclOffs(); + } return true; } else if (AsOp()->gtOp1->OperGet() == GT_IND) { GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1; - return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire); + return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire, pOffset); } else if (AsOp()->gtOp1->OperIsBlk()) { @@ -16038,7 +16042,7 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo } unsigned size = comp->typGetObjLayout(AsCall()->gtRetClsHnd)->GetSize(); - return retBufArg->DefinesLocalAddr(comp, size, pLclVarTree, pIsEntire); + return retBufArg->DefinesLocalAddr(comp, size, pLclVarTree, pIsEntire, pOffset); } else if (OperIsBlk()) { @@ -16064,14 +16068,17 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo } } - return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire); + return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire, pOffset); } // Otherwise... return false; } -// Returns true if this GenTree defines a result which is based on the address of a local. -bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire) +bool GenTree::DefinesLocalAddr(Compiler* comp, + unsigned width, + GenTreeLclVarCommon** pLclVarTree, + bool* pIsEntire, + ssize_t* pOffset) { if (OperIs(GT_ADDR, GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR)) { @@ -16085,10 +16092,10 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm { GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon(); *pLclVarTree = addrArgLcl; + unsigned lclOffset = addrArgLcl->GetLclOffs(); + if (pIsEntire != nullptr) { - unsigned lclOffset = addrArgLcl->GetLclOffs(); - if (lclOffset != 0) { // We aren't updating the bytes at [0..lclOffset-1] so *pIsEntire should be set to false @@ -16107,29 +16114,45 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm *pIsEntire = (varWidth == width); } } + + if (pOffset != nullptr) + { + *pOffset += lclOffset; + } + return true; } else if (addrArg->OperGet() == GT_IND) { // A GT_ADDR of a GT_IND can both be optimized away, recurse using the child of the GT_IND - return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire); + return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire, pOffset); } } else if (OperGet() == GT_ADD) { if (AsOp()->gtOp1->IsCnsIntOrI()) { + if (pOffset != nullptr) + { + *pOffset += AsOp()->gtOp1->AsIntCon()->IconValue(); + } + // If we just adding a zero then we allow an IsEntire match against width // otherwise we change width to zero to disallow an IsEntire Match return AsOp()->gtOp2->DefinesLocalAddr(comp, AsOp()->gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree, - pIsEntire); + pIsEntire, pOffset); } else if (AsOp()->gtOp2->IsCnsIntOrI()) { + if (pOffset != nullptr) + { + *pOffset += AsOp()->gtOp2->AsIntCon()->IconValue(); + } + // If we just adding a zero then we allow an IsEntire match against width // otherwise we change width to zero to disallow an IsEntire Match return AsOp()->gtOp1->DefinesLocalAddr(comp, AsOp()->gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree, - pIsEntire); + pIsEntire, pOffset); } } // Post rationalization we could have GT_IND(GT_LEA(..)) trees. @@ -16145,20 +16168,20 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm GenTree* index = AsOp()->gtOp2; if (index != nullptr) { - assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire)); + assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire, pOffset)); } #endif // DEBUG // base - GenTree* base = AsOp()->gtOp1; + GenTree* base = AsAddrMode()->Base(); if (base != nullptr) { - // Lea could have an Indir as its base. - if (base->OperGet() == GT_IND) + if (pOffset != nullptr) { - base = base->AsOp()->gtOp1->gtEffectiveVal(/*commas only*/ true); + *pOffset += AsAddrMode()->Offset(); } - return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire); + + return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire, pOffset); } } // Otherwise... @@ -16604,6 +16627,12 @@ ssize_t GenTreeIndir::Offset() } } +unsigned GenTreeIndir::Size() const +{ + assert(isIndir() || OperIsBlk()); + return OperIsBlk() ? AsBlk()->Size() : genTypeSize(TypeGet()); +} + //------------------------------------------------------------------------ // GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM? // @@ -22816,6 +22845,11 @@ unsigned GenTreeHWIntrinsic::GetResultOpNumForFMA(GenTree* use, GenTree* op1, Ge } #endif // TARGET_XARCH && FEATURE_HW_INTRINSICS +unsigned GenTreeLclFld::GetSize() const +{ + return genTypeSize(TypeGet()); +} + #ifdef TARGET_ARM //------------------------------------------------------------------------ // IsOffsetMisaligned: check if the field needs a special handling on arm. diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 7f11f4f26680ac..6724fef7644edc 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -1900,7 +1900,8 @@ struct GenTree // tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to // true or false, depending on whether the assignment writes to the entirety of the local // variable, or just a portion of it. - bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr); + bool DefinesLocal( + Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr, ssize_t* pOffset = nullptr); bool IsLocalAddrExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, @@ -1939,7 +1940,11 @@ struct GenTree // operation. Returns "true" if "this" is an address of (or within) // a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null, // sets "*pIsEntire" to true if this assignment writes the full width of the local. - bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire); + bool DefinesLocalAddr(Compiler* comp, + unsigned width, + GenTreeLclVarCommon** pLclVarTree, + bool* pIsEntire, + ssize_t* pOffset = nullptr); // These are only used for dumping. // The GetRegNum() is only valid in LIR, but the dumping methods are not easily @@ -3520,6 +3525,8 @@ struct GenTreeLclFld : public GenTreeLclVarCommon m_fieldSeq = fieldSeq; } + unsigned GetSize() const; + #ifdef TARGET_ARM bool IsOffsetMisaligned() const; #endif // TARGET_ARM @@ -6522,6 +6529,8 @@ struct GenTreeIndir : public GenTreeOp unsigned Scale(); ssize_t Offset(); + unsigned Size() const; + GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data) { } diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 1e5926e1f6fd0a..24fac09a013b92 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -3309,7 +3309,7 @@ void Compiler::lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HAND // // Returns: // Number of bytes needed on the frame for such a local. - +// unsigned Compiler::lvaLclSize(unsigned varNum) { assert(varNum < lvaCount); diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index a9cf8c8a30b663..29723b1f24e4df 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -2299,6 +2299,18 @@ ValueNum ValueNumStore::VNForMapStore(ValueNum map, ValueNum index, ValueNum val return result; } +ValueNum ValueNumStore::VNForMapPhysicalStore(ValueNum map, unsigned offset, unsigned size, ValueNum value) +{ + ValueNum selector = EncodePhysicalSelector(offset, size); + ValueNum result = VNForFunc(TypeOfVN(map), VNF_MapPhysicalStore, map, selector, value); + + JITDUMP(" VNForMapPhysicalStore:%s returns ", VNMapTypeName(TypeOfVN(result))); + JITDUMPEXEC(m_pComp->vnPrint(result, 1)); + JITDUMP("\n"); + + return result; +} + //------------------------------------------------------------------------------ // VNForMapSelect : Evaluate VNF_MapSelect with the given arguments. // @@ -2316,7 +2328,7 @@ ValueNum ValueNumStore::VNForMapStore(ValueNum map, ValueNum index, ValueNum val // This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate // "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number // (liberal/conservative) to read from the SSA def referenced in the phi argument. - +// ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNum map, ValueNum index) { int budget = m_mapSelectBudget; @@ -2326,14 +2338,29 @@ ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNu // The remaining budget should always be between [0..m_mapSelectBudget] assert((budget >= 0) && (budget <= m_mapSelectBudget)); -#ifdef DEBUG - if (m_pComp->verbose) - { - printf(" VNForMapSelect(" FMT_VN ", " FMT_VN "):%s returns ", map, index, VNMapTypeName(type)); - m_pComp->vnPrint(result, 1); - printf("\n"); - } -#endif + JITDUMP(" VNForMapSelect(" FMT_VN ", " FMT_VN "):%s returns ", map, index, VNMapTypeName(type)); + JITDUMPEXEC(m_pComp->vnPrint(result, 1)); + JITDUMP("\n"); + + return result; +} + +ValueNum ValueNumStore::VNForMapPhysicalSelect(ValueNumKind vnk, var_types type, ValueNum map, unsigned offset, unsigned size) +{ + ValueNum selector = EncodePhysicalSelector(offset, size); + int budget = m_mapSelectBudget; + bool usedRecursiveVN = false; + ValueNum result = VNForMapSelectWork(vnk, type, map, selector, &budget, &usedRecursiveVN); + + // The remaining budget should always be between [0..m_mapSelectBudget] + assert((budget >= 0) && (budget <= m_mapSelectBudget)); + + JITDUMP(" VNForMapPhysicalSelect(" FMT_VN ", ", map); + JITDUMPEXEC(vnDumpPhysicalSelector(selector)); + JITDUMP("):%s returns ", VNMapTypeName(type)); + JITDUMPEXEC(m_pComp->vnPrint(result, 1)); + JITDUMP("\n"); + return result; } @@ -2357,14 +2384,19 @@ ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNu // This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate // "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number // (liberal/conservative) to read from the SSA def referenced in the phi argument. - +// ValueNum ValueNumStore::VNForMapSelectWork( ValueNumKind vnk, var_types type, ValueNum map, ValueNum index, int* pBudget, bool* pUsedRecursiveVN) { +#ifdef DEBUG + bool mapIsPrecise = false; + bool mapIsPhysical = false; +#endif // DEBUG + TailCall: // This label allows us to directly implement a tail call by setting up the arguments, and doing a goto to here. assert(map != NoVN && index != NoVN); - assert(map == VNNormalValue(map)); // Arguments carry no exceptions. + assert(map == VNNormalValue(map)); // Arguments carry no exceptions. assert(index == VNNormalValue(index)); // Arguments carry no exceptions. *pUsedRecursiveVN = false; @@ -2415,6 +2447,9 @@ ValueNum ValueNumStore::VNForMapSelectWork( { if (funcApp.m_func == VNF_MapStore) { + assert(!mapIsPhysical); + INDEBUG(mapIsPrecise = true); + // select(store(m, i, v), i) == v if (funcApp.m_args[1] == index) { @@ -2445,6 +2480,74 @@ ValueNum ValueNumStore::VNForMapSelectWork( goto TailCall; } } + else if (funcApp.m_func == VNF_MapPhysicalStore) + { + assert(!mapIsPrecise); + INDEBUG(mapIsPhysical = true); + +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" select("); + JITDUMPEXEC(m_pComp->vnPrint(map, 1)); + JITDUMP(", "); + JITDUMPEXEC(vnDumpPhysicalSelector(index)); + JITDUMP(")"); +#endif + ValueNum storeSelector = funcApp.m_args[1]; + + if (index == storeSelector) + { +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" ==> " FMT_VN "\n", funcApp.m_args[2]); +#endif + return funcApp.m_args[2]; + } + + unsigned selectSize; + unsigned selectOffset = DecodePhysicalSelector(index, &selectSize); + + unsigned storeSize; + unsigned storeOffset = DecodePhysicalSelector(storeSelector, &storeSize); + + unsigned selectEndOffset = selectOffset + selectSize; // Exclusive. + unsigned storeEndOffset = storeOffset + storeSize; // Exclusive. + + if ((storeOffset <= selectOffset) && (selectEndOffset <= storeEndOffset)) + { +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" ==> enclosing, selecting inner, remaining budget is %d\n", *pBudget); +#endif + map = funcApp.m_args[2]; + index = EncodePhysicalSelector(selectOffset - storeOffset, selectSize); + goto TailCall; + } + + // If it was disjoint with the location being selected, continue the linear search. + if ((storeEndOffset <= selectOffset) || (selectEndOffset <= storeOffset)) + { +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" ==> disjoint, remaining budget is %d\n", *pBudget); +#endif + map = funcApp.m_args[0]; + goto TailCall; + } + else + { +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" ==> aliasing!\n"); +#endif + } + } + else if (funcApp.m_func == VNF_ZeroObj) + { + // ZeroObj maps are always physical. + assert(!mapIsPrecise); + + // TODO-CQ: support selection of TYP_STRUCT here. + if (type != TYP_STRUCT) + { + return VNZeroForType(type); + } + } else if (funcApp.m_func == VNF_PhiDef || funcApp.m_func == VNF_PhiMemoryDef) { unsigned lclNum = BAD_VAR_NUM; @@ -2529,6 +2632,7 @@ ValueNum ValueNumStore::VNForMapSelectWork( bool usedRecursiveVN = false; ValueNum curResult = VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, &usedRecursiveVN); + *pUsedRecursiveVN |= usedRecursiveVN; if (sameSelResult == ValueNumStore::RecursiveVN) { @@ -2564,22 +2668,6 @@ ValueNum ValueNumStore::VNForMapSelectWork( m_fixedPointMapSels.Pop(); } } - else if (funcApp.m_func == VNF_ZeroObj) - { - // For structs, we need to extract the handle from the selector. - if (type == TYP_STRUCT) - { - // We only expect field selectors here. - assert(GetHandleFlags(index) == GTF_ICON_FIELD_HDL); - CORINFO_FIELD_HANDLE fieldHnd = CORINFO_FIELD_HANDLE(ConstantValue(index)); - CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; - m_pComp->eeGetFieldType(fieldHnd, &structHnd); - - return VNForZeroObj(structHnd); - } - - return VNZeroForType(type); - } } // We may have run out of budget and already assigned a result @@ -2600,6 +2688,23 @@ ValueNum ValueNumStore::VNForMapSelectWork( } } +ValueNum ValueNumStore::EncodePhysicalSelector(unsigned offset, unsigned size) +{ + assert(size != 0); + + return VNForLongCon(static_cast(offset) | (static_cast(size) << 32)); +} + +unsigned ValueNumStore::DecodePhysicalSelector(ValueNum selector, unsigned* pSize) +{ + uint64_t value = ConstantValue(selector); + unsigned offset = static_cast(value); + unsigned size = static_cast(value >> 32); + + *pSize = size; + return offset; +} + //------------------------------------------------------------------------ // VNForFieldSelector: A specialized version (with logging) of VNForHandle // that is used for field handle selectors. @@ -4206,6 +4311,142 @@ ValueNumPair ValueNumStore::VNPairApplySelectors(ValueNumPair map, FieldSeqNode* return ValueNumPair(liberalVN, conservVN); } +ValueNum ValueNumStore::VNForLoad(ValueNumKind vnk, + ValueNum locationValue, + unsigned locationSize, + var_types loadType, + ssize_t offset, + unsigned loadSize) +{ + assert((loadSize > 0)); + + unsigned loadOffset = static_cast(offset); + + if ((offset < 0) || (locationSize < (loadOffset + loadSize))) + { + JITDUMP(" *** VNForLoad: out-of-bounds load!\n"); + return VNForExpr(m_pComp->compCurBB, loadType); + } + + JITDUMP(" VNForLoad:\n"); + ValueNum value = VNForMapPhysicalSelect(vnk, loadType, locationValue, loadOffset, loadSize); + value = VNForLoadStoreBitcast(vnk, value, loadType, loadSize); + + return value; +} + +ValueNumPair ValueNumStore::VNPairForLoad( + ValueNumPair locationValue, unsigned locationSize, var_types loadType, ssize_t offset, unsigned loadSize) +{ + ValueNum liberalVN = VNForLoad(VNK_Liberal, locationValue.GetLiberal(), locationSize, loadType, offset, loadSize); + ValueNum conservVN = VNForLoad(VNK_Conservative, locationValue.GetConservative(), locationSize, loadType, offset, + loadSize); + + return ValueNumPair(liberalVN, conservVN); +} + +ValueNum ValueNumStore::VNForStore(ValueNum locationValue, + unsigned locationSize, + ssize_t offset, + unsigned storeSize, + ValueNum value) +{ + assert(storeSize > 0); + + unsigned storeOffset = static_cast(offset); + + if ((offset < 0) || (locationSize < (storeOffset + storeSize))) + { + JITDUMP(" *** VNForStore: out-of-bounds store!\n"); + // Some callers will need to invalidate parenting maps, so force explicit + // handling of this case instead of returning a "new, unique" VN. + return NoVN; + } + + // Note how we handle identity (whole/entire) stores via selection here. + // This is to allow the caller to apply its own policy in case of type + // mismatches between the underlying location and value being stored. + // Currently, this policy is to normalize for locals and leave all other + // locations (possibly) denormalized. + // + JITDUMP(" VNForStore:\n"); + return VNForMapPhysicalStore(locationValue, storeOffset, storeSize, value); +} + +ValueNumPair ValueNumStore::VNPairForStore(ValueNumPair locationValue, + unsigned locationSize, + ssize_t offset, + unsigned storeSize, + ValueNumPair value) +{ + ValueNum liberalVN = VNForStore(locationValue.GetLiberal(), locationSize, offset, storeSize, value.GetLiberal()); + ValueNum conservVN; + if (locationValue.BothEqual() && value.BothEqual()) + { + conservVN = liberalVN; + } + else + { + conservVN = VNForStore(locationValue.GetConservative(), locationSize, offset, storeSize, + value.GetConservative()); + } + + return ValueNumPair(liberalVN, conservVN); +} + +ValueNum ValueNumStore::VNForLoadStoreBitcast(ValueNumKind vnk, ValueNum value, var_types indType, unsigned indSize) +{ + var_types typeOfValue = TypeOfVN(value); + + if (typeOfValue != indType) + { + if (indType == TYP_STRUCT) + { + assert(typeOfValue != TYP_STRUCT); + // Represent this as an identity store: "$VN.Struct[[0:size - 1] := primitive]". + JITDUMP(" VNForLoadStoreBitcast:\n"); + value = VNForMapPhysicalStore(VNForEmptyMap(TYP_STRUCT), 0, indSize, value); + } + else if (typeOfValue == TYP_STRUCT) + { + assert(indType != TYP_STRUCT); + // Unwrap the (possibly present) identity store with an identity selection. + JITDUMP(" VNForLoadStoreBitcast:\n"); + value = VNForMapPhysicalSelect(vnk, indType, value, 0, indSize); + } + else + { + assert(genTypeSize(indType) == indSize); + value = VNApplySelectorsTypeCheck(value, indType, indSize); + } + } + + assert(genActualType(TypeOfVN(value)) == genActualType(indType)); + + return value; +} + +ValueNumPair ValueNumStore::VNPairForLoadStoreBitcast(ValueNumPair value, var_types indType, unsigned indSize) +{ + ValueNum liberalVN = VNForLoadStoreBitcast(VNK_Liberal, value.GetLiberal(), indType, indSize); + ValueNum conservVN = VNForLoadStoreBitcast(VNK_Conservative, value.GetConservative(), indType, indSize); + + return ValueNumPair(liberalVN, conservVN); +} + +ValueNum ValueNumStore::VNForEmptyMap(var_types type) +{ + // TODO-PhysicalVN: use one single VN here instead of allocation new ones. + return VNForExpr(nullptr, type); +} + +ValueNumPair ValueNumStore::VNPairForEmptyMap(var_types type) +{ + ValueNum map = VNForEmptyMap(type); + + return ValueNumPair(map, map); +} + //------------------------------------------------------------------------ // IsVNNotAField: Is the value number a "NotAField" field sequence? // @@ -4590,6 +4831,59 @@ ValueNum Compiler::fgValueNumberByrefExposedLoad(var_types type, ValueNum pointe } } +void Compiler::fgValueNumberLocalStore(GenTree* storeNode, + GenTreeLclVarCommon* lclDefNode, + unsigned storeSize, + ssize_t offset, + ValueNumPair value, + bool normalize) +{ + // Should not have been recorded as updating the GC heap. + assert(!GetMemorySsaMap(GcHeap)->Lookup(storeNode)); + + LclVarDsc* varDsc = lvaGetDesc(lclDefNode); + unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclDefNode); + + if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) + { + unsigned lclSize = lvaLclExactSize(lclDefNode->GetLclNum()); + + ValueNumPair newLclValue; + if ((storeSize == lclSize) && (offset == 0)) + { + newLclValue = normalize ? vnStore->VNPairForLoadStoreBitcast(value, varDsc->TypeGet(), lclSize) : value; + } + else + { + assert((lclDefNode->gtFlags & GTF_VAR_USEASG) != 0); + // The "lclDefNode" node will be labeled with the SSA number of its "use" identity + // (we looked in a side table above for its "def" identity). Look up that value. + ValueNumPair oldLclValue = varDsc->GetPerSsaData(lclDefNode->GetSsaNum())->m_vnPair; + newLclValue = vnStore->VNPairForStore(oldLclValue, lclSize, offset, storeSize, value); + } + + // We maintain this invariant only for local locations. + assert((genActualType(vnStore->TypeOfVN(newLclValue.GetLiberal())) == genActualType(varDsc)) || !normalize); + + varDsc->GetPerSsaData(lclDefSsaNum)->m_vnPair = newLclValue; + + JITDUMP("Tree [%06u] assigned VN to local var V%02u/%d: ", dspTreeID(storeNode), lclDefNode->GetLclNum(), + lclDefSsaNum); + JITDUMPEXEC(vnpPrint(newLclValue, 1)); + JITDUMP("\n"); + } + else if (varDsc->IsAddressExposed()) + { + ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); + recordAddressExposedLocalStore(storeNode, heapVN DEBUGARG("local assign")); + } + else + { + JITDUMP("Tree [%06u] assigns to non-address-taken local var V%02u; excluded from SSA, so value not tracked\n", + dspTreeID(storeNode), lclDefNode->GetLclNum()); + } +} + var_types ValueNumStore::TypeOfVN(ValueNum vn) { if (vn == NoVN) @@ -6166,6 +6460,9 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr) case VNF_MapStore: vnDumpMapStore(comp, &funcApp); break; + case VNF_MapPhysicalStore: + vnDumpMapPhysicalStore(comp, &funcApp); + break; case VNF_ValWithExc: vnDumpValWithExc(comp, &funcApp); break; @@ -6297,6 +6594,38 @@ void ValueNumStore::vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore) } } +void ValueNumStore::vnDumpPhysicalSelector(ValueNum selector) +{ + unsigned size; + unsigned offset = DecodePhysicalSelector(selector, &size); + + if (size == 1) + { + printf("[%u]", offset); + } + else + { + printf("[%u:%u]", offset, offset + size - 1); + } +} + +void ValueNumStore::vnDumpMapPhysicalStore(Compiler* comp, VNFuncApp* mapPhysicalStore) +{ + ValueNum mapVN = mapPhysicalStore->m_args[0]; + ValueNum selector = mapPhysicalStore->m_args[1]; + ValueNum valueVN = mapPhysicalStore->m_args[2]; + + unsigned size; + unsigned offset = DecodePhysicalSelector(selector, &size); + unsigned endOffset = offset + size; + + comp->vnPrint(mapVN, 0); + vnDumpPhysicalSelector(selector); + printf(" := "); + comp->vnPrint(valueVN, 0); + printf("]"); +} + void ValueNumStore::vnDumpMemOpaque(Compiler* comp, VNFuncApp* memOpaque) { assert(memOpaque->m_func == VNF_MemOpaque); // Precondition. @@ -7650,6 +7979,7 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) ValueNumPair rhsExcSet; ValueNumPair rhsVNPair; vnStore->VNPUnpackExc(rhs->gtVNPair, &rhsVNPair, &rhsExcSet); + assert(rhsVNPair.BothDefined()); // Is the type being stored different from the type computed by the rhs? if (rhs->TypeGet() != lhs->TypeGet()) @@ -7679,106 +8009,29 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) { case GT_LCL_VAR: { - GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon(); - unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lcl); - - // Should not have been recorded as updating the GC heap. - assert(!GetMemorySsaMap(GcHeap)->Lookup(tree)); - - if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) - { - // Should not have been recorded as updating ByrefExposed mem. - assert(!GetMemorySsaMap(ByrefExposed)->Lookup(tree)); - assert(rhsVNPair.BothDefined()); - - lvaTable[lcl->GetLclNum()].GetPerSsaData(lclDefSsaNum)->m_vnPair = rhsVNPair; - - JITDUMP("Tree [%06u] assigned VN to local var V%02u/%d: ", dspTreeID(tree), lcl->GetLclNum(), - lclDefSsaNum); - JITDUMPEXEC(vnpPrint(rhsVNPair, 1)); - JITDUMP("\n"); - } - else if (lvaVarAddrExposed(lcl->GetLclNum())) - { - // We could use MapStore here and MapSelect on reads of address-exposed locals - // (using the local nums as selectors) to get e.g. propagation of values - // through address-taken locals in regions of code with no calls or byref - // writes. - // For now, just use a new opaque VN. - ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(tree, heapVN DEBUGARG("local assign")); - } - else - { - JITDUMP("Tree [%06u] assigns to non-address-taken local var V%02u; excluded from SSA, so value not" - "tracked.\n", - dspTreeID(tree), lcl->GetLclNum()); - } + GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon(); + fgValueNumberLocalStore(tree, lcl, lvaLclExactSize(lcl->GetLclNum()), 0, rhsVNPair, /* normalize */ false); } break; + case GT_LCL_FLD: { - GenTreeLclFld* lclFld = lhs->AsLclFld(); - unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclFld); + GenTreeLclFld* lclFld = lhs->AsLclFld(); + unsigned storeOffset = lclFld->GetLclOffs(); + unsigned storeSize = lclFld->GetSize(); - // Should not have been recorded as updating the GC heap. - assert(!GetMemorySsaMap(GcHeap)->Lookup(tree)); - - if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + if ((lclFld->GetFieldSeq() == FieldSeqStore::NotAField()) && ((lclFld->gtFlags & GTF_VAR_USEASG) != 0)) { - ValueNumPair newLhsVNPair; - // Is this a full definition? - if ((lclFld->gtFlags & GTF_VAR_USEASG) == 0) - { - assert(!lclFld->IsPartialLclFld(this)); - assert(rhsVNPair.GetLiberal() != ValueNumStore::NoVN); - newLhsVNPair = rhsVNPair; - } - else - { - // We should never have a null field sequence here. - assert(lclFld->GetFieldSeq() != nullptr); - if (lclFld->GetFieldSeq() == FieldSeqStore::NotAField()) - { - // We don't know what field this represents. Assign a new VN to the whole variable - // (since we may be writing to an unknown portion of it.) - newLhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lvaGetActualType(lclFld->GetLclNum()))); - } - else - { - // We do know the field sequence. - // The "lclFld" node will be labeled with the SSA number of its "use" identity - // (we looked in a side table above for its "def" identity). Look up that value. - ValueNumPair oldLhsVNPair = - lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclFld->GetSsaNum())->m_vnPair; - newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lclFld->GetFieldSeq(), - rhsVNPair, // Pre-value. - lclFld->TypeGet()); - } - } - - lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair; - - JITDUMP("Tree [%06u] assigned VN to local var V%02u/%d: ", dspTreeID(tree), lclFld->GetLclNum(), - lclDefSsaNum); - JITDUMPEXEC(vnpPrint(newLhsVNPair, 1)); - JITDUMP("\n"); - } - else if (lvaVarAddrExposed(lclFld->GetLclNum())) - { - // This side-effects ByrefExposed. Just use a new opaque VN. - // As with GT_LCL_VAR, we could probably use MapStore here and MapSelect at corresponding - // loads, but to do so would have to identify the subset of address-exposed locals - // whose fields can be disambiguated. - ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(tree, heapVN DEBUGARG("local field assign")); - } - else - { - JITDUMP("Tree [%06u] assigns to non-address-taken local var V%02u; excluded from SSA, so value not" - "tracked.\n", - dspTreeID(tree), lclFld->GetLclNum()); + // Invalidate the whole local. + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lvaGetDesc(lclFld)->TypeGet())); + storeOffset = 0; + storeSize = lvaLclExactSize(lclFld->GetLclNum()); } + + // TODO-PhysicalVN: remove this quirk, it was added to avoid diffs. + bool normalize = ((lclFld->gtFlags & GTF_VAR_USEASG) != 0); + fgValueNumberLocalStore(tree, lclFld, storeSize, storeOffset, rhsVNPair, normalize); } break; @@ -7798,249 +8051,153 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhs->TypeGet())); } - GenTree* arg = lhs->AsOp()->gtOp1; - - // Indicates whether the argument of the IND is the address of a local. - bool wasLocal = false; + GenTree* arg = lhs->AsOp()->gtOp1; + unsigned lhsSize = lhs->AsIndir()->Size(); VNFuncApp funcApp; - ValueNum argVN = arg->gtVNPair.GetLiberal(); + ValueNum argVN = arg->gtVNPair.GetLiberal(); + bool argIsVNFunc = vnStore->GetVNFunc(vnStore->VNNormalValue(argVN), &funcApp); - bool argIsVNFunc = vnStore->GetVNFunc(vnStore->VNNormalValue(argVN), &funcApp); + GenTreeLclVarCommon* lclVarTree = nullptr; + bool isEntire = false; + ssize_t offset = 0; + GenTree* baseAddr = nullptr; + FieldSeqNode* fldSeq = nullptr; - // Is this an assignment to a (field of, perhaps) a local? - // If it is a PtrToLoc, lib and cons VNs will be the same. - if (argIsVNFunc) + if (argIsVNFunc && (funcApp.m_func == VNF_PtrToStatic)) { - if (funcApp.m_func == VNF_PtrToLoc) - { - assert(arg->gtVNPair.BothEqual()); // If it's a PtrToLoc, lib/cons shouldn't differ. - assert(vnStore->IsVNConstant(funcApp.m_args[0])); - unsigned lclNum = vnStore->ConstantValue(funcApp.m_args[0]); - - wasLocal = true; - - bool wasInSsa = false; - if (lvaInSsa(lclNum)) - { - FieldSeqNode* fieldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); - - // Either "arg" is the address of (part of) a local itself, or else we have - // a "rogue" PtrToLoc, one that should have made the local in question - // address-exposed. Assert on that. - GenTreeLclVarCommon* lclVarTree = nullptr; - bool isEntire = false; - - if (arg->DefinesLocalAddr(this, genTypeSize(lhs->TypeGet()), &lclVarTree, &isEntire) && - lclVarTree->HasSsaName()) - { - // The local #'s should agree. - assert(lclNum == lclVarTree->GetLclNum()); - - if (fieldSeq == FieldSeqStore::NotAField()) - { - assert(!isEntire && "did not expect an entire NotAField write."); - // We don't know where we're storing, so give the local a new, unique VN. - // Do this by considering it an "entire" assignment, with an unknown RHS. - isEntire = true; - rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); - } - else if ((fieldSeq == nullptr) && !isEntire) - { - // It is a partial store of a LCL_VAR without using LCL_FLD. - // Generate a unique VN. - isEntire = true; - rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); - } - - ValueNumPair newLhsVNPair; - if (isEntire) - { - newLhsVNPair = rhsVNPair; - } - else - { - // Don't use the lclVarTree's VN: if it's a local field, it will - // already be dereferenced by it's field sequence. - ValueNumPair oldLhsVNPair = - lvaTable[lclVarTree->GetLclNum()].GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair; - newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, fieldSeq, rhsVNPair, - lhs->TypeGet()); - } - - unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree); - - if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) - { - lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair; - wasInSsa = true; -#ifdef DEBUG - if (verbose) - { - printf("Tree "); - Compiler::printTreeID(tree); - printf(" assigned VN to local var V%02u/%d: VN ", lclNum, lclDefSsaNum); - vnpPrint(newLhsVNPair, 1); - printf("\n"); - } -#endif // DEBUG - } - } - else - { - unreached(); // "Rogue" PtrToLoc, as discussed above. - } - } + FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); + assert(fldSeq != nullptr); // We should never see an empty sequence here. - if (!wasInSsa && lvaVarAddrExposed(lclNum)) - { - // Need to record the effect on ByrefExposed. - // We could use MapStore here and MapSelect on reads of address-exposed locals - // (using the local nums as selectors) to get e.g. propagation of values - // through address-taken locals in regions of code with no calls or byref - // writes. - // For now, just use a new opaque VN. - ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(tree, heapVN DEBUGARG("PtrToLoc indir")); - } + if (fldSeq != FieldSeqStore::NotAField()) + { + ValueNum newHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, + rhsVNPair.GetLiberal(), lhs->TypeGet()); + recordGcHeapStore(tree, newHeapVN DEBUGARG("static field store")); + } + else + { + fgMutateGcHeap(tree DEBUGARG("indirect store at NotAField PtrToStatic address")); } } - - // Was the argument of the GT_IND the address of a local, handled above? - if (!wasLocal) + // Is the LHS an array index expression? + else if (argIsVNFunc && funcApp.m_func == VNF_PtrToArrElem) { - GenTree* baseAddr = nullptr; - FieldSeqNode* fldSeq = nullptr; - - if (argIsVNFunc && funcApp.m_func == VNF_PtrToStatic) + CORINFO_CLASS_HANDLE elemTypeEq = + CORINFO_CLASS_HANDLE(vnStore->ConstantValue(funcApp.m_args[0])); + ValueNum arrVN = funcApp.m_args[1]; + ValueNum inxVN = funcApp.m_args[2]; + FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[3]); +#ifdef DEBUG + if (verbose) { - FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); - assert(fldSeq != nullptr); // We should never see an empty sequence here. - - if (fldSeq != FieldSeqStore::NotAField()) - { - ValueNum newHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, - rhsVNPair.GetLiberal(), lhs->TypeGet()); - recordGcHeapStore(tree, newHeapVN DEBUGARG("static field store")); - } - else - { - fgMutateGcHeap(tree DEBUGARG("indirect store at NotAField PtrToStatic address")); - } + printf("Tree "); + Compiler::printTreeID(tree); + printf(" assigns to an array element:\n"); } - // Is the LHS an array index expression? - else if (argIsVNFunc && funcApp.m_func == VNF_PtrToArrElem) - { - CORINFO_CLASS_HANDLE elemTypeEq = - CORINFO_CLASS_HANDLE(vnStore->ConstantValue(funcApp.m_args[0])); - ValueNum arrVN = funcApp.m_args[1]; - ValueNum inxVN = funcApp.m_args[2]; - FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[3]); -#ifdef DEBUG - if (verbose) - { - printf("Tree "); - Compiler::printTreeID(tree); - printf(" assigns to an array element:\n"); - } #endif // DEBUG - ValueNum heapVN = fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, - rhsVNPair.GetLiberal(), lhs->TypeGet()); - recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign")); - } - else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) - { - assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField())); + ValueNum heapVN = fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, + rhsVNPair.GetLiberal(), lhs->TypeGet()); + recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign")); + } + else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) + { + assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField())); - // The value number from the rhs of the assignment - ValueNum storeVal = rhsVNPair.GetLiberal(); - ValueNum newHeapVN = ValueNumStore::NoVN; + // The value number from the rhs of the assignment + ValueNum storeVal = rhsVNPair.GetLiberal(); + ValueNum newHeapVN = ValueNumStore::NoVN; - // We will check that the final field in the sequence matches 'indType'. - var_types indType = lhs->TypeGet(); + // We will check that the final field in the sequence matches 'indType'. + var_types indType = lhs->TypeGet(); - if (baseAddr != nullptr) - { - // Instance field / "complex" static: heap[field][baseAddr][struct fields...] = storeVal. - - var_types firstFieldType; - ValueNum firstFieldSelectorVN = - vnStore->VNForFieldSelector(fldSeq->GetFieldHandle(), &firstFieldType); - - // Construct the "field map" VN. It represents memory state of the first field - // of all objects on the heap. This is our primary map. - ValueNum fldMapVN = - vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], firstFieldSelectorVN); + if (baseAddr != nullptr) + { + // Instance field / "complex" static: heap[field][baseAddr][struct fields...] = storeVal. - ValueNum firstFieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); + var_types firstFieldType; + ValueNum firstFieldSelectorVN = + vnStore->VNForFieldSelector(fldSeq->GetFieldHandle(), &firstFieldType); - ValueNum newFirstFieldValueVN = ValueNumStore::NoVN; - // Optimization: avoid traversting the maps for the value of the first field if - // we do not need it, which is the case if the rest of the field sequence is empty. - if (fldSeq->GetNext() == nullptr) - { - newFirstFieldValueVN = vnStore->VNApplySelectorsAssignTypeCoerce(storeVal, indType); - } - else - { - // Construct the ValueNumber for fldMap[baseAddr]. This (struct) - // map represents the specific field we're looking to store to. - ValueNum firstFieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, - firstFieldValueSelectorVN); - - // Construct the maps updating the struct fields in the sequence. - newFirstFieldValueVN = - vnStore->VNApplySelectorsAssign(VNK_Liberal, firstFieldValueVN, fldSeq->GetNext(), - storeVal, indType); - } + // Construct the "field map" VN. It represents memory state of the first field + // of all objects on the heap. This is our primary map. + ValueNum fldMapVN = + vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], firstFieldSelectorVN); - // Finally, construct the new field map... - ValueNum newFldMapVN = - vnStore->VNForMapStore(fldMapVN, firstFieldValueSelectorVN, newFirstFieldValueVN); + ValueNum firstFieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); - // ...and a new value for the heap. - newHeapVN = vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], firstFieldSelectorVN, newFldMapVN); + ValueNum newFirstFieldValueVN = ValueNumStore::NoVN; + // Optimization: avoid traversting the maps for the value of the first field if + // we do not need it, which is the case if the rest of the field sequence is empty. + if (fldSeq->GetNext() == nullptr) + { + newFirstFieldValueVN = vnStore->VNApplySelectorsAssignTypeCoerce(storeVal, indType); } else { - // "Simple" static: heap[field][struct fields...] = storeVal. - newHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, - storeVal, indType); + // Construct the ValueNumber for fldMap[baseAddr]. This (struct) + // map represents the specific field we're looking to store to. + ValueNum firstFieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, + firstFieldValueSelectorVN); + + // Construct the maps updating the struct fields in the sequence. + newFirstFieldValueVN = + vnStore->VNApplySelectorsAssign(VNK_Liberal, firstFieldValueVN, fldSeq->GetNext(), + storeVal, indType); } - // Update the GcHeap value. - recordGcHeapStore(tree, newHeapVN DEBUGARG("StoreField")); + // Finally, construct the new field map... + ValueNum newFldMapVN = + vnStore->VNForMapStore(fldMapVN, firstFieldValueSelectorVN, newFirstFieldValueVN); + + // ...and a new value for the heap. + newHeapVN = vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], firstFieldSelectorVN, newFldMapVN); } else { - GenTreeLclVarCommon* lclVarTree = nullptr; - bool isLocal = tree->DefinesLocal(this, &lclVarTree); + // "Simple" static: heap[field][struct fields...] = storeVal. + newHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, + storeVal, indType); + } - if (isLocal && lvaVarAddrExposed(lclVarTree->GetLclNum())) - { - // Store to address-exposed local; need to record the effect on ByrefExposed. - // We could use MapStore here and MapSelect on reads of address-exposed locals - // (using the local nums as selectors) to get e.g. propagation of values - // through address-taken locals in regions of code with no calls or byref - // writes. - // For now, just use a new opaque VN. - ValueNum memoryVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(tree, memoryVN DEBUGARG("PtrToLoc indir")); - } - else if (!isLocal) - { - // If it doesn't define a local, then it might update GcHeap/ByrefExposed. - // For the new ByrefExposed VN, we could use an operator here like - // VNF_ByrefExposedStore that carries the VNs of the pointer and RHS, then - // at byref loads if the current ByrefExposed VN happens to be - // VNF_ByrefExposedStore with the same pointer VN, we could propagate the - // VN from the RHS to the VN for the load. This would e.g. allow tracking - // values through assignments to out params. For now, just model this - // as an opaque GcHeap/ByrefExposed mutation. - fgMutateGcHeap(tree DEBUGARG("assign-of-IND")); - } + // Update the GcHeap value. + recordGcHeapStore(tree, newHeapVN DEBUGARG("StoreField")); + } + else if (arg->DefinesLocalAddr(this, lhsSize, &lclVarTree, &isEntire, &offset)) + { + unsigned lclNum = lclVarTree->GetLclNum(); + fldSeq = FieldSeqStore::NotAField(); + + if (argIsVNFunc && (funcApp.m_func == VNF_PtrToLoc)) + { + assert(arg->gtVNPair.BothEqual()); // If it's a PtrToLoc, lib/cons shouldn't differ. + assert(lclNum == vnStore->ConstantValue(funcApp.m_args[0])); + fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); } + + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + if ((fldSeq == FieldSeqStore::NotAField()) || ((fldSeq == nullptr) && !isEntire)) + { + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); + offset = 0; + lhsSize = lvaLclExactSize(lclVarTree->GetLclNum()); + } + + fgValueNumberLocalStore(tree, lclVarTree, lhsSize, offset, rhsVNPair); + } + else + { + assert(!tree->DefinesLocal(this, &lclVarTree)); + // If it doesn't define a local, then it might update GcHeap/ByrefExposed. + // For the new ByrefExposed VN, we could use an operator here like + // VNF_ByrefExposedStore that carries the VNs of the pointer and RHS, then + // at byref loads if the current ByrefExposed VN happens to be + // VNF_ByrefExposedStore with the same pointer VN, we could propagate the + // VN from the RHS to the VN for the load. This would e.g. allow tracking + // values through assignments to out params. For now, just model this + // as an opaque GcHeap/ByrefExposed mutation. + fgMutateGcHeap(tree DEBUGARG("assign-of-IND")); } // We don't actually evaluate an IND on the LHS, so give it the Void value. @@ -8073,9 +8230,10 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) GenTree* lhs = tree->gtGetOp1(); GenTree* rhs = tree->gtGetOp2(); - GenTreeLclVarCommon* lclVarTree; - bool isEntire; - if (tree->DefinesLocal(this, &lclVarTree, &isEntire)) + GenTreeLclVarCommon* lclVarTree = nullptr; + bool isEntire = false; + ssize_t offset = 0; + if (tree->DefinesLocal(this, &lclVarTree, &isEntire, &offset)) { assert(lclVarTree->gtFlags & GTF_VAR_DEF); // Should not have been recorded as updating the GC heap. @@ -8089,31 +8247,30 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) // SSA names in which to store VN's on defs. We'll yield unique VN's when we read from them. if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) { - FieldSeqNode* lhsFldSeq = nullptr; + unsigned lhsLclSize = lvaLclExactSize(lhsLclNum); - if (lhs->IsLocalExpr(this, &lclVarTree, &lhsFldSeq)) + unsigned storeSize; + FieldSeqNode* lhsFldSeq; + if (lhs->OperIs(GT_LCL_VAR)) + { + storeSize = lhsLclSize; + lhsFldSeq = nullptr; + } + else if (lhs->OperIs(GT_LCL_FLD)) { - noway_assert(lclVarTree->GetLclNum() == lhsLclNum); + storeSize = lhs->AsLclFld()->GetSize(); + lhsFldSeq = lhs->AsLclFld()->GetFieldSeq(); } else { - GenTree* lhsAddr = lhs->AsIndir()->Addr(); - - // For addr-of-local expressions, lib/cons shouldn't matter. - assert(lhsAddr->gtVNPair.BothEqual()); - ValueNum lhsAddrVN = lhsAddr->GetVN(VNK_Liberal); - - // Unpack the PtrToLoc value number of the address. - assert(vnStore->IsVNFunc(lhsAddrVN)); + storeSize = lhs->AsIndir()->Size(); - VNFuncApp lhsAddrFuncApp; - vnStore->GetVNFunc(lhsAddrVN, &lhsAddrFuncApp); + ValueNumPair lhsAddrVNP = lhs->AsIndir()->Addr()->gtVNPair; + VNFuncApp lhsAddrFunc; + bool lhsAddrIsVNFunc = vnStore->GetVNFunc(lhsAddrVNP.GetLiberal(), &lhsAddrFunc); + assert(lhsAddrIsVNFunc && (lhsAddrFunc.m_func == VNF_PtrToLoc) && lhsAddrVNP.BothEqual()); - assert(lhsAddrFuncApp.m_func == VNF_PtrToLoc); - assert(vnStore->IsVNConstant(lhsAddrFuncApp.m_args[0]) && - vnStore->ConstantValue(lhsAddrFuncApp.m_args[0]) == lhsLclNum); - - lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFuncApp.m_args[1]); + lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFunc.m_args[1]); } bool isNewUniq = false; @@ -8141,12 +8298,28 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) { assert(tree->OperIsCopyBlkOp()); + // TODO-PhysicalVN: with the physical VN scheme, we no longer need this check. if (fgValueNumberBlockAssignmentTypeCheck(lhsVarDsc, lhsFldSeq, rhs)) { - ValueNumPair rhsVNPair = vnStore->VNPNormalPair(rhs->gtVNPair); - ValueNumPair oldLhsLclVNPair = lhsVarDsc->GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair; - newLhsLclVNPair = - vnStore->VNPairApplySelectorsAssign(oldLhsLclVNPair, lhsFldSeq, rhsVNPair, lhs->TypeGet()); + ValueNumPair rhsVNPair = vnStore->VNPNormalPair(rhs->gtVNPair); + + // TODO-PhysicalVN: remove this quirk, it was added to avoid diffs. + if (lhs->TypeIs(TYP_STRUCT) && (vnStore->TypeOfVN(rhsVNPair.GetLiberal()) != TYP_STRUCT)) + { + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_STRUCT)); + } + + if (isEntire) + { + newLhsLclVNPair = vnStore->VNPairForLoadStoreBitcast(rhsVNPair, lhsVarDsc->TypeGet(), + lhsLclSize); + } + else + { + ValueNumPair oldLhsLclVNPair = lhsVarDsc->GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair; + newLhsLclVNPair = vnStore->VNPairForStore(oldLhsLclVNPair, lhsLclSize, offset, + storeSize, rhsVNPair); + } } else { @@ -8414,26 +8587,26 @@ void Compiler::fgValueNumberTree(GenTree* tree) case GT_LCL_FLD: { GenTreeLclFld* lclFld = tree->AsLclFld(); - assert(!lvaInSsa(lclFld->GetLclNum()) || (lclFld->GetFieldSeq() != nullptr)); // If this is a (full or partial) def we skip; it will be handled as part of the assignment. if ((lclFld->gtFlags & GTF_VAR_DEF) == 0) { - unsigned ssaNum = lclFld->GetSsaNum(); - LclVarDsc* varDsc = lvaGetDesc(lclFld); + unsigned lclNum = lclFld->GetLclNum(); - var_types indType = tree->TypeGet(); + // TODO-ADDR: delete the "GetSize" check below once TYP_STRUCT LCL_FLD is supported. + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. if ((lclFld->GetFieldSeq() == FieldSeqStore::NotAField()) || !lvaInSsa(lclFld->GetLclNum()) || - !lclFld->HasSsaName()) + !lclFld->HasSsaName() || (lclFld->GetSize() == 0)) { - // This doesn't represent a proper field access or it's a struct - // with overlapping fields that is hard to reason about; return a new unique VN. - tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType)); + lclFld->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclFld->TypeGet())); } else { - ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; - tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, lclFld->GetFieldSeq(), indType); + LclVarDsc* varDsc = lvaGetDesc(lclNum); + ValueNumPair lclVarValue = varDsc->GetPerSsaData(lclFld->GetSsaNum())->m_vnPair; + lclFld->gtVNPair = vnStore->VNPairForLoad(lclVarValue, lvaLclExactSize(lclNum), + lclFld->TypeGet(), lclFld->GetLclOffs(), + lclFld->GetSize()); // If we have byref field, we may have a zero-offset sequence to add. FieldSeqNode* zeroOffsetFldSeq = nullptr; @@ -8665,25 +8838,41 @@ void Compiler::fgValueNumberTree(GenTree* tree) // We will "evaluate" this as part of the assignment. else if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0) { + unsigned loadSize = tree->AsIndir()->Size(); + ssize_t offset = 0; + FieldSeqNode* localFldSeq = nullptr; VNFuncApp funcApp; // Is it a local or a heap address? - if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq) && lvaInSsa(lclVarTree->GetLclNum()) && - lclVarTree->HasSsaName()) + if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq, &offset) && + lvaInSsa(lclVarTree->GetLclNum()) && lclVarTree->HasSsaName()) { unsigned ssaNum = lclVarTree->GetSsaNum(); LclVarDsc* varDsc = lvaGetDesc(lclVarTree); + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + // TODO-PhysicalVN: use "DefinesLocalAddr" here for consistency with the store code. if ((localFldSeq == FieldSeqStore::NotAField()) || (localFldSeq == nullptr)) { tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); } else { - var_types indType = tree->TypeGet(); - ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; - tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, localFldSeq, indType); + ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; + unsigned lclSize = lvaLclExactSize(lclVarTree->GetLclNum()); + offset = offset + lclVarTree->GetLclOffs(); + + // TODO-1stClassStructs-CQ: delete layout-less struct nodes and the "loadSize == 0" condition. + if (loadSize != 0) + { + tree->gtVNPair = vnStore->VNPairForLoad(lclVNPair, lclSize, tree->TypeGet(), offset, + loadSize); + } + else + { + tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); + } } tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 864c40bb81d86d..79f38d3b84a24c 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -587,6 +587,8 @@ class ValueNumStore ValueNum VNForMapSelect(ValueNumKind vnk, var_types type, ValueNum map, ValueNum index); + ValueNum VNForMapPhysicalSelect(ValueNumKind vnk, var_types type, ValueNum map, unsigned offset, unsigned size); + // A method that does the work for VNForMapSelect and may call itself recursively. ValueNum VNForMapSelectWork( ValueNumKind vnk, var_types type, ValueNum map, ValueNum index, int* pBudget, bool* pUsedRecursiveVN); @@ -594,6 +596,12 @@ class ValueNumStore // A specialized version of VNForFunc that is used for VNF_MapStore and provides some logging when verbose is set ValueNum VNForMapStore(ValueNum map, ValueNum index, ValueNum value); + ValueNum VNForMapPhysicalStore(ValueNum map, unsigned offset, unsigned size, ValueNum value); + + ValueNum EncodePhysicalSelector(unsigned offset, unsigned size); + + unsigned DecodePhysicalSelector(ValueNum selector, unsigned* pSize); + ValueNum VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, size_t* pStructSize = nullptr); // These functions parallel the ones above, except that they take liberal/conservative VN pairs @@ -650,6 +658,39 @@ class ValueNumStore ValueNumPair VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType); + ValueNum VNForLoad(ValueNumKind vnk, + ValueNum locationValue, + unsigned locationSize, + var_types loadType, + ssize_t offset, + unsigned loadSize); + + ValueNumPair VNPairForLoad(ValueNumPair locationValue, + unsigned locationSize, + var_types loadType, + ssize_t offset, + unsigned loadSize); + + ValueNum VNForStore(ValueNum locationValue, + unsigned locationSize, + ssize_t offset, + unsigned storeSize, + ValueNum value); + + ValueNumPair VNPairForStore(ValueNumPair locationValue, + unsigned locationSize, + ssize_t offset, + unsigned storeSize, + ValueNumPair value); + + ValueNum VNForLoadStoreBitcast(ValueNumKind vnk, ValueNum value, var_types indType, unsigned indSize); + + ValueNumPair VNPairForLoadStoreBitcast(ValueNumPair value, var_types indType, unsigned indSize); + + ValueNum VNForEmptyMap(var_types type); + + ValueNumPair VNPairForEmptyMap(var_types type); + ValueNumPair VNPairApplySelectorsAssign(ValueNumPair map, FieldSeqNode* fieldSeq, ValueNumPair value, @@ -1015,6 +1056,10 @@ class ValueNumStore // Prints a representation of a MapStore operation on standard out. void vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore); + void vnDumpPhysicalSelector(ValueNum selector); + + void vnDumpMapPhysicalStore(Compiler* comp, VNFuncApp* mapPhysicalStore); + // Requires "memOpaque" to be a mem opaque VNFuncApp // Prints a representation of a MemOpaque state on standard out. void vnDumpMemOpaque(Compiler* comp, VNFuncApp* memOpaque); diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h index 082dd09810d982..80fa93c2f63741 100644 --- a/src/coreclr/jit/valuenumfuncs.h +++ b/src/coreclr/jit/valuenumfuncs.h @@ -6,9 +6,10 @@ // ) // clang-format off -ValueNumFuncDef(MemOpaque, 1, false, false, false) // Args: 0: loop num -ValueNumFuncDef(MapStore, 4, false, false, false) // Args: 0: map, 1: index (e. g. field handle), 2: value being stored, 3: loop num. -ValueNumFuncDef(MapSelect, 2, false, false, false) // Args: 0: map, 1: key. +ValueNumFuncDef(MemOpaque, 1, false, false, false) // Args: 0: loop num +ValueNumFuncDef(MapStore, 4, false, false, false) // Args: 0: map, 1: index (e. g. field handle), 2: value being stored, 3: loop num. +ValueNumFuncDef(MapPhysicalStore, 3, false, false, false) // Args: 0: map, 1: "physical selector": offset and size, 2: value being stored +ValueNumFuncDef(MapSelect, 2, false, false, false) // Args: 0: map, 1: key. ValueNumFuncDef(PtrToLoc, 2, false, true, false) // Pointer (byref) to a local variable. Args: VN's of: 0: var num, 1: FieldSeq. ValueNumFuncDef(PtrToArrElem, 4, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: FieldSeq. From 9a59a6f42d2355257daa5b4a41d18271a34517c0 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Tue, 19 Apr 2022 22:21:22 +0300 Subject: [PATCH 04/17] Physical VN -- fields --- src/coreclr/jit/compiler.h | 9 + src/coreclr/jit/gentree.cpp | 36 ++- src/coreclr/jit/gentree.h | 21 +- src/coreclr/jit/importer.cpp | 45 ++- src/coreclr/jit/lclmorph.cpp | 3 +- src/coreclr/jit/morph.cpp | 45 +-- src/coreclr/jit/morphblock.cpp | 13 +- src/coreclr/jit/optimizer.cpp | 3 +- src/coreclr/jit/valuenum.cpp | 509 ++++++++++++++++++-------------- src/coreclr/jit/valuenum.h | 16 +- src/coreclr/jit/valuenumfuncs.h | 9 +- 11 files changed, 426 insertions(+), 283 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 708102589bd316..9bb221671fb1f9 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4737,6 +4737,15 @@ class Compiler ValueNumPair value, bool normalize = true); + void fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset); + + void fgValueNumberFieldStore(GenTree* storeNode, + GenTree* baseAddr, + FieldSeqNode* fieldSeq, + ssize_t offset, + unsigned storeSize, + ValueNum value); + unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 8d658ff0773362..92b88b2252ada4 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -16760,6 +16760,7 @@ bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp) // comp - the Compiler object // pBaseAddr - [out] parameter for "the base address" // pFldSeq - [out] parameter for the field sequence +// pOffset - [out] parameter for the offset of the component struct fields // // Return Value: // If "this" matches patterns denoted above, and the FldSeq found is "full", @@ -16771,7 +16772,7 @@ bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp) // reference, for statics - the address to which the field offset with the // field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField". // -bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq) +bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq, ssize_t* pOffset) { assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF)); @@ -16780,6 +16781,7 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = FieldSeqStore::NotAField(); + ssize_t offset = 0; if (OperIs(GT_ADD)) { @@ -16788,14 +16790,16 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF if (AsOp()->gtOp1->IsCnsIntOrI() && AsOp()->gtOp1->IsIconHandle()) { assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle()); - fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq; baseAddr = AsOp()->gtOp2; + fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq; + offset = AsOp()->gtOp1->AsIntCon()->IconValue(); } else if (AsOp()->gtOp2->IsCnsIntOrI()) { assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle()); - fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq; baseAddr = AsOp()->gtOp1; + fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq; + offset = AsOp()->gtOp2->AsIntCon()->IconValue(); } else { @@ -16807,12 +16811,15 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL)) { assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr)); - fldSeq = AsIntCon()->gtFieldSeq; - baseAddr = this; + baseAddr = this; + fldSeq = AsIntCon()->gtFieldSeq; + offset = AsIntCon()->IconValue(); } else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq)) { + assert((fldSeq != FieldSeqStore::NotAField()) || (fldSeq->GetOffset() == 0)); baseAddr = this; + offset = 0; } else { @@ -16826,6 +16833,9 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF return false; } + // Subtract from the offset such that the portion remaining is relative to the field itself. + offset -= fldSeq->GetOffset(); + // The above screens out obviously invalid cases, but we have more checks to perform. The // sequence returned from this method *must* start with either a class (NOT struct) field // or a static field. To avoid the expense of calling "getFieldClass" here, we will instead @@ -16840,6 +16850,7 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF } *pFldSeq = fldSeq; + *pOffset = offset; return true; } @@ -16849,6 +16860,7 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF *pBaseAddr = baseAddr; *pFldSeq = fldSeq; + *pOffset = offset; return true; } @@ -18117,16 +18129,17 @@ bool GenTree::IsArrayAddr(GenTreeArrAddr** pArrAddr) // Note that the value of the below field doesn't matter; it exists only to provide a distinguished address. // // static -FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr, FieldSeqNode::FieldKind::Instance); +FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr, 0, FieldSeqNode::FieldKind::Instance); // FieldSeqStore methods. FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc)) { } -FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode::FieldKind fieldKind) +FieldSeqNode* FieldSeqStore::CreateSingleton( + CORINFO_FIELD_HANDLE fieldHnd, size_t offset, FieldSeqNode::FieldKind fieldKind) { - FieldSeqNode fsn(fieldHnd, nullptr, fieldKind); + FieldSeqNode fsn(fieldHnd, nullptr, offset, fieldKind); FieldSeqNode* res = nullptr; if (m_canonMap->Lookup(fsn, &res)) { @@ -18165,7 +18178,7 @@ FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b) assert(a != b); FieldSeqNode* tmp = Append(a->GetNext(), b); - FieldSeqNode fsn(a->GetFieldHandleValue(), tmp, a->GetKind()); + FieldSeqNode fsn(a->GetFieldHandleValue(), tmp, a->GetOffset(), a->GetKind()); FieldSeqNode* res = nullptr; if (m_canonMap->Lookup(fsn, &res)) { @@ -18181,7 +18194,9 @@ FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b) } } -FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind) : m_next(next) +FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, size_t offset, FieldKind fieldKind) + : m_next(next) + , m_offset(offset) { uintptr_t handleValue = reinterpret_cast(fieldHnd); @@ -18191,6 +18206,7 @@ FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, Fi if (fieldHnd != NO_FIELD_HANDLE) { assert(JitTls::GetCompiler()->eeIsFieldStatic(fieldHnd) == IsStaticField()); + // TODO-PhysicalVN: assert that "offset" is correct. } else { diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 6724fef7644edc..db4bd09e6acb67 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -243,9 +243,10 @@ class FieldSeqNode uintptr_t m_fieldHandleAndKind; FieldSeqNode* m_next; + size_t m_offset; public: - FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind); + FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, size_t offset, FieldKind fieldKind); FieldKind GetKind() const { @@ -268,6 +269,18 @@ class FieldSeqNode return m_next; } + //------------------------------------------------------------------------ + // GetOffset: Retrieve "the offset" for the field this node represents. + // + // For statics with a known (constant) address it will be the value of that address. + // For boxed statics, it will be TARGET_POINTER_SIZE (the method table pointer size). + // For other fields, it will be equal to the value "getFieldOffset" would return. + // + size_t GetOffset() const + { + return m_offset; + } + bool IsStaticField() const { return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic); @@ -290,7 +303,8 @@ class FieldSeqNode // Make sure this provides methods that allow it to be used as a KeyFuncs type in JitHashTable. // Note that there is a one-to-one relationship between the field handle and the field kind, so - // we do not need to mask away the latter for comparison purposes. + // we do not need to mask away the latter for comparison purposes. Likewise, we do not need to + // include the offset. static int GetHashCode(FieldSeqNode fsn) { return static_cast(fsn.m_fieldHandleAndKind) ^ static_cast(reinterpret_cast(fsn.m_next)); @@ -317,6 +331,7 @@ class FieldSeqStore // Returns the (canonical in the store) singleton field sequence for the given handle. FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd, + size_t offset, FieldSeqNode::FieldKind fieldKind = FieldSeqNode::FieldKind::Instance); // This is a special distinguished FieldSeqNode indicating that a constant does *not* @@ -1931,7 +1946,7 @@ struct GenTree // Determine whether this tree is a basic block profile count update. bool IsBlockProfileUpdate(); - bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq); + bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq, ssize_t* pOffset); bool IsArrayAddr(GenTreeArrAddr** pArrAddr); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index bf8e6114f19e71..bf58f30b62fecf 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1410,11 +1410,14 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); - fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); + fgAddFieldSeqForZeroOffset(destAddr, + GetFieldSeqStore()->CreateSingleton(GetRefanyDataField(), + OFFSETOF__CORINFO_TypedReference__dataPtr)); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); - typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); + typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), + OFFSETOF__CORINFO_TypedReference__type); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); @@ -3635,11 +3638,11 @@ GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); - pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); + pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd, 0)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); - lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); + lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd, TARGET_POINTER_SIZE)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span @@ -8164,11 +8167,35 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_READYTORUN_HELPER); FieldSeqNode::FieldKind fieldKind = isSharedStatic ? FieldSeqNode::FieldKind::SharedStatic : FieldSeqNode::FieldKind::SimpleStatic; - FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind) - : FieldSeqStore::NotAField(); - GenTree* op1; + FieldSeqNode* innerFldSeq = nullptr; + FieldSeqNode* outerFldSeq = nullptr; + if (isBoxedStatic) + { + innerFldSeq = FieldSeqStore::NotAField(); + outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, TARGET_POINTER_SIZE, fieldKind); + } + else + { + bool hasConstAddr = (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDRESS) || + (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_RVA_ADDRESS); + size_t offset; + if (hasConstAddr) + { + offset = reinterpret_cast(info.compCompHnd->getFieldAddress(pResolvedToken->hField)); + assert(offset != 0); + } + else + { + offset = pFieldInfo->offset; + } + + innerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, offset, fieldKind); + outerFldSeq = FieldSeqStore::NotAField(); + } + + GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: @@ -8294,8 +8321,6 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT if (isBoxedStatic) { - FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind); - op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); @@ -8319,8 +8344,6 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT if (isBoxedStatic) { - FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind); - op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index 3860682ac36a6d..acd65c27bb9663 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -308,7 +308,8 @@ class LocalAddressVisitor final : public GenTreeVisitor if (haveCorrectFieldForVN) { FieldSeqStore* fieldSeqStore = compiler->GetFieldSeqStore(); - m_fieldSeq = fieldSeqStore->Append(val.m_fieldSeq, fieldSeqStore->CreateSingleton(field->gtFldHnd)); + FieldSeqNode* fieldSeqNode = fieldSeqStore->CreateSingleton(field->gtFldHnd, field->gtFldOffset); + m_fieldSeq = fieldSeqStore->Append(val.m_fieldSeq, fieldSeqNode); } else { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 9e8647c08d4ab0..0ce01cc31aadbc 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -3465,9 +3465,11 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); - destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); + destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField(), + OFFSETOF__CORINFO_TypedReference__dataPtr)); destPtrSlot->gtFlags |= GTF_VAR_DEF; - destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField())); + destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), + OFFSETOF__CORINFO_TypedReference__type)); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); @@ -5290,24 +5292,12 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) { assert(tree->gtOper == GT_FIELD); - CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; - unsigned fldOffset = tree->AsField()->gtFldOffset; - GenTree* objRef = tree->AsField()->GetFldObj(); - bool objIsLocal = false; - - FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); - if (!tree->AsField()->gtFldMayOverlap) - { - if (objRef != nullptr) - { - fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::Instance); - } - else - { - // Only simple statics get imported as GT_FIELDs. - fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::SimpleStatic); - } - } + CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; + unsigned fldOffset = tree->AsField()->gtFldOffset; + GenTree* objRef = tree->AsField()->GetFldObj(); + bool objIsLocal = false; + bool fldMayOverlap = tree->AsField()->gtFldMayOverlap; + FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // Reset the flag because we may reuse the node. tree->AsField()->gtFldMayOverlap = false; @@ -5573,6 +5563,12 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode); } #endif + + if (!fldMayOverlap) + { + fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, fldOffset, FieldSeqNode::FieldKind::Instance); + } + if (fldOffset != 0) { // Generate the "addr" node. @@ -5687,6 +5683,9 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) /* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */ tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); + assert(!fldMayOverlap); + fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, fldOffset, FieldSeqNode::FieldKind::SimpleStatic); + if (fldOffset != 0) { GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq); @@ -5721,9 +5720,11 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) // For boxed statics, this direct address will be for the box. We have already added // the indirection for the field itself and attached the sequence, in importation. bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd); - if (isBoxedStatic) + if (!isBoxedStatic) { - fieldSeq = FieldSeqStore::NotAField(); + // Only simple statics get importred as GT_FIELDs. + fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, reinterpret_cast(fldAddr), + FieldSeqNode::FieldKind::SimpleStatic); } // TODO-CQ: enable this optimization for 32 bit targets. diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index ad1b6bd149efb4..3b740ab368635e 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -1218,10 +1218,10 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() CORINFO_CLASS_HANDLE classHnd = srcVarDsc->GetStructHnd(); CORINFO_FIELD_HANDLE fieldHnd = m_comp->info.compCompHnd->getFieldInClass(classHnd, srcFieldVarDsc->lvFldOrdinal); - FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd); - unsigned srcFieldOffset = m_comp->lvaGetDesc(srcFieldLclNum)->lvFldOffset; - var_types srcType = srcFieldVarDsc->TypeGet(); + unsigned srcFieldOffset = m_comp->lvaGetDesc(srcFieldLclNum)->lvFldOffset; + var_types srcType = srcFieldVarDsc->TypeGet(); + FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd, srcFieldOffset); if (!m_dstUseLclFld) { @@ -1317,11 +1317,13 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() CORINFO_FIELD_HANDLE fieldHnd = m_comp->info.compCompHnd->getFieldInClass(classHnd, m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOrdinal); - FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd); + + unsigned fldOffset = m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOffset; var_types destType = m_comp->lvaGetDesc(dstFieldLclNum)->lvType; + FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd, fldOffset); bool done = false; - if (m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOffset == 0) + if (fldOffset == 0) { // If this is a full-width use of the src via a different type, we need to create a // GT_LCL_FLD. @@ -1348,7 +1350,6 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() } if (!done) { - unsigned fldOffset = m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOffset; if (!m_srcUseLclFld) { assert(srcAddrClone != nullptr); diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 9be1521030b51d..491b326c8cfd2b 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -7854,6 +7854,7 @@ bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk) GenTreeArrAddr* arrAddr = nullptr; GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = nullptr; + ssize_t offset = 0; if (arg->IsArrayAddr(&arrAddr)) { @@ -7865,7 +7866,7 @@ bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk) // Conservatively assume byrefs may alias this array element memoryHavoc |= memoryKindSet(ByrefExposed); } - else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) + else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField())); diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 29723b1f24e4df..5ef4039874941b 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -1778,6 +1778,15 @@ ValueNum ValueNumStore::VNForIntCon(INT32 cnsVal) } } +ValueNum ValueNumStore::VNForIntPtrCon(ssize_t cnsVal) +{ +#ifdef HOST_64BIT + return VNForLongCon(cnsVal); +#else // !HOST_64BIT + return VNForIntCon(cnsVal); +#endif // !HOST_64BIT +} + ValueNum ValueNumStore::VNForLongCon(INT64 cnsVal) { return VnForConst(cnsVal, GetLongCnsMap(), TYP_LONG); @@ -2388,17 +2397,19 @@ ValueNum ValueNumStore::VNForMapPhysicalSelect(ValueNumKind vnk, var_types type, ValueNum ValueNumStore::VNForMapSelectWork( ValueNumKind vnk, var_types type, ValueNum map, ValueNum index, int* pBudget, bool* pUsedRecursiveVN) { -#ifdef DEBUG - bool mapIsPrecise = false; - bool mapIsPhysical = false; -#endif // DEBUG - TailCall: // This label allows us to directly implement a tail call by setting up the arguments, and doing a goto to here. + +#ifdef DEBUG assert(map != NoVN && index != NoVN); assert(map == VNNormalValue(map)); // Arguments carry no exceptions. assert(index == VNNormalValue(index)); // Arguments carry no exceptions. + // Currently, all precise maps represent memory, and all physical - typed values. + const bool mapIsPrecise = (TypeOfVN(map) == TYP_HEAP) || (TypeOfVN(map) == TYP_MEM); + const bool mapIsPhysical = !mapIsPrecise; +#endif // DEBUG + *pUsedRecursiveVN = false; #ifdef DEBUG @@ -2447,8 +2458,7 @@ ValueNum ValueNumStore::VNForMapSelectWork( { if (funcApp.m_func == VNF_MapStore) { - assert(!mapIsPhysical); - INDEBUG(mapIsPrecise = true); + assert(mapIsPrecise); // select(store(m, i, v), i) == v if (funcApp.m_args[1] == index) @@ -2482,8 +2492,7 @@ ValueNum ValueNumStore::VNForMapSelectWork( } else if (funcApp.m_func == VNF_MapPhysicalStore) { - assert(!mapIsPrecise); - INDEBUG(mapIsPhysical = true); + assert(mapIsPhysical); #if FEATURE_VN_TRACE_APPLY_SELECTORS JITDUMP(" select("); @@ -2537,10 +2546,20 @@ ValueNum ValueNumStore::VNForMapSelectWork( #endif } } + else if (funcApp.m_func == VNF_BitCast) + { + assert(mapIsPhysical); +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" select(bitcast<%s>(" FMT_VN ")) ==> select(" FMT_VN ")\n", + varTypeName(TypeOfVN(funcApp.m_args[0])), funcApp.m_args[0], funcApp.m_args[0]); +#endif // FEATURE_VN_TRACE_APPLY_SELECTORS + + map = funcApp.m_args[0]; + goto TailCall; + } else if (funcApp.m_func == VNF_ZeroObj) { - // ZeroObj maps are always physical. - assert(!mapIsPrecise); + assert(mapIsPhysical); // TODO-CQ: support selection of TYP_STRUCT here. if (type != TYP_STRUCT) @@ -2710,32 +2729,34 @@ unsigned ValueNumStore::DecodePhysicalSelector(ValueNum selector, unsigned* pSiz // that is used for field handle selectors. // // Arguments: -// fieldHnd - handle of the field in question -// pFieldType - [out] parameter for the field's type -// pStructSize - optional [out] parameter for the size of the struct, -// populated if the field in question is of a struct type, -// otherwise set to zero +// fieldHnd - handle of the field in question +// pFieldType - [out] parameter for the field's type +// pSize - optional [out] parameter for the size of the field // // Return Value: // Value number corresponding to the given field handle. // -ValueNum ValueNumStore::VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, size_t* pStructSize) +ValueNum ValueNumStore::VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, unsigned* pSize) { - CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; - ValueNum fldHndVN = VNForHandle(ssize_t(fieldHnd), GTF_ICON_FIELD_HDL); - var_types fieldType = m_pComp->eeGetFieldType(fieldHnd, &structHnd); - size_t structSize = 0; + CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; + ValueNum fldHndVN = VNForHandle(ssize_t(fieldHnd), GTF_ICON_FIELD_HDL); + var_types fieldType = m_pComp->eeGetFieldType(fieldHnd, &structHnd); + unsigned size = 0; if (fieldType == TYP_STRUCT) { - structSize = m_pComp->info.compCompHnd->getClassSize(structHnd); + size = m_pComp->info.compCompHnd->getClassSize(structHnd); // We have to normalize here since there is no CorInfoType for vectors... - if (m_pComp->structSizeMightRepresentSIMDType(structSize)) + if (m_pComp->structSizeMightRepresentSIMDType(size)) { fieldType = m_pComp->impNormStructType(structHnd); } } + else + { + size = genTypeSize(fieldType); + } #ifdef DEBUG if (m_pComp->verbose) @@ -2743,19 +2764,16 @@ ValueNum ValueNumStore::VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_ty const char* modName; const char* fldName = m_pComp->eeGetFieldName(fieldHnd, &modName); printf(" VNForHandle(%s) is " FMT_VN ", fieldType is %s", fldName, fldHndVN, varTypeName(fieldType)); - if (structSize != 0) + if (size != 0) { - printf(", size = %u", structSize); + printf(", size = %u", size); } printf("\n"); } #endif - if (pStructSize != nullptr) - { - *pStructSize = structSize; - } *pFieldType = fieldType; + *pSize = size; return fldHndVN; } @@ -4119,12 +4137,12 @@ ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, JITDUMP(" VNApplySelectors:\n"); var_types fieldType; - size_t structSize; - ValueNum fldHndVN = VNForFieldSelector(field->GetFieldHandle(), &fieldType, &structSize); + unsigned fieldSize; + ValueNum fldHndVN = VNForFieldSelector(field->GetFieldHandle(), &fieldType, &fieldSize); if (wbFinalStructSize != nullptr) { - *wbFinalStructSize = structSize; + *wbFinalStructSize = fieldSize; } map = VNForMapSelect(vnk, fieldType, map, fldHndVN); @@ -4328,11 +4346,22 @@ ValueNum ValueNumStore::VNForLoad(ValueNumKind vnk, return VNForExpr(m_pComp->compCurBB, loadType); } - JITDUMP(" VNForLoad:\n"); - ValueNum value = VNForMapPhysicalSelect(vnk, loadType, locationValue, loadOffset, loadSize); - value = VNForLoadStoreBitcast(vnk, value, loadType, loadSize); + ValueNum loadValue; + if (LoadStoreIsEntire(locationSize, loadOffset, loadSize)) + { + loadValue = locationValue; + } + else + { + JITDUMP(" VNForLoad:\n"); + loadValue = VNForMapPhysicalSelect(vnk, loadType, locationValue, loadOffset, loadSize); + } + + // Unlike with stores, loads we always normalize (to have the property that the tree's type + // is the same as its VN's). + loadValue = VNForLoadStoreBitcast(loadValue, loadType, loadSize); - return value; + return loadValue; } ValueNumPair ValueNumStore::VNPairForLoad( @@ -4357,7 +4386,8 @@ ValueNum ValueNumStore::VNForStore(ValueNum locationValue, if ((offset < 0) || (locationSize < (storeOffset + storeSize))) { - JITDUMP(" *** VNForStore: out-of-bounds store!\n"); + JITDUMP(" *** VNForStore: out-of-bounds store -- location size is %u, offset is %zd, store size is %u\n", + locationSize, offset, storeSize); // Some callers will need to invalidate parenting maps, so force explicit // handling of this case instead of returning a "new, unique" VN. return NoVN; @@ -4394,25 +4424,18 @@ ValueNumPair ValueNumStore::VNPairForStore(ValueNumPair locationValue, return ValueNumPair(liberalVN, conservVN); } -ValueNum ValueNumStore::VNForLoadStoreBitcast(ValueNumKind vnk, ValueNum value, var_types indType, unsigned indSize) +ValueNum ValueNumStore::VNForLoadStoreBitcast(ValueNum value, var_types indType, unsigned indSize) { var_types typeOfValue = TypeOfVN(value); if (typeOfValue != indType) { - if (indType == TYP_STRUCT) + if ((typeOfValue == TYP_STRUCT) || (indType == TYP_STRUCT)) { - assert(typeOfValue != TYP_STRUCT); - // Represent this as an identity store: "$VN.Struct[[0:size - 1] := primitive]". - JITDUMP(" VNForLoadStoreBitcast:\n"); - value = VNForMapPhysicalStore(VNForEmptyMap(TYP_STRUCT), 0, indSize, value); - } - else if (typeOfValue == TYP_STRUCT) - { - assert(indType != TYP_STRUCT); - // Unwrap the (possibly present) identity store with an identity selection. - JITDUMP(" VNForLoadStoreBitcast:\n"); - value = VNForMapPhysicalSelect(vnk, indType, value, 0, indSize); + value = VNForBitCast(value, indType); + JITDUMP(" VNForLoadStoreBitcast returns "); + JITDUMPEXEC(m_pComp->vnPrint(value, 1)); + JITDUMP("\n"); } else { @@ -4428,8 +4451,16 @@ ValueNum ValueNumStore::VNForLoadStoreBitcast(ValueNumKind vnk, ValueNum value, ValueNumPair ValueNumStore::VNPairForLoadStoreBitcast(ValueNumPair value, var_types indType, unsigned indSize) { - ValueNum liberalVN = VNForLoadStoreBitcast(VNK_Liberal, value.GetLiberal(), indType, indSize); - ValueNum conservVN = VNForLoadStoreBitcast(VNK_Conservative, value.GetConservative(), indType, indSize); + ValueNum liberalVN = VNForLoadStoreBitcast(value.GetLiberal(), indType, indSize); + ValueNum conservVN; + if (value.BothEqual()) + { + conservVN = liberalVN; + } + else + { + conservVN = VNForLoadStoreBitcast(value.GetConservative(), indType, indSize); + } return ValueNumPair(liberalVN, conservVN); } @@ -4561,13 +4592,13 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, GenTree* opB) FieldSeqNode* fldSeq = opB->AsIntCon()->gtFieldSeq; if (fldSeq != nullptr) { - return ExtendPtrVN(opA, fldSeq); + return ExtendPtrVN(opA, fldSeq, opB->AsIntCon()->IconValue()); } } return NoVN; } -ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq) +ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq, ssize_t offset) { assert(fldSeq != nullptr); @@ -4597,7 +4628,8 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq) } else if (funcApp.m_func == VNF_PtrToStatic) { - res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, funcApp.m_args[0], FieldSeqVNAppend(funcApp.m_args[1], fldSeq)); + res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, funcApp.m_args[0], FieldSeqVNAppend(funcApp.m_args[1], fldSeq), + VNForIntPtrCon(ConstantValue(funcApp.m_args[2]) + offset)); } else if (funcApp.m_func == VNF_PtrToArrElem) { @@ -4849,7 +4881,7 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, unsigned lclSize = lvaLclExactSize(lclDefNode->GetLclNum()); ValueNumPair newLclValue; - if ((storeSize == lclSize) && (offset == 0)) + if (vnStore->LoadStoreIsEntire(lclSize, offset, storeSize)) { newLclValue = normalize ? vnStore->VNPairForLoadStoreBitcast(value, varDsc->TypeGet(), lclSize) : value; } @@ -4862,7 +4894,10 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, newLclValue = vnStore->VNPairForStore(oldLclValue, lclSize, offset, storeSize, value); } - // We maintain this invariant only for local locations. + // Any out-of-bounds stores should have made the local address-exposed. + assert(newLclValue.BothDefined()); + + // We normalize types stored in local locations because things outside VN itself look at them. assert((genActualType(vnStore->TypeOfVN(newLclValue.GetLiberal())) == genActualType(varDsc)) || !normalize); varDsc->GetPerSsaData(lclDefSsaNum)->m_vnPair = newLclValue; @@ -4884,6 +4919,127 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, } } +void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset) +{ + assert(fieldSeq != nullptr); + + if (fieldSeq == FieldSeqStore::NotAField()) + { + loadTree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, loadTree->TypeGet())); + return; + } + + // Two cases: + // + // 1) Instance field / "complex" static: heap[field][baseAddr][offset + load size]. + // 2) "Simple" static: heap[field][offset + load size]. + // + var_types fieldType; + unsigned fieldSize; + ValueNum fieldSelectorVN = + vnStore->VNForFieldSelector(fieldSeq->GetFieldHandle(), &fieldType, &fieldSize); + + ValueNum fieldMapVN = ValueNumStore::NoVN; + ValueNum fieldValueSelectorVN = ValueNumStore::NoVN; + if (baseAddr != nullptr) + { + fieldMapVN = + vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], fieldSelectorVN); + fieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); + } + else + { + fieldMapVN = fgCurMemoryVN[GcHeap]; + fieldValueSelectorVN = fieldSelectorVN; + } + + ValueNum fieldValueVN = + vnStore->VNForMapSelect(VNK_Liberal, fieldType, fieldMapVN, fieldValueSelectorVN); + + // Finally, account for the struct fields and type mismatches. + var_types loadType = loadTree->TypeGet(); + unsigned loadSize = loadTree->OperIsBlk() ? loadTree->AsBlk()->Size() : genTypeSize(loadTree); + ValueNum loadValueVN = vnStore->VNForLoad(VNK_Liberal, fieldValueVN, fieldSize, loadType, offset, loadSize); + + loadTree->gtVNPair.SetLiberal(loadValueVN); + loadTree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, loadType)); +} + +void Compiler::fgValueNumberFieldStore(GenTree* storeNode, + GenTree* baseAddr, + FieldSeqNode* fieldSeq, + ssize_t offset, + unsigned storeSize, + ValueNum value) +{ + assert(fieldSeq != nullptr); + + if (fieldSeq == FieldSeqStore::NotAField()) + { + fgMutateGcHeap(storeNode DEBUGARG("NotAField field store")); + return; + } + + // Two cases: + // 1) Instance field / "complex" static: heap[field][baseAddr][offset + load size] = value. + // 2) "Simple" static: heap[field][offset + load size] = value. + // + unsigned fieldSize; + var_types fieldType; + ValueNum fieldSelectorVN = vnStore->VNForFieldSelector(fieldSeq->GetFieldHandle(), &fieldType, &fieldSize); + + ValueNum fieldMapVN = ValueNumStore::NoVN; + ValueNum fieldValueSelectorVN = ValueNumStore::NoVN; + if (baseAddr != nullptr) + { + // Construct the "field map" VN. It represents memory state of the first field of all objects + // on the heap. This is our primary map. + fieldMapVN = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], fieldSelectorVN); + fieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); + } + else + { + fieldMapVN = fgCurMemoryVN[GcHeap]; + fieldValueSelectorVN = fieldSelectorVN; + } + + ValueNum newFieldValueVN = ValueNumStore::NoVN; + if (vnStore->LoadStoreIsEntire(fieldSize, offset, storeSize)) + { + // For memory locations (as opposed to locals), we do not normalize types. + newFieldValueVN = value; + } + else + { + ValueNum oldFieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, fieldType, fieldMapVN, fieldValueSelectorVN); + newFieldValueVN = vnStore->VNForStore(oldFieldValueVN, fieldSize, offset, storeSize, value); + } + + if (newFieldValueVN != ValueNumStore::NoVN) + { + // Construct the new field map... + ValueNum newFieldMapVN = vnStore->VNForMapStore(fieldMapVN, fieldValueSelectorVN, newFieldValueVN); + + // ...and a new value for the heap. + ValueNum newHeapVN = ValueNumStore::NoVN; + if (baseAddr != nullptr) + { + newHeapVN = vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], fieldSelectorVN, newFieldMapVN); + } + else + { + newHeapVN = newFieldMapVN; + } + + recordGcHeapStore(storeNode, newHeapVN DEBUGARG("StoreField")); + } + else + { + // For out-of-bounds stores, the heap has to be invalidated as other fields may be affected. + fgMutateGcHeap(storeNode DEBUGARG("out-of-bounds store to a field")); + } +} + var_types ValueNumStore::TypeOfVN(ValueNum vn) { if (vn == NoVN) @@ -6478,6 +6634,9 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr) case VNF_CastOvf: vnDumpCast(comp, vn); break; + case VNF_BitCast: + vnDumpBitCast(comp, &funcApp); + break; case VNF_ZeroObj: vnDumpZeroObj(comp, &funcApp); break; @@ -6689,6 +6848,16 @@ void ValueNumStore::vnDumpCast(Compiler* comp, ValueNum castVN) } } +void ValueNumStore::vnDumpBitCast(Compiler* comp, VNFuncApp* bitCast) +{ + var_types srcType = TypeOfVN(bitCast->m_args[0]); + var_types castToType = static_cast(ConstantValue(bitCast->m_args[1])); + + printf("BitCast<%s <- %s>(", varTypeName(castToType), varTypeName(srcType)); + comp->vnPrint(bitCast->m_args[0], 0); + printf(")"); +} + void ValueNumStore::vnDumpZeroObj(Compiler* comp, VNFuncApp* zeroObj) { printf("ZeroObj("); @@ -8051,9 +8220,7 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhs->TypeGet())); } - GenTree* arg = lhs->AsOp()->gtOp1; - unsigned lhsSize = lhs->AsIndir()->Size(); - + GenTree* arg = lhs->AsOp()->gtOp1; VNFuncApp funcApp; ValueNum argVN = arg->gtVNPair.GetLiberal(); bool argIsVNFunc = vnStore->GetVNFunc(vnStore->VNNormalValue(argVN), &funcApp); @@ -8061,24 +8228,17 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) GenTreeLclVarCommon* lclVarTree = nullptr; bool isEntire = false; ssize_t offset = 0; + unsigned storeSize = lhs->AsIndir()->Size(); GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = nullptr; if (argIsVNFunc && (funcApp.m_func == VNF_PtrToStatic)) { - FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); - assert(fldSeq != nullptr); // We should never see an empty sequence here. + baseAddr = nullptr; // All VNF_PtrToStatic statics are currently "simple". + fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); + offset = vnStore->ConstantValue(funcApp.m_args[2]); - if (fldSeq != FieldSeqStore::NotAField()) - { - ValueNum newHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, - rhsVNPair.GetLiberal(), lhs->TypeGet()); - recordGcHeapStore(tree, newHeapVN DEBUGARG("static field store")); - } - else - { - fgMutateGcHeap(tree DEBUGARG("indirect store at NotAField PtrToStatic address")); - } + fgValueNumberFieldStore(tree, baseAddr, fldSeq, offset, storeSize, rhsVNPair.GetLiberal()); } // Is the LHS an array index expression? else if (argIsVNFunc && funcApp.m_func == VNF_PtrToArrElem) @@ -8101,70 +8261,12 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) rhsVNPair.GetLiberal(), lhs->TypeGet()); recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign")); } - else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) + else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { - assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField())); - - // The value number from the rhs of the assignment - ValueNum storeVal = rhsVNPair.GetLiberal(); - ValueNum newHeapVN = ValueNumStore::NoVN; - - // We will check that the final field in the sequence matches 'indType'. - var_types indType = lhs->TypeGet(); - - if (baseAddr != nullptr) - { - // Instance field / "complex" static: heap[field][baseAddr][struct fields...] = storeVal. - - var_types firstFieldType; - ValueNum firstFieldSelectorVN = - vnStore->VNForFieldSelector(fldSeq->GetFieldHandle(), &firstFieldType); - - // Construct the "field map" VN. It represents memory state of the first field - // of all objects on the heap. This is our primary map. - ValueNum fldMapVN = - vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], firstFieldSelectorVN); - - ValueNum firstFieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); - - ValueNum newFirstFieldValueVN = ValueNumStore::NoVN; - // Optimization: avoid traversting the maps for the value of the first field if - // we do not need it, which is the case if the rest of the field sequence is empty. - if (fldSeq->GetNext() == nullptr) - { - newFirstFieldValueVN = vnStore->VNApplySelectorsAssignTypeCoerce(storeVal, indType); - } - else - { - // Construct the ValueNumber for fldMap[baseAddr]. This (struct) - // map represents the specific field we're looking to store to. - ValueNum firstFieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, - firstFieldValueSelectorVN); - - // Construct the maps updating the struct fields in the sequence. - newFirstFieldValueVN = - vnStore->VNApplySelectorsAssign(VNK_Liberal, firstFieldValueVN, fldSeq->GetNext(), - storeVal, indType); - } - - // Finally, construct the new field map... - ValueNum newFldMapVN = - vnStore->VNForMapStore(fldMapVN, firstFieldValueSelectorVN, newFirstFieldValueVN); - - // ...and a new value for the heap. - newHeapVN = vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], firstFieldSelectorVN, newFldMapVN); - } - else - { - // "Simple" static: heap[field][struct fields...] = storeVal. - newHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, - storeVal, indType); - } - - // Update the GcHeap value. - recordGcHeapStore(tree, newHeapVN DEBUGARG("StoreField")); + assert(fldSeq != FieldSeqStore::NotAField()); + fgValueNumberFieldStore(tree, baseAddr, fldSeq, offset, storeSize, rhsVNPair.GetLiberal()); } - else if (arg->DefinesLocalAddr(this, lhsSize, &lclVarTree, &isEntire, &offset)) + else if (arg->DefinesLocalAddr(this, storeSize, &lclVarTree, &isEntire, &offset)) { unsigned lclNum = lclVarTree->GetLclNum(); fldSeq = FieldSeqStore::NotAField(); @@ -8181,10 +8283,10 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) { rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); offset = 0; - lhsSize = lvaLclExactSize(lclVarTree->GetLclNum()); + storeSize = lvaLclExactSize(lclVarTree->GetLclNum()); } - fgValueNumberLocalStore(tree, lclVarTree, lhsSize, offset, rhsVNPair); + fgValueNumberLocalStore(tree, lclVarTree, storeSize, offset, rhsVNPair); } else { @@ -8567,7 +8669,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) FieldSeqNode* zeroOffsetFldSeq = nullptr; if ((typ == TYP_BYREF) && GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFldSeq)) { - ValueNum addrExtended = vnStore->ExtendPtrVN(lcl, zeroOffsetFldSeq); + ValueNum addrExtended = vnStore->ExtendPtrVN(lcl, zeroOffsetFldSeq, 0); if (addrExtended != ValueNumStore::NoVN) { assert(lcl->gtVNPair.BothEqual()); @@ -8612,7 +8714,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) FieldSeqNode* zeroOffsetFldSeq = nullptr; if ((typ == TYP_BYREF) && GetZeroOffsetFieldMap()->Lookup(lclFld, &zeroOffsetFldSeq)) { - ValueNum addrExtended = vnStore->ExtendPtrVN(lclFld, zeroOffsetFldSeq); + ValueNum addrExtended = vnStore->ExtendPtrVN(lclFld, zeroOffsetFldSeq, 0); if (addrExtended != ValueNumStore::NoVN) { lclFld->gtVNPair.SetBoth(addrExtended); @@ -8719,7 +8821,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) FieldSeqNode* zeroOffsetFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFieldSeq)) { - ValueNum addrExtended = vnStore->ExtendPtrVN(arg->AsIndir()->Addr(), zeroOffsetFieldSeq); + ValueNum addrExtended = vnStore->ExtendPtrVN(arg->AsIndir()->Addr(), zeroOffsetFieldSeq, 0); if (addrExtended != ValueNumStore::NoVN) { // We don't care about lib/cons differences for addresses. @@ -8800,8 +8902,9 @@ void Compiler::fgValueNumberTree(GenTree* tree) assert(addrNvnp.BothEqual() && (addrXvnp == vnStore->VNPForEmptyExcSet())); ValueNum boxAddrVN = addrNvnp.GetLiberal(); ValueNum fieldSeqVN = vnStore->VNForFieldSeq(nullptr); + ValueNum offsetVN = vnStore->VNForIntPtrCon(-TARGET_POINTER_SIZE); ValueNum staticAddrVN = - vnStore->VNForFunc(tree->TypeGet(), VNF_PtrToStatic, boxAddrVN, fieldSeqVN); + vnStore->VNForFunc(tree->TypeGet(), VNF_PtrToStatic, boxAddrVN, fieldSeqVN, offsetVN); tree->gtVNPair = ValueNumPair(staticAddrVN, staticAddrVN); } else // TODO-VNTypes: this code needs to encode the types of the indirections. @@ -8838,15 +8941,20 @@ void Compiler::fgValueNumberTree(GenTree* tree) // We will "evaluate" this as part of the assignment. else if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0) { - unsigned loadSize = tree->AsIndir()->Size(); - ssize_t offset = 0; + var_types loadType = tree->TypeGet(); + ssize_t offset = 0; + unsigned loadSize = tree->AsIndir()->Size(); FieldSeqNode* localFldSeq = nullptr; - VNFuncApp funcApp; + VNFuncApp funcApp{VNF_COUNT}; - // Is it a local or a heap address? - if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq, &offset) && - lvaInSsa(lclVarTree->GetLclNum()) && lclVarTree->HasSsaName()) + // TODO-1stClassStructs: delete layout-less "IND(struct)" nodes and the "loadSize == 0" condition. + if (loadSize == 0) + { + tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, loadType)); + } + else if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq, &offset) && + lvaInSsa(lclVarTree->GetLclNum()) && lclVarTree->HasSsaName()) { unsigned ssaNum = lclVarTree->GetSsaNum(); LclVarDsc* varDsc = lvaGetDesc(lclVarTree); @@ -8863,93 +8971,25 @@ void Compiler::fgValueNumberTree(GenTree* tree) unsigned lclSize = lvaLclExactSize(lclVarTree->GetLclNum()); offset = offset + lclVarTree->GetLclOffs(); - // TODO-1stClassStructs-CQ: delete layout-less struct nodes and the "loadSize == 0" condition. - if (loadSize != 0) - { - tree->gtVNPair = vnStore->VNPairForLoad(lclVNPair, lclSize, tree->TypeGet(), offset, - loadSize); - } - else - { - tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); - } + tree->gtVNPair = vnStore->VNPairForLoad(lclVNPair, lclSize, tree->TypeGet(), offset, loadSize); } - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && (funcApp.m_func == VNF_PtrToStatic)) { - var_types indType = tree->TypeGet(); - ValueNum fieldSeqVN = funcApp.m_args[1]; - - FieldSeqNode* fldSeqForStaticVar = vnStore->FieldSeqVNToFieldSeq(fieldSeqVN); - - if (fldSeqForStaticVar != FieldSeqStore::NotAField()) - { - assert(fldSeqForStaticVar != nullptr); - - ValueNum selectedStaticVar; - // We model statics as indices into the GcHeap (which is a subset of ByrefExposed). - size_t structSize = 0; - selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], - fldSeqForStaticVar, &structSize); - selectedStaticVar = vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize); + fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); + offset = vnStore->ConstantValue(funcApp.m_args[2]); - tree->gtVNPair.SetLiberal(selectedStaticVar); - tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, indType)); - } - else - { - JITDUMP(" *** Missing field sequence info for VNF_PtrToStatic value GT_IND\n"); - tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType)); // a new unique value number - } - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); + // Note VNF_PtrToStatic statics are currently always "simple". + fgValueNumberFieldLoad(tree, /* baseAddr */ nullptr, fldSeq, offset); } else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && (funcApp.m_func == VNF_PtrToArrElem)) { fgValueNumberArrIndexVal(tree, &funcApp, addrXvnp); } - else if (addr->IsFieldAddr(this, &baseAddr, &fldSeq)) + else if (addr->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { - assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField())); - - // The size of the ultimate value we will select, if it is of a struct type. - size_t structSize = 0; - ValueNum valueVN = ValueNumStore::NoVN; - - if (baseAddr != nullptr) - { - // Instance field / "complex" static: heap[field][baseAddr][struct fields...]. - - // Get the selector for the first field. - var_types firstFieldType; - ValueNum firstFieldSelectorVN = - vnStore->VNForFieldSelector(fldSeq->GetFieldHandle(), &firstFieldType, &structSize); - - ValueNum fldMapVN = - vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], firstFieldSelectorVN); - - ValueNum firstFieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); - - // Construct the value number for fldMap[baseAddr]. - ValueNum firstFieldValueVN = - vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, firstFieldValueSelectorVN); - - // Finally, account for the rest of the fields in the sequence. - valueVN = - vnStore->VNApplySelectors(VNK_Liberal, firstFieldValueVN, fldSeq->GetNext(), &structSize); - } - else - { - // "Simple" static: heap[static][struct fields...]. - valueVN = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, &structSize); - } - - valueVN = vnStore->VNApplySelectorsTypeCheck(valueVN, tree->TypeGet(), structSize); - tree->gtVNPair.SetLiberal(valueVN); - - // The conservative value is a new, unique VN. - tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet())); - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); + assert(fldSeq != FieldSeqStore::NotAField()); + fgValueNumberFieldLoad(tree, baseAddr, fldSeq, offset); } else // We don't know where the address points, so it is an ByrefExposed load. { @@ -8957,8 +8997,9 @@ void Compiler::fgValueNumberTree(GenTree* tree) ValueNum loadVN = fgValueNumberByrefExposedLoad(typ, addrVN); tree->gtVNPair.SetLiberal(loadVN); tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet())); - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } + + tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } // To be able to propagate exception sets, we give location nodes the "Void" VN. @@ -9763,6 +9804,28 @@ ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, return {castLibVN, castConVN}; } +ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types type) +{ + // BitCast(BitCast(x)) => BitCast(x). + VNFuncApp srcVNFunc; + if (GetVNFunc(srcVN, &srcVNFunc) && (srcVNFunc.m_func == VNF_BitCast)) + { + srcVN = srcVNFunc.m_args[0]; + } + + if (TypeOfVN(srcVN) == type) + { + return srcVN; + } + + // TODO-PhysicalVN: document the intended semantics for small types... + // Also document the overall strategy w.r.t bitcasts and how they were + // chosen over identity stores/selections. + assert((type != TYP_STRUCT) || (TypeOfVN(srcVN) != TYP_STRUCT)); + + return VNForFunc(type, VNF_BitCast, srcVN, VNForIntCon(type)); +} + void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc) { unsigned nArgs = ValueNumStore::VNFuncArity(vnf); diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 79f38d3b84a24c..27449871a2de57 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -390,6 +390,7 @@ class ValueNumStore // This block of methods gets value numbers for constants of primitive types. ValueNum VNForIntCon(INT32 cnsVal); + ValueNum VNForIntPtrCon(ssize_t cnsVal); ValueNum VNForLongCon(INT64 cnsVal); ValueNum VNForFloatCon(float cnsVal); ValueNum VNForDoubleCon(double cnsVal); @@ -602,7 +603,7 @@ class ValueNumStore unsigned DecodePhysicalSelector(ValueNum selector, unsigned* pSize); - ValueNum VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, size_t* pStructSize = nullptr); + ValueNum VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, unsigned* pSize); // These functions parallel the ones above, except that they take liberal/conservative VN pairs // as arguments, and return such a pair (the pair of the function applied to the liberal args, and @@ -683,7 +684,12 @@ class ValueNumStore unsigned storeSize, ValueNumPair value); - ValueNum VNForLoadStoreBitcast(ValueNumKind vnk, ValueNum value, var_types indType, unsigned indSize); + bool LoadStoreIsEntire(unsigned locationSize, ssize_t offset, unsigned indSize) const + { + return (offset == 0) && (locationSize == indSize); + } + + ValueNum VNForLoadStoreBitcast(ValueNum value, var_types indType, unsigned indSize); ValueNumPair VNPairForLoadStoreBitcast(ValueNumPair value, var_types indType, unsigned indSize); @@ -716,6 +722,8 @@ class ValueNumStore bool srcIsUnsigned = false, bool hasOverflowCheck = false); + ValueNum VNForBitCast(ValueNum srcVN, var_types castToType); + bool IsVNNotAField(ValueNum vn); ValueNum VNForFieldSeq(FieldSeqNode* fieldSeq); @@ -729,7 +737,7 @@ class ValueNumStore ValueNum ExtendPtrVN(GenTree* opA, GenTree* opB); // If "opA" has a PtrToLoc, PtrToArrElem, or PtrToStatic application as its value numbers, returns the VN for the // pointer form extended with "fieldSeq"; or else NoVN. - ValueNum ExtendPtrVN(GenTree* opA, FieldSeqNode* fieldSeq); + ValueNum ExtendPtrVN(GenTree* opA, FieldSeqNode* fieldSeq, ssize_t offset); // Queries on value numbers. // All queries taking value numbers require that those value numbers are valid, that is, that @@ -1082,6 +1090,8 @@ class ValueNumStore // Prints the cast's representation mirroring GT_CAST's dump format. void vnDumpCast(Compiler* comp, ValueNum castVN); + void vnDumpBitCast(Compiler* comp, VNFuncApp* bitCast); + // Requires "zeroObj" to be a VNF_ZeroObj. Prints its representation. void vnDumpZeroObj(Compiler* comp, VNFuncApp* zeroObj); diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h index 80fa93c2f63741..07b1b788434cdd 100644 --- a/src/coreclr/jit/valuenumfuncs.h +++ b/src/coreclr/jit/valuenumfuncs.h @@ -9,18 +9,21 @@ ValueNumFuncDef(MemOpaque, 1, false, false, false) // Args: 0: loop num ValueNumFuncDef(MapStore, 4, false, false, false) // Args: 0: map, 1: index (e. g. field handle), 2: value being stored, 3: loop num. ValueNumFuncDef(MapPhysicalStore, 3, false, false, false) // Args: 0: map, 1: "physical selector": offset and size, 2: value being stored +ValueNumFuncDef(BitCast, 2, false, false, false) // Args: 0: VN of the arg, 1: VN of the target type +ValueNumFuncDef(ZeroObj, 1, false, false, false) // Args: 0: VN of the class handle. ValueNumFuncDef(MapSelect, 2, false, false, false) // Args: 0: map, 1: key. ValueNumFuncDef(PtrToLoc, 2, false, true, false) // Pointer (byref) to a local variable. Args: VN's of: 0: var num, 1: FieldSeq. ValueNumFuncDef(PtrToArrElem, 4, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: FieldSeq. -ValueNumFuncDef(PtrToStatic, 2, false, true, false) // Pointer (byref) to a static variable (or possibly a field thereof, if the static variable is a struct). - // Args: 0: (VN of) the box's address if the static is "boxed", 1: the field sequence, of which the first element is the static itself. +ValueNumFuncDef(PtrToStatic, 3, false, true, false) // Pointer (byref) to a static variable (or possibly a field thereof, if the static variable is a struct). + // Args: 0: (VN of) the box's address if the static is "boxed", + // 1: (VN of) the field sequence, of which the first element is the static itself. + // 2: (VN of) offset for the constituent struct fields ValueNumFuncDef(Phi, 2, false, false, false) // A phi function. Only occurs as arg of PhiDef or PhiMemoryDef. Arguments are SSA numbers of var being defined. ValueNumFuncDef(PhiDef, 3, false, false, false) // Args: 0: local var # (or -1 for memory), 1: SSA #, 2: VN of definition. // Wouldn't need this if I'd made memory a regular local variable... ValueNumFuncDef(PhiMemoryDef, 2, false, false, false) // Args: 0: VN for basic block pointer, 1: VN of definition -ValueNumFuncDef(ZeroObj, 1, false, false, false) // Zero-initialized struct. Args: 0: VN of the class handle. ValueNumFuncDef(InitVal, 1, false, false, false) // An input arg, or init val of a local Args: 0: a constant VN. From e8758c65ece1be32cc615515cf61958fc86ca118 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Wed, 20 Apr 2022 12:11:25 +0300 Subject: [PATCH 05/17] Physical VN -- arrays --- src/coreclr/jit/compiler.h | 41 +--- src/coreclr/jit/valuenum.cpp | 343 +++++++++++++------------------- src/coreclr/jit/valuenumfuncs.h | 3 +- 3 files changed, 141 insertions(+), 246 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 9bb221671fb1f9..9b1f249d7d9784 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4692,43 +4692,9 @@ class Compiler // tree node). void fgValueNumber(); - // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. - // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. - // The 'indType' is the indirection type of the lhs of the assignment and will typically - // match the element type of the array or fldSeq. When this type doesn't match - // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] - // - ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, - ValueNum arrVN, - ValueNum inxVN, - FieldSeqNode* fldSeq, - ValueNum rhsVN, - var_types indType); - - // Requires that "tree" is a GT_IND marked as an array index, and that its address argument - // has been parsed to yield the other input arguments. If evaluation of the address - // can raise exceptions, those should be captured in the exception set "addrXvnp". - // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. - // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique - // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. - // The type tree->TypeGet() will typically match the element type of the array or fldSeq. - // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN - // - ValueNum fgValueNumberArrIndexVal(GenTree* tree, - CORINFO_CLASS_HANDLE elemTypeEq, - ValueNum arrVN, - ValueNum inxVN, - ValueNumPair addrXvnp, - FieldSeqNode* fldSeq); - - // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown - // by evaluating the array index expression "tree". Returns the value number resulting from - // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the - // "GT_IND" that does the dereference, and it is given the returned value number. - ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); + void fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc); - // Compute the value number for a byref-exposed load of the given type via the given pointerVN. - ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); + void fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFunc, unsigned storeSize, ValueNum value); void fgValueNumberLocalStore(GenTree* storeNode, GenTreeLclVarCommon* lclDefNode, @@ -4746,6 +4712,9 @@ class Compiler unsigned storeSize, ValueNum value); + // Compute the value number for a byref-exposed load of the given type via the given pointerVN. + ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); + unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 5ef4039874941b..30240aae3041b2 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -4382,6 +4382,9 @@ ValueNum ValueNumStore::VNForStore(ValueNum locationValue, { assert(storeSize > 0); + // The caller is expected to handle identity stores, applying the appropriate normalization policy. + assert(!LoadStoreIsEntire(locationSize, offset, storeSize)); + unsigned storeOffset = static_cast(offset); if ((offset < 0) || (locationSize < (storeOffset + storeSize))) @@ -4393,12 +4396,6 @@ ValueNum ValueNumStore::VNForStore(ValueNum locationValue, return NoVN; } - // Note how we handle identity (whole/entire) stores via selection here. - // This is to allow the caller to apply its own policy in case of type - // mismatches between the underlying location and value being stored. - // Currently, this policy is to normalize for locals and leave all other - // locations (possibly) denormalized. - // JITDUMP(" VNForStore:\n"); return VNForMapPhysicalStore(locationValue, storeOffset, storeSize, value); } @@ -4633,8 +4630,13 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq, ssize_t } else if (funcApp.m_func == VNF_PtrToArrElem) { + VNFuncApp offsFunc; + GetVNFunc(funcApp.m_args[3], &offsFunc); + ValueNum fieldSeqVN = FieldSeqVNAppend(offsFunc.m_args[0], fldSeq); + ValueNum offsetVN = VNForIntPtrCon(ConstantValue(offsFunc.m_args[1]) + offset); + res = VNForFunc(TYP_BYREF, VNF_PtrToArrElem, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], - FieldSeqVNAppend(funcApp.m_args[3], fldSeq)); + VNForFunc(TYP_I_IMPL, VNF_ArrElemOffset, fieldSeqVN, offsetVN)); } if (res != NoVN) { @@ -4644,223 +4646,138 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq, ssize_t return res; } -ValueNum Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, - ValueNum arrVN, - ValueNum inxVN, - FieldSeqNode* fldSeq, - ValueNum rhsVN, - var_types indType) +void Compiler::fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc) { - bool invalidateArray = false; - ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); - var_types arrElemType = DecodeElemType(elemTypeEq); - ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], elemTypeEqVN); - ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, hAtArrType, arrVN); - ValueNum hAtArrTypeAtArrAtInx = vnStore->VNForMapSelect(VNK_Liberal, arrElemType, hAtArrTypeAtArr, inxVN); + assert(loadTree->OperIsIndir() && (addrFunc->m_func == VNF_PtrToArrElem)); - ValueNum newValAtInx = ValueNumStore::NoVN; - ValueNum newValAtArr = ValueNumStore::NoVN; - ValueNum newValAtArrType = ValueNumStore::NoVN; - - if (fldSeq == FieldSeqStore::NotAField()) - { - // This doesn't represent a proper array access - JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrIndexAssign\n"); + CORINFO_CLASS_HANDLE elemTypeEq = CORINFO_CLASS_HANDLE(vnStore->ConstantValue(addrFunc->m_args[0])); + ValueNum arrVN = addrFunc->m_args[1]; + ValueNum inxVN = addrFunc->m_args[2]; + VNFuncApp offsFunc; + vnStore->GetVNFunc(addrFunc->m_args[3], &offsFunc); + FieldSeqNode* fieldSeq = vnStore->FieldSeqVNToFieldSeq(offsFunc.m_args[0]); + ssize_t offset = vnStore->ConstantValue(offsFunc.m_args[1]); - // Store a new unique value for newValAtArrType - newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_MEM); - invalidateArray = true; - } - else + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + if (fieldSeq == FieldSeqStore::NotAField()) { - // Note that this does the right thing if "fldSeq" is null -- returns last "rhs" argument. - // This is the value that should be stored at "arr[inx]". - newValAtInx = vnStore->VNApplySelectorsAssign(VNK_Liberal, hAtArrTypeAtArrAtInx, fldSeq, rhsVN, indType); - - // TODO-VNTypes: the validation below is a workaround for logic in ApplySelectorsAssignTypeCoerce - // not handling some cases correctly. Remove once ApplySelectorsAssignTypeCoerce has been fixed. - var_types arrElemFldType = - (fldSeq != nullptr) ? eeGetFieldType(fldSeq->GetTail()->GetFieldHandle()) : arrElemType; - - if (indType != arrElemFldType) - { - // Mismatched types: Store between different types (indType into array of arrElemFldType) - // - - JITDUMP(" *** Mismatched types in fgValueNumberArrIndexAssign\n"); - - // Store a new unique value for newValAtArrType - newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_MEM); - invalidateArray = true; - } + loadTree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, loadTree->TypeGet())); + return; } - if (!invalidateArray) - { - newValAtArr = vnStore->VNForMapStore(hAtArrTypeAtArr, inxVN, newValAtInx); - newValAtArrType = vnStore->VNForMapStore(hAtArrType, arrVN, newValAtArr); - } + // The VN inputs are required to be non-exceptional values. + assert(arrVN == vnStore->VNNormalValue(arrVN)); + assert(inxVN == vnStore->VNNormalValue(inxVN)); -#ifdef DEBUG - if (verbose) - { - printf(" hAtArrType " FMT_VN " is MapSelect(curGcHeap(" FMT_VN "), ", hAtArrType, fgCurMemoryVN[GcHeap]); + // Heap[elemTypeEq][arrVN][inx][offset + size]. + var_types elemType = DecodeElemType(elemTypeEq); + ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); + JITDUMP(" Array element load: elemTypeEq is " FMT_VN " for %s[]\n", elemTypeEqVN, + (elemType == TYP_STRUCT) ? eeGetClassName(elemTypeEq) : varTypeName(elemType)); - if (arrElemType == TYP_STRUCT) - { - printf("%s[]).\n", eeGetClassName(elemTypeEq)); - } - else - { - printf("%s[]).\n", varTypeName(arrElemType)); - } - printf(" hAtArrTypeAtArr " FMT_VN " is MapSelect(hAtArrType(" FMT_VN "), arr=" FMT_VN ")\n", hAtArrTypeAtArr, - hAtArrType, arrVN); - printf(" hAtArrTypeAtArrAtInx " FMT_VN " is MapSelect(hAtArrTypeAtArr(" FMT_VN "), inx=" FMT_VN "):%s\n", - hAtArrTypeAtArrAtInx, hAtArrTypeAtArr, inxVN, ValueNumStore::VNMapTypeName(arrElemType)); + ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], elemTypeEqVN); + JITDUMP(" GcHeap[elemTypeEq: " FMT_VN "] is " FMT_VN "\n", elemTypeEqVN, hAtArrType); - if (!invalidateArray) - { - printf(" newValAtInd " FMT_VN " is ", newValAtInx); - vnStore->vnDump(this, newValAtInx); - printf("\n"); + ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, hAtArrType, arrVN); + JITDUMP(" GcHeap[elemTypeEq][array: " FMT_VN "] is " FMT_VN "\n", arrVN, hAtArrTypeAtArr); - printf(" newValAtArr " FMT_VN " is ", newValAtArr); - vnStore->vnDump(this, newValAtArr); - printf("\n"); - } + ValueNum wholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemType, hAtArrTypeAtArr, inxVN); + JITDUMP(" GcHeap[elemTypeEq][array][index: " FMT_VN "] is " FMT_VN "\n", inxVN, wholeElem); - printf(" newValAtArrType " FMT_VN " is ", newValAtArrType); - vnStore->vnDump(this, newValAtArrType); - printf("\n"); - } -#endif // DEBUG + unsigned elemSize = (elemType == TYP_STRUCT) ? info.compCompHnd->getClassSize(elemTypeEq) : genTypeSize(elemType); + var_types loadType = loadTree->TypeGet(); + unsigned loadSize = loadTree->AsIndir()->Size(); + ValueNum loadValueVN = vnStore->VNForLoad(VNK_Liberal, wholeElem, elemSize, loadType, offset, loadSize); - return vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], elemTypeEqVN, newValAtArrType); + loadTree->gtVNPair.SetLiberal(loadValueVN); + loadTree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, loadType)); } -ValueNum Compiler::fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* pFuncApp, ValueNumPair addrXvnp) +void Compiler::fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFunc, unsigned storeSize, ValueNum value) { - assert(vnStore->IsVNHandle(pFuncApp->m_args[0])); - CORINFO_CLASS_HANDLE arrElemTypeEQ = CORINFO_CLASS_HANDLE(vnStore->ConstantValue(pFuncApp->m_args[0])); - ValueNum arrVN = pFuncApp->m_args[1]; - ValueNum inxVN = pFuncApp->m_args[2]; - FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(pFuncApp->m_args[3]); - return fgValueNumberArrIndexVal(tree, arrElemTypeEQ, arrVN, inxVN, addrXvnp, fldSeq); -} + assert(addrFunc->m_func == VNF_PtrToArrElem); -ValueNum Compiler::fgValueNumberArrIndexVal(GenTree* tree, - CORINFO_CLASS_HANDLE elemTypeEq, - ValueNum arrVN, - ValueNum inxVN, - ValueNumPair addrXvnp, - FieldSeqNode* fldSeq) -{ - assert(tree == nullptr || tree->OperIsIndir()); + CORINFO_CLASS_HANDLE elemTypeEq = CORINFO_CLASS_HANDLE(vnStore->ConstantValue(addrFunc->m_args[0])); + ValueNum arrVN = addrFunc->m_args[1]; + ValueNum inxVN = addrFunc->m_args[2]; + VNFuncApp offsFunc; + vnStore->GetVNFunc(addrFunc->m_args[3], &offsFunc); + FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(offsFunc.m_args[0]); + ssize_t offset = vnStore->ConstantValue(offsFunc.m_args[1]); - // The VN inputs are required to be non-exceptional values. - assert(arrVN == vnStore->VNNormalValue(arrVN)); - assert(inxVN == vnStore->VNNormalValue(inxVN)); + bool invalidateArray = false; + var_types elemType = DecodeElemType(elemTypeEq); + ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); + JITDUMP(" Array element store: elemTypeEq is " FMT_VN " for %s[]\n", elemTypeEqVN, + (elemType == TYP_STRUCT) ? eeGetClassName(elemTypeEq) : varTypeName(elemType)); - var_types elemTyp = DecodeElemType(elemTypeEq); - var_types indType = (tree == nullptr) ? elemTyp : tree->TypeGet(); - ValueNum selectedElem; - unsigned elemWidth = elemTyp == TYP_STRUCT ? info.compCompHnd->getClassSize(elemTypeEq) : genTypeSize(elemTyp); + ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], elemTypeEqVN); + JITDUMP(" GcHeap[elemTypeEq: " FMT_VN "] is " FMT_VN "\n", elemTypeEqVN, hAtArrType); - if ((fldSeq == FieldSeqStore::NotAField()) || (genTypeSize(indType) > elemWidth)) - { - // This doesn't represent a proper array access - JITDUMP(" *** Not a proper arrray access encountered in fgValueNumberArrIndexVal\n"); + ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, hAtArrType, arrVN); + JITDUMP(" GcHeap[elemTypeEq][array: " FMT_VN "] is " FMT_VN "\n", arrVN, hAtArrTypeAtArr); - // a new unique value number - selectedElem = vnStore->VNForExpr(compCurBB, indType); + ValueNum newWholeElem = ValueNumStore::NoVN; + ValueNum newValAtArr = ValueNumStore::NoVN; + ValueNum newValAtArrType = ValueNumStore::NoVN; -#ifdef DEBUG - if (verbose) - { - printf(" IND of PtrToArrElem is unique VN " FMT_VN ".\n", selectedElem); - } -#endif // DEBUG + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + if (fldSeq == FieldSeqStore::NotAField()) + { + // This doesn't represent a proper array access + JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrayElemStore\n"); - if (tree != nullptr) - { - tree->gtVNPair = vnStore->VNPWithExc(ValueNumPair(selectedElem, selectedElem), addrXvnp); - } + // Store a new unique value for newValAtArrType + newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_MEM); + invalidateArray = true; } else { - ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); - ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], elemTypeEqVN); - ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, hAtArrType, arrVN); - ValueNum wholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemTyp, hAtArrTypeAtArr, inxVN); + unsigned elemSize = + (elemType == TYP_STRUCT) ? info.compCompHnd->getClassSize(elemTypeEq) : genTypeSize(elemType); -#ifdef DEBUG - if (verbose) + // This is the value that should be stored at "arr[inx]". + if (vnStore->LoadStoreIsEntire(elemSize, offset, storeSize)) { - printf(" hAtArrType " FMT_VN " is MapSelect(curGcHeap(" FMT_VN "), ", hAtArrType, fgCurMemoryVN[GcHeap]); - if (elemTyp == TYP_STRUCT) - { - printf("%s[]).\n", eeGetClassName(elemTypeEq)); - } - else - { - printf("%s[]).\n", varTypeName(elemTyp)); - } - - printf(" hAtArrTypeAtArr " FMT_VN " is MapSelect(hAtArrType(" FMT_VN "), arr=" FMT_VN ").\n", - hAtArrTypeAtArr, hAtArrType, arrVN); - - printf(" wholeElem " FMT_VN " is MapSelect(hAtArrTypeAtArr(" FMT_VN "), ind=" FMT_VN ").\n", wholeElem, - hAtArrTypeAtArr, inxVN); + // For memory locations (as opposed to locals), we do not normalize types. + newWholeElem = value; } -#endif // DEBUG - - selectedElem = wholeElem; - size_t elemStructSize = 0; - if (fldSeq) + else { - selectedElem = vnStore->VNApplySelectors(VNK_Liberal, wholeElem, fldSeq, &elemStructSize); - elemTyp = vnStore->TypeOfVN(selectedElem); - } - selectedElem = vnStore->VNApplySelectorsTypeCheck(selectedElem, indType, elemStructSize); - selectedElem = vnStore->VNWithExc(selectedElem, addrXvnp.GetLiberal()); + ValueNum oldWholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemType, hAtArrTypeAtArr, inxVN); + JITDUMP(" GcHeap[elemTypeEq][array][index: " FMT_VN "] is " FMT_VN "\n", inxVN, oldWholeElem); -#ifdef DEBUG - if (verbose && (selectedElem != wholeElem)) - { - printf(" selectedElem is " FMT_VN " after applying selectors.\n", selectedElem); + newWholeElem = vnStore->VNForStore(oldWholeElem, elemSize, offset, storeSize, value); } -#endif // DEBUG - if (tree != nullptr) + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the type check below. + var_types arrElemFldType = + (fldSeq != nullptr) ? eeGetFieldType(fldSeq->GetTail()->GetFieldHandle()) : elemType; + + if ((storeNode->gtGetOp1()->TypeGet() != arrElemFldType) || (newWholeElem == ValueNumStore::NoVN)) { - tree->gtVNPair.SetLiberal(selectedElem); - tree->gtVNPair.SetConservative(vnStore->VNUniqueWithExc(tree->TypeGet(), addrXvnp.GetConservative())); + // Mismatched types: Store between different types (indType into array of arrElemFldType) + JITDUMP(" *** Mismatched types in fgValueNumberArrayElemStore\n"); + + // Store a new unique value for newValAtArrType + newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_MEM); + invalidateArray = true; } } - return selectedElem; -} - -ValueNum Compiler::fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN) -{ - if (type == TYP_STRUCT) - { - // We can't assign a value number for a read of a struct as we can't determine - // how many bytes will be read by this load, so return a new unique value number - // - return vnStore->VNForExpr(compCurBB, TYP_STRUCT); - } - else + if (!invalidateArray) { - ValueNum memoryVN = fgCurMemoryVN[ByrefExposed]; - // The memoization for VNFunc applications does not factor in the result type, so - // VNF_ByrefExposedLoad takes the loaded type as an explicit parameter. - ValueNum typeVN = vnStore->VNForIntCon(type); - ValueNum loadVN = - vnStore->VNForFunc(type, VNF_ByrefExposedLoad, typeVN, vnStore->VNNormalValue(pointerVN), memoryVN); - return loadVN; + JITDUMP(" GcHeap[elemTypeEq][array][index: " FMT_VN "] = " FMT_VN ":\n", inxVN, newWholeElem); + newValAtArr = vnStore->VNForMapStore(hAtArrTypeAtArr, inxVN, newWholeElem); + + JITDUMP(" GcHeap[elemTypeEq][array: " FMT_VN "] = " FMT_VN ":\n", arrVN, newValAtArr); + newValAtArrType = vnStore->VNForMapStore(hAtArrType, arrVN, newValAtArr); } + + JITDUMP(" GcHeap[elemTypeEq: " FMT_VN "] = " FMT_VN ":\n", elemTypeEqVN, newValAtArrType); + ValueNum newHeapVN = vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], elemTypeEqVN, newValAtArrType); + + recordGcHeapStore(storeNode, newHeapVN DEBUGARG("array element store")); } void Compiler::fgValueNumberLocalStore(GenTree* storeNode, @@ -5040,6 +4957,27 @@ void Compiler::fgValueNumberFieldStore(GenTree* storeNode, } } +ValueNum Compiler::fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN) +{ + if (type == TYP_STRUCT) + { + // We can't assign a value number for a read of a struct as we can't determine + // how many bytes will be read by this load, so return a new unique value number + // + return vnStore->VNForExpr(compCurBB, TYP_STRUCT); + } + else + { + ValueNum memoryVN = fgCurMemoryVN[ByrefExposed]; + // The memoization for VNFunc applications does not factor in the result type, so + // VNF_ByrefExposedLoad takes the loaded type as an explicit parameter. + ValueNum typeVN = vnStore->VNForIntCon(type); + ValueNum loadVN = + vnStore->VNForFunc(type, VNF_ByrefExposedLoad, typeVN, vnStore->VNNormalValue(pointerVN), memoryVN); + return loadVN; + } +} + var_types ValueNumStore::TypeOfVN(ValueNum vn) { if (vn == NoVN) @@ -8240,26 +8178,9 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) fgValueNumberFieldStore(tree, baseAddr, fldSeq, offset, storeSize, rhsVNPair.GetLiberal()); } - // Is the LHS an array index expression? - else if (argIsVNFunc && funcApp.m_func == VNF_PtrToArrElem) + else if (argIsVNFunc && (funcApp.m_func == VNF_PtrToArrElem)) { - CORINFO_CLASS_HANDLE elemTypeEq = - CORINFO_CLASS_HANDLE(vnStore->ConstantValue(funcApp.m_args[0])); - ValueNum arrVN = funcApp.m_args[1]; - ValueNum inxVN = funcApp.m_args[2]; - FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[3]); -#ifdef DEBUG - if (verbose) - { - printf("Tree "); - Compiler::printTreeID(tree); - printf(" assigns to an array element:\n"); - } -#endif // DEBUG - - ValueNum heapVN = fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, - rhsVNPair.GetLiberal(), lhs->TypeGet()); - recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign")); + fgValueNumberArrayElemStore(tree, &funcApp, storeSize, rhsVNPair.GetLiberal()); } else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { @@ -8984,7 +8905,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) } else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && (funcApp.m_func == VNF_PtrToArrElem)) { - fgValueNumberArrIndexVal(tree, &funcApp, addrXvnp); + fgValueNumberArrayElemLoad(tree, &funcApp); } else if (addr->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { @@ -9447,9 +9368,11 @@ void Compiler::fgValueNumberArrIndexAddr(GenTreeArrAddr* arrAddr) FieldSeqNode* fldSeq = nullptr; GetZeroOffsetFieldMap()->Lookup(arrAddr, &fldSeq); - ValueNum fldSeqVN = vnStore->VNForFieldSeq(fldSeq); + ValueNum fldSeqVN = vnStore->VNForFieldSeq(fldSeq); + ValueNum offsetVN = vnStore->VNForIntPtrCon(0); + ValueNum elemOffsVN = vnStore->VNForFunc(TYP_I_IMPL, VNF_ArrElemOffset, fldSeqVN, offsetVN); - ValueNum arrAddrVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, fldSeqVN); + ValueNum arrAddrVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, elemOffsVN); ValueNumPair arrAddrVNP = ValueNumPair(arrAddrVN, arrAddrVN); arrAddr->gtVNPair = vnStore->VNPWithExc(arrAddrVNP, vnStore->VNPExceptionSet(arrAddr->Addr()->gtVNPair)); } @@ -11124,7 +11047,9 @@ void Compiler::fgDebugCheckValueNumberedTree(GenTree* tree) break; case VNF_PtrToArrElem: - fullFldSeq = vnStore->FieldSeqVNToFieldSeq(vnFunc.m_args[3]); + VNFuncApp offsFunc; + vnStore->GetVNFunc(vnFunc.m_args[3], &offsFunc); + fullFldSeq = vnStore->FieldSeqVNToFieldSeq(offsFunc.m_args[0]); break; default: diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h index 07b1b788434cdd..80ea2e38bbae4a 100644 --- a/src/coreclr/jit/valuenumfuncs.h +++ b/src/coreclr/jit/valuenumfuncs.h @@ -14,7 +14,8 @@ ValueNumFuncDef(ZeroObj, 1, false, false, false) // Args: 0: VN of th ValueNumFuncDef(MapSelect, 2, false, false, false) // Args: 0: map, 1: key. ValueNumFuncDef(PtrToLoc, 2, false, true, false) // Pointer (byref) to a local variable. Args: VN's of: 0: var num, 1: FieldSeq. -ValueNumFuncDef(PtrToArrElem, 4, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: FieldSeq. +ValueNumFuncDef(PtrToArrElem, 4, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: "ArrElemOffset" VN. +ValueNumFuncDef(ArrElemOffset, 2, false, false, false) // Args: 0: (VN of) the field sequence, 1: (VN of) the offset ValueNumFuncDef(PtrToStatic, 3, false, true, false) // Pointer (byref) to a static variable (or possibly a field thereof, if the static variable is a struct). // Args: 0: (VN of) the box's address if the static is "boxed", // 1: (VN of) the field sequence, of which the first element is the static itself. From 670a5cde040af74fdb68c70df3da9637c48de302 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Wed, 20 Apr 2022 12:12:43 +0300 Subject: [PATCH 06/17] Deleting VNApplySelectors[Assign] --- src/coreclr/jit/valuenum.cpp | 251 +++-------------------------------- src/coreclr/jit/valuenum.h | 25 ---- 2 files changed, 19 insertions(+), 257 deletions(-) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 30240aae3041b2..8b774a643767ee 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -4103,232 +4103,6 @@ ValueNum ValueNumStore::VNForExpr(BasicBlock* block, var_types typ) return resultVN; } -//------------------------------------------------------------------------ -// VNApplySelectors: Find the value number corresponding to map[fieldSeq...]. -// -// Will construct the value number which is the result of selecting from "map" -// with all the fields (VNs of their handles): map[f0][f1]...[fN], indexing -// from outer to inner. The type of the returned number will be that of "f0", -// and all the "inner" maps in the indexing (map[f0][f1], ...) will also have -// the types of their selector fields. -// -// Essentially, this is a helper method that calls VNForMapSelect for the fields -// and provides some logging. -// -// Arguments: -// vnk - the value number kind to use when recursing through phis -// map - the map to select from -// fieldSeq - the fields to use as selectors -// wbFinalStructSize - optional [out] parameter for the size of the last struct -// field in the sequence -// -// Return Value: -// Value number corresponding to indexing "map" with all the fields. -// If the field sequence is empty ("nullptr"), will simply return "map". -// -ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, - ValueNum map, - FieldSeqNode* fieldSeq, - size_t* wbFinalStructSize) -{ - for (FieldSeqNode* field = fieldSeq; field != nullptr; field = field->GetNext()) - { - assert(field != FieldSeqStore::NotAField()); - - JITDUMP(" VNApplySelectors:\n"); - var_types fieldType; - unsigned fieldSize; - ValueNum fldHndVN = VNForFieldSelector(field->GetFieldHandle(), &fieldType, &fieldSize); - - if (wbFinalStructSize != nullptr) - { - *wbFinalStructSize = fieldSize; - } - - map = VNForMapSelect(vnk, fieldType, map, fldHndVN); - } - - return map; -} - -//------------------------------------------------------------------------ -// VNApplySelectorsTypeCheck: Constructs VN for an indirect read. -// -// Returns VN corresponding to the value of "IND(indType (ADDR(value))". -// May return a "new, unique" VN for un-analyzable reads (those of structs -// or out-of-bounds ones), or "value" cast to "indType", or "value" itself -// if there was no type mismatch. This function is intended to be used after -// VNApplySelectors has determined the value a particular location has, that -// is "value", and it is being read through an indirection of "indType". -// -// Arguments: -// value - (VN of) value the location has -// indType - the type through which the location is being read -// valueStructSize - size of "value" if is of a struct type -// -// Return Value: -// The constructed value number (see description). -// -// Notes: -// TODO-Bug: it is known that this function does not currently handle -// all cases correctly. E. g. it incorrectly returns CAST(float <- int) -// for cases like IND(float ADDR(int)). It is also too conservative for -// reads of SIMD types (treats them as un-analyzable). -// -ValueNum ValueNumStore::VNApplySelectorsTypeCheck(ValueNum value, var_types indType, size_t valueStructSize) -{ - var_types typeOfValue = TypeOfVN(value); - - // Check if the typeOfValue is matching/compatible - - if (indType != typeOfValue) - { - // We are trying to read from an 'elem' of type 'elemType' using 'indType' read - - size_t elemTypSize = (typeOfValue == TYP_STRUCT) ? valueStructSize : genTypeSize(typeOfValue); - size_t indTypeSize = genTypeSize(indType); - - if (indTypeSize > elemTypSize) - { - // Reading beyong the end of "value", return a new unique value number. - value = VNMakeNormalUnique(value); - - JITDUMP(" *** Mismatched types in VNApplySelectorsTypeCheck (reading beyond the end)\n"); - } - else if (varTypeIsStruct(indType)) - { - // We do not know how wide this indirection is - return a new unique value number. - value = VNMakeNormalUnique(value); - - JITDUMP(" *** Mismatched types in VNApplySelectorsTypeCheck (indType is TYP_STRUCT)\n"); - } - else - { - // We are trying to read "value" of type "typeOfValue" using "indType" read. - // Insert a cast - this handles small types, i. e. "IND(byte ADDR(int))". - value = VNForCast(value, indType, typeOfValue); - } - } - - return value; -} - -//------------------------------------------------------------------------ -// VNApplySelectorsAssignTypeCoerce: Compute the value number corresponding to `value` -// being written using an indirection of 'dstIndType'. -// -// Arguments: -// value - value number for the value being stored; -// dstIndType - type of the indirection storing the value to the memory; -// -// Return Value: -// The value number corresponding to memory after the assignment. -// -// Notes: It may insert a cast to dstIndType or return a unique value number for an incompatible indType. -// -ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum value, var_types dstIndType) -{ - var_types srcType = TypeOfVN(value); - - // Check if the srcType is matching/compatible. - if (dstIndType != srcType) - { - bool isConstant = IsVNConstant(value); - if (isConstant && (srcType == genActualType(dstIndType))) - { - // (i.e. We recorded a constant of TYP_INT for a TYP_BYTE field) - } - else - { - // We are trying to write an 'elem' of type 'elemType' using 'indType' store - - if (varTypeIsStruct(dstIndType)) - { - // return a new unique value number - value = VNMakeNormalUnique(value); - - JITDUMP(" *** Mismatched types in VNApplySelectorsAssignTypeCoerce (indType is TYP_STRUCT)\n"); - } - else - { - // We are trying to write an 'elem' of type 'elemType' using 'indType' store - - // insert a cast of elem to 'indType' - value = VNForCast(value, dstIndType, srcType); - - JITDUMP(" Cast to %s inserted in VNApplySelectorsAssignTypeCoerce (elemTyp is %s)\n", - varTypeName(dstIndType), varTypeName(srcType)); - } - } - } - - return value; -} - -//------------------------------------------------------------------------ -// VNApplySelectorsAssign: Compute the value number corresponding to "map" but with -// the element at "fieldSeq" updated to be equal to "'value' written as 'indType'"; -// this is the new memory value for an assignment of "value" into the memory at -// location "fieldSeq" that occurs in the current block (so long as the selectors -// into that memory occupy disjoint locations, which is true for GcHeap). -// -// Arguments: -// vnk - identifies whether to recurse to Conservative or Liberal value numbers -// when recursing through phis -// map - value number for the field map before the assignment -// value - value number for the value being stored (to the given field) -// dstIndType - type of the indirection storing the value -// -// Return Value: -// The value number corresponding to memory ("map") after the assignment. -// -ValueNum ValueNumStore::VNApplySelectorsAssign( - ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum value, var_types dstIndType) -{ - if (fieldSeq == nullptr) - { - return VNApplySelectorsAssignTypeCoerce(value, dstIndType); - } - else - { - assert(fieldSeq != FieldSeqStore::NotAField()); - - if (fieldSeq->GetNext() == nullptr) - { - JITDUMP(" VNApplySelectorsAssign:\n"); - } - - var_types fieldType; - ValueNum fldHndVN = VNForFieldSelector(fieldSeq->GetFieldHandle(), &fieldType); - - ValueNum valueAfter; - if (fieldSeq->GetNext() != nullptr) - { - ValueNum fseqMap = VNForMapSelect(vnk, fieldType, map, fldHndVN); - valueAfter = VNApplySelectorsAssign(vnk, fseqMap, fieldSeq->GetNext(), value, dstIndType); - } - else - { - valueAfter = VNApplySelectorsAssignTypeCoerce(value, dstIndType); - } - - return VNForMapStore(map, fldHndVN, valueAfter); - } -} - -ValueNumPair ValueNumStore::VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType) -{ - size_t structSize = 0; - ValueNum liberalVN = VNApplySelectors(VNK_Liberal, map.GetLiberal(), fieldSeq, &structSize); - liberalVN = VNApplySelectorsTypeCheck(liberalVN, indType, structSize); - - structSize = 0; - ValueNum conservVN = VNApplySelectors(VNK_Conservative, map.GetConservative(), fieldSeq, &structSize); - conservVN = VNApplySelectorsTypeCheck(conservVN, indType, structSize); - - return ValueNumPair(liberalVN, conservVN); -} - ValueNum ValueNumStore::VNForLoad(ValueNumKind vnk, ValueNum locationValue, unsigned locationSize, @@ -4437,7 +4211,20 @@ ValueNum ValueNumStore::VNForLoadStoreBitcast(ValueNum value, var_types indType, else { assert(genTypeSize(indType) == indSize); - value = VNApplySelectorsTypeCheck(value, indType, indSize); + + // TODO-PhysicalVN: remove this pessimization. + if (varTypeIsSIMD(indType)) + { + JITDUMP(" *** Mismatched types in VNForLoadStoreBitcast (indType is SIMD)\n"); + value = VNForExpr(m_pComp->compCurBB, indType); + } + else + { + // We are trying to read "value" of type "typeOfValue" using "indType" read. + // Insert a cast - this handles small types, i. e. "IND(byte ADDR(int))". + // TODO-Bug: this does not not handle "IND(float ADDR(int))" correctly. + value = VNForCast(value, indType, typeOfValue); + } } } @@ -4464,7 +4251,7 @@ ValueNumPair ValueNumStore::VNPairForLoadStoreBitcast(ValueNumPair value, var_ty ValueNum ValueNumStore::VNForEmptyMap(var_types type) { - // TODO-PhysicalVN: use one single VN here instead of allocation new ones. + // TODO-PhysicalVN: use one single VN here instead of allocating new ones. return VNForExpr(nullptr, type); } @@ -9727,7 +9514,7 @@ ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, return {castLibVN, castConVN}; } -ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types type) +ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types castToType) { // BitCast(BitCast(x)) => BitCast(x). VNFuncApp srcVNFunc; @@ -9736,7 +9523,7 @@ ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types type) srcVN = srcVNFunc.m_args[0]; } - if (TypeOfVN(srcVN) == type) + if (TypeOfVN(srcVN) == castToType) { return srcVN; } @@ -9744,9 +9531,9 @@ ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types type) // TODO-PhysicalVN: document the intended semantics for small types... // Also document the overall strategy w.r.t bitcasts and how they were // chosen over identity stores/selections. - assert((type != TYP_STRUCT) || (TypeOfVN(srcVN) != TYP_STRUCT)); + assert((castToType != TYP_STRUCT) || (TypeOfVN(srcVN) != TYP_STRUCT)); - return VNForFunc(type, VNF_BitCast, srcVN, VNForIntCon(type)); + return VNForFunc(castToType, VNF_BitCast, srcVN, VNForIntCon(castToType)); } void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc) diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 27449871a2de57..e845dc6568de75 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -645,20 +645,6 @@ class ValueNumStore // This controls extra tracing of the "evaluation" of "VNF_MapSelect" functions. #define FEATURE_VN_TRACE_APPLY_SELECTORS 1 - ValueNum VNApplySelectors(ValueNumKind vnk, - ValueNum map, - FieldSeqNode* fieldSeq, - size_t* wbFinalStructSize = nullptr); - - ValueNum VNApplySelectorsTypeCheck(ValueNum value, var_types indType, size_t valueStructSize); - - ValueNum VNApplySelectorsAssign( - ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum value, var_types dstIndType); - - ValueNum VNApplySelectorsAssignTypeCoerce(ValueNum value, var_types dstIndType); - - ValueNumPair VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType); - ValueNum VNForLoad(ValueNumKind vnk, ValueNum locationValue, unsigned locationSize, @@ -697,17 +683,6 @@ class ValueNumStore ValueNumPair VNPairForEmptyMap(var_types type); - ValueNumPair VNPairApplySelectorsAssign(ValueNumPair map, - FieldSeqNode* fieldSeq, - ValueNumPair value, - var_types dstIndType) - { - return ValueNumPair(VNApplySelectorsAssign(VNK_Liberal, map.GetLiberal(), fieldSeq, value.GetLiberal(), - dstIndType), - VNApplySelectorsAssign(VNK_Conservative, map.GetConservative(), fieldSeq, - value.GetConservative(), dstIndType)); - } - // Compute the ValueNumber for a cast ValueNum VNForCast(ValueNum srcVN, var_types castToType, From b1950c97bbbf80caf5201c0d35cd6076a8314a12 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Mon, 25 Apr 2022 17:15:05 +0300 Subject: [PATCH 07/17] Delete VNForEmptyMap Not needed in the "identity stores are bitcasts" scheme. --- src/coreclr/jit/valuenum.cpp | 13 ------------- src/coreclr/jit/valuenum.h | 4 ---- 2 files changed, 17 deletions(-) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 8b774a643767ee..156a031b51fdbf 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -4249,19 +4249,6 @@ ValueNumPair ValueNumStore::VNPairForLoadStoreBitcast(ValueNumPair value, var_ty return ValueNumPair(liberalVN, conservVN); } -ValueNum ValueNumStore::VNForEmptyMap(var_types type) -{ - // TODO-PhysicalVN: use one single VN here instead of allocating new ones. - return VNForExpr(nullptr, type); -} - -ValueNumPair ValueNumStore::VNPairForEmptyMap(var_types type) -{ - ValueNum map = VNForEmptyMap(type); - - return ValueNumPair(map, map); -} - //------------------------------------------------------------------------ // IsVNNotAField: Is the value number a "NotAField" field sequence? // diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index e845dc6568de75..43e639641e6936 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -679,10 +679,6 @@ class ValueNumStore ValueNumPair VNPairForLoadStoreBitcast(ValueNumPair value, var_types indType, unsigned indSize); - ValueNum VNForEmptyMap(var_types type); - - ValueNumPair VNPairForEmptyMap(var_types type); - // Compute the ValueNumber for a cast ValueNum VNForCast(ValueNum srcVN, var_types castToType, From 10e18072e9e2af8d18a3a05ac8b18000649f73f1 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Wed, 20 Apr 2022 13:51:04 +0300 Subject: [PATCH 08/17] Add function headers --- src/coreclr/jit/valuenum.cpp | 259 ++++++++++++++++++++++++++++++----- src/coreclr/jit/valuenum.h | 16 ++- 2 files changed, 237 insertions(+), 38 deletions(-) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 156a031b51fdbf..5eec0d843c55ee 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -2279,11 +2279,10 @@ ValueNum ValueNumStore::VNForFunc( } //------------------------------------------------------------------------------ -// VNForMapStore : Evaluate VNF_MapStore with the given arguments. -// +// VNForMapStore: Create the VN for a precise store (to a precise map). // // Arguments: -// map - Map value number +// map - (VN of) the precise map // index - Index value number // value - New value for map[index] // @@ -2292,6 +2291,8 @@ ValueNum ValueNumStore::VNForFunc( // ValueNum ValueNumStore::VNForMapStore(ValueNum map, ValueNum index, ValueNum value) { + assert(MapIsPrecise(map)); + BasicBlock* const bb = m_pComp->compCurBB; BasicBlock::loopNumber const loopNum = bb->bbNatLoopNum; ValueNum const result = VNForFunc(TypeOfVN(map), VNF_MapStore, map, index, value, loopNum); @@ -2308,8 +2309,22 @@ ValueNum ValueNumStore::VNForMapStore(ValueNum map, ValueNum index, ValueNum val return result; } +//------------------------------------------------------------------------------ +// VNForMapPhysicalStore: Create the VN for a physical store (to a physical map). +// +// Arguments: +// map - (VN of) the physical map +// offset - The store offset +// size - Size of "value" (in bytes) +// value - The value being stored +// +// Return Value: +// Value number for "map" with "map[offset:offset + size - 1]" set to "value". +// ValueNum ValueNumStore::VNForMapPhysicalStore(ValueNum map, unsigned offset, unsigned size, ValueNum value) { + assert(MapIsPhysical(map)); + ValueNum selector = EncodePhysicalSelector(offset, size); ValueNum result = VNForFunc(TypeOfVN(map), VNF_MapPhysicalStore, map, selector, value); @@ -2321,25 +2336,21 @@ ValueNum ValueNumStore::VNForMapPhysicalStore(ValueNum map, unsigned offset, uns } //------------------------------------------------------------------------------ -// VNForMapSelect : Evaluate VNF_MapSelect with the given arguments. -// +// VNForMapSelect: Select value from a precise map. // // Arguments: -// vnk - Value number kind -// type - Value type +// vnk - Value number kind (see "VNForMapSelectWork" notes) +// type - The type to select // map - Map value number // index - Index value number // // Return Value: -// Value number for the result of the evaluation. -// -// Notes: -// This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate -// "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number -// (liberal/conservative) to read from the SSA def referenced in the phi argument. +// Value number corresponding to "map[index]". // ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNum map, ValueNum index) { + assert(MapIsPrecise(map)); + int budget = m_mapSelectBudget; bool usedRecursiveVN = false; ValueNum result = VNForMapSelectWork(vnk, type, map, index, &budget, &usedRecursiveVN); @@ -2354,8 +2365,25 @@ ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNu return result; } +//------------------------------------------------------------------------------ +// VNForMapPhysicalSelect: Select value from a physical map. +// +// Arguments: +// vnk - Value number kind (see the notes for "VNForMapSelect") +// type - The type to select +// map - (VN of) the physical map +// offset - Offset to select from +// size - Size to select +// +// Return Value: +// Value number corresponding to "map[offset:offset + size - 1]". The value +// number returned can be of a type different from "type", as physical maps +// do not have canonical types, only sizes. +// ValueNum ValueNumStore::VNForMapPhysicalSelect(ValueNumKind vnk, var_types type, ValueNum map, unsigned offset, unsigned size) { + assert(MapIsPhysical(map)); + ValueNum selector = EncodePhysicalSelector(offset, size); int budget = m_mapSelectBudget; bool usedRecursiveVN = false; @@ -2376,7 +2404,6 @@ ValueNum ValueNumStore::VNForMapPhysicalSelect(ValueNumKind vnk, var_types type, //------------------------------------------------------------------------------ // VNForMapSelectWork : A method that does the work for VNForMapSelect and may call itself recursively. // -// // Arguments: // vnk - Value number kind // type - Value type @@ -2400,16 +2427,10 @@ ValueNum ValueNumStore::VNForMapSelectWork( TailCall: // This label allows us to directly implement a tail call by setting up the arguments, and doing a goto to here. -#ifdef DEBUG assert(map != NoVN && index != NoVN); assert(map == VNNormalValue(map)); // Arguments carry no exceptions. assert(index == VNNormalValue(index)); // Arguments carry no exceptions. - // Currently, all precise maps represent memory, and all physical - typed values. - const bool mapIsPrecise = (TypeOfVN(map) == TYP_HEAP) || (TypeOfVN(map) == TYP_MEM); - const bool mapIsPhysical = !mapIsPrecise; -#endif // DEBUG - *pUsedRecursiveVN = false; #ifdef DEBUG @@ -2458,7 +2479,7 @@ ValueNum ValueNumStore::VNForMapSelectWork( { if (funcApp.m_func == VNF_MapStore) { - assert(mapIsPrecise); + assert(MapIsPrecise(map)); // select(store(m, i, v), i) == v if (funcApp.m_args[1] == index) @@ -2492,7 +2513,7 @@ ValueNum ValueNumStore::VNForMapSelectWork( } else if (funcApp.m_func == VNF_MapPhysicalStore) { - assert(mapIsPhysical); + assert(MapIsPhysical(map)); #if FEATURE_VN_TRACE_APPLY_SELECTORS JITDUMP(" select("); @@ -2548,7 +2569,7 @@ ValueNum ValueNumStore::VNForMapSelectWork( } else if (funcApp.m_func == VNF_BitCast) { - assert(mapIsPhysical); + assert(MapIsPhysical(map)); #if FEATURE_VN_TRACE_APPLY_SELECTORS JITDUMP(" select(bitcast<%s>(" FMT_VN ")) ==> select(" FMT_VN ")\n", varTypeName(TypeOfVN(funcApp.m_args[0])), funcApp.m_args[0], funcApp.m_args[0]); @@ -2559,7 +2580,7 @@ ValueNum ValueNumStore::VNForMapSelectWork( } else if (funcApp.m_func == VNF_ZeroObj) { - assert(mapIsPhysical); + assert(MapIsPhysical(map)); // TODO-CQ: support selection of TYP_STRUCT here. if (type != TYP_STRUCT) @@ -2707,6 +2728,16 @@ ValueNum ValueNumStore::VNForMapSelectWork( } } +//------------------------------------------------------------------------ +// EncodePhysicalSelector: Get the VN representing a physical selector. +// +// Arguments: +// offset - The offset to encode +// size - The size to encode +// +// Return Value: +// VN encoding the "{ offset, size }" tuple. +// ValueNum ValueNumStore::EncodePhysicalSelector(unsigned offset, unsigned size) { assert(size != 0); @@ -2714,6 +2745,17 @@ ValueNum ValueNumStore::EncodePhysicalSelector(unsigned offset, unsigned size) return VNForLongCon(static_cast(offset) | (static_cast(size) << 32)); } +//------------------------------------------------------------------------ +// DecodePhysicalSelector: Get the components of a physical selector from the +// VN representing one. +// +// Arguments: +// selector - The VN of the selector obtained via "EncodePhysicalSelector" +// pSize - [out] parameter for the size +// +// Return Value: +// The offset. +// unsigned ValueNumStore::DecodePhysicalSelector(ValueNum selector, unsigned* pSize) { uint64_t value = ConstantValue(selector); @@ -4103,6 +4145,22 @@ ValueNum ValueNumStore::VNForExpr(BasicBlock* block, var_types typ) return resultVN; } +//------------------------------------------------------------------------ +// VNForLoad: Get the VN for a load from a location (physical map). +// +// Arguments: +// vnk - The kind of VN to select (see "VNForMapSelectWork" notes) +// locationValue - (VN of) the value location has +// locationSize - Size of the location +// loadType - Type being loaded +// offset - In-location offset being loaded from +// loadSize - Number of bytes being loaded +// +// Return Value: +// Value number representing "locationValue[offset:offset + loadSize - 1]", +// normalized to the same actual type as "loadType". Handles out-of-bounds +// loads by returning a "new, unique" VN. +// ValueNum ValueNumStore::VNForLoad(ValueNumKind vnk, ValueNum locationValue, unsigned locationSize, @@ -4133,11 +4191,16 @@ ValueNum ValueNumStore::VNForLoad(ValueNumKind vnk, // Unlike with stores, loads we always normalize (to have the property that the tree's type // is the same as its VN's). - loadValue = VNForLoadStoreBitcast(loadValue, loadType, loadSize); + loadValue = VNForLoadStoreBitCast(loadValue, loadType, loadSize); + + assert(genActualType(TypeOfVN(loadValue)) == genActualType(loadType)); return loadValue; } +//------------------------------------------------------------------------ +// VNPairForLoad: VNForLoad applied to a ValueNumPair. +// ValueNumPair ValueNumStore::VNPairForLoad( ValueNumPair locationValue, unsigned locationSize, var_types loadType, ssize_t offset, unsigned loadSize) { @@ -4148,6 +4211,24 @@ ValueNumPair ValueNumStore::VNPairForLoad( return ValueNumPair(liberalVN, conservVN); } +//------------------------------------------------------------------------ +// VNForStore: Get the VN for a store to a location (physical map). +// +// Arguments: +// locationValue - (VN of) the value location had before the store +// locationSize - Size of the location +// offset - In-location offset being stored to +// storeSize - Number of bytes being stored +// value - (VN of) the value being stored +// +// Return Value: +// Value number for "locationValue" with "storeSize" bytes starting at +// "offset" set to "value". "NoVN" in case of an out-of-bounds store +// (the caller is expected to explicitly handle that). +// +// Notes: +// Does not handle "entire" (whole/identity) stores. +// ValueNum ValueNumStore::VNForStore(ValueNum locationValue, unsigned locationSize, ssize_t offset, @@ -4174,6 +4255,9 @@ ValueNum ValueNumStore::VNForStore(ValueNum locationValue, return VNForMapPhysicalStore(locationValue, storeOffset, storeSize, value); } +//------------------------------------------------------------------------ +// VNPairForStore: VNForStore applied to a ValueNumPair. +// ValueNumPair ValueNumStore::VNPairForStore(ValueNumPair locationValue, unsigned locationSize, ssize_t offset, @@ -4195,7 +4279,28 @@ ValueNumPair ValueNumStore::VNPairForStore(ValueNumPair locationValue, return ValueNumPair(liberalVN, conservVN); } -ValueNum ValueNumStore::VNForLoadStoreBitcast(ValueNum value, var_types indType, unsigned indSize) +//------------------------------------------------------------------------ +// VNForLoadStoreBitCast: Normalize a value number to the desired type. +// +// Arguments: +// value - (VN of) the value needed normalization +// indType - The type to normalize to +// indSize - The size of "indType" and "value" (relevant for structs) +// +// Return Value: +// Value number the logical "BitCast(value)". +// +// Notes: +// As far as the physical maps are concerned, all values with the same +// size "are equal". However, both IR and the rest of VN do distinguish +// between "4 bytes of TYP_INT" and "4 bytes of TYP_FLOAT". This method +// is called in cases where that gap needs to be bridged and the value +// "normalized" to the appropriate type. Notably, this normalization is +// only performed for primitives -- TYP_STRUCTs of different handles but +// same size are treated as equal (intentionally so -- this is good from +// CQ, TP and simplicity standpoints). +// +ValueNum ValueNumStore::VNForLoadStoreBitCast(ValueNum value, var_types indType, unsigned indSize) { var_types typeOfValue = TypeOfVN(value); @@ -4233,9 +4338,12 @@ ValueNum ValueNumStore::VNForLoadStoreBitcast(ValueNum value, var_types indType, return value; } -ValueNumPair ValueNumStore::VNPairForLoadStoreBitcast(ValueNumPair value, var_types indType, unsigned indSize) +//------------------------------------------------------------------------ +// VNPairForLoadStoreBitCast: VNForLoadStoreBitCast applied to a ValueNumPair. +// +ValueNumPair ValueNumStore::VNPairForLoadStoreBitCast(ValueNumPair value, var_types indType, unsigned indSize) { - ValueNum liberalVN = VNForLoadStoreBitcast(value.GetLiberal(), indType, indSize); + ValueNum liberalVN = VNForLoadStoreBitCast(value.GetLiberal(), indType, indSize); ValueNum conservVN; if (value.BothEqual()) { @@ -4243,7 +4351,7 @@ ValueNumPair ValueNumStore::VNPairForLoadStoreBitcast(ValueNumPair value, var_ty } else { - conservVN = VNForLoadStoreBitcast(value.GetConservative(), indType, indSize); + conservVN = VNForLoadStoreBitCast(value.GetConservative(), indType, indSize); } return ValueNumPair(liberalVN, conservVN); @@ -4420,6 +4528,16 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq, ssize_t return res; } +//------------------------------------------------------------------------ +// fgValueNumberArrayElemLoad: Value number a load from an array element. +// +// Arguments: +// loadTree - The indirection tree performing the load +// addrFunc - The "VNF_PtrToArrElem" function representing the address +// +// Notes: +// Only assigns normal VNs to "loadTree". +// void Compiler::fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc) { assert(loadTree->OperIsIndir() && (addrFunc->m_func == VNF_PtrToArrElem)); @@ -4467,6 +4585,16 @@ void Compiler::fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc loadTree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, loadType)); } +//------------------------------------------------------------------------ +// fgValueNumberArrayElemStore: Update the current heap state after a store +// to an array element. +// +// Arguments: +// storeNode - The ASG node performing the store +// addrFunc - The "VNF_PtrToArrElem" function representing the address +// storeSize - The number of bytes being stored +// value - (VN of) the value being stored +// void Compiler::fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFunc, unsigned storeSize, ValueNum value) { assert(addrFunc->m_func == VNF_PtrToArrElem); @@ -4554,6 +4682,22 @@ void Compiler::fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFu recordGcHeapStore(storeNode, newHeapVN DEBUGARG("array element store")); } +//------------------------------------------------------------------------ +// fgValueNumberLocalStore: Assign VNs to the SSA definition corresponding +// to a local store. +// +// Or update the current heap state in case the local was address-exposed. +// +// Arguments: +// storeNode - The node performing the store +// lclDefNode - The local node representating the SSA definition +// storeSize - The number of bytes being stored +// offset - The offset, relative to the local, of the target location +// value - (VN of) the value being stored +// normalize - Whether "value" should be normalized to the local's type +// (in case the store overwrites the entire variable) before +// being written to the SSA descriptor +// void Compiler::fgValueNumberLocalStore(GenTree* storeNode, GenTreeLclVarCommon* lclDefNode, unsigned storeSize, @@ -4574,7 +4718,7 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, ValueNumPair newLclValue; if (vnStore->LoadStoreIsEntire(lclSize, offset, storeSize)) { - newLclValue = normalize ? vnStore->VNPairForLoadStoreBitcast(value, varDsc->TypeGet(), lclSize) : value; + newLclValue = normalize ? vnStore->VNPairForLoadStoreBitCast(value, varDsc->TypeGet(), lclSize) : value; } else { @@ -4610,6 +4754,18 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, } } +//------------------------------------------------------------------------ +// fgValueNumberFieldLoad: Value number a class/static field load. +// +// Arguments: +// loadTree - The indirection tree performing the load +// baseAddr - The "base address" of the field (see "GenTree::IsFieldAddr") +// fieldSeq - The field sequence representing the address +// offset - The offset, relative to the field, being loaded from +// +// Notes: +// Only assigns normal VNs to "loadTree". +// void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset) { assert(fieldSeq != nullptr); @@ -4656,6 +4812,18 @@ void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, Fiel loadTree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, loadType)); } +//------------------------------------------------------------------------ +// fgValueNumberFieldStore: Update the current heap state after a store to +// a class/static field. +// +// Arguments: +// storeNode - The ASG node performing the store +// baseAddr - The "base address" of the field (see "GenTree::IsFieldAddr") +// fieldSeq - The field sequence representing the address +// offset - The offset, relative to the field, of the target location +// storeSize - The number of bytes being stored +// value - The value being stored +// void Compiler::fgValueNumberFieldStore(GenTree* storeNode, GenTree* baseAddr, FieldSeqNode* fieldSeq, @@ -4752,7 +4920,7 @@ ValueNum Compiler::fgValueNumberByrefExposedLoad(var_types type, ValueNum pointe } } -var_types ValueNumStore::TypeOfVN(ValueNum vn) +var_types ValueNumStore::TypeOfVN(ValueNum vn) const { if (vn == NoVN) { @@ -8108,7 +8276,7 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) if (isEntire) { - newLhsLclVNPair = vnStore->VNPairForLoadStoreBitcast(rhsVNPair, lhsVarDsc->TypeGet(), + newLhsLclVNPair = vnStore->VNPairForLoadStoreBitCast(rhsVNPair, lhsVarDsc->TypeGet(), lhsLclSize); } else @@ -9501,9 +9669,33 @@ ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, return {castLibVN, castConVN}; } +//------------------------------------------------------------------------ +// VNForBitCast: Get the VN representing bitwise reinterpretation of types. +// +// Arguments: +// srcVN - (VN of) the value being cast from +// castToType - The type being cast to +// +// Return Value: +// The value number representing "IND(ADDR(srcVN))". Notably, +// this includes the special sign/zero-extension semantic for small types. +// +// Notes: +// Bitcasts play a very significant role of representing identity (entire) +// selections and stores in the physical maps: when we have to "normalize" +// the types, we need something that represents a "nop" in the selection +// process, and "VNF_BitCast" is that function. See also the notes for +// "VNForLoadStoreBitCast". +// ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types castToType) { // BitCast(BitCast(x)) => BitCast(x). + // This ensures we do not end up with pathologically long chains of + // bitcasts in physical maps. We could do a similar optimization in + // "VNForMapPhysical[Store|Select]"; we presume that's not worth it, + // and it is better TP-wise to skip bitcasts "lazily" when doing the + // selection, as the scenario where they are expected to be common, + // single-field structs, implies short selection chains. VNFuncApp srcVNFunc; if (GetVNFunc(srcVN, &srcVNFunc) && (srcVNFunc.m_func == VNF_BitCast)) { @@ -9515,9 +9707,6 @@ ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types castToType) return srcVN; } - // TODO-PhysicalVN: document the intended semantics for small types... - // Also document the overall strategy w.r.t bitcasts and how they were - // chosen over identity stores/selections. assert((castToType != TYP_STRUCT) || (TypeOfVN(srcVN) != TYP_STRUCT)); return VNForFunc(castToType, VNF_BitCast, srcVN, VNForIntCon(castToType)); diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 43e639641e6936..e18ba8a92fb0eb 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -599,6 +599,16 @@ class ValueNumStore ValueNum VNForMapPhysicalStore(ValueNum map, unsigned offset, unsigned size, ValueNum value); + bool MapIsPrecise(ValueNum map) const + { + return (TypeOfVN(map) == TYP_HEAP) || (TypeOfVN(map) == TYP_MEM); + } + + bool MapIsPhysical(ValueNum map) const + { + return !MapIsPrecise(map); + } + ValueNum EncodePhysicalSelector(unsigned offset, unsigned size); unsigned DecodePhysicalSelector(ValueNum selector, unsigned* pSize); @@ -675,9 +685,9 @@ class ValueNumStore return (offset == 0) && (locationSize == indSize); } - ValueNum VNForLoadStoreBitcast(ValueNum value, var_types indType, unsigned indSize); + ValueNum VNForLoadStoreBitCast(ValueNum value, var_types indType, unsigned indSize); - ValueNumPair VNPairForLoadStoreBitcast(ValueNumPair value, var_types indType, unsigned indSize); + ValueNumPair VNPairForLoadStoreBitCast(ValueNumPair value, var_types indType, unsigned indSize); // Compute the ValueNumber for a cast ValueNum VNForCast(ValueNum srcVN, @@ -716,7 +726,7 @@ class ValueNumStore // not true. // Returns TYP_UNKNOWN if the given value number has not been given a type. - var_types TypeOfVN(ValueNum vn); + var_types TypeOfVN(ValueNum vn) const; // Returns BasicBlock::MAX_LOOP_NUM if the given value number's loop nest is unknown or ill-defined. BasicBlock::loopNumber LoopOfVN(ValueNum vn); From f97c1ad33b99bdd87a762c703faa73c8fb481386 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Mon, 25 Apr 2022 16:55:42 +0300 Subject: [PATCH 09/17] fgValueNumberLocalStore: fix arg ordering --- src/coreclr/jit/compiler.h | 10 +-- src/coreclr/jit/valuenum.cpp | 150 +++++++++++++++++------------------ 2 files changed, 80 insertions(+), 80 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 9b1f249d7d9784..fee5d2356c226f 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4692,17 +4692,17 @@ class Compiler // tree node). void fgValueNumber(); - void fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc); - - void fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFunc, unsigned storeSize, ValueNum value); - void fgValueNumberLocalStore(GenTree* storeNode, GenTreeLclVarCommon* lclDefNode, + ssize_t offset, unsigned storeSize, - ssize_t storeOffset, ValueNumPair value, bool normalize = true); + void fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc); + + void fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFunc, unsigned storeSize, ValueNum value); + void fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset); void fgValueNumberFieldStore(GenTree* storeNode, diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 5eec0d843c55ee..0ab1096569067c 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -4528,6 +4528,78 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq, ssize_t return res; } +//------------------------------------------------------------------------ +// fgValueNumberLocalStore: Assign VNs to the SSA definition corresponding +// to a local store. +// +// Or update the current heap state in case the local was address-exposed. +// +// Arguments: +// storeNode - The node performing the store +// lclDefNode - The local node representating the SSA definition +// offset - The offset, relative to the local, of the target location +// storeSize - The number of bytes being stored +// value - (VN of) the value being stored +// normalize - Whether "value" should be normalized to the local's type +// (in case the store overwrites the entire variable) before +// being written to the SSA descriptor +// +void Compiler::fgValueNumberLocalStore(GenTree* storeNode, + GenTreeLclVarCommon* lclDefNode, + ssize_t offset, + unsigned storeSize, + ValueNumPair value, + bool normalize) +{ + // Should not have been recorded as updating the GC heap. + assert(!GetMemorySsaMap(GcHeap)->Lookup(storeNode)); + + LclVarDsc* varDsc = lvaGetDesc(lclDefNode); + unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclDefNode); + + if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) + { + unsigned lclSize = lvaLclExactSize(lclDefNode->GetLclNum()); + + ValueNumPair newLclValue; + if (vnStore->LoadStoreIsEntire(lclSize, offset, storeSize)) + { + newLclValue = normalize ? vnStore->VNPairForLoadStoreBitCast(value, varDsc->TypeGet(), lclSize) : value; + } + else + { + assert((lclDefNode->gtFlags & GTF_VAR_USEASG) != 0); + // The "lclDefNode" node will be labeled with the SSA number of its "use" identity + // (we looked in a side table above for its "def" identity). Look up that value. + ValueNumPair oldLclValue = varDsc->GetPerSsaData(lclDefNode->GetSsaNum())->m_vnPair; + newLclValue = vnStore->VNPairForStore(oldLclValue, lclSize, offset, storeSize, value); + } + + // Any out-of-bounds stores should have made the local address-exposed. + assert(newLclValue.BothDefined()); + + // We normalize types stored in local locations because things outside VN itself look at them. + assert((genActualType(vnStore->TypeOfVN(newLclValue.GetLiberal())) == genActualType(varDsc)) || !normalize); + + varDsc->GetPerSsaData(lclDefSsaNum)->m_vnPair = newLclValue; + + JITDUMP("Tree [%06u] assigned VN to local var V%02u/%d: ", dspTreeID(storeNode), lclDefNode->GetLclNum(), + lclDefSsaNum); + JITDUMPEXEC(vnpPrint(newLclValue, 1)); + JITDUMP("\n"); + } + else if (varDsc->IsAddressExposed()) + { + ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); + recordAddressExposedLocalStore(storeNode, heapVN DEBUGARG("local assign")); + } + else + { + JITDUMP("Tree [%06u] assigns to non-address-taken local var V%02u; excluded from SSA, so value not tracked\n", + dspTreeID(storeNode), lclDefNode->GetLclNum()); + } +} + //------------------------------------------------------------------------ // fgValueNumberArrayElemLoad: Value number a load from an array element. // @@ -4682,78 +4754,6 @@ void Compiler::fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFu recordGcHeapStore(storeNode, newHeapVN DEBUGARG("array element store")); } -//------------------------------------------------------------------------ -// fgValueNumberLocalStore: Assign VNs to the SSA definition corresponding -// to a local store. -// -// Or update the current heap state in case the local was address-exposed. -// -// Arguments: -// storeNode - The node performing the store -// lclDefNode - The local node representating the SSA definition -// storeSize - The number of bytes being stored -// offset - The offset, relative to the local, of the target location -// value - (VN of) the value being stored -// normalize - Whether "value" should be normalized to the local's type -// (in case the store overwrites the entire variable) before -// being written to the SSA descriptor -// -void Compiler::fgValueNumberLocalStore(GenTree* storeNode, - GenTreeLclVarCommon* lclDefNode, - unsigned storeSize, - ssize_t offset, - ValueNumPair value, - bool normalize) -{ - // Should not have been recorded as updating the GC heap. - assert(!GetMemorySsaMap(GcHeap)->Lookup(storeNode)); - - LclVarDsc* varDsc = lvaGetDesc(lclDefNode); - unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclDefNode); - - if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) - { - unsigned lclSize = lvaLclExactSize(lclDefNode->GetLclNum()); - - ValueNumPair newLclValue; - if (vnStore->LoadStoreIsEntire(lclSize, offset, storeSize)) - { - newLclValue = normalize ? vnStore->VNPairForLoadStoreBitCast(value, varDsc->TypeGet(), lclSize) : value; - } - else - { - assert((lclDefNode->gtFlags & GTF_VAR_USEASG) != 0); - // The "lclDefNode" node will be labeled with the SSA number of its "use" identity - // (we looked in a side table above for its "def" identity). Look up that value. - ValueNumPair oldLclValue = varDsc->GetPerSsaData(lclDefNode->GetSsaNum())->m_vnPair; - newLclValue = vnStore->VNPairForStore(oldLclValue, lclSize, offset, storeSize, value); - } - - // Any out-of-bounds stores should have made the local address-exposed. - assert(newLclValue.BothDefined()); - - // We normalize types stored in local locations because things outside VN itself look at them. - assert((genActualType(vnStore->TypeOfVN(newLclValue.GetLiberal())) == genActualType(varDsc)) || !normalize); - - varDsc->GetPerSsaData(lclDefSsaNum)->m_vnPair = newLclValue; - - JITDUMP("Tree [%06u] assigned VN to local var V%02u/%d: ", dspTreeID(storeNode), lclDefNode->GetLclNum(), - lclDefSsaNum); - JITDUMPEXEC(vnpPrint(newLclValue, 1)); - JITDUMP("\n"); - } - else if (varDsc->IsAddressExposed()) - { - ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(storeNode, heapVN DEBUGARG("local assign")); - } - else - { - JITDUMP("Tree [%06u] assigns to non-address-taken local var V%02u; excluded from SSA, so value not tracked\n", - dspTreeID(storeNode), lclDefNode->GetLclNum()); - } -} - //------------------------------------------------------------------------ // fgValueNumberFieldLoad: Value number a class/static field load. // @@ -8059,7 +8059,7 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) case GT_LCL_VAR: { GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon(); - fgValueNumberLocalStore(tree, lcl, lvaLclExactSize(lcl->GetLclNum()), 0, rhsVNPair, /* normalize */ false); + fgValueNumberLocalStore(tree, lcl, 0, lvaLclExactSize(lcl->GetLclNum()), rhsVNPair, /* normalize */ false); } break; @@ -8080,7 +8080,7 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) // TODO-PhysicalVN: remove this quirk, it was added to avoid diffs. bool normalize = ((lclFld->gtFlags & GTF_VAR_USEASG) != 0); - fgValueNumberLocalStore(tree, lclFld, storeSize, storeOffset, rhsVNPair, normalize); + fgValueNumberLocalStore(tree, lclFld, storeOffset, storeSize, rhsVNPair, normalize); } break; @@ -8149,7 +8149,7 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) storeSize = lvaLclExactSize(lclVarTree->GetLclNum()); } - fgValueNumberLocalStore(tree, lclVarTree, storeSize, offset, rhsVNPair); + fgValueNumberLocalStore(tree, lclVarTree, offset, storeSize, rhsVNPair); } else { From caa1998c7cf061c2b53f25ecc18bec968c99a527 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Fri, 29 Apr 2022 20:08:10 +0300 Subject: [PATCH 10/17] Add BitCast folding --- src/coreclr/jit/valuenum.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 0ab1096569067c..dddf24ff51727b 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -9709,6 +9709,12 @@ ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types castToType) assert((castToType != TYP_STRUCT) || (TypeOfVN(srcVN) != TYP_STRUCT)); + // TODO-CQ: implement constant folding for BitCast. + if (srcVNFunc.m_func == VNF_ZeroObj) + { + return VNZeroForType(castToType); + } + return VNForFunc(castToType, VNF_BitCast, srcVN, VNForIntCon(castToType)); } From 221fe64528184ffced0794424c689fecfc54878f Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sat, 30 Apr 2022 00:06:35 +0300 Subject: [PATCH 11/17] Fix block assignment numbering --- src/coreclr/jit/valuenum.cpp | 79 +++++++++++++----------------------- 1 file changed, 29 insertions(+), 50 deletions(-) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index dddf24ff51727b..31887d788cc62d 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -4564,7 +4564,7 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, ValueNumPair newLclValue; if (vnStore->LoadStoreIsEntire(lclSize, offset, storeSize)) { - newLclValue = normalize ? vnStore->VNPairForLoadStoreBitCast(value, varDsc->TypeGet(), lclSize) : value; + newLclValue = value; } else { @@ -4578,8 +4578,12 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, // Any out-of-bounds stores should have made the local address-exposed. assert(newLclValue.BothDefined()); - // We normalize types stored in local locations because things outside VN itself look at them. - assert((genActualType(vnStore->TypeOfVN(newLclValue.GetLiberal())) == genActualType(varDsc)) || !normalize); + if (normalize) + { + // We normalize types stored in local locations because things outside VN itself look at them. + newLclValue = vnStore->VNPairForLoadStoreBitCast(newLclValue, varDsc->TypeGet(), lclSize); + assert((genActualType(vnStore->TypeOfVN(newLclValue.GetLiberal())) == genActualType(varDsc))); + } varDsc->GetPerSsaData(lclDefSsaNum)->m_vnPair = newLclValue; @@ -8238,77 +8242,52 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFunc.m_args[1]); } - bool isNewUniq = false; - ValueNumPair newLhsLclVNPair = ValueNumPair(); + ValueNumPair rhsVNPair = ValueNumPair(); if (tree->OperIsInitBlkOp()) { - ValueNum lclVarVN = ValueNumStore::NoVN; + ValueNum initObjVN = ValueNumStore::NoVN; if (isEntire && rhs->IsIntegralConst(0)) { // Note that it is possible to see pretty much any kind of type for the local // (not just TYP_STRUCT) here because of the ASG(BLK(ADDR(LCL_VAR/FLD)), 0) form. - lclVarVN = (lhsVarDsc->TypeGet() == TYP_STRUCT) ? vnStore->VNForZeroObj(lhsVarDsc->GetStructHnd()) - : vnStore->VNZeroForType(lhsVarDsc->TypeGet()); + initObjVN = (lhsVarDsc->TypeGet() == TYP_STRUCT) ? vnStore->VNForZeroObj(lhsVarDsc->GetStructHnd()) + : vnStore->VNZeroForType(lhsVarDsc->TypeGet()); } else { // Non-zero block init is very rare so we'll use a simple, unique VN here. - lclVarVN = vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet()); - isNewUniq = true; + initObjVN = vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet()); + + // TODO-PhysicalVN: remove this pessimization, it was added to avoid diffs. + // There is no need to invalidate the whole local. + offset = 0; + storeSize = lhsLclSize; } - newLhsLclVNPair.SetBoth(lclVarVN); + rhsVNPair.SetBoth(initObjVN); } else { assert(tree->OperIsCopyBlkOp()); + rhsVNPair = vnStore->VNPNormalPair(rhs->gtVNPair); + // TODO-PhysicalVN: with the physical VN scheme, we no longer need this check. - if (fgValueNumberBlockAssignmentTypeCheck(lhsVarDsc, lhsFldSeq, rhs)) + if (!fgValueNumberBlockAssignmentTypeCheck(lhsVarDsc, lhsFldSeq, rhs)) { - ValueNumPair rhsVNPair = vnStore->VNPNormalPair(rhs->gtVNPair); - - // TODO-PhysicalVN: remove this quirk, it was added to avoid diffs. - if (lhs->TypeIs(TYP_STRUCT) && (vnStore->TypeOfVN(rhsVNPair.GetLiberal()) != TYP_STRUCT)) - { - rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_STRUCT)); - } - - if (isEntire) - { - newLhsLclVNPair = vnStore->VNPairForLoadStoreBitCast(rhsVNPair, lhsVarDsc->TypeGet(), - lhsLclSize); - } - else - { - ValueNumPair oldLhsLclVNPair = lhsVarDsc->GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair; - newLhsLclVNPair = vnStore->VNPairForStore(oldLhsLclVNPair, lhsLclSize, offset, - storeSize, rhsVNPair); - } + // Invalidate the whole local. + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet())); + offset = 0; + storeSize = lhsLclSize; } - else + // TODO-PhysicalVN: remove this quirk, it was added to avoid diffs. + else if (lhs->TypeIs(TYP_STRUCT) && (vnStore->TypeOfVN(rhsVNPair.GetLiberal()) != TYP_STRUCT)) { - newLhsLclVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet())); - isNewUniq = true; + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet())); } } - lhsVarDsc->GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsLclVNPair; - -#ifdef DEBUG - if (verbose) - { - printf("Tree "); - Compiler::printTreeID(tree); - printf(" assigned VN to local var V%02u/%d: ", lhsLclNum, lclDefSsaNum); - if (isNewUniq) - { - printf("new uniq "); - } - vnpPrint(newLhsLclVNPair, 1); - printf("\n"); - } -#endif // DEBUG + fgValueNumberLocalStore(tree, lclVarTree, offset, storeSize, rhsVNPair); } else if (lclVarTree->HasSsaName()) { From fc03ae6c872f1768319c927e48214503285b2a6b Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sat, 30 Apr 2022 19:01:13 +0300 Subject: [PATCH 12/17] Add more TODO-PhysicalVN notes --- src/coreclr/jit/morph.cpp | 3 +-- src/coreclr/jit/valuenum.cpp | 29 ++--------------------------- 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 0ce01cc31aadbc..143df1e47f51c0 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -9922,8 +9922,7 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne // false otherwise. // // Notes: -// This check is needed to avoid accessing LCL_VARs with incorrect -// CORINFO_FIELD_HANDLE that would confuse VN optimizations. +// TODO-PhysicalVN: with the physical VN scheme, this method is no longer needed. // bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2) { diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 31887d788cc62d..fe8fc5aa68f164 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -8342,35 +8342,16 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) // Whether "src"'s exact type matches that of the destination location. // // Notes: -// Currently this method only handles local destinations, it should be expanded to support more -// locations (static/instance fields, array elements) once/if "fgValueNumberBlockAssignment" -// supports them. +// TODO-PhysicalVN: with the physical VN scheme, this method is no longer needed. // bool Compiler::fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src) { if (dstFldSeq == FieldSeqStore::NotAField()) { - // We don't have proper field sequence information for the lhs - assume arbitrary aliasing. JITDUMP(" *** Missing field sequence info for Dst/LHS of COPYBLK\n"); return false; } - // With unsafe code or nested structs, we can end up with IR that has - // mismatched struct types on the LHS and RHS. We need to maintain the - // invariant that a node's VN corresponds exactly to its type. Failure - // to do so is a correctness problem. For example: - // - // S1 s1 = { ... }; // s1 = map - // S1.F0 = 0; // s1 = map[F0 := 0] - // S2 s2 = s1; // s2 = map[F0 := 0] (absent below checks) - // s2.F1 = 1; // s2 = map[F0 := 0][F1 := 1] - // s1 = s2; // s1 = map[F0 := 0][F1 := 1] - // - // int r = s1.F0; // map[F0 := 0][F1 := 1][F0] => map[F0 := 0][F0] => 0 - // - // If F1 and F0 physically alias (exist at the same offset, say), the above - // represents an incorrect optimization. - var_types dstLocationType = TYP_UNDEF; CORINFO_CLASS_HANDLE dstLocationStructHnd = NO_CLASS_HANDLE; if (dstFldSeq == nullptr) @@ -8399,18 +8380,12 @@ bool Compiler::fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, Field return false; } - // They're equal, and they're primitives. Allow, for now. TYP_SIMD is tentatively - // allowed here as well as, currently, there are no two vector types with public - // fields both that could reasonably alias each other. + // They're equal, and they're primitives. Allow. if (dstLocationType != TYP_STRUCT) { return true; } - // Figure out what the source's type really is. Note that this will miss - // struct fields of struct locals currently ("src" for them is an IND(struct)). - // Note as well that we're relying on the invariant that "node type == node's - // VN type" here (it would be expensive to recover the handle from "src"'s VN). CORINFO_CLASS_HANDLE srcValueStructHnd = gtGetStructHandleIfPresent(src); if (srcValueStructHnd == NO_CLASS_HANDLE) { From 535b406562a818eded45bda59b6cff508dedce5b Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sat, 30 Apr 2022 20:56:40 +0300 Subject: [PATCH 13/17] Update the documentation on maps and selection --- src/coreclr/jit/valuenum.h | 165 +++++++++++++++++++++---------------- 1 file changed, 93 insertions(+), 72 deletions(-) diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index e18ba8a92fb0eb..841b1ffde40f49 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -16,104 +16,126 @@ // // Finally, we will also use value numbers to express control-flow-dependent assertions. Some test may // imply that after the test, something new is known about a value: that an object reference is non-null -// after a dereference (since control flow continued because no exception was thrown); that an integer value -// is restricted to some subrange in after a comparison test; etc. +// after a dereference (since control flow continued because no exception was thrown); that an integer +// value is restricted to some subrange in after a comparison test; etc. // In addition to classical numbering, this implementation also performs disambiguation of heap writes, -// using memory SSA and the guarantee that two managed field accesses will not alias (if the fields do -// not overlap). This is the sole reason for the existence of field sequences - providing information -// to numbering on which addresses represent "proper", non-aliasing, bases for loads and stores. Note -// that a field sequence is *only* about the address - it is the VN's responsibility to properly "give up" -// on reads and stores that are too wide or of otherwise incompatible types - this logic is encapsulated -// in the methods VNApplySelectorsTypeCheck and VNApplySelectorsAssignTypeCoerce. Note as well that this -// design, based on symbols, not offsets, is why the compiler must take such great care in maintaining the -// field sequences throughout its many transformations - accessing a field of type A "through" a field from -// a distinct type B will result in effectively undefined behavior. We say this is by design for classes -// (i. e. that people using Unsafe.As(object) need to *really* know what they're doing) and fix up places -// where the compiler itself produces these type mismatches with structs (which can arise due to "reinterpret -// casts" or morph's transforms which fold copies of structs with bitwise compatible layouts). +// using memory SSA and the following aliasing model: +// +// 1. Arrays of different types do not alias - taking into account the array compatibilty rules, i. e. +// "int[] <-> uint[]" and such being allowed. +// 2. Different static fields do not alias (meaning mutable overlapping RVA statics are not supported). +// 3. Different class fields do not alias. Struct fields are allowed to alias - this supports code that +// does reinterpretation of structs (e. g. "Unsafe.As(...)"), but makes it UB +// to alias reference types in the same manner (including via explicit layout). +// +// The no aliasing rule for fields should be interpreted to mean that "ld[s]fld[a] FieldOne" cannot refer +// to the same location as "ld[s]fld[a] FieldTwo". The aliasing model above reflects the fact type safety +// rules in .NET largely only apply to reference types, while struct locations can be and often are treated +// by user code (and, importantly, the compiler itself) as simple blobs of bytes. // // Abstractly, numbering maintains states of memory in "maps", which are indexed into with various "selectors", -// loads reading from said maps and stores recording new states for them (note that as with everything VN, the -// "maps" are immutable, thus an update is performed via deriving a new map from an existing one). +// loads reading from said maps and stores recording new states for them (note that as with everything VN, +// the "maps" are immutable, thus an update is performed via deriving a new map from an existing one). +// +// Due to the fact we allow struct field access to alias, but still want to optimize it, our model has two +// types of maps and selectors: precise and physical. Precise maps allow arbitrary selectors, and if those +// are known to be distinct values (e. g. different constants), the values they select are also presumed to +// represent distinct locations. Physical maps, on the other hand, can only have one type of selector: "the +// physical selector", representing offset of the location and its size (in bytes), where both must be known +// at compile time. Naturally, different physical selectors can refer to overlapping locations. +// +// The following "VNFunc"s are relevant when it comes to map numbering: +// +// 1. "MapSelect" - represents a "value" taken from another map at a given index: "map[index] => value". It is +// the "VNForMapSelect[Work]" method that represents the core of the selection infrastructure: it performs +// various reductions based on the maps (listed below) being selected from, before "giving up" and creating +// a new "MapSelect" VN. "MapSelect"s are used for both precise and physical maps. +// 2. "Phi[Memory]Def" - the PHI function applied to multiple reaching definitions for a given block. PHIs can +// be reduced by the selection process: "Phi(d:1, d:2, ...)[index]" is evaluated as "Phi(d:1[index], ...)", +// so if all the inner selections ("d:n[index]") agree, that value is returned as the selected one. +// 3. "MapStore" - this is the precise "update" map, it represents a map after a "set" operation at some index. +// MapStore VNs naturally "chain" together, the next map representing an update of the previous, and will be +// traversed by the selection process as long as the store indices are constant, and different from the one +// being selected (meaning they represent distinct locations): "map[F0 := V0][F1 := V1][F0]" => "V0". +// 4. "MapPhysicalStore" - the physical equivalent to "MapStore", can only be indexed with physical selectors, +// with the selection rules taking into account aliasability of physical locations. +// 5. "BitCast" - the physical map representing "identity" selection ("map[0:sizeof(map) - 1]"). Exists because +// physical maps themselves do not have a strong type identity (the physical selector only cares about size). +// but the VN/IR at large do. Is a no-op in the selection process. One can notice that we could have chosen +// to represent this concept with an identity "MapPhysicalStore", however, a different "VNFunc" was ultimately +// chosen due to it being easier to reason about and a little cheaper, with the expectation that "BitCast"s +// would be reasonably common - the scenario they are meant to handle are stores/loads to/from structs with +// one field, where the location can be referenced from the IR as both TYP_STRUCT and the field's type. +// +// We give "placeholder" types (TYP_UNDEF and TYP_UNKNOWN as TYP_MEM and TYP_HEAP) to maps that do not represent +// values found in IR, which are currently all precise (though that is not a requirement of the model). +// +// We choose to maintain the following invariants with regards to types of physical locations: +// +// 1. Tree VNs are always "normalized on load" - their types are made to match (via bitcasts). We presume this +// makes the rest of the compiler code simpler, as it will not have to reason about "TYP_INT" trees having +// "TYP_FLOAT" value numbers. This normalization is currently not always done; that should be fixed. +// 2. Types of locals are "normalized on store" - this is different from the rest of physical locations, as not +// only VN looks at these value numbers (stored in SSA descriptors), and similar to the tree case, we presume +// it is simpler to reason about matching types. +// 3. Types of all other locations (array elements and fields) are not normalized - these only appear in the VN +// itself as physical maps / values. // -// Memory states are represented with value numbers corresponding to the following VNFunc's: -// 1. MemOpaque - for opaque memory states, usually caused by writing to byrefs, or other -// non-analyzable memory operations such as atomics or volatile writes (and reads!). -// 2. PhiMemoryDef - this is simply the PHI function applied to multiple reaching memory -// definitions for a given block. -// 3. MapStore - this is the "update" map, it represents a map after a "set" operation at a -// given index. Note that the value stored can itself be a map (memory state), for example, -// with the store to a field of a struct that itself is a field of a class, the value -// recorded for the struct is a MapStore($StructInClassFieldMap, $ValueField, $Value). -// Because of this, these VNs need to have proper types, those being the types that the -// maps they represent updated states of have (in the example above, the VN would have -// a "struct" type, for example). MapStore VNs naturally "chain" together, the next map -// representing an update of the previous, and VNForMapSelect, when it tries to find -// the value traverses through these chains, as long as the indices are constant (meaning -// that they represent distinct locations, e. g. different fields). -// 4. MapSelect - if MapStore is an "update/assign" operation, MapSelect is the "read" one. -// It represents a "value" (which, as with MapStore, can itself can be a memory region) -// taken from another map at a given index. As such, it must have a type that corresponds -// to the "index/selector", in practical terms - the field's type. +// Note as well how we handle type identity for structs: we canonicalize on their size. This has the significant +// consequence that any two equally-sized structs can be given the same value number, even if they have different +// ABI characteristics or GC layout. The primary motivations for this are throughout and simplicity, however, we +// would also like the compiler at large to treat structs with compatible layouts as equivalent, so that we can +// propagate copies between them freely. // -// Note that we give "placeholder" types (TYP_UNDEF and TYP_UNKNOWN as TYP_MEM and TYP_HEAP) -// to maps that do not represent values found in IR. This is just to avoid confusion and -// facilitate more precise validating checks. // -// Let's review the following snippet to demonstrate how the MapSelect/MapStore machinery works -// together to deliver the results that it does. Say we have this snippet of (C#) code: +// Let's review the following snippet to demonstrate how the MapSelect/MapStore machinery works. Say we have this +// snippet of (C#) code: // // int Procedure(OneClass obj, AnotherClass subj, int objVal, int subjVal) // { -// obj.StructField.AnotherStructField.ScalarField = objVal; +// obj.StructField.ScalarField = objVal; // subj.OtherScalarField = subjVal; // -// return obj.StructField.AnotherStructField.ScalarField + subj.OtherScalarField; +// return obj.StructField.ScalarField + subj.OtherScalarField; // } // -// On entry, we assign some VN to the GcHeap (VN mostly only cares about GcHeap, so from now on -// the term "heap" will be used to mean GcHeap), $Heap. +// On entry, we assign some VN to the GcHeap (VN mostly only cares about GcHeap, so from now on the term "heap" +// will be used to mean GcHeap), $Heap. // -// A store to the ScalarField is seen. Now, the value numbering of fields is done in the following -// pattern for maps that it builds: [$Heap][$FirstField][$Object][$SecondField]...[$FieldToStoreInto]. -// It may seem odd that the indexing is done first for the field, and only then for the object, but -// the reason for that is the fact that it enables MapStores to $Heap to refer to distinct selectors, -// thus enabling the traversal through the map updates when looking for the values that were stored. -// Were $Object VNs used for this, the traversal could not be performed, as two numerically different VNs -// can, obviously, refer to the same object. +// A store to the ScalarField is seen. Now, the value numbering of fields is done in the following pattern for +// maps that it builds: [$Heap][$FirstField][$Object][offset:offset + size of the load]. It may seem odd that +// the indexing is done first for the field, and only then for the object, but the reason for that is the fact +// that it enables MapStores to $Heap to refer to distinct selectors, thus enabling the traversal through the +// map updates when looking for the values that were stored. Were $Object VNs used for this, the traversal could +// not be performed, as two numerically different VNs can, obviously, refer to the same object. // // With that in mind, the following maps are first built for the store ("field VNs" - VNs for handles): // -// $StructFieldMap = MapSelect($Heap, $StructField) -// $StructFieldForObjMap = MapSelect($StructFieldMap, $Obj) -// $AnotherStructFieldMap = MapSelect($StructFieldForObjMap, $AnotherStructField) -// $ScalarFieldMap = MapSelect($AnotherStructFieldMap, $ScalarField) +// $StructFieldMap = MapSelect($Heap, $StructField) +// $StructFieldForObjMap = MapSelect($StructFieldMap, $Obj) // -// The building of maps for the individual fields in the sequence [AnotherStructField, ScalarField] is -// usually performed by VNApplySelectors, which is just a convenience method that loops over all the -// fields, calling VNForMapSelect on them. Now that we know where to store, the store maps are built: +// Now that we know where to store, the store maps are built: // -// $NewAnotherStructFieldMap = MapStore($AnotherStructFieldMap, $ScalarField, $ObjVal) -// $NewStructFieldForObjMap = MapStore($StructFieldForObjMap, $AnotherStructField, $NewAnotherStructFieldMap) -// $NewStructFieldMap = MapStore($StructFieldMap, $Obj, $NewStructFieldForObjMap) -// $NewHeap = MapStore($Heap, $StructField, $NewStructFieldMap) +// $ScalarFieldSelector = PhysicalSelector(offsetof(StructField) + offsetof(ScalarField), sizeof(ScalarField)) +// $NewStructFieldForObjMap = MapPhysicalStore($StructFieldForObjMap, $ScalarFieldSelector, $ObjVal) +// $NewStructFieldMap = MapStore($StructFieldMap, $Obj, $NewStructFieldForObjMap) +// $NewHeap = MapStore($Heap, $StructField, $NewStructFieldMap) // -// Notice that the maps are built in the opposite order, as we must first know the value of the "narrower" -// map to store into the "wider" map (this is implemented via recursion in VNApplySelectorsAssign). +// Notice that the maps are built in the opposite order, as we must first know the value of the "narrower" map to +// store into the "wider" map. // -// Similarly, the numbering is performed for "subj.OtherScalarField = subjVal", and the heap state updated -// (say to $NewHeapWithSubj). Now when we call VNForMapSelect to find out the stored values when numbering -// the reads, the following traversal is performed (the '[]' operator representing VNForMapSelect): +// Similarly, the numbering is performed for "subj.OtherScalarField = subjVal", and the heap state updated (say to +// $NewHeapWithSubj). Now when we call "VNForMapSelect" to find out the stored values when numbering the reads, the +// following traversal is performed: // // $obj.StructField.AnotherStructField.ScalarField -// = $NewHeapWithSubj[$StructField][$Obj][$AnotherStructField][ScalarField]: +// = $NewHeapWithSubj[$StructField][$Obj][$ScalarFieldSelector]: // "$NewHeapWithSubj.Index == $StructField" => false (not the needed map). // "IsConst($NewHeapWithSubj.Index) && IsConst($StructField)" => true (can continue, non-aliasing indices). // "$NewHeap.Index == $StructField" => true, Value is $NewStructFieldMap. // "$NewStructFieldMap.Index == $Obj" => true, Value is $NewStructFieldForObjMap. -// "$NewStructFieldForObjMap.Index == $AnotherStructField" => true, Value is $NewAnotherStructFieldMap. -// "$NewAnotherStructFieldMap.Index == $ScalarField" => true, Value is $ObjVal (found the value!). +// "$NewStructFieldForObjMap.Index == $ScalarFieldSelector" => true, Value is $ObjVal (found it!). // // And similarly for the $SubjVal - we end up with a nice $Add($ObjVal, $SubjVal) feeding the return. // @@ -121,8 +143,7 @@ // modeled as straight indicies into the heap (MapSelect($Heap, $Field) returns the value of the field for them), // arrays - like fields, but with the primiary selector being not the first field, but the "equivalence class" of // an array, i. e. the type of its elements, taking into account things like "int[]" being legally aliasable as -// "uint[]". MapSelect & MapStore are also used to number local fields, with the primary selectors being (VNs of) -// their local numbers. +// "uint[]". Physical maps are used to number local fields. /*****************************************************************************/ #ifndef _VALUENUM_H_ From 64b3cf62f007bb1d9a9bb34a9d3c4f72218acf2e Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sun, 1 May 2022 17:36:21 +0300 Subject: [PATCH 14/17] Use a 'switch' in VNForMapSelectWork --- src/coreclr/jit/valuenum.cpp | 359 ++++++++++++++++---------------- src/coreclr/jit/valuenumfuncs.h | 11 +- 2 files changed, 189 insertions(+), 181 deletions(-) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index fe8fc5aa68f164..b14455cb7bb065 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -2477,236 +2477,247 @@ ValueNum ValueNumStore::VNForMapSelectWork( VNFuncApp funcApp; if (GetVNFunc(map, &funcApp)) { - if (funcApp.m_func == VNF_MapStore) + switch (funcApp.m_func) { - assert(MapIsPrecise(map)); - - // select(store(m, i, v), i) == v - if (funcApp.m_args[1] == index) + case VNF_MapStore: { + assert(MapIsPrecise(map)); + + // select(store(m, i, v), i) == v + if (funcApp.m_args[1] == index) + { #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" AX1: select([" FMT_VN "]store(" FMT_VN ", " FMT_VN ", " FMT_VN "), " FMT_VN - ") ==> " FMT_VN ".\n", - funcApp.m_args[0], map, funcApp.m_args[1], funcApp.m_args[2], index, funcApp.m_args[2]); + JITDUMP(" AX1: select([" FMT_VN "]store(" FMT_VN ", " FMT_VN ", " FMT_VN "), " FMT_VN + ") ==> " FMT_VN ".\n", + funcApp.m_args[0], map, funcApp.m_args[1], funcApp.m_args[2], index, funcApp.m_args[2]); #endif - m_pComp->optRecordLoopMemoryDependence(m_pComp->compCurTree, m_pComp->compCurBB, funcApp.m_args[0]); - return funcApp.m_args[2]; - } - // i # j ==> select(store(m, i, v), j) == select(m, j) - // Currently the only source of distinctions is when both indices are constants. - else if (IsVNConstant(index) && IsVNConstant(funcApp.m_args[1])) - { - assert(funcApp.m_args[1] != index); // we already checked this above. + m_pComp->optRecordLoopMemoryDependence(m_pComp->compCurTree, m_pComp->compCurBB, funcApp.m_args[0]); + return funcApp.m_args[2]; + } + // i # j ==> select(store(m, i, v), j) == select(m, j) + // Currently the only source of distinctions is when both indices are constants. + else if (IsVNConstant(index) && IsVNConstant(funcApp.m_args[1])) + { + assert(funcApp.m_args[1] != index); // we already checked this above. #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" AX2: " FMT_VN " != " FMT_VN " ==> select([" FMT_VN "]store(" FMT_VN ", " FMT_VN - ", " FMT_VN "), " FMT_VN ") ==> select(" FMT_VN ", " FMT_VN ") remaining budget is %d.\n", - index, funcApp.m_args[1], map, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], - index, funcApp.m_args[0], index, *pBudget); + JITDUMP(" AX2: " FMT_VN " != " FMT_VN " ==> select([" FMT_VN "]store(" FMT_VN ", " FMT_VN + ", " FMT_VN "), " FMT_VN ") ==> select(" FMT_VN ", " FMT_VN ") remaining budget is %d.\n", + index, funcApp.m_args[1], map, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], + index, funcApp.m_args[0], index, *pBudget); #endif - // This is the equivalent of the recursive tail call: - // return VNForMapSelect(vnk, typ, funcApp.m_args[0], index); - // Make sure we capture any exceptions from the "i" and "v" of the store... - map = funcApp.m_args[0]; - goto TailCall; + // This is the equivalent of the recursive tail call: + // return VNForMapSelect(vnk, typ, funcApp.m_args[0], index); + // Make sure we capture any exceptions from the "i" and "v" of the store... + map = funcApp.m_args[0]; + goto TailCall; + } } - } - else if (funcApp.m_func == VNF_MapPhysicalStore) - { - assert(MapIsPhysical(map)); + break; + + case VNF_MapPhysicalStore: + { + assert(MapIsPhysical(map)); #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" select("); - JITDUMPEXEC(m_pComp->vnPrint(map, 1)); - JITDUMP(", "); - JITDUMPEXEC(vnDumpPhysicalSelector(index)); - JITDUMP(")"); + JITDUMP(" select("); + JITDUMPEXEC(m_pComp->vnPrint(map, 1)); + JITDUMP(", "); + JITDUMPEXEC(vnDumpPhysicalSelector(index)); + JITDUMP(")"); #endif - ValueNum storeSelector = funcApp.m_args[1]; + ValueNum storeSelector = funcApp.m_args[1]; - if (index == storeSelector) - { + if (index == storeSelector) + { #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" ==> " FMT_VN "\n", funcApp.m_args[2]); + JITDUMP(" ==> " FMT_VN "\n", funcApp.m_args[2]); #endif - return funcApp.m_args[2]; - } + return funcApp.m_args[2]; + } - unsigned selectSize; - unsigned selectOffset = DecodePhysicalSelector(index, &selectSize); + unsigned selectSize; + unsigned selectOffset = DecodePhysicalSelector(index, &selectSize); - unsigned storeSize; - unsigned storeOffset = DecodePhysicalSelector(storeSelector, &storeSize); + unsigned storeSize; + unsigned storeOffset = DecodePhysicalSelector(storeSelector, &storeSize); - unsigned selectEndOffset = selectOffset + selectSize; // Exclusive. - unsigned storeEndOffset = storeOffset + storeSize; // Exclusive. + unsigned selectEndOffset = selectOffset + selectSize; // Exclusive. + unsigned storeEndOffset = storeOffset + storeSize; // Exclusive. - if ((storeOffset <= selectOffset) && (selectEndOffset <= storeEndOffset)) - { + if ((storeOffset <= selectOffset) && (selectEndOffset <= storeEndOffset)) + { #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" ==> enclosing, selecting inner, remaining budget is %d\n", *pBudget); + JITDUMP(" ==> enclosing, selecting inner, remaining budget is %d\n", *pBudget); #endif - map = funcApp.m_args[2]; - index = EncodePhysicalSelector(selectOffset - storeOffset, selectSize); - goto TailCall; - } + map = funcApp.m_args[2]; + index = EncodePhysicalSelector(selectOffset - storeOffset, selectSize); + goto TailCall; + } - // If it was disjoint with the location being selected, continue the linear search. - if ((storeEndOffset <= selectOffset) || (selectEndOffset <= storeOffset)) - { + // If it was disjoint with the location being selected, continue the linear search. + if ((storeEndOffset <= selectOffset) || (selectEndOffset <= storeOffset)) + { #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" ==> disjoint, remaining budget is %d\n", *pBudget); + JITDUMP(" ==> disjoint, remaining budget is %d\n", *pBudget); #endif - map = funcApp.m_args[0]; - goto TailCall; - } - else - { + map = funcApp.m_args[0]; + goto TailCall; + } + else + { #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" ==> aliasing!\n"); + JITDUMP(" ==> aliasing!\n"); #endif + } } - } - else if (funcApp.m_func == VNF_BitCast) - { - assert(MapIsPhysical(map)); + break; + + case VNF_BitCast: + assert(MapIsPhysical(map)); #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" select(bitcast<%s>(" FMT_VN ")) ==> select(" FMT_VN ")\n", - varTypeName(TypeOfVN(funcApp.m_args[0])), funcApp.m_args[0], funcApp.m_args[0]); + JITDUMP(" select(bitcast<%s>(" FMT_VN ")) ==> select(" FMT_VN ")\n", + varTypeName(TypeOfVN(funcApp.m_args[0])), funcApp.m_args[0], funcApp.m_args[0]); #endif // FEATURE_VN_TRACE_APPLY_SELECTORS - map = funcApp.m_args[0]; - goto TailCall; - } - else if (funcApp.m_func == VNF_ZeroObj) - { - assert(MapIsPhysical(map)); + map = funcApp.m_args[0]; + goto TailCall; - // TODO-CQ: support selection of TYP_STRUCT here. - if (type != TYP_STRUCT) - { - return VNZeroForType(type); - } - } - else if (funcApp.m_func == VNF_PhiDef || funcApp.m_func == VNF_PhiMemoryDef) - { - unsigned lclNum = BAD_VAR_NUM; - bool isMemory = false; - VNFuncApp phiFuncApp; - bool defArgIsFunc = false; - if (funcApp.m_func == VNF_PhiDef) - { - lclNum = unsigned(funcApp.m_args[0]); - defArgIsFunc = GetVNFunc(funcApp.m_args[2], &phiFuncApp); - } - else - { - assert(funcApp.m_func == VNF_PhiMemoryDef); - isMemory = true; - defArgIsFunc = GetVNFunc(funcApp.m_args[1], &phiFuncApp); - } - if (defArgIsFunc && phiFuncApp.m_func == VNF_Phi) - { - // select(phi(m1, m2), x): if select(m1, x) == select(m2, x), return that, else new fresh. - // Get the first argument of the phi. + case VNF_ZeroObj: + assert(MapIsPhysical(map)); - // We need to be careful about breaking infinite recursion. Record the outer select. - m_fixedPointMapSels.Push(VNDefFuncApp<2>(VNF_MapSelect, map, index)); + // TODO-CQ: support selection of TYP_STRUCT here. + if (type != TYP_STRUCT) + { + return VNZeroForType(type); + } + break; - assert(IsVNConstant(phiFuncApp.m_args[0])); - unsigned phiArgSsaNum = ConstantValue(phiFuncApp.m_args[0]); - ValueNum phiArgVN; - if (isMemory) + case VNF_PhiDef: + case VNF_PhiMemoryDef: + { + unsigned lclNum = BAD_VAR_NUM; + bool isMemory = false; + VNFuncApp phiFuncApp; + bool defArgIsFunc = false; + if (funcApp.m_func == VNF_PhiDef) { - phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + lclNum = unsigned(funcApp.m_args[0]); + defArgIsFunc = GetVNFunc(funcApp.m_args[2], &phiFuncApp); } else { - phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + assert(funcApp.m_func == VNF_PhiMemoryDef); + isMemory = true; + defArgIsFunc = GetVNFunc(funcApp.m_args[1], &phiFuncApp); } - if (phiArgVN != ValueNumStore::NoVN) + if (defArgIsFunc && phiFuncApp.m_func == VNF_Phi) { - bool allSame = true; - ValueNum argRest = phiFuncApp.m_args[1]; - ValueNum sameSelResult = - VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, pUsedRecursiveVN); - - // It is possible that we just now exceeded our budget, if so we need to force an early exit - // and stop calling VNForMapSelectWork - if (*pBudget <= 0) + // select(phi(m1, m2), x): if select(m1, x) == select(m2, x), return that, else new fresh. + // Get the first argument of the phi. + + // We need to be careful about breaking infinite recursion. Record the outer select. + m_fixedPointMapSels.Push(VNDefFuncApp<2>(VNF_MapSelect, map, index)); + + assert(IsVNConstant(phiFuncApp.m_args[0])); + unsigned phiArgSsaNum = ConstantValue(phiFuncApp.m_args[0]); + ValueNum phiArgVN; + if (isMemory) { - // We don't have any budget remaining to verify that all phiArgs are the same - // so setup the default failure case now. - allSame = false; + phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); } - - while (allSame && argRest != ValueNumStore::NoVN) + else { - ValueNum cur = argRest; - VNFuncApp phiArgFuncApp; - if (GetVNFunc(argRest, &phiArgFuncApp) && phiArgFuncApp.m_func == VNF_Phi) - { - cur = phiArgFuncApp.m_args[0]; - argRest = phiArgFuncApp.m_args[1]; - } - else - { - argRest = ValueNumStore::NoVN; // Cause the loop to terminate. - } - assert(IsVNConstant(cur)); - phiArgSsaNum = ConstantValue(cur); - if (isMemory) - { - phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); - } - else - { - phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); - } - if (phiArgVN == ValueNumStore::NoVN) + phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + } + if (phiArgVN != ValueNumStore::NoVN) + { + bool allSame = true; + ValueNum argRest = phiFuncApp.m_args[1]; + ValueNum sameSelResult = + VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, pUsedRecursiveVN); + + // It is possible that we just now exceeded our budget, if so we need to force an early exit + // and stop calling VNForMapSelectWork + if (*pBudget <= 0) { + // We don't have any budget remaining to verify that all phiArgs are the same + // so setup the default failure case now. allSame = false; } - else - { - bool usedRecursiveVN = false; - ValueNum curResult = - VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, &usedRecursiveVN); - *pUsedRecursiveVN |= usedRecursiveVN; - if (sameSelResult == ValueNumStore::RecursiveVN) + while (allSame && argRest != ValueNumStore::NoVN) + { + ValueNum cur = argRest; + VNFuncApp phiArgFuncApp; + if (GetVNFunc(argRest, &phiArgFuncApp) && phiArgFuncApp.m_func == VNF_Phi) + { + cur = phiArgFuncApp.m_args[0]; + argRest = phiArgFuncApp.m_args[1]; + } + else + { + argRest = ValueNumStore::NoVN; // Cause the loop to terminate. + } + assert(IsVNConstant(cur)); + phiArgSsaNum = ConstantValue(cur); + if (isMemory) { - sameSelResult = curResult; + phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); } - if (curResult != ValueNumStore::RecursiveVN && curResult != sameSelResult) + else + { + phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + } + if (phiArgVN == ValueNumStore::NoVN) { allSame = false; } + else + { + bool usedRecursiveVN = false; + ValueNum curResult = + VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, &usedRecursiveVN); + + *pUsedRecursiveVN |= usedRecursiveVN; + if (sameSelResult == ValueNumStore::RecursiveVN) + { + sameSelResult = curResult; + } + if (curResult != ValueNumStore::RecursiveVN && curResult != sameSelResult) + { + allSame = false; + } + } } - } - if (allSame && sameSelResult != ValueNumStore::RecursiveVN) - { - // Make sure we're popping what we pushed. - assert(FixedPointMapSelsTopHasValue(map, index)); - m_fixedPointMapSels.Pop(); - - // To avoid exponential searches, we make sure that this result is memo-ized. - // The result is always valid for memoization if we didn't rely on RecursiveVN to get it. - // If RecursiveVN was used, we are processing a loop and we can't memo-ize this intermediate - // result if, e.g., this block is in a multi-entry loop. - if (!*pUsedRecursiveVN) + if (allSame && sameSelResult != ValueNumStore::RecursiveVN) { - GetVNFunc2Map()->Set(fstruct, sameSelResult); - } + // Make sure we're popping what we pushed. + assert(FixedPointMapSelsTopHasValue(map, index)); + m_fixedPointMapSels.Pop(); + + // To avoid exponential searches, we make sure that this result is memo-ized. + // The result is always valid for memoization if we didn't rely on RecursiveVN to get it. + // If RecursiveVN was used, we are processing a loop and we can't memo-ize this intermediate + // result if, e.g., this block is in a multi-entry loop. + if (!*pUsedRecursiveVN) + { + GetVNFunc2Map()->Set(fstruct, sameSelResult); + } - return sameSelResult; + return sameSelResult; + } + // Otherwise, fall through to creating the select(phi(m1, m2), x) function application. } - // Otherwise, fall through to creating the select(phi(m1, m2), x) function application. + // Make sure we're popping what we pushed. + assert(FixedPointMapSelsTopHasValue(map, index)); + m_fixedPointMapSels.Pop(); } - // Make sure we're popping what we pushed. - assert(FixedPointMapSelsTopHasValue(map, index)); - m_fixedPointMapSels.Pop(); } + break; + + default: + break; } } diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h index 80ea2e38bbae4a..b5e23ca8513723 100644 --- a/src/coreclr/jit/valuenumfuncs.h +++ b/src/coreclr/jit/valuenumfuncs.h @@ -7,11 +7,14 @@ // clang-format off ValueNumFuncDef(MemOpaque, 1, false, false, false) // Args: 0: loop num +ValueNumFuncDef(MapSelect, 2, false, false, false) // Args: 0: map, 1: key. ValueNumFuncDef(MapStore, 4, false, false, false) // Args: 0: map, 1: index (e. g. field handle), 2: value being stored, 3: loop num. ValueNumFuncDef(MapPhysicalStore, 3, false, false, false) // Args: 0: map, 1: "physical selector": offset and size, 2: value being stored ValueNumFuncDef(BitCast, 2, false, false, false) // Args: 0: VN of the arg, 1: VN of the target type ValueNumFuncDef(ZeroObj, 1, false, false, false) // Args: 0: VN of the class handle. -ValueNumFuncDef(MapSelect, 2, false, false, false) // Args: 0: map, 1: key. +ValueNumFuncDef(PhiDef, 3, false, false, false) // Args: 0: local var # (or -1 for memory), 1: SSA #, 2: VN of definition. +ValueNumFuncDef(PhiMemoryDef, 2, false, false, false) // Args: 0: VN for basic block pointer, 1: VN of definition +ValueNumFuncDef(Phi, 2, false, false, false) // A phi function. Only occurs as arg of PhiDef or PhiMemoryDef. Arguments are SSA numbers of var being defined. ValueNumFuncDef(PtrToLoc, 2, false, true, false) // Pointer (byref) to a local variable. Args: VN's of: 0: var num, 1: FieldSeq. ValueNumFuncDef(PtrToArrElem, 4, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: "ArrElemOffset" VN. @@ -21,14 +24,8 @@ ValueNumFuncDef(PtrToStatic, 3, false, true, false) // Pointer (byref) t // 1: (VN of) the field sequence, of which the first element is the static itself. // 2: (VN of) offset for the constituent struct fields -ValueNumFuncDef(Phi, 2, false, false, false) // A phi function. Only occurs as arg of PhiDef or PhiMemoryDef. Arguments are SSA numbers of var being defined. -ValueNumFuncDef(PhiDef, 3, false, false, false) // Args: 0: local var # (or -1 for memory), 1: SSA #, 2: VN of definition. -// Wouldn't need this if I'd made memory a regular local variable... -ValueNumFuncDef(PhiMemoryDef, 2, false, false, false) // Args: 0: VN for basic block pointer, 1: VN of definition ValueNumFuncDef(InitVal, 1, false, false, false) // An input arg, or init val of a local Args: 0: a constant VN. - - ValueNumFuncDef(Cast, 2, false, false, false) // VNF_Cast: Cast Operation changes the representations size and unsigned-ness. // Args: 0: Source for the cast operation. // 1: Constant integer representing the operation . From 0c6af715efbf1889fff9ada11bf6080e5098a9be Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sat, 30 Apr 2022 21:20:59 +0300 Subject: [PATCH 15/17] Fix formatting --- src/coreclr/jit/gentree.cpp | 24 ++++---- src/coreclr/jit/gentree.h | 13 ++--- src/coreclr/jit/importer.cpp | 4 +- src/coreclr/jit/lclmorph.cpp | 2 +- src/coreclr/jit/morph.cpp | 8 +-- src/coreclr/jit/valuenum.cpp | 107 ++++++++++++++++------------------- src/coreclr/jit/valuenum.h | 25 +++----- 7 files changed, 81 insertions(+), 102 deletions(-) diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 92b88b2252ada4..eedf2b60e4fdb7 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -16026,7 +16026,8 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo else if (AsOp()->gtOp1->OperGet() == GT_IND) { GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1; - return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire, pOffset); + return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire, + pOffset); } else if (AsOp()->gtOp1->OperIsBlk()) { @@ -16074,11 +16075,8 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo return false; } -bool GenTree::DefinesLocalAddr(Compiler* comp, - unsigned width, - GenTreeLclVarCommon** pLclVarTree, - bool* pIsEntire, - ssize_t* pOffset) +bool GenTree::DefinesLocalAddr( + Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire, ssize_t* pOffset) { if (OperIs(GT_ADDR, GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR)) { @@ -16811,9 +16809,9 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL)) { assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr)); - baseAddr = this; - fldSeq = AsIntCon()->gtFieldSeq; - offset = AsIntCon()->IconValue(); + baseAddr = this; + fldSeq = AsIntCon()->gtFieldSeq; + offset = AsIntCon()->IconValue(); } else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq)) { @@ -18136,8 +18134,9 @@ FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(n { } -FieldSeqNode* FieldSeqStore::CreateSingleton( - CORINFO_FIELD_HANDLE fieldHnd, size_t offset, FieldSeqNode::FieldKind fieldKind) +FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd, + size_t offset, + FieldSeqNode::FieldKind fieldKind) { FieldSeqNode fsn(fieldHnd, nullptr, offset, fieldKind); FieldSeqNode* res = nullptr; @@ -18195,8 +18194,7 @@ FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b) } FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, size_t offset, FieldKind fieldKind) - : m_next(next) - , m_offset(offset) + : m_next(next), m_offset(offset) { uintptr_t handleValue = reinterpret_cast(fieldHnd); diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index db4bd09e6acb67..4f0b6baff2a2a8 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -1915,8 +1915,10 @@ struct GenTree // tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to // true or false, depending on whether the assignment writes to the entirety of the local // variable, or just a portion of it. - bool DefinesLocal( - Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr, ssize_t* pOffset = nullptr); + bool DefinesLocal(Compiler* comp, + GenTreeLclVarCommon** pLclVarTree, + bool* pIsEntire = nullptr, + ssize_t* pOffset = nullptr); bool IsLocalAddrExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, @@ -1955,11 +1957,8 @@ struct GenTree // operation. Returns "true" if "this" is an address of (or within) // a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null, // sets "*pIsEntire" to true if this assignment writes the full width of the local. - bool DefinesLocalAddr(Compiler* comp, - unsigned width, - GenTreeLclVarCommon** pLclVarTree, - bool* pIsEntire, - ssize_t* pOffset = nullptr); + bool DefinesLocalAddr( + Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire, ssize_t* pOffset = nullptr); // These are only used for dumping. // The GetRegNum() is only valid in LIR, but the dumping methods are not easily diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index bf58f30b62fecf..10f34bc7db5e77 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1416,8 +1416,8 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); - typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), - OFFSETOF__CORINFO_TypedReference__type); + typeFieldOffset->gtFieldSeq = + GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), OFFSETOF__CORINFO_TypedReference__type); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index acd65c27bb9663..7bfb53142c8e22 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -309,7 +309,7 @@ class LocalAddressVisitor final : public GenTreeVisitor { FieldSeqStore* fieldSeqStore = compiler->GetFieldSeqStore(); FieldSeqNode* fieldSeqNode = fieldSeqStore->CreateSingleton(field->gtFldHnd, field->gtFldOffset); - m_fieldSeq = fieldSeqStore->Append(val.m_fieldSeq, fieldSeqNode); + m_fieldSeq = fieldSeqStore->Append(val.m_fieldSeq, fieldSeqNode); } else { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 143df1e47f51c0..1c480ba0e34502 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -3465,11 +3465,11 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); - destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField(), - OFFSETOF__CORINFO_TypedReference__dataPtr)); + destPtrSlot->SetFieldSeq( + GetFieldSeqStore()->CreateSingleton(GetRefanyDataField(), OFFSETOF__CORINFO_TypedReference__dataPtr)); destPtrSlot->gtFlags |= GTF_VAR_DEF; - destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), - OFFSETOF__CORINFO_TypedReference__type)); + destTypeSlot->SetFieldSeq( + GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), OFFSETOF__CORINFO_TypedReference__type)); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index b14455cb7bb065..07d4cda53e2944 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -1782,7 +1782,7 @@ ValueNum ValueNumStore::VNForIntPtrCon(ssize_t cnsVal) { #ifdef HOST_64BIT return VNForLongCon(cnsVal); -#else // !HOST_64BIT +#else // !HOST_64BIT return VNForIntCon(cnsVal); #endif // !HOST_64BIT } @@ -2380,7 +2380,8 @@ ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNu // number returned can be of a type different from "type", as physical maps // do not have canonical types, only sizes. // -ValueNum ValueNumStore::VNForMapPhysicalSelect(ValueNumKind vnk, var_types type, ValueNum map, unsigned offset, unsigned size) +ValueNum ValueNumStore::VNForMapPhysicalSelect( + ValueNumKind vnk, var_types type, ValueNum map, unsigned offset, unsigned size) { assert(MapIsPhysical(map)); @@ -2428,7 +2429,7 @@ ValueNum ValueNumStore::VNForMapSelectWork( // This label allows us to directly implement a tail call by setting up the arguments, and doing a goto to here. assert(map != NoVN && index != NoVN); - assert(map == VNNormalValue(map)); // Arguments carry no exceptions. + assert(map == VNNormalValue(map)); // Arguments carry no exceptions. assert(index == VNNormalValue(index)); // Arguments carry no exceptions. *pUsedRecursiveVN = false; @@ -2492,7 +2493,8 @@ ValueNum ValueNumStore::VNForMapSelectWork( funcApp.m_args[0], map, funcApp.m_args[1], funcApp.m_args[2], index, funcApp.m_args[2]); #endif - m_pComp->optRecordLoopMemoryDependence(m_pComp->compCurTree, m_pComp->compCurBB, funcApp.m_args[0]); + m_pComp->optRecordLoopMemoryDependence(m_pComp->compCurTree, m_pComp->compCurBB, + funcApp.m_args[0]); return funcApp.m_args[2]; } // i # j ==> select(store(m, i, v), j) == select(m, j) @@ -2502,7 +2504,8 @@ ValueNum ValueNumStore::VNForMapSelectWork( assert(funcApp.m_args[1] != index); // we already checked this above. #if FEATURE_VN_TRACE_APPLY_SELECTORS JITDUMP(" AX2: " FMT_VN " != " FMT_VN " ==> select([" FMT_VN "]store(" FMT_VN ", " FMT_VN - ", " FMT_VN "), " FMT_VN ") ==> select(" FMT_VN ", " FMT_VN ") remaining budget is %d.\n", + ", " FMT_VN "), " FMT_VN ") ==> select(" FMT_VN ", " FMT_VN + ") remaining budget is %d.\n", index, funcApp.m_args[1], map, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], index, funcApp.m_args[0], index, *pBudget); #endif @@ -2697,8 +2700,10 @@ ValueNum ValueNumStore::VNForMapSelectWork( m_fixedPointMapSels.Pop(); // To avoid exponential searches, we make sure that this result is memo-ized. - // The result is always valid for memoization if we didn't rely on RecursiveVN to get it. - // If RecursiveVN was used, we are processing a loop and we can't memo-ize this intermediate + // The result is always valid for memoization if we didn't rely on RecursiveVN to get + // it. + // If RecursiveVN was used, we are processing a loop and we can't memo-ize this + // intermediate // result if, e.g., this block is in a multi-entry loop. if (!*pUsedRecursiveVN) { @@ -4216,8 +4221,8 @@ ValueNumPair ValueNumStore::VNPairForLoad( ValueNumPair locationValue, unsigned locationSize, var_types loadType, ssize_t offset, unsigned loadSize) { ValueNum liberalVN = VNForLoad(VNK_Liberal, locationValue.GetLiberal(), locationSize, loadType, offset, loadSize); - ValueNum conservVN = VNForLoad(VNK_Conservative, locationValue.GetConservative(), locationSize, loadType, offset, - loadSize); + ValueNum conservVN = + VNForLoad(VNK_Conservative, locationValue.GetConservative(), locationSize, loadType, offset, loadSize); return ValueNumPair(liberalVN, conservVN); } @@ -4240,11 +4245,8 @@ ValueNumPair ValueNumStore::VNPairForLoad( // Notes: // Does not handle "entire" (whole/identity) stores. // -ValueNum ValueNumStore::VNForStore(ValueNum locationValue, - unsigned locationSize, - ssize_t offset, - unsigned storeSize, - ValueNum value) +ValueNum ValueNumStore::VNForStore( + ValueNum locationValue, unsigned locationSize, ssize_t offset, unsigned storeSize, ValueNum value) { assert(storeSize > 0); @@ -4269,11 +4271,8 @@ ValueNum ValueNumStore::VNForStore(ValueNum locationValue, //------------------------------------------------------------------------ // VNPairForStore: VNForStore applied to a ValueNumPair. // -ValueNumPair ValueNumStore::VNPairForStore(ValueNumPair locationValue, - unsigned locationSize, - ssize_t offset, - unsigned storeSize, - ValueNumPair value) +ValueNumPair ValueNumStore::VNPairForStore( + ValueNumPair locationValue, unsigned locationSize, ssize_t offset, unsigned storeSize, ValueNumPair value) { ValueNum liberalVN = VNForStore(locationValue.GetLiberal(), locationSize, offset, storeSize, value.GetLiberal()); ValueNum conservVN; @@ -4283,8 +4282,8 @@ ValueNumPair ValueNumStore::VNPairForStore(ValueNumPair locationValue, } else { - conservVN = VNForStore(locationValue.GetConservative(), locationSize, offset, storeSize, - value.GetConservative()); + conservVN = + VNForStore(locationValue.GetConservative(), locationSize, offset, storeSize, value.GetConservative()); } return ValueNumPair(liberalVN, conservVN); @@ -4663,9 +4662,9 @@ void Compiler::fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc ValueNum wholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemType, hAtArrTypeAtArr, inxVN); JITDUMP(" GcHeap[elemTypeEq][array][index: " FMT_VN "] is " FMT_VN "\n", inxVN, wholeElem); - unsigned elemSize = (elemType == TYP_STRUCT) ? info.compCompHnd->getClassSize(elemTypeEq) : genTypeSize(elemType); - var_types loadType = loadTree->TypeGet(); - unsigned loadSize = loadTree->AsIndir()->Size(); + unsigned elemSize = (elemType == TYP_STRUCT) ? info.compCompHnd->getClassSize(elemTypeEq) : genTypeSize(elemType); + var_types loadType = loadTree->TypeGet(); + unsigned loadSize = loadTree->AsIndir()->Size(); ValueNum loadValueVN = vnStore->VNForLoad(VNK_Liberal, wholeElem, elemSize, loadType, offset, loadSize); loadTree->gtVNPair.SetLiberal(loadValueVN); @@ -4694,9 +4693,9 @@ void Compiler::fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFu FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(offsFunc.m_args[0]); ssize_t offset = vnStore->ConstantValue(offsFunc.m_args[1]); - bool invalidateArray = false; - var_types elemType = DecodeElemType(elemTypeEq); - ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); + bool invalidateArray = false; + var_types elemType = DecodeElemType(elemTypeEq); + ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); JITDUMP(" Array element store: elemTypeEq is " FMT_VN " for %s[]\n", elemTypeEqVN, (elemType == TYP_STRUCT) ? eeGetClassName(elemTypeEq) : varTypeName(elemType)); @@ -4740,8 +4739,7 @@ void Compiler::fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFu } // TODO-PhysicalVN: with the physical VN scheme, we no longer need the type check below. - var_types arrElemFldType = - (fldSeq != nullptr) ? eeGetFieldType(fldSeq->GetTail()->GetFieldHandle()) : elemType; + var_types arrElemFldType = (fldSeq != nullptr) ? eeGetFieldType(fldSeq->GetTail()->GetFieldHandle()) : elemType; if ((storeNode->gtGetOp1()->TypeGet() != arrElemFldType) || (newWholeElem == ValueNumStore::NoVN)) { @@ -4798,15 +4796,13 @@ void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, Fiel // var_types fieldType; unsigned fieldSize; - ValueNum fieldSelectorVN = - vnStore->VNForFieldSelector(fieldSeq->GetFieldHandle(), &fieldType, &fieldSize); + ValueNum fieldSelectorVN = vnStore->VNForFieldSelector(fieldSeq->GetFieldHandle(), &fieldType, &fieldSize); ValueNum fieldMapVN = ValueNumStore::NoVN; ValueNum fieldValueSelectorVN = ValueNumStore::NoVN; if (baseAddr != nullptr) { - fieldMapVN = - vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], fieldSelectorVN); + fieldMapVN = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], fieldSelectorVN); fieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); } else @@ -4815,8 +4811,7 @@ void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, Fiel fieldValueSelectorVN = fieldSelectorVN; } - ValueNum fieldValueVN = - vnStore->VNForMapSelect(VNK_Liberal, fieldType, fieldMapVN, fieldValueSelectorVN); + ValueNum fieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, fieldType, fieldMapVN, fieldValueSelectorVN); // Finally, account for the struct fields and type mismatches. var_types loadType = loadTree->TypeGet(); @@ -4839,12 +4834,8 @@ void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, Fiel // storeSize - The number of bytes being stored // value - The value being stored // -void Compiler::fgValueNumberFieldStore(GenTree* storeNode, - GenTree* baseAddr, - FieldSeqNode* fieldSeq, - ssize_t offset, - unsigned storeSize, - ValueNum value) +void Compiler::fgValueNumberFieldStore( + GenTree* storeNode, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset, unsigned storeSize, ValueNum value) { assert(fieldSeq != nullptr); @@ -4868,7 +4859,7 @@ void Compiler::fgValueNumberFieldStore(GenTree* storeNode, { // Construct the "field map" VN. It represents memory state of the first field of all objects // on the heap. This is our primary map. - fieldMapVN = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], fieldSelectorVN); + fieldMapVN = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], fieldSelectorVN); fieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); } else @@ -4886,7 +4877,7 @@ void Compiler::fgValueNumberFieldStore(GenTree* storeNode, else { ValueNum oldFieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, fieldType, fieldMapVN, fieldValueSelectorVN); - newFieldValueVN = vnStore->VNForStore(oldFieldValueVN, fieldSize, offset, storeSize, value); + newFieldValueVN = vnStore->VNForStore(oldFieldValueVN, fieldSize, offset, storeSize, value); } if (newFieldValueVN != ValueNumStore::NoVN) @@ -6921,11 +6912,11 @@ const char* ValueNumStore::VNFuncNameArr[] = { } static const char* s_reservedNameArr[] = { - "$VN.Recursive", // -2 RecursiveVN - "$VN.No", // -1 NoVN - "$VN.Null", // 0 VNForNull() - "$VN.Void", // 1 VNForVoid() - "$VN.EmptyExcSet" // 2 VNForEmptyExcSet() + "$VN.Recursive", // -2 RecursiveVN + "$VN.No", // -1 NoVN + "$VN.Null", // 0 VNForNull() + "$VN.Void", // 1 VNForVoid() + "$VN.EmptyExcSet" // 2 VNForEmptyExcSet() }; // Returns the string name of "vn" when it is a reserved value number, nullptr otherwise @@ -8160,7 +8151,7 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) if ((fldSeq == FieldSeqStore::NotAField()) || ((fldSeq == nullptr) && !isEntire)) { rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); - offset = 0; + offset = 0; storeSize = lvaLclExactSize(lclVarTree->GetLclNum()); } @@ -8238,8 +8229,8 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) } else if (lhs->OperIs(GT_LCL_FLD)) { - storeSize = lhs->AsLclFld()->GetSize(); - lhsFldSeq = lhs->AsLclFld()->GetFieldSeq(); + storeSize = lhs->AsLclFld()->GetSize(); + lhsFldSeq = lhs->AsLclFld()->GetFieldSeq(); } else { @@ -8267,7 +8258,7 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) else { // Non-zero block init is very rare so we'll use a simple, unique VN here. - initObjVN = vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet()); + initObjVN = vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet()); // TODO-PhysicalVN: remove this pessimization, it was added to avoid diffs. // There is no need to invalidate the whole local. @@ -8534,9 +8525,9 @@ void Compiler::fgValueNumberTree(GenTree* tree) { LclVarDsc* varDsc = lvaGetDesc(lclNum); ValueNumPair lclVarValue = varDsc->GetPerSsaData(lclFld->GetSsaNum())->m_vnPair; - lclFld->gtVNPair = vnStore->VNPairForLoad(lclVarValue, lvaLclExactSize(lclNum), - lclFld->TypeGet(), lclFld->GetLclOffs(), - lclFld->GetSize()); + lclFld->gtVNPair = + vnStore->VNPairForLoad(lclVarValue, lvaLclExactSize(lclNum), lclFld->TypeGet(), + lclFld->GetLclOffs(), lclFld->GetSize()); // If we have byref field, we may have a zero-offset sequence to add. FieldSeqNode* zeroOffsetFldSeq = nullptr; @@ -8795,9 +8786,9 @@ void Compiler::fgValueNumberTree(GenTree* tree) } else { - ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; - unsigned lclSize = lvaLclExactSize(lclVarTree->GetLclNum()); - offset = offset + lclVarTree->GetLclOffs(); + ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; + unsigned lclSize = lvaLclExactSize(lclVarTree->GetLclNum()); + offset = offset + lclVarTree->GetLclOffs(); tree->gtVNPair = vnStore->VNPairForLoad(lclVNPair, lclSize, tree->TypeGet(), offset, loadSize); } diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 841b1ffde40f49..957278cc77cd82 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -683,23 +683,14 @@ class ValueNumStore ssize_t offset, unsigned loadSize); - ValueNumPair VNPairForLoad(ValueNumPair locationValue, - unsigned locationSize, - var_types loadType, - ssize_t offset, - unsigned loadSize); - - ValueNum VNForStore(ValueNum locationValue, - unsigned locationSize, - ssize_t offset, - unsigned storeSize, - ValueNum value); - - ValueNumPair VNPairForStore(ValueNumPair locationValue, - unsigned locationSize, - ssize_t offset, - unsigned storeSize, - ValueNumPair value); + ValueNumPair VNPairForLoad( + ValueNumPair locationValue, unsigned locationSize, var_types loadType, ssize_t offset, unsigned loadSize); + + ValueNum VNForStore( + ValueNum locationValue, unsigned locationSize, ssize_t offset, unsigned storeSize, ValueNum value); + + ValueNumPair VNPairForStore( + ValueNumPair locationValue, unsigned locationSize, ssize_t offset, unsigned storeSize, ValueNumPair value); bool LoadStoreIsEntire(unsigned locationSize, ssize_t offset, unsigned indSize) const { From d1d5aea5040c3cd5f5a7acaaddc3f654b108c126 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Mon, 2 May 2022 00:34:59 +0300 Subject: [PATCH 16/17] Fix the CAST integer> bug 10 diffs across CLR and libraries tests, all correctness fixes. --- src/coreclr/jit/valuenum.cpp | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 07d4cda53e2944..ef71522ff40509 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -4319,9 +4319,6 @@ ValueNum ValueNumStore::VNForLoadStoreBitCast(ValueNum value, var_types indType, if ((typeOfValue == TYP_STRUCT) || (indType == TYP_STRUCT)) { value = VNForBitCast(value, indType); - JITDUMP(" VNForLoadStoreBitcast returns "); - JITDUMPEXEC(m_pComp->vnPrint(value, 1)); - JITDUMP("\n"); } else { @@ -4333,14 +4330,20 @@ ValueNum ValueNumStore::VNForLoadStoreBitCast(ValueNum value, var_types indType, JITDUMP(" *** Mismatched types in VNForLoadStoreBitcast (indType is SIMD)\n"); value = VNForExpr(m_pComp->compCurBB, indType); } + else if (!varTypeIsFloating(typeOfValue) && !varTypeIsFloating(indType)) + { + // TODO-PhysicalVN: implement constant folding for bitcasts and remove this special case. + value = VNForCast(value, indType, TypeOfVN(value)); + } else { - // We are trying to read "value" of type "typeOfValue" using "indType" read. - // Insert a cast - this handles small types, i. e. "IND(byte ADDR(int))". - // TODO-Bug: this does not not handle "IND(float ADDR(int))" correctly. - value = VNForCast(value, indType, typeOfValue); + value = VNForBitCast(value, indType); } } + + JITDUMP(" VNForLoadStoreBitcast returns "); + JITDUMPEXEC(m_pComp->vnPrint(value, 1)); + JITDUMP("\n"); } assert(genActualType(TypeOfVN(value)) == genActualType(indType)); @@ -9658,15 +9661,18 @@ ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types castToType) srcVN = srcVNFunc.m_args[0]; } - if (TypeOfVN(srcVN) == castToType) + var_types srcType = TypeOfVN(srcVN); + + if (srcType == castToType) { return srcVN; } - assert((castToType != TYP_STRUCT) || (TypeOfVN(srcVN) != TYP_STRUCT)); + assert((castToType != TYP_STRUCT) || (srcType != TYP_STRUCT)); - // TODO-CQ: implement constant folding for BitCast. - if (srcVNFunc.m_func == VNF_ZeroObj) + // TODO-PhysicalVN: implement proper constant folding for BitCast. + if ((srcVNFunc.m_func == VNF_ZeroObj) || + ((castToType != TYP_STRUCT) && (srcType != TYP_STRUCT) && (srcVN == VNZeroForType(srcType)))) { return VNZeroForType(castToType); } From 051224d6e86163a84aa5f8694298f4daf2d9efb7 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Thu, 5 May 2022 14:24:34 +0300 Subject: [PATCH 17/17] Correct documentation mistakes --- src/coreclr/jit/valuenum.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 957278cc77cd82..b0cbc1e0653b0b 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -104,7 +104,7 @@ // will be used to mean GcHeap), $Heap. // // A store to the ScalarField is seen. Now, the value numbering of fields is done in the following pattern for -// maps that it builds: [$Heap][$FirstField][$Object][offset:offset + size of the load]. It may seem odd that +// maps that it builds: [$Heap][$FirstField][$Object][offset:offset + size of the store]. It may seem odd that // the indexing is done first for the field, and only then for the object, but the reason for that is the fact // that it enables MapStores to $Heap to refer to distinct selectors, thus enabling the traversal through the // map updates when looking for the values that were stored. Were $Object VNs used for this, the traversal could @@ -117,7 +117,7 @@ // // Now that we know where to store, the store maps are built: // -// $ScalarFieldSelector = PhysicalSelector(offsetof(StructField) + offsetof(ScalarField), sizeof(ScalarField)) +// $ScalarFieldSelector = PhysicalSelector(offsetof(ScalarField), sizeof(ScalarField)) // $NewStructFieldForObjMap = MapPhysicalStore($StructFieldForObjMap, $ScalarFieldSelector, $ObjVal) // $NewStructFieldMap = MapStore($StructFieldMap, $Obj, $NewStructFieldForObjMap) // $NewHeap = MapStore($Heap, $StructField, $NewStructFieldMap)