diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 273c6f045123df..75def5b0e6a45e 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -676,11 +676,11 @@ void BasicBlock::dspKind() const break; case BBJ_EHFILTERRET: - printf(" -> %s (fltret)", dspBlockNum(bbTarget)); + printf(" -> %s (fltret)", dspBlockNum(GetTarget())); break; case BBJ_EHCATCHRET: - printf(" -> %s (cret)", dspBlockNum(bbTarget)); + printf(" -> %s (cret)", dspBlockNum(GetTarget())); break; case BBJ_THROW: @@ -694,28 +694,28 @@ void BasicBlock::dspKind() const case BBJ_ALWAYS: if (HasFlag(BBF_KEEP_BBJ_ALWAYS)) { - printf(" -> %s (ALWAYS)", dspBlockNum(bbTarget)); + printf(" -> %s (ALWAYS)", dspBlockNum(GetTarget())); } else { - printf(" -> %s (always)", dspBlockNum(bbTarget)); + printf(" -> %s (always)", dspBlockNum(GetTarget())); } break; case BBJ_LEAVE: - printf(" -> %s (leave)", dspBlockNum(bbTarget)); + printf(" -> %s (leave)", dspBlockNum(GetTarget())); break; case BBJ_CALLFINALLY: - printf(" -> %s (callf)", dspBlockNum(bbTarget)); + printf(" -> %s (callf)", dspBlockNum(GetTarget())); break; case BBJ_CALLFINALLYRET: - printf(" -> %s (callfr)", dspBlockNum(bbTarget)); + printf(" -> %s (callfr)", dspBlockNum(GetTarget())); break; case BBJ_COND: - printf(" -> %s,%s (cond)", dspBlockNum(bbTrueTarget), dspBlockNum(bbFalseTarget)); + printf(" -> %s,%s (cond)", dspBlockNum(GetTrueTarget()), dspBlockNum(GetFalseTarget())); break; case BBJ_SWITCH: @@ -857,11 +857,16 @@ void BasicBlock::TransferTarget(BasicBlock* from) SetEhf(from->GetEhfTargets()); from->bbEhfTargets = nullptr; // Make sure nobody uses the descriptor after this. break; + + // TransferTarget may be called after setting the source block of `from`'s + // successor edges to this block. + // This means calling GetTarget/GetTrueTarget/GetFalseTarget would trigger asserts. + // Avoid this by accessing the edges directly. case BBJ_COND: - SetCond(from->GetTrueTarget(), from->GetFalseTarget()); + SetCond(from->bbTrueEdge, from->bbFalseEdge); break; case BBJ_ALWAYS: - SetKindAndTarget(from->GetKind(), from->GetTarget()); + SetKindAndTargetEdge(BBJ_ALWAYS, from->bbTargetEdge); CopyFlags(from, BBF_NONE_QUIRK); break; case BBJ_CALLFINALLY: @@ -869,10 +874,10 @@ void BasicBlock::TransferTarget(BasicBlock* from) case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - SetKindAndTarget(from->GetKind(), from->GetTarget()); + SetKindAndTargetEdge(from->GetKind(), from->bbTargetEdge); break; default: - SetKindAndTarget(from->GetKind()); // Clear the target + SetKindAndTargetEdge(from->GetKind()); // Clear the target break; } assert(KindIs(from->GetKind())); @@ -985,7 +990,7 @@ BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const // BasicBlock* BasicBlock::GetUniqueSucc() const { - return KindIs(BBJ_ALWAYS) ? bbTarget : nullptr; + return KindIs(BBJ_ALWAYS) ? GetTarget() : nullptr; } // Static vars. @@ -1145,7 +1150,7 @@ unsigned BasicBlock::NumSucc() const return 1; case BBJ_COND: - if (bbTrueTarget == bbFalseTarget) + if (bbTrueEdge == bbFalseEdge) { return 1; } @@ -1199,18 +1204,18 @@ BasicBlock* BasicBlock::GetSucc(unsigned i) const case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - return bbTarget; + return GetTarget(); case BBJ_COND: if (i == 0) { - return bbFalseTarget; + return GetFalseTarget(); } else { assert(i == 1); - assert(bbFalseTarget != bbTrueTarget); - return bbTrueTarget; + assert(bbTrueEdge != bbFalseEdge); + return GetTrueTarget(); } case BBJ_EHFINALLYRET: @@ -1270,7 +1275,7 @@ unsigned BasicBlock::NumSucc(Compiler* comp) return 1; case BBJ_COND: - if (bbTrueTarget == bbFalseTarget) + if (bbTrueEdge == bbFalseEdge) { return 1; } @@ -1309,8 +1314,8 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) { case BBJ_EHFILTERRET: // Handler is the (sole) normal successor of the filter. - assert(comp->fgFirstBlockOfHandler(this) == bbTarget); - return bbTarget; + assert(comp->fgFirstBlockOfHandler(this) == GetTarget()); + return GetTarget(); case BBJ_EHFINALLYRET: assert(bbEhfTargets != nullptr); @@ -1322,18 +1327,18 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: - return bbTarget; + return GetTarget(); case BBJ_COND: if (i == 0) { - return bbFalseTarget; + return GetFalseTarget(); } else { assert(i == 1); - assert(bbFalseTarget != bbTrueTarget); - return bbTrueTarget; + assert(bbTrueEdge != bbFalseEdge); + return GetTrueTarget(); } case BBJ_SWITCH: @@ -1585,15 +1590,10 @@ BasicBlock* BasicBlock::New(Compiler* compiler) return block; } -BasicBlock* BasicBlock::New(Compiler* compiler, BBKinds kind, BasicBlock* target /* = nullptr */) +BasicBlock* BasicBlock::New(Compiler* compiler, BBKinds kind) { BasicBlock* block = BasicBlock::New(compiler); - - // In some cases, we don't know a block's jump target during initialization, so don't check the jump kind/target - // yet. - // The checks will be done any time the jump kind/target is read or written to after initialization. - block->bbKind = kind; - block->bbTarget = target; + block->bbKind = kind; if (block->KindIs(BBJ_THROW)) { diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 208eb07e583fa0..12f97668967088 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -309,27 +309,15 @@ class PredBlockList // BBArrayIterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab. // It is an error (with assert) to yield a nullptr BasicBlock* in this array. -// `m_bbEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr +// `m_edgeEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr // (meaning, no actual iteration will happen). // class BBArrayIterator { - // Quirk: Some BasicBlock kinds refer to their successors with BasicBlock pointers, - // while others use FlowEdge pointers. Eventually, every type will use FlowEdge pointers. - // For now, support iterating with both types. - union { - BasicBlock* const* m_bbEntry; - FlowEdge* const* m_edgeEntry; - }; - - bool iterateEdges; + FlowEdge* const* m_edgeEntry; public: - BBArrayIterator(BasicBlock* const* bbEntry) : m_bbEntry(bbEntry), iterateEdges(false) - { - } - - BBArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry), iterateEdges(true) + BBArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry) { } @@ -337,14 +325,14 @@ class BBArrayIterator BBArrayIterator& operator++() { - assert(m_bbEntry != nullptr); - ++m_bbEntry; + assert(m_edgeEntry != nullptr); + ++m_edgeEntry; return *this; } bool operator!=(const BBArrayIterator& i) const { - return m_bbEntry != i.m_bbEntry; + return m_edgeEntry != i.m_edgeEntry; } }; @@ -506,6 +494,185 @@ enum class BasicBlockVisit // clang-format on +//------------------------------------------------------------------------- +// FlowEdge -- control flow edge +// +// In compiler terminology the control flow between two BasicBlocks +// is typically referred to as an "edge". Most well known are the +// backward branches for loops, which are often called "back-edges". +// +// "struct FlowEdge" is the type that represents our control flow edges. +// This type is a linked list of zero or more "edges". +// (The list of zero edges is represented by NULL.) +// Every BasicBlock has a field called bbPreds of this type. This field +// represents the list of "edges" that flow into this BasicBlock. +// The FlowEdge type only stores the BasicBlock* of the source for the +// control flow edge. The destination block for the control flow edge +// is implied to be the block which contained the bbPreds field. +// +// For a switch branch target there may be multiple "edges" that have +// the same source block (and destination block). We need to count the +// number of these edges so that during optimization we will know when +// we have zero of them. Rather than have extra FlowEdge entries we +// track this via the DupCount property. +// +// When we have Profile weight for the BasicBlocks we can usually compute +// the number of times each edge was executed by examining the adjacent +// BasicBlock weights. As we are doing for BasicBlocks, we call the number +// of times that a control flow edge was executed the "edge weight". +// In order to compute the edge weights we need to use a bounded range +// for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax' +// are used to hold a bounded range. Most often these will converge such +// that both values are the same and that value is the exact edge weight. +// Sometimes we are left with a rage of possible values between [Min..Max] +// which represents an inexact edge weight. +// +// The bbPreds list is initially created by Compiler::fgLinkBasicBlocks() +// and is incrementally kept up to date. +// +// The edge weight are computed by Compiler::fgComputeEdgeWeights() +// the edge weights are used to straighten conditional branches +// by Compiler::fgReorderBlocks() +// +struct FlowEdge +{ +private: + // The next predecessor edge in the list, nullptr for end of list. + FlowEdge* m_nextPredEdge; + + // The source of the control flow + BasicBlock* m_sourceBlock; + + // The destination of the control flow + BasicBlock* m_destBlock; + + // Edge weights + weight_t m_edgeWeightMin; + weight_t m_edgeWeightMax; + + // Likelihood that m_sourceBlock transfers control along this edge. + // Values in range [0..1] + weight_t m_likelihood; + + // The count of duplicate "edges" (used for switch stmts or degenerate branches) + unsigned m_dupCount; + + // True if likelihood has been set + bool m_likelihoodSet; + +public: + FlowEdge(BasicBlock* sourceBlock, BasicBlock* destBlock, FlowEdge* rest) + : m_nextPredEdge(rest) + , m_sourceBlock(sourceBlock) + , m_destBlock(destBlock) + , m_edgeWeightMin(0) + , m_edgeWeightMax(0) + , m_likelihood(0) + , m_dupCount(0) + , m_likelihoodSet(false) + { + } + + FlowEdge* getNextPredEdge() const + { + return m_nextPredEdge; + } + + FlowEdge** getNextPredEdgeRef() + { + return &m_nextPredEdge; + } + + void setNextPredEdge(FlowEdge* newEdge) + { + m_nextPredEdge = newEdge; + } + + BasicBlock* getSourceBlock() const + { + assert(m_sourceBlock != nullptr); + return m_sourceBlock; + } + + void setSourceBlock(BasicBlock* newBlock) + { + assert(newBlock != nullptr); + m_sourceBlock = newBlock; + } + + BasicBlock* getDestinationBlock() const + { + assert(m_destBlock != nullptr); + return m_destBlock; + } + + void setDestinationBlock(BasicBlock* newBlock) + { + assert(newBlock != nullptr); + m_destBlock = newBlock; + } + + weight_t edgeWeightMin() const + { + return m_edgeWeightMin; + } + + weight_t edgeWeightMax() const + { + return m_edgeWeightMax; + } + + // These two methods are used to set new values for edge weights. + // They return false if the newWeight is not between the current [min..max] + // when slop is non-zero we allow for the case where our weights might be off by 'slop' + // + bool setEdgeWeightMinChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); + bool setEdgeWeightMaxChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); + void setEdgeWeights(weight_t newMinWeight, weight_t newMaxWeight, BasicBlock* bDst); + + weight_t getLikelihood() const + { + return m_likelihood; + } + + void setLikelihood(weight_t likelihood) + { + assert(likelihood >= 0.0); + assert(likelihood <= 1.0); + m_likelihoodSet = true; + m_likelihood = likelihood; + } + + void clearLikelihood() + { + m_likelihood = 0.0; + m_likelihoodSet = false; + } + + bool hasLikelihood() const + { + return m_likelihoodSet; + } + + weight_t getLikelyWeight() const; + + unsigned getDupCount() const + { + return m_dupCount; + } + + void incrementDupCount() + { + m_dupCount++; + } + + void decrementDupCount() + { + assert(m_dupCount >= 1); + m_dupCount--; + } +}; + //------------------------------------------------------------------------ // BasicBlock: describes a basic block in the flowgraph. // @@ -525,19 +692,19 @@ struct BasicBlock : private LIR::Range /* The following union describes the jump target(s) of this block */ union { - unsigned bbTargetOffs; // PC offset (temporary only) - BasicBlock* bbTarget; // basic block - BasicBlock* bbTrueTarget; // BBJ_COND jump target when its condition is true (alias for bbTarget) - BBswtDesc* bbSwtTargets; // switch descriptor - BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor + unsigned bbTargetOffs; // PC offset (temporary only) + FlowEdge* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) + FlowEdge* bbTrueEdge; // BBJ_COND successor edge when its condition is true (alias for bbTargetEdge) + BBswtDesc* bbSwtTargets; // switch descriptor + BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor }; - // Points to the successor of a BBJ_COND block if bbTrueTarget is not taken - BasicBlock* bbFalseTarget; + // Successor edge of a BBJ_COND block if bbTrueEdge is not taken + FlowEdge* bbFalseEdge; public: static BasicBlock* New(Compiler* compiler); - static BasicBlock* New(Compiler* compiler, BBKinds kind, BasicBlock* target = nullptr); + static BasicBlock* New(Compiler* compiler, BBKinds kind); static BasicBlock* New(Compiler* compiler, BBehfDesc* ehfTargets); static BasicBlock* New(Compiler* compiler, BBswtDesc* swtTargets); static BasicBlock* New(Compiler* compiler, BBKinds kind, unsigned targetOffs); @@ -623,100 +790,135 @@ struct BasicBlock : private LIR::Range return bbTargetOffs; } - void SetKindAndTarget(BBKinds kind, unsigned targetOffs) - { - bbKind = kind; - bbTargetOffs = targetOffs; - assert(KindIs(BBJ_ALWAYS, BBJ_COND, BBJ_LEAVE)); - } - bool HasTarget() const { - // These block types should always have bbTarget set + // These block types should always have bbTargetEdge set return KindIs(BBJ_ALWAYS, BBJ_CALLFINALLY, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET, BBJ_EHFILTERRET, BBJ_LEAVE); } BasicBlock* GetTarget() const { - // Only block kinds that use `bbTarget` can access it, and it must be non-null. + return GetTargetEdge()->getDestinationBlock(); + } + + FlowEdge* GetTargetEdge() const + { + // Only block kinds that use `bbTargetEdge` can access it, and it must be non-null. assert(HasInitializedTarget()); - return bbTarget; + assert(bbTargetEdge->getSourceBlock() == this); + assert(bbTargetEdge->getDestinationBlock() != nullptr); + return bbTargetEdge; } - void SetTarget(BasicBlock* target) + void SetTargetEdge(FlowEdge* targetEdge) { // SetKindAndTarget() nulls target for non-jump kinds, - // so don't use SetTarget() to null bbTarget without updating bbKind. - bbTarget = target; + // so don't use SetTargetEdge() to null bbTargetEdge without updating bbKind. + bbTargetEdge = targetEdge; assert(HasInitializedTarget()); + assert(bbTargetEdge->getSourceBlock() == this); + assert(bbTargetEdge->getDestinationBlock() != nullptr); } BasicBlock* GetTrueTarget() const + { + return GetTrueEdge()->getDestinationBlock(); + } + + FlowEdge* GetTrueEdge() const { assert(KindIs(BBJ_COND)); - assert(bbTrueTarget != nullptr); - return bbTrueTarget; + assert(bbTrueEdge != nullptr); + assert(bbTrueEdge->getSourceBlock() == this); + assert(bbTrueEdge->getDestinationBlock() != nullptr); + return bbTrueEdge; } - void SetTrueTarget(BasicBlock* target) + void SetTrueEdge(FlowEdge* trueEdge) { assert(KindIs(BBJ_COND)); - assert(target != nullptr); - bbTrueTarget = target; + bbTrueEdge = trueEdge; + assert(bbTrueEdge != nullptr); + assert(bbTrueEdge->getSourceBlock() == this); + assert(bbTrueEdge->getDestinationBlock() != nullptr); } bool TrueTargetIs(const BasicBlock* target) const { - assert(KindIs(BBJ_COND)); - assert(bbTrueTarget != nullptr); - return (bbTrueTarget == target); + return (GetTrueTarget() == target); + } + + bool TrueEdgeIs(const FlowEdge* targetEdge) const + { + return (GetTrueEdge() == targetEdge); } BasicBlock* GetFalseTarget() const + { + return GetFalseEdge()->getDestinationBlock(); + } + + FlowEdge* GetFalseEdge() const { assert(KindIs(BBJ_COND)); - assert(bbFalseTarget != nullptr); - return bbFalseTarget; + assert(bbFalseEdge != nullptr); + assert(bbFalseEdge->getSourceBlock() == this); + assert(bbFalseEdge->getDestinationBlock() != nullptr); + return bbFalseEdge; } - void SetFalseTarget(BasicBlock* target) + void SetFalseEdge(FlowEdge* falseEdge) { assert(KindIs(BBJ_COND)); - assert(target != nullptr); - bbFalseTarget = target; + bbFalseEdge = falseEdge; + assert(bbFalseEdge != nullptr); + assert(bbFalseEdge->getSourceBlock() == this); + assert(bbFalseEdge->getDestinationBlock() != nullptr); } bool FalseTargetIs(const BasicBlock* target) const { - assert(KindIs(BBJ_COND)); - assert(bbFalseTarget != nullptr); - return (bbFalseTarget == target); + return (GetFalseTarget() == target); } - void SetCond(BasicBlock* trueTarget, BasicBlock* falseTarget) + bool FalseEdgeIs(const FlowEdge* targetEdge) const { - assert(trueTarget != nullptr); - bbKind = BBJ_COND; - bbTrueTarget = trueTarget; - bbFalseTarget = falseTarget; + return (GetFalseEdge() == targetEdge); } - // Set both the block kind and target. This can clear `bbTarget` when setting - // block kinds that don't use `bbTarget`. - void SetKindAndTarget(BBKinds kind, BasicBlock* target = nullptr) + void SetCond(FlowEdge* trueEdge, FlowEdge* falseEdge) { - bbKind = kind; - bbTarget = target; + bbKind = BBJ_COND; + SetTrueEdge(trueEdge); + SetFalseEdge(falseEdge); + } - // If bbKind indicates this block has a jump, bbTarget cannot be null. + // In most cases, a block's true and false targets are known by the time SetCond is called. + // To simplify the few cases where the false target isn't available until later, + // overload SetCond to initialize only the true target. + // This simplifies, for example, lowering switch blocks into jump sequences. + void SetCond(FlowEdge* trueEdge) + { + bbKind = BBJ_COND; + SetTrueEdge(trueEdge); + } + + // Set both the block kind and target edge. This can clear `bbTargetEdge` when setting + // block kinds that don't use `bbTargetEdge`. + void SetKindAndTargetEdge(BBKinds kind, FlowEdge* targetEdge = nullptr) + { + bbKind = kind; + bbTargetEdge = targetEdge; + + // If bbKind indicates this block has a jump, bbTargetEdge cannot be null. // You shouldn't use this to set a BBJ_COND, BBJ_SWITCH, or BBJ_EHFINALLYRET. - assert(HasTarget() ? HasInitializedTarget() : (bbTarget == nullptr)); + assert(HasTarget() ? HasInitializedTarget() : (bbTargetEdge == nullptr)); } bool HasInitializedTarget() const { assert(HasTarget()); - return (bbTarget != nullptr); + return (bbTargetEdge != nullptr); } bool TargetIs(const BasicBlock* target) const @@ -762,19 +964,13 @@ struct BasicBlock : private LIR::Range bbEhfTargets = ehfTarget; } - // BBJ_CALLFINALLYRET uses the `bbTarget` field. However, also treat it specially: + // BBJ_CALLFINALLYRET uses the `bbTargetEdge` field. However, also treat it specially: // for callers that know they want a continuation, use this function instead of the // general `GetTarget()` to allow asserting on the block kind. BasicBlock* GetFinallyContinuation() const { assert(KindIs(BBJ_CALLFINALLYRET)); - return bbTarget; - } - - void SetFinallyContinuation(BasicBlock* finallyContinuation) - { - assert(KindIs(BBJ_CALLFINALLYRET)); - bbTarget = finallyContinuation; + return GetTarget(); } #ifdef DEBUG @@ -783,21 +979,21 @@ struct BasicBlock : private LIR::Range BasicBlock* GetTargetRaw() const { assert(HasTarget()); - return bbTarget; + return (bbTargetEdge == nullptr) ? nullptr : bbTargetEdge->getDestinationBlock(); } // Return the BBJ_COND true target; it might be null. Only used during dumping. BasicBlock* GetTrueTargetRaw() const { assert(KindIs(BBJ_COND)); - return bbTrueTarget; + return (bbTrueEdge == nullptr) ? nullptr : bbTrueEdge->getDestinationBlock(); } // Return the BBJ_COND false target; it might be null. Only used during dumping. BasicBlock* GetFalseTargetRaw() const { assert(KindIs(BBJ_COND)); - return bbFalseTarget; + return (bbFalseEdge == nullptr) ? nullptr : bbFalseEdge->getDestinationBlock(); } #endif // DEBUG @@ -1576,22 +1772,9 @@ struct BasicBlock : private LIR::Range // need to call a function or execute another `switch` to get them. Also, pre-compute the begin and end // points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at // `m_succs` or at the switch table successor array. - BasicBlock* m_succs[2]; - - // Quirk: Some BasicBlock kinds refer to their successors with BasicBlock pointers, - // while others use FlowEdge pointers. Eventually, every type will use FlowEdge pointers. - // For now, support iterating with both types. - union { - BasicBlock* const* m_begin; - FlowEdge* const* m_beginEdge; - }; - - union { - BasicBlock* const* m_end; - FlowEdge* const* m_endEdge; - }; - - bool iterateEdges; + FlowEdge* m_succs[2]; + FlowEdge* const* m_begin; + FlowEdge* const* m_end; public: BBSuccList(const BasicBlock* block); @@ -1932,7 +2115,6 @@ inline BBArrayIterator BBEhfSuccList::end() const inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); - iterateEdges = false; switch (block->bbKind) { @@ -1950,24 +2132,24 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - m_succs[0] = block->bbTarget; + m_succs[0] = block->GetTargetEdge(); m_begin = &m_succs[0]; m_end = &m_succs[1]; break; case BBJ_COND: - m_succs[0] = block->bbFalseTarget; + m_succs[0] = block->GetFalseEdge(); m_begin = &m_succs[0]; // If both fall-through and branch successors are identical, then only include // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()). - if (block->TrueTargetIs(block->GetFalseTarget())) + if (block->TrueEdgeIs(block->GetFalseEdge())) { m_end = &m_succs[1]; } else { - m_succs[1] = block->bbTrueTarget; + m_succs[1] = block->GetTrueEdge(); m_end = &m_succs[2]; } break; @@ -1978,26 +2160,22 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) // been computed. if (block->GetEhfTargets() == nullptr) { - m_beginEdge = nullptr; - m_endEdge = nullptr; + m_begin = nullptr; + m_end = nullptr; } else { - m_beginEdge = block->GetEhfTargets()->bbeSuccs; - m_endEdge = block->GetEhfTargets()->bbeSuccs + block->GetEhfTargets()->bbeCount; + m_begin = block->GetEhfTargets()->bbeSuccs; + m_end = block->GetEhfTargets()->bbeSuccs + block->GetEhfTargets()->bbeCount; } - - iterateEdges = true; break; case BBJ_SWITCH: // We don't use the m_succs in-line data for switches; use the existing jump table in the block. assert(block->bbSwtTargets != nullptr); assert(block->bbSwtTargets->bbsDstTab != nullptr); - m_beginEdge = block->bbSwtTargets->bbsDstTab; - m_endEdge = block->bbSwtTargets->bbsDstTab + block->bbSwtTargets->bbsCount; - - iterateEdges = true; + m_begin = block->bbSwtTargets->bbsDstTab; + m_end = block->bbSwtTargets->bbsDstTab + block->bbSwtTargets->bbsCount; break; default: @@ -2009,12 +2187,12 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) inline BBArrayIterator BasicBlock::BBSuccList::begin() const { - return (iterateEdges ? BBArrayIterator(m_beginEdge) : BBArrayIterator(m_begin)); + return BBArrayIterator(m_begin); } inline BBArrayIterator BasicBlock::BBSuccList::end() const { - return (iterateEdges ? BBArrayIterator(m_endEdge) : BBArrayIterator(m_end)); + return BBArrayIterator(m_end); } // We have a simpler struct, BasicBlockList, which is simply a singly-linked @@ -2034,206 +2212,23 @@ struct BasicBlockList } }; -//------------------------------------------------------------------------- -// FlowEdge -- control flow edge -// -// In compiler terminology the control flow between two BasicBlocks -// is typically referred to as an "edge". Most well known are the -// backward branches for loops, which are often called "back-edges". -// -// "struct FlowEdge" is the type that represents our control flow edges. -// This type is a linked list of zero or more "edges". -// (The list of zero edges is represented by NULL.) -// Every BasicBlock has a field called bbPreds of this type. This field -// represents the list of "edges" that flow into this BasicBlock. -// The FlowEdge type only stores the BasicBlock* of the source for the -// control flow edge. The destination block for the control flow edge -// is implied to be the block which contained the bbPreds field. -// -// For a switch branch target there may be multiple "edges" that have -// the same source block (and destination block). We need to count the -// number of these edges so that during optimization we will know when -// we have zero of them. Rather than have extra FlowEdge entries we -// track this via the DupCount property. -// -// When we have Profile weight for the BasicBlocks we can usually compute -// the number of times each edge was executed by examining the adjacent -// BasicBlock weights. As we are doing for BasicBlocks, we call the number -// of times that a control flow edge was executed the "edge weight". -// In order to compute the edge weights we need to use a bounded range -// for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax' -// are used to hold a bounded range. Most often these will converge such -// that both values are the same and that value is the exact edge weight. -// Sometimes we are left with a rage of possible values between [Min..Max] -// which represents an inexact edge weight. -// -// The bbPreds list is initially created by Compiler::fgLinkBasicBlocks() -// and is incrementally kept up to date. -// -// The edge weight are computed by Compiler::fgComputeEdgeWeights() -// the edge weights are used to straighten conditional branches -// by Compiler::fgReorderBlocks() -// -struct FlowEdge -{ -private: - // The next predecessor edge in the list, nullptr for end of list. - FlowEdge* m_nextPredEdge; - - // The source of the control flow - BasicBlock* m_sourceBlock; - - // The destination of the control flow - BasicBlock* m_destBlock; - - // Edge weights - weight_t m_edgeWeightMin; - weight_t m_edgeWeightMax; - - // Likelihood that m_sourceBlock transfers control along this edge. - // Values in range [0..1] - weight_t m_likelihood; - - // The count of duplicate "edges" (used for switch stmts or degenerate branches) - unsigned m_dupCount; - - // True if likelihood has been set - bool m_likelihoodSet; - -public: - FlowEdge(BasicBlock* sourceBlock, BasicBlock* destBlock, FlowEdge* rest) - : m_nextPredEdge(rest) - , m_sourceBlock(sourceBlock) - , m_destBlock(destBlock) - , m_edgeWeightMin(0) - , m_edgeWeightMax(0) - , m_likelihood(0) - , m_dupCount(0) - , m_likelihoodSet(false) - { - } - - FlowEdge* getNextPredEdge() const - { - return m_nextPredEdge; - } - - FlowEdge** getNextPredEdgeRef() - { - return &m_nextPredEdge; - } - - void setNextPredEdge(FlowEdge* newEdge) - { - m_nextPredEdge = newEdge; - } - - BasicBlock* getSourceBlock() const - { - assert(m_sourceBlock != nullptr); - return m_sourceBlock; - } - - void setSourceBlock(BasicBlock* newBlock) - { - assert(newBlock != nullptr); - m_sourceBlock = newBlock; - } +// FlowEdge implementations (that are required to be defined after the declaration of BasicBlock) - BasicBlock* getDestinationBlock() const - { - assert(m_destBlock != nullptr); - return m_destBlock; - } - - void setDestinationBlock(BasicBlock* newBlock) - { - assert(newBlock != nullptr); - m_destBlock = newBlock; - } - - weight_t edgeWeightMin() const - { - return m_edgeWeightMin; - } - - weight_t edgeWeightMax() const - { - return m_edgeWeightMax; - } - - // These two methods are used to set new values for edge weights. - // They return false if the newWeight is not between the current [min..max] - // when slop is non-zero we allow for the case where our weights might be off by 'slop' - // - bool setEdgeWeightMinChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); - bool setEdgeWeightMaxChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); - void setEdgeWeights(weight_t newMinWeight, weight_t newMaxWeight, BasicBlock* bDst); - - weight_t getLikelihood() const - { - return m_likelihood; - } - - void setLikelihood(weight_t likelihood) - { - assert(likelihood >= 0.0); - assert(likelihood <= 1.0); - m_likelihoodSet = true; - m_likelihood = likelihood; - } - - void clearLikelihood() - { - m_likelihood = 0.0; - m_likelihoodSet = false; - } - - bool hasLikelihood() const - { - return m_likelihoodSet; - } - - weight_t getLikelyWeight() const - { - assert(m_likelihoodSet); - return m_likelihood * m_sourceBlock->bbWeight; - } - - unsigned getDupCount() const - { - return m_dupCount; - } - - void incrementDupCount() - { - m_dupCount++; - } - - void decrementDupCount() - { - assert(m_dupCount >= 1); - m_dupCount--; - } -}; +inline weight_t FlowEdge::getLikelyWeight() const +{ + assert(m_likelihoodSet); + return m_likelihood * m_sourceBlock->bbWeight; +} // BasicBlock iterator implementations (that are required to be defined after the declaration of FlowEdge) inline BasicBlock* BBArrayIterator::operator*() const { - if (iterateEdges) - { - assert(m_edgeEntry != nullptr); - FlowEdge* edgeTarget = *m_edgeEntry; - assert(edgeTarget != nullptr); - assert(edgeTarget->getDestinationBlock() != nullptr); - return edgeTarget->getDestinationBlock(); - } - - assert(m_bbEntry != nullptr); - BasicBlock* bTarget = *m_bbEntry; - assert(bTarget != nullptr); - return bTarget; + assert(m_edgeEntry != nullptr); + FlowEdge* edgeTarget = *m_edgeEntry; + assert(edgeTarget != nullptr); + assert(edgeTarget->getDestinationBlock() != nullptr); + return edgeTarget->getDestinationBlock(); } // Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and FlowEdge) diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis index 95dd3dc305689b..d87d36fcf1410b 100644 --- a/src/coreclr/jit/clrjit.natvis +++ b/src/coreclr/jit/clrjit.natvis @@ -21,7 +21,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u - BB{bbNum,d}->BB{bbTarget->bbNum,d}; {bbKind,en} + BB{bbNum,d}->BB{bbTargetEdge->m_destBlock->bbNum,d}; {bbKind,en} BB{bbNum,d}; {bbKind,en}; {bbSwtTargets->bbsCount} cases BB{bbNum,d}; {bbKind,en}; {bbEhfTargets->bbeCount} succs BB{bbNum,d}; {bbKind,en} diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 0b984ea2ef2089..ecd29a59783ac6 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -332,7 +332,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) #endif // !FEATURE_EH_FUNCLETS // The BBJ_CALLFINALLYRET is used because the BBJ_CALLFINALLY can't point to the - // jump target using bbTarget - that is already used to point + // jump target using bbTargetEdge - that is already used to point // to the finally block. So just skip past the BBJ_CALLFINALLYRET unless the // block is RETLESS. if (!block->HasFlag(BBF_RETLESS_CALL)) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index f3ebaf3e57ef12..cc679290a23d48 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -5080,34 +5080,31 @@ class Compiler void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); - BasicBlock* fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion, BasicBlock* jumpDest = nullptr); + BasicBlock* fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion); - BasicBlock* fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion, BasicBlock* jumpDest = nullptr); + BasicBlock* fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion); - BasicBlock* fgNewBBFromTreeAfter(BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, BasicBlock* jumpDest = nullptr, bool updateSideEffects = false); + BasicBlock* fgNewBBFromTreeAfter(BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, bool updateSideEffects = false); BasicBlock* fgNewBBinRegion(BBKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, - BasicBlock* jumpDest = nullptr, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBKinds jumpKind, BasicBlock* srcBlk, - BasicBlock* jumpDest = nullptr, bool runRarely = false, bool insertAtEnd = false); - BasicBlock* fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest = nullptr); + BasicBlock* fgNewBBinRegion(BBKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, - bool putInTryRegion, - BasicBlock* jumpDest = nullptr); + bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 073a5cde7e2c3a..2b7883a3cc7fdc 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -664,27 +664,27 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) return VisitEHSuccs(comp, func); case BBJ_CALLFINALLY: - RETURN_ON_ABORT(func(bbTarget)); + RETURN_ON_ABORT(func(GetTarget())); return ::VisitEHSuccs(comp, this, func); case BBJ_CALLFINALLYRET: // These are "pseudo-blocks" and control never actually flows into them // (codegen directly jumps to its successor after finally calls). - return func(bbTarget); + return func(GetTarget()); case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: case BBJ_ALWAYS: - RETURN_ON_ABORT(func(bbTarget)); + RETURN_ON_ABORT(func(GetTarget())); return VisitEHSuccs(comp, func); case BBJ_COND: - RETURN_ON_ABORT(func(bbFalseTarget)); + RETURN_ON_ABORT(func(GetFalseTarget())); - if (bbTrueTarget != bbFalseTarget) + if (!TrueEdgeIs(GetFalseEdge())) { - RETURN_ON_ABORT(func(bbTrueTarget)); + RETURN_ON_ABORT(func(GetTrueTarget())); } return VisitEHSuccs(comp, func); @@ -744,14 +744,14 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) case BBJ_EHFILTERRET: case BBJ_LEAVE: case BBJ_ALWAYS: - return func(bbTarget); + return func(GetTarget()); case BBJ_COND: - RETURN_ON_ABORT(func(bbFalseTarget)); + RETURN_ON_ABORT(func(GetFalseTarget())); - if (bbTrueTarget != bbFalseTarget) + if (!TrueEdgeIs(GetFalseEdge())) { - RETURN_ON_ABORT(func(bbTrueTarget)); + RETURN_ON_ABORT(func(GetTrueTarget())); } return BasicBlockVisit::Continue; diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index d1a7bc7fda298a..625b306e600237 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -206,17 +206,10 @@ bool Compiler::fgEnsureFirstBBisScratch() assert(fgFirstBBScratch == nullptr); - BasicBlock* block = BasicBlock::New(this, BBJ_ALWAYS, fgFirstBB); - block->SetFlags(BBF_NONE_QUIRK); + BasicBlock* block; if (fgFirstBB != nullptr) { - // If we have profile data the new block will inherit fgFirstBlock's weight - if (fgFirstBB->hasProfileWeight()) - { - block->inheritWeight(fgFirstBB); - } - // The first block has an implicit ref count which we must // remove. Note the ref count could be greater than one, if // the first block is not scratch and is targeted by a @@ -224,14 +217,24 @@ bool Compiler::fgEnsureFirstBBisScratch() assert(fgFirstBB->bbRefs >= 1); fgFirstBB->bbRefs--; + block = BasicBlock::New(this); + + // If we have profile data the new block will inherit fgFirstBlock's weight + if (fgFirstBB->hasProfileWeight()) + { + block->inheritWeight(fgFirstBB); + } + // The new scratch bb will fall through to the old first bb FlowEdge* const edge = fgAddRefPred(fgFirstBB, block); edge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_ALWAYS, edge); fgInsertBBbefore(fgFirstBB, block); } else { noway_assert(fgLastBB == nullptr); + block = BasicBlock::New(this, BBJ_ALWAYS); fgFirstBB = block; fgLastBB = block; } @@ -239,7 +242,7 @@ bool Compiler::fgEnsureFirstBBisScratch() noway_assert(fgLastBB != nullptr); // Set the expected flags - block->SetFlags(BBF_INTERNAL | BBF_IMPORTED); + block->SetFlags(BBF_INTERNAL | BBF_IMPORTED | BBF_NONE_QUIRK); // This new first BB has an implicit ref, and no others. // @@ -357,7 +360,7 @@ void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgRemoveBlockAsPred(block); // Update jump kind after the scrub. - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); block->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY // Any block with a throw is rare @@ -645,9 +648,9 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas case BBJ_LEAVE: // This function can be called before import, so we still have BBJ_LEAVE { assert(block->TargetIs(oldTarget)); - block->SetTarget(newTarget); - FlowEdge* const oldEdge = fgRemoveRefPred(oldTarget, block); - fgAddRefPred(newTarget, block, oldEdge); + fgRemoveRefPred(block->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(newTarget, block, block->GetTargetEdge()); + block->SetTargetEdge(newEdge); break; } @@ -655,44 +658,50 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas if (block->TrueTargetIs(oldTarget)) { - if (block->FalseTargetIs(oldTarget)) + FlowEdge* const oldEdge = block->GetTrueEdge(); + + if (block->FalseEdgeIs(oldEdge)) { // fgRemoveRefPred returns nullptr for BBJ_COND blocks with two flow edges to target fgRemoveConditionalJump(block); assert(block->KindIs(BBJ_ALWAYS)); assert(block->TargetIs(oldTarget)); - block->SetTarget(newTarget); - } - else - { - block->SetTrueTarget(newTarget); } // fgRemoveRefPred should have removed the flow edge - FlowEdge* oldEdge = fgRemoveRefPred(oldTarget, block); - assert(oldEdge != nullptr); + fgRemoveRefPred(oldEdge); + assert(oldEdge->getDupCount() == 0); // TODO-NoFallThrough: Proliferate weight from oldEdge // (as a quirk, we avoid doing so for the true target to reduce diffs for now) FlowEdge* const newEdge = fgAddRefPred(newTarget, block); + if (block->KindIs(BBJ_ALWAYS)) { newEdge->setLikelihood(1.0); + block->SetTargetEdge(newEdge); } - else if (oldEdge->hasLikelihood()) + else { - newEdge->setLikelihood(oldEdge->getLikelihood()); + assert(block->KindIs(BBJ_COND)); + block->SetTrueEdge(newEdge); + + if (oldEdge->hasLikelihood()) + { + newEdge->setLikelihood(oldEdge->getLikelihood()); + } } } else { assert(block->FalseTargetIs(oldTarget)); + FlowEdge* const oldEdge = block->GetFalseEdge(); // fgRemoveRefPred should have removed the flow edge - FlowEdge* oldEdge = fgRemoveRefPred(oldTarget, block); - assert(oldEdge != nullptr); - block->SetFalseTarget(newTarget); - fgAddRefPred(newTarget, block, oldEdge); + fgRemoveRefPred(oldEdge); + assert(oldEdge->getDupCount() == 0); + FlowEdge* const newEdge = fgAddRefPred(newTarget, block, oldEdge); + block->SetFalseEdge(newEdge); } break; @@ -2965,10 +2974,10 @@ void Compiler::fgLinkBasicBlocks() { BasicBlock* const trueTarget = fgLookupBB(curBBdesc->GetTargetOffs()); BasicBlock* const falseTarget = curBBdesc->Next(); - curBBdesc->SetTrueTarget(trueTarget); - curBBdesc->SetFalseTarget(falseTarget); - fgAddRefPred(trueTarget, curBBdesc); - fgAddRefPred(falseTarget, curBBdesc); + FlowEdge* const trueEdge = fgAddRefPred(trueTarget, curBBdesc); + FlowEdge* const falseEdge = fgAddRefPred(falseTarget, curBBdesc); + curBBdesc->SetTrueEdge(trueEdge); + curBBdesc->SetFalseEdge(falseEdge); if (trueTarget->bbNum <= curBBdesc->bbNum) { @@ -2991,10 +3000,10 @@ void Compiler::fgLinkBasicBlocks() assert(!(curBBdesc->IsLast() && jumpsToNext)); BasicBlock* const jumpDest = jumpsToNext ? curBBdesc->Next() : fgLookupBB(curBBdesc->GetTargetOffs()); - // Redundantly use SetKindAndTarget() instead of SetTarget() just this once, - // so we don't break the HasInitializedTarget() invariant of SetTarget(). - curBBdesc->SetKindAndTarget(curBBdesc->GetKind(), jumpDest); - fgAddRefPred(jumpDest, curBBdesc); + // Redundantly use SetKindAndTargetEdge() instead of SetTargetEdge() just this once, + // so we don't break the HasInitializedTarget() invariant of SetTargetEdge(). + FlowEdge* const newEdge = fgAddRefPred(jumpDest, curBBdesc); + curBBdesc->SetKindAndTargetEdge(curBBdesc->GetKind(), newEdge); if (curBBdesc->GetTarget()->bbNum <= curBBdesc->bbNum) { @@ -3569,7 +3578,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F noway_assert(codeAddr == codeEndp); - /* Finally link up the bbTarget of the blocks together */ + /* Finally link up the targets of the blocks together */ fgLinkBasicBlocks(); @@ -3887,10 +3896,10 @@ void Compiler::fgFindBasicBlocks() if (block->KindIs(BBJ_EHFILTERRET)) { // Mark catch handler as successor. - block->SetTarget(hndBegBB); FlowEdge* const newEdge = fgAddRefPred(hndBegBB, block); newEdge->setLikelihood(1.0); - assert(block->GetTarget()->bbCatchTyp == BBCT_FILTER_HANDLER); + block->SetTargetEdge(newEdge); + assert(hndBegBB->bbCatchTyp == BBCT_FILTER_HANDLER); break; } } @@ -4223,9 +4232,9 @@ void Compiler::fgFixEntryFlowForOSR() fgEnsureFirstBBisScratch(); assert(fgFirstBB->KindIs(BBJ_ALWAYS) && fgFirstBB->JumpsToNext()); fgRemoveRefPred(fgFirstBB->GetTarget(), fgFirstBB); - fgFirstBB->SetKindAndTarget(BBJ_ALWAYS, fgOSREntryBB); - FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); - edge->setLikelihood(1.0); + FlowEdge* const newEdge = fgAddRefPred(fgOSREntryBB, fgFirstBB); + newEdge->setLikelihood(1.0); + fgFirstBB->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // We don't know the right weight for this block, since // execution of the method was interrupted within the @@ -4813,19 +4822,18 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // Remove flags from the old block that are no longer possible. curr->RemoveFlags(BBF_HAS_JMP | BBF_RETLESS_CALL); + // Default to fallthrough, and add the arc for that. + FlowEdge* const newEdge = fgAddRefPred(newBlock, curr); + newEdge->setLikelihood(1.0); + // Transfer the kind and target. Do this after the code above, to avoid null-ing out the old targets used by the - // above code (and so newBlock->bbNext is valid, so SetCond() can initialize bbFalseTarget if newBlock is a - // BBJ_COND). + // above code. newBlock->TransferTarget(curr); - // Default to fallthrough, and add the arc for that. - curr->SetKindAndTarget(BBJ_ALWAYS, newBlock); + curr->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); curr->SetFlags(BBF_NONE_QUIRK); assert(curr->JumpsToNext()); - FlowEdge* const newEdge = fgAddRefPred(newBlock, curr); - newEdge->setLikelihood(1.0); - return newBlock; } @@ -5048,15 +5056,14 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // an immediately following block of a BBJ_SWITCH (which has // no fall-through path). For this case, simply insert a new // fall-through block after 'curr'. - // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, this will be unnecessary for BBJ_COND - newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */, /* jumpDest */ succ); + // TODO-NoFallThrough: Once false target can diverge from bbNext, this will be unnecessary for BBJ_COND + newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */); newBlock->SetFlags(BBF_NONE_QUIRK); - assert(newBlock->JumpsToNext()); } else { // The new block always jumps to 'succ' - newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, /* jumpDest */ succ, /* isRunRarely */ curr->isRunRarely()); + newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, /* isRunRarely */ curr->isRunRarely()); } newBlock->CopyFlags(curr, succ->GetFlagsRaw() & BBF_BACKWARD_JUMP); @@ -5069,6 +5076,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // And 'succ' has 'newBlock' as a new predecessor. FlowEdge* const newEdge = fgAddRefPred(succ, newBlock); newEdge->setLikelihood(1.0); + newBlock->SetTargetEdge(newEdge); // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. @@ -5371,7 +5379,7 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) assert(!bPrev->FalseTargetIs(block)); /* Check if both sides of the BBJ_COND now jump to the same block */ - if (bPrev->TrueTargetIs(bPrev->GetFalseTarget())) + if (bPrev->TrueEdgeIs(bPrev->GetFalseEdge())) { fgRemoveConditionalJump(bPrev); } @@ -5447,16 +5455,19 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->KindIs(BBJ_COND) && bSrc->FalseTargetIs(bDst) && !bSrc->NextIs(bDst)) { // Add a new block after bSrc which jumps to 'bDst' - jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true, bDst); - bSrc->SetFalseTarget(jmpBlk); - fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc)); + jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); + FlowEdge* oldEdge = bSrc->GetFalseEdge(); + fgReplacePred(oldEdge, jmpBlk); + jmpBlk->SetTargetEdge(oldEdge); + assert(jmpBlk->TargetIs(bDst)); + + FlowEdge* newEdge = fgAddRefPred(jmpBlk, bSrc, oldEdge); + bSrc->SetFalseEdge(newEdge); // When adding a new jmpBlk we will set the bbWeight and bbFlags // if (fgHaveValidEdgeWeights && fgHaveProfileWeights()) { - FlowEdge* const newEdge = fgGetPredForBlock(jmpBlk, bSrc); - jmpBlk->bbWeight = (newEdge->edgeWeightMin() + newEdge->edgeWeightMax()) / 2; if (bSrc->bbWeight == BB_ZERO_WEIGHT) { @@ -5494,8 +5505,6 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) } } - fgReplacePred(bDst, bSrc, jmpBlk); - JITDUMP("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n", jmpBlk->GetTarget()->bbNum, bSrc->bbNum); } @@ -6052,14 +6061,11 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r * Insert a BasicBlock before the given block. */ -BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, - BasicBlock* block, - bool extendRegion, - BasicBlock* jumpDest /* = nullptr */) +BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in - BasicBlock* newBlk = BasicBlock::New(this, jumpKind, jumpDest); + BasicBlock* newBlk = BasicBlock::New(this, jumpKind); newBlk->SetFlags(BBF_INTERNAL); fgInsertBBbefore(block, newBlk); @@ -6094,14 +6100,11 @@ BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, * Insert a BasicBlock after the given block. */ -BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, - BasicBlock* block, - bool extendRegion, - BasicBlock* jumpDest /* = nullptr */) +BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in - BasicBlock* newBlk = BasicBlock::New(this, jumpKind, jumpDest); + BasicBlock* newBlk = BasicBlock::New(this, jumpKind); newBlk->SetFlags(BBF_INTERNAL); fgInsertBBafter(block, newBlk); @@ -6141,7 +6144,6 @@ BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, // tree - tree that will be wrapped into a statement and // inserted in the new block. // debugInfo - debug info to propagate into the new statement. -// jumpDest - the jump target of the new block. Defaults to nullptr. // updateSideEffects - update side effects for the whole statement. // // Return Value: @@ -6150,14 +6152,10 @@ BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, // Notes: // The new block will have BBF_INTERNAL flag and EH region will be extended // -BasicBlock* Compiler::fgNewBBFromTreeAfter(BBKinds jumpKind, - BasicBlock* block, - GenTree* tree, - DebugInfo& debugInfo, - BasicBlock* jumpDest /* = nullptr */, - bool updateSideEffects /* = false */) +BasicBlock* Compiler::fgNewBBFromTreeAfter( + BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, bool updateSideEffects /* = false */) { - BasicBlock* newBlock = fgNewBBafter(jumpKind, block, true, jumpDest); + BasicBlock* newBlock = fgNewBBafter(jumpKind, block, true); newBlock->SetFlags(BBF_INTERNAL); Statement* stmt = fgNewStmtFromTree(tree, debugInfo); fgInsertStmtAtEnd(newBlock, stmt); @@ -6579,7 +6577,6 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // [0..compHndBBtabCount]. // nearBlk - insert the new block closely after this block, if possible. If nullptr, put the new block anywhere // in the requested region. -// jumpDest - the jump target of the new block. Defaults to nullptr. // putInFilter - put the new block in the filter region given by hndIndex, as described above. // runRarely - 'true' if the new block is run rarely. // insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only @@ -6592,7 +6589,6 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, - BasicBlock* jumpDest /* = nullptr */, bool putInFilter /* = false */, bool runRarely /* = false */, bool insertAtEnd /* = false */) @@ -6718,7 +6714,7 @@ _FoundAfterBlk:; bbKindNames[jumpKind], tryIndex, hndIndex, dspBool(putInFilter), dspBool(runRarely), dspBool(insertAtEnd), afterBlk->bbNum); - return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion, jumpDest); + return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion); } //------------------------------------------------------------------------ @@ -6729,7 +6725,6 @@ _FoundAfterBlk:; // Arguments: // jumpKind - the jump kind of the new block to create. // srcBlk - insert the new block in the same EH region as this block, and closely after it if possible. -// jumpDest - the jump target of the new block. Defaults to nullptr. // runRarely - 'true' if the new block is run rarely. // insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only // implemented when inserting into the main function (not into any EH region). @@ -6739,7 +6734,6 @@ _FoundAfterBlk:; BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* srcBlk, - BasicBlock* jumpDest /* = nullptr */, bool runRarely /* = false */, bool insertAtEnd /* = false */) { @@ -6758,7 +6752,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, putInFilter = ehGetDsc(hndIndex - 1)->InFilterRegionBBRange(srcBlk); } - return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, jumpDest, putInFilter, runRarely, insertAtEnd); + return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, putInFilter, runRarely, insertAtEnd); } //------------------------------------------------------------------------ @@ -6768,14 +6762,13 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, // // Arguments: // jumpKind - the jump kind of the new block to create. -// jumpDest - the jump target of the new block. Defaults to nullptr. // // Return Value: // The new block. -BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /* = nullptr */) +BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind) { - return fgNewBBinRegion(jumpKind, 0, 0, nullptr, jumpDest, /* putInFilter */ false, /* runRarely */ false, + return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, /* insertAtEnd */ true); } @@ -6794,7 +6787,6 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /* // set its handler index to the most nested handler region enclosing that 'try' region. // Otherwise, put the block in the handler region specified by 'regionIndex', and set its 'try' // index to the most nested 'try' region enclosing that handler region. -// jumpDest - the jump target of the new block. Defaults to nullptr. // // Return Value: // The new block. @@ -6802,13 +6794,12 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /* BasicBlock* Compiler::fgNewBBinRegionWorker(BBKinds jumpKind, BasicBlock* afterBlk, unsigned regionIndex, - bool putInTryRegion, - BasicBlock* jumpDest /* = nullptr */) + bool putInTryRegion) { /* Insert the new block */ BasicBlock* afterBlkNext = afterBlk->Next(); (void)afterBlkNext; // prevent "unused variable" error from GCC - BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false, jumpDest); + BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false); if (putInTryRegion) { diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 6a2068169e1a54..4e545a649f990d 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -134,7 +134,7 @@ void Compiler::fgDebugCheckUpdate() // Check for an unnecessary jumps to the next block. // A conditional branch should never jump to the next block as it can be folded into a BBJ_ALWAYS. - if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget())) + if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge())) { noway_assert(!"Unnecessary jump to the next block!"); } diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 4c781cbc0c222c..54a36e5c0d1697 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -167,12 +167,12 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() fgPrepareCallFinallyRetForRemoval(leaveBlock); fgRemoveBlock(leaveBlock, /* unreachable */ true); - currentBlock->SetKindAndTarget(BBJ_ALWAYS, postTryFinallyBlock); - currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY - // Ref count updates. - fgAddRefPred(postTryFinallyBlock, currentBlock); - fgRemoveRefPred(firstBlock, currentBlock); + fgRemoveRefPred(currentBlock->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(postTryFinallyBlock, currentBlock); + + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); + currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY // Cleanup the postTryFinallyBlock fgCleanupContinuation(postTryFinallyBlock); @@ -524,8 +524,8 @@ PhaseStatus Compiler::fgRemoveEmptyTry() GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(block, finallyRet); - block->SetKindAndTarget(BBJ_ALWAYS, continuation); - fgAddRefPred(continuation, block); + FlowEdge* const newEdge = fgAddRefPred(continuation, block); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } } @@ -1093,9 +1093,9 @@ PhaseStatus Compiler::fgCloneFinally() GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(newBlock, finallyRet); - newBlock->SetKindAndTarget(BBJ_ALWAYS, normalCallFinallyReturn); - fgAddRefPred(normalCallFinallyReturn, newBlock); + FlowEdge* const newEdge = fgAddRefPred(normalCallFinallyReturn, newBlock); + newBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } else { @@ -1135,13 +1135,13 @@ PhaseStatus Compiler::fgCloneFinally() fgPrepareCallFinallyRetForRemoval(leaveBlock); fgRemoveBlock(leaveBlock, /* unreachable */ true); + // Ref count updates. + fgRemoveRefPred(currentBlock->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(firstCloneBlock, currentBlock); + // This call returns to the expected spot, so retarget it to branch to the clone. - currentBlock->SetKindAndTarget(BBJ_ALWAYS, firstCloneBlock); currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY - - // Ref count updates. - fgAddRefPred(firstCloneBlock, currentBlock); - fgRemoveRefPred(firstBlock, currentBlock); + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // Make sure iteration isn't going off the deep end. assert(leaveBlock != endCallFinallyRangeBlock); @@ -1757,8 +1757,8 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, JITDUMP("Redirecting branch in " FMT_BB " from " FMT_BB " to " FMT_BB ".\n", block->bbNum, callFinally->bbNum, canonicalCallFinally->bbNum); - block->SetTarget(canonicalCallFinally); - fgAddRefPred(canonicalCallFinally, block); + FlowEdge* const newEdge = fgAddRefPred(canonicalCallFinally, block); + block->SetTargetEdge(newEdge); assert(callFinally->bbRefs > 0); fgRemoveRefPred(callFinally, block); @@ -2103,19 +2103,20 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, assert(predBlock->KindIs(BBJ_COND)); assert(predBlock->FalseTargetIs(nonCanonicalBlock)); - BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true, canonicalBlock); - predBlock->SetFalseTarget(newBlock); + BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true); JITDUMP("*** " FMT_BB " now falling through to empty " FMT_BB " and then to " FMT_BB "\n", predBlock->bbNum, newBlock->bbNum, canonicalBlock->bbNum); // Remove the old flow - fgRemoveRefPred(nonCanonicalBlock, predBlock); + fgRemoveRefPred(predEdge); // Wire up the new flow - fgAddRefPred(newBlock, predBlock, predEdge); + FlowEdge* const falseEdge = fgAddRefPred(newBlock, predBlock, predEdge); + predBlock->SetFalseEdge(falseEdge); - fgAddRefPred(canonicalBlock, newBlock, predEdge); + FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, newBlock, predEdge); + newBlock->SetTargetEdge(newEdge); // If nonCanonicalBlock has only one pred, all its flow transfers. // If it has multiple preds, then we need edge counts or likelihoods @@ -2149,17 +2150,17 @@ void Compiler::fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, fgRemoveRefPred(nonCanonicalBlock, predBlock); // Wire up the new flow + FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, predBlock, predEdge); + if (predBlock->KindIs(BBJ_ALWAYS)) { assert(predBlock->TargetIs(nonCanonicalBlock)); - predBlock->SetTarget(canonicalBlock); + predBlock->SetTargetEdge(newEdge); } else { assert(predBlock->KindIs(BBJ_COND)); assert(predBlock->TrueTargetIs(nonCanonicalBlock)); - predBlock->SetTrueTarget(canonicalBlock); + predBlock->SetTrueEdge(newEdge); } - - fgAddRefPred(canonicalBlock, predBlock, predEdge); } diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 479e8b5fb1442a..0f1192288152b7 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -151,7 +151,7 @@ FlowEdge* Compiler::fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, FlowE // their successor edge should never have a duplicate count over 1. // assert(blockPred->KindIs(BBJ_COND)); - assert(blockPred->TrueTargetIs(blockPred->GetFalseTarget())); + assert(blockPred->TrueEdgeIs(blockPred->GetFalseEdge())); flow->setLikelihood(1.0); } else @@ -214,6 +214,11 @@ FlowEdge* Compiler::fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, FlowE // When initializing preds, ensure edge likelihood is set, // such that this edge is as likely as any other successor edge + // Note: We probably shouldn't call NumSucc on a new BBJ_COND block. + // NumSucc compares bbTrueEdge and bbFalseEdge to determine if this BBJ_COND block has only one successor, + // but these members are uninitialized. Aside from the fact that this compares uninitialized memory, + // we don't know if the true and false targets are the same in NumSucc until both edges exist. + // TODO: Move this edge likelihood logic to fgLinkBasicBlocks. // const unsigned numSucc = blockPred->NumSucc(); assert(numSucc > 0); diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 54003637e7e36e..8497e3c904164a 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -675,13 +675,13 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { - m_compiler->fgRemoveRefPred(block->GetTrueTarget(), block); - block->SetKindAndTarget(BBJ_ALWAYS, block->Next()); + m_compiler->fgRemoveRefPred(block->GetTrueEdge()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge()); block->SetFlags(BBF_NONE_QUIRK); } else { - m_compiler->fgRemoveRefPred(block->GetFalseTarget(), block); + m_compiler->fgRemoveRefPred(block->GetFalseEdge()); block->SetKind(BBJ_ALWAYS); } } @@ -1533,9 +1533,9 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) JITDUMP("\nConvert bbKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); - block->SetKindAndTarget(BBJ_ALWAYS, bottomBlock); FlowEdge* const newEdge = fgAddRefPred(bottomBlock, block); newEdge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); if (block == InlineeCompiler->fgLastBB) { @@ -1551,11 +1551,12 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) // Insert inlinee's blocks into inliner's block list. assert(topBlock->KindIs(BBJ_ALWAYS)); assert(topBlock->TargetIs(bottomBlock)); + FlowEdge* const oldEdge = fgRemoveRefPred(bottomBlock, topBlock); + FlowEdge* const newEdge = fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock, oldEdge); + topBlock->SetNext(InlineeCompiler->fgFirstBB); - topBlock->SetTarget(topBlock->Next()); + topBlock->SetTargetEdge(newEdge); topBlock->SetFlags(BBF_NONE_QUIRK); - FlowEdge* const oldEdge = fgRemoveRefPred(bottomBlock, topBlock); - fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock, oldEdge); InlineeCompiler->fgLastBB->SetNext(bottomBlock); // diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 18b9ea157b86dc..a0065dd2c6af39 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -132,7 +132,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) block->RemoveFlags(BBF_REMOVED | BBF_INTERNAL); block->SetFlags(BBF_IMPORTED); - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); block->bbSetRunRarely(); } else @@ -623,8 +623,8 @@ PhaseStatus Compiler::fgPostImportationCleanup() // What follows is similar to fgNewBBInRegion, but we can't call that // here as the oldTryEntry is no longer in the main bb list. - newTryEntry = BasicBlock::New(this, BBJ_ALWAYS, tryEntryPrev->Next()); - newTryEntry->SetFlags(BBF_IMPORTED | BBF_INTERNAL | BBF_NONE_QUIRK); + newTryEntry = BasicBlock::New(this); + newTryEntry->SetFlags(BBF_IMPORTED | BBF_INTERNAL); newTryEntry->bbRefs = 0; // Set the right EH region indices on this new block. @@ -643,12 +643,14 @@ PhaseStatus Compiler::fgPostImportationCleanup() // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->Next())) { - newTryEntry->SetKindAndTarget(BBJ_THROW); + newTryEntry->SetKindAndTargetEdge(BBJ_THROW); } else { FlowEdge* const newEdge = fgAddRefPred(newTryEntry->Next(), newTryEntry); newEdge->setLikelihood(1.0); + newTryEntry->SetFlags(BBF_NONE_QUIRK); + newTryEntry->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n", @@ -774,7 +776,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() fromBlock->SetFlags(BBF_INTERNAL); newBlock->RemoveFlags(BBF_DONT_REMOVE); addedBlocks++; - FlowEdge* const normalTryEntryEdge = fgGetPredForBlock(newBlock, fromBlock); + FlowEdge* const normalTryEntryEdge = fromBlock->GetTargetEdge(); GenTree* const entryStateLcl = gtNewLclvNode(entryStateVar, TYP_INT); GenTree* const compareEntryStateToZero = @@ -782,9 +784,9 @@ PhaseStatus Compiler::fgPostImportationCleanup() GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); - fromBlock->SetCond(toBlock, newBlock); FlowEdge* const osrTryEntryEdge = fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); + fromBlock->SetCond(osrTryEntryEdge, normalTryEntryEdge); // Not sure what the correct edge likelihoods are just yet; // for now we'll say the OSR path is the likely one. @@ -833,9 +835,9 @@ PhaseStatus Compiler::fgPostImportationCleanup() if (entryJumpTarget != osrEntry) { - fgFirstBB->SetTarget(entryJumpTarget); FlowEdge* const oldEdge = fgRemoveRefPred(osrEntry, fgFirstBB); - fgAddRefPred(entryJumpTarget, fgFirstBB, oldEdge); + FlowEdge* const newEdge = fgAddRefPred(entryJumpTarget, fgFirstBB, oldEdge); + fgFirstBB->SetTargetEdge(newEdge); JITDUMP("OSR: redirecting flow from method entry " FMT_BB " to OSR entry " FMT_BB " via step blocks.\n", @@ -1286,24 +1288,31 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: - block->SetKindAndTarget(bNext->GetKind(), bNext->GetTarget()); + { + /* Update the predecessor list for bNext's target */ + FlowEdge* const targetEdge = bNext->GetTargetEdge(); + fgReplacePred(targetEdge, block); - /* Update the predecessor list for 'bNext->bbTarget' */ - fgReplacePred(bNext->GetTarget(), bNext, block); + block->SetKindAndTargetEdge(bNext->GetKind(), targetEdge); break; + } case BBJ_COND: - block->SetCond(bNext->GetTrueTarget(), bNext->GetFalseTarget()); - - /* Update the predecessor list for 'bNext->bbTrueTarget' */ - fgReplacePred(bNext->GetTrueTarget(), bNext, block); + { + /* Update the predecessor list for bNext's true target */ + FlowEdge* const trueEdge = bNext->GetTrueEdge(); + FlowEdge* const falseEdge = bNext->GetFalseEdge(); + fgReplacePred(trueEdge, block); - /* Update the predecessor list for 'bNext->bbFalseTarget' if it is different than 'bNext->bbTrueTarget' */ - if (!bNext->TrueTargetIs(bNext->GetFalseTarget())) + /* Update the predecessor list for bNext's false target if it is different from the true target */ + if (trueEdge != falseEdge) { - fgReplacePred(bNext->GetFalseTarget(), bNext, block); + fgReplacePred(falseEdge, block); } + + block->SetCond(trueEdge, falseEdge); break; + } case BBJ_EHFINALLYRET: block->SetEhf(bNext->GetEhfTargets()); @@ -1557,23 +1566,25 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc } // Optimize the JUMP to empty unconditional JUMP to go to the new target + FlowEdge* const newEdge = fgAddRefPred(bDest->GetTarget(), block, fgRemoveRefPred(bDest, block)); + switch (block->GetKind()) { case BBJ_ALWAYS: case BBJ_CALLFINALLYRET: - block->SetTarget(bDest->GetTarget()); + block->SetTargetEdge(newEdge); break; case BBJ_COND: if (block->TrueTargetIs(bDest)) { assert(!block->FalseTargetIs(bDest)); - block->SetTrueTarget(bDest->GetTarget()); + block->SetTrueEdge(newEdge); } else { assert(block->FalseTargetIs(bDest)); - block->SetFalseTarget(bDest->GetTarget()); + block->SetFalseEdge(newEdge); } break; @@ -1581,8 +1592,6 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc unreached(); } - fgAddRefPred(bDest->GetTarget(), block, fgRemoveRefPred(bDest, block)); - return true; } return false; @@ -1642,7 +1651,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) else { // TODO-NoFallThrough: Once BBJ_COND blocks have pointers to their false branches, - // allow removing empty BBJ_ALWAYS and pointing bPrev's false branch to block->bbTarget. + // allow removing empty BBJ_ALWAYS and pointing bPrev's false branch to block's target. if (bPrev->bbFallsThrough() && !block->JumpsToNext()) { break; @@ -1998,10 +2007,10 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } // Change the switch jump into a BBJ_ALWAYS - block->SetKindAndTarget(BBJ_ALWAYS, block->GetSwitchTargets()->bbsDstTab[0]->getDestinationBlock()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetSwitchTargets()->bbsDstTab[0]); for (unsigned i = 1; i < jmpCnt; ++i) { - fgRemoveRefPred(jmpTab[i]->getDestinationBlock(), block); + fgRemoveRefPred(jmpTab[i]); } return true; @@ -2060,9 +2069,9 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) fgSetStmtSeq(switchStmt); } - BasicBlock* const trueTarget = block->GetSwitchTargets()->bbsDstTab[0]->getDestinationBlock(); - BasicBlock* const falseTarget = block->GetSwitchTargets()->bbsDstTab[1]->getDestinationBlock(); - block->SetCond(trueTarget, falseTarget); + FlowEdge* const trueEdge = block->GetSwitchTargets()->bbsDstTab[0]; + FlowEdge* const falseEdge = block->GetSwitchTargets()->bbsDstTab[1]; + block->SetCond(trueEdge, falseEdge); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -2475,11 +2484,9 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // fgRemoveRefPred(target, block); - FlowEdge* const targetTrueEdge = fgGetPredForBlock(target->GetTrueTarget(), target); - FlowEdge* const targetFalseEdge = fgGetPredForBlock(target->GetFalseTarget(), target); - block->SetCond(target->GetTrueTarget(), target->GetFalseTarget()); - fgAddRefPred(block->GetTrueTarget(), block, targetTrueEdge); - fgAddRefPred(block->GetFalseTarget(), block, targetFalseEdge); + FlowEdge* const trueEdge = fgAddRefPred(target->GetTrueTarget(), block, target->GetTrueEdge()); + FlowEdge* const falseEdge = fgAddRefPred(target->GetFalseTarget(), block, target->GetFalseEdge()); + block->SetCond(trueEdge, falseEdge); JITDUMP("fgOptimizeUncondBranchToSimpleCond(from " FMT_BB " to cond " FMT_BB "), modified " FMT_BB "\n", block->bbNum, target->bbNum, block->bbNum); @@ -2509,7 +2516,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* void Compiler::fgRemoveConditionalJump(BasicBlock* block) { assert(block->KindIs(BBJ_COND)); - assert(block->TrueTargetIs(block->GetFalseTarget())); + assert(block->TrueEdgeIs(block->GetFalseEdge())); BasicBlock* target = block->GetTrueTarget(); @@ -2629,7 +2636,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) * block are counted twice so we have to remove one of them */ noway_assert(target->countOfInEdges() > 1); - fgRemoveRefPred(target, block); + fgRemoveRefPred(block->GetTargetEdge()); } //------------------------------------------------------------- @@ -2889,13 +2896,11 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // We need to update the following flags of the bJump block if they were set in the bDest block bJump->CopyFlags(bDest, BBF_COPY_PROPAGATE); - bJump->SetCond(bDestNormalTarget, bJump->Next()); - /* Update bbRefs and bbPreds */ // bJump now falls through into the next block // - fgAddRefPred(bJump->GetFalseTarget(), bJump); + FlowEdge* const falseEdge = fgAddRefPred(bJump->Next(), bJump); // bJump no longer jumps to bDest // @@ -2903,7 +2908,9 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // bJump now jumps to bDest's normal jump target // - fgAddRefPred(bDestNormalTarget, bJump); + FlowEdge* const trueEdge = fgAddRefPred(bDestNormalTarget, bJump); + + bJump->SetCond(trueEdge, falseEdge); if (weightJump > 0) { @@ -3049,11 +3056,9 @@ bool Compiler::fgOptimizeSwitchJumps() // Wire up the new control flow. // - block->SetCond(dominantTarget, newBlock); FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds; - assert(blockToNewBlockEdge->getSourceBlock() == block); - assert(blockToTargetEdge->getSourceBlock() == block); + block->SetCond(blockToTargetEdge, blockToNewBlockEdge); // Update profile data // @@ -3522,11 +3527,11 @@ bool Compiler::fgReorderBlocks(bool useProfile) assert(test->OperIsConditionalJump()); test->AsOp()->gtOp1 = gtReverseCond(test->AsOp()->gtOp1); - BasicBlock* newFalseTarget = block->GetTrueTarget(); - BasicBlock* newTrueTarget = block->GetFalseTarget(); - block->SetTrueTarget(newTrueTarget); - block->SetFalseTarget(newFalseTarget); - assert(block->CanRemoveJumpToTarget(newFalseTarget, this)); + FlowEdge* const newFalseEdge = block->GetTrueEdge(); + FlowEdge* const newTrueEdge = block->GetFalseEdge(); + block->SetTrueEdge(newTrueEdge); + block->SetFalseEdge(newFalseEdge); + assert(block->CanRemoveJumpToTarget(block->GetFalseTarget(), this)); } else { @@ -4583,10 +4588,10 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(condTest->gtOper == GT_JTRUE); condTest->AsOp()->gtOp1 = gtReverseCond(condTest->AsOp()->gtOp1); - BasicBlock* trueTarget = bPrev->GetTrueTarget(); - BasicBlock* falseTarget = bPrev->GetFalseTarget(); - bPrev->SetTrueTarget(falseTarget); - bPrev->SetFalseTarget(trueTarget); + FlowEdge* const trueEdge = bPrev->GetTrueEdge(); + FlowEdge* const falseEdge = bPrev->GetFalseEdge(); + bPrev->SetTrueEdge(falseEdge); + bPrev->SetFalseEdge(trueEdge); // may need to rethread // @@ -4987,13 +4992,15 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh if (bDest->KindIs(BBJ_COND) && !bDest->NextIs(bDest->GetFalseTarget())) { BasicBlock* const bDestFalseTarget = bDest->GetFalseTarget(); - BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true, bDestFalseTarget); - bDest->SetFalseTarget(bFixup); + BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); bFixup->inheritWeight(bDestFalseTarget); fgRemoveRefPred(bDestFalseTarget, bDest); - fgAddRefPred(bFixup, bDest); - fgAddRefPred(bDestFalseTarget, bFixup); + FlowEdge* const falseEdge = fgAddRefPred(bFixup, bDest); + bDest->SetFalseEdge(falseEdge); + + FlowEdge* const newEdge = fgAddRefPred(bDestFalseTarget, bFixup); + bFixup->SetTargetEdge(newEdge); } } } @@ -5021,10 +5028,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh } // Optimize the Conditional JUMP to go to the new target - block->SetTrueTarget(bNext->GetTarget()); - block->SetFalseTarget(bNext->Next()); - - fgAddRefPred(bNext->GetTarget(), block, fgRemoveRefPred(bNext->GetTarget(), bNext)); + fgRemoveRefPred(block->GetFalseEdge()); + fgRemoveRefPred(bNext->GetTargetEdge()); + block->SetFalseEdge(block->GetTrueEdge()); + FlowEdge* const newEdge = fgAddRefPred(bNext->GetTarget(), block, bNext->GetTargetEdge()); + block->SetTrueEdge(newEdge); /* Unlink bNext from the BasicBlock list; note that we can @@ -5036,7 +5044,6 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh to the final target by the time we're done here. */ - fgRemoveRefPred(bNext, block); fgUnlinkBlockForRemoval(bNext); /* Mark the block as removed */ @@ -5669,13 +5676,13 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) // Fix up the flow. // - predBlock->SetKindAndTarget(BBJ_ALWAYS, crossJumpTarget); - if (commSucc != nullptr) { fgRemoveRefPred(commSucc, predBlock); } - fgAddRefPred(crossJumpTarget, predBlock); + + FlowEdge* const newEdge = fgAddRefPred(crossJumpTarget, predBlock); + predBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } // We changed things @@ -5844,7 +5851,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if (!block->KindIs(BBJ_COND) || block->TrueTargetIs(block->GetFalseTarget())) + if (!block->KindIs(BBJ_COND) || block->TrueEdgeIs(block->GetFalseEdge())) { return false; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index a1c9da833fd3f4..1499042cb8b42e 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -507,12 +507,12 @@ void BlockCountInstrumentor::RelocateProbes() // if (criticalPreds.Height() > 0) { - BasicBlock* const intermediary = - m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true, /* jumpDest */ block); + BasicBlock* const intermediary = m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true); intermediary->SetFlags(BBF_IMPORTED | BBF_MARKED | BBF_NONE_QUIRK); intermediary->inheritWeight(block); FlowEdge* const newEdge = m_comp->fgAddRefPred(block, intermediary); newEdge->setLikelihood(1.0); + intermediary->SetTargetEdge(newEdge); SetModifiedFlow(); while (criticalPreds.Height() > 0) @@ -1679,12 +1679,12 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // if (criticalPreds.Height() > 0) { - BasicBlock* intermediary = - m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true, /* jumpDest */ block); + BasicBlock* intermediary = m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true); intermediary->SetFlags(BBF_IMPORTED | BBF_NONE_QUIRK); intermediary->inheritWeight(block); FlowEdge* const newEdge = m_comp->fgAddRefPred(block, intermediary); newEdge->setLikelihood(1.0); + intermediary->SetTargetEdge(newEdge); NewRelocatedProbe(intermediary, probe->source, probe->target, &leader); SetModifiedFlow(); @@ -4033,7 +4033,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf // // This can happen because bome BBJ_LEAVE blocks may have been missed during // our spanning tree walk since we don't know where all the finallies can return - // to just yet (specially, in WalkSpanningTree, we may not add the bbTarget of + // to just yet (specially, in WalkSpanningTree, we may not add the target of // a BBJ_LEAVE to the worklist). // // Worst case those missed blocks dominate other blocks so we can't limit diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index e315e33015e138..7683f3b8d07521 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -190,7 +190,7 @@ void ProfileSynthesis::AssignLikelihoodNext(BasicBlock* block) //------------------------------------------------------------------------ // AssignLikelihoodJump: update edge likelihood for a block that always -// transfers control to bbTarget +// transfers control to its target block // // Arguments; // block -- block in question diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index c393648c843ef4..b621739a41cddb 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -266,17 +266,11 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // I want to create: // top -> poll -> bottom (lexically) // so that we jump over poll to get to bottom. - BasicBlock* top = block; - BBKinds oldJumpKind = top->GetKind(); + BasicBlock* top = block; BasicBlock* poll = fgNewBBafter(BBJ_ALWAYS, top, true); bottom = fgNewBBafter(top->GetKind(), poll, true); - poll->SetTarget(bottom); - assert(poll->JumpsToNext()); - - bottom->TransferTarget(top); - // Update block flags const BasicBlockFlags originalFlags = top->GetFlagsRaw() | BBF_GC_SAFE_POINT; @@ -300,7 +294,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) } // Remove the last statement from Top and add it to Bottom if necessary. - if ((oldJumpKind == BBJ_COND) || (oldJumpKind == BBJ_RETURN) || (oldJumpKind == BBJ_THROW)) + if (top->KindIs(BBJ_COND, BBJ_RETURN, BBJ_THROW)) { Statement* stmt = top->firstStmt(); while (stmt->GetNextStmt() != nullptr) @@ -364,38 +358,47 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) } #endif - top->SetCond(bottom, poll); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. - fgAddRefPred(bottom, poll); - fgAddRefPred(bottom, top); - fgAddRefPred(poll, top); + FlowEdge* const trueEdge = fgAddRefPred(bottom, top); + FlowEdge* const falseEdge = fgAddRefPred(poll, top); + + FlowEdge* const newEdge = fgAddRefPred(bottom, poll); + poll->SetTargetEdge(newEdge); + assert(poll->JumpsToNext()); // Replace Top with Bottom in the predecessor list of all outgoing edges from Bottom // (1 for unconditional branches, 2 for conditional branches, N for switches). - switch (oldJumpKind) + switch (top->GetKind()) { case BBJ_RETURN: case BBJ_THROW: // no successors break; + case BBJ_COND: // replace predecessor in true/false successors. noway_assert(!bottom->IsLast()); - fgReplacePred(bottom->GetFalseTarget(), top, bottom); - fgReplacePred(bottom->GetTrueTarget(), top, bottom); + fgReplacePred(top->GetFalseEdge(), bottom); + fgReplacePred(top->GetTrueEdge(), bottom); break; case BBJ_ALWAYS: case BBJ_CALLFINALLY: - fgReplacePred(bottom->GetTarget(), top, bottom); + fgReplacePred(top->GetTargetEdge(), bottom); break; + case BBJ_SWITCH: NO_WAY("SWITCH should be a call rather than an inlined poll."); break; + default: NO_WAY("Unknown block type for updating predecessor lists."); + break; } + bottom->TransferTarget(top); + top->SetCond(trueEdge, falseEdge); + if (compCurBB == top) { compCurBB = bottom; @@ -1625,9 +1628,9 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. - block->SetKindAndTarget(BBJ_ALWAYS, genReturnBB); FlowEdge* const newEdge = fgAddRefPred(genReturnBB, block); newEdge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); #ifdef DEBUG if (verbose) @@ -2097,9 +2100,9 @@ class MergedReturns // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); - returnBlock->SetKindAndTarget(BBJ_ALWAYS, constReturnBlock); FlowEdge* const newEdge = comp->fgAddRefPred(constReturnBlock, returnBlock); newEdge->setLikelihood(1.0); + returnBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // Remove GT_RETURN since constReturnBlock returns the constant. assert(returnBlock->lastStmt()->GetRootNode()->OperIs(GT_RETURN)); @@ -2758,15 +2761,14 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) /* Allocate a new basic block */ - BasicBlock* newHead = BasicBlock::New(this, BBJ_ALWAYS, block); + BasicBlock* newHead = BasicBlock::New(this); newHead->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); newHead->inheritWeight(block); newHead->bbRefs = 0; fgInsertBBbefore(block, newHead); // insert the new block in the block list - assert(newHead->JumpsToNext()); - fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH - // block. + fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH + // block. // Distribute the pred list between newHead and block. Incoming edges coming from outside // the handler go to the prolog. Edges coming from with the handler are back-edges, and @@ -2782,11 +2784,13 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) switch (predBlock->GetKind()) { case BBJ_CALLFINALLY: + { noway_assert(predBlock->TargetIs(block)); - predBlock->SetTarget(newHead); - fgRemoveRefPred(block, predBlock); - fgAddRefPred(newHead, predBlock); + fgRemoveRefPred(predBlock->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(newHead, predBlock); + predBlock->SetTargetEdge(newEdge); break; + } default: // The only way into the handler is via a BBJ_CALLFINALLY (to a finally handler), or @@ -2797,10 +2801,10 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) } } - assert(nullptr == fgGetPredForBlock(block, newHead)); - fgAddRefPred(block, newHead); - - assert(newHead->HasFlag(BBF_INTERNAL)); + assert(fgGetPredForBlock(block, newHead) == nullptr); + FlowEdge* const newEdge = fgAddRefPred(block, newHead); + newHead->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); + assert(newHead->JumpsToNext()); } //------------------------------------------------------------------------ @@ -3374,7 +3378,7 @@ PhaseStatus Compiler::fgCreateThrowHelperBlocks() assert((add->acdKind == SCK_FAIL_FAST) || (bbThrowIndex(srcBlk) == add->acdData)); assert(add->acdKind != SCK_NONE); - BasicBlock* const newBlk = fgNewBBinRegion(jumpKinds[add->acdKind], srcBlk, /* jumpDest */ nullptr, + BasicBlock* const newBlk = fgNewBBinRegion(jumpKinds[add->acdKind], srcBlk, /* runRarely */ true, /* insertAtEnd */ true); // Update the descriptor @@ -3438,7 +3442,7 @@ PhaseStatus Compiler::fgCreateThrowHelperBlocks() #endif // DEBUG // Mark the block as added by the compiler and not removable by future flow - // graph optimizations. Note that no bbTarget points to these blocks. + // graph optimizations. Note that no target block points to these blocks. // newBlk->SetFlags(BBF_IMPORTED | BBF_DONT_REMOVE); diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index adab37fc0835c9..30d8b2dbd42cb9 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -194,7 +194,7 @@ inline AssertionIndex GetAssertionIndex(unsigned index) class AssertionInfo { - // true if the assertion holds on the bbNext edge instead of the bbTarget edge (for GT_JTRUE nodes) + // true if the assertion holds on the false edge instead of the true edge (for GT_JTRUE nodes) unsigned short m_isNextEdgeAssertion : 1; // 1-based index of the assertion unsigned short m_assertionIndex : 15; diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 39f7b8f09d2764..d6a9ac01df9694 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -319,21 +319,11 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // Fallback basic block GenTree* fallbackValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), call); - BasicBlock* fallbackBb = - fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, nullcheckBb->Next(), true); - - assert(fallbackBb->JumpsToNext()); - fallbackBb->SetFlags(BBF_NONE_QUIRK); - - // Set nullcheckBb's true jump target - nullcheckBb->SetTrueTarget(fallbackBb); + BasicBlock* fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, true); // Fast-path basic block GenTree* fastpathValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), fastPathValueClone); - BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fastpathValueDef, debugInfo, block); - - // Set nullcheckBb's false jump target - nullcheckBb->SetFalseTarget(fastPathBb); + BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fastpathValueDef, debugInfo); BasicBlock* sizeCheckBb = nullptr; if (needsSizeCheck) @@ -375,42 +365,60 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm GenTree* jtrue = gtNewOperNode(GT_JTRUE, TYP_VOID, sizeCheck); // sizeCheckBb fails - jump to fallbackBb - sizeCheckBb = fgNewBBFromTreeAfter(BBJ_COND, prevBb, jtrue, debugInfo, fallbackBb); - sizeCheckBb->SetFalseTarget(nullcheckBb); + sizeCheckBb = fgNewBBFromTreeAfter(BBJ_COND, prevBb, jtrue, debugInfo); } // // Update preds in all new blocks // fgRemoveRefPred(block, prevBb); - fgAddRefPred(block, fastPathBb); - fgAddRefPred(block, fallbackBb); + + { + FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); + fastPathBb->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); + assert(fallbackBb->JumpsToNext()); + fallbackBb->SetFlags(BBF_NONE_QUIRK); + } + assert(prevBb->KindIs(BBJ_ALWAYS)); if (needsSizeCheck) { // sizeCheckBb is the first block after prevBb - prevBb->SetTarget(sizeCheckBb); - fgAddRefPred(sizeCheckBb, prevBb); + FlowEdge* const newEdge = fgAddRefPred(sizeCheckBb, prevBb); + prevBb->SetTargetEdge(newEdge); + // sizeCheckBb flows into nullcheckBb in case if the size check passes - fgAddRefPred(nullcheckBb, sizeCheckBb); + { + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, sizeCheckBb); + FlowEdge* const falseEdge = fgAddRefPred(nullcheckBb, sizeCheckBb); + sizeCheckBb->SetTrueEdge(trueEdge); + sizeCheckBb->SetFalseEdge(falseEdge); + } + // fallbackBb is reachable from both nullcheckBb and sizeCheckBb - fgAddRefPred(fallbackBb, nullcheckBb); - fgAddRefPred(fallbackBb, sizeCheckBb); // fastPathBb is only reachable from successful nullcheckBb - fgAddRefPred(fastPathBb, nullcheckBb); } else { // nullcheckBb is the first block after prevBb - prevBb->SetTarget(nullcheckBb); - fgAddRefPred(nullcheckBb, prevBb); + FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, prevBb); + prevBb->SetTargetEdge(newEdge); + // No size check, nullcheckBb jumps to fast path - fgAddRefPred(fastPathBb, nullcheckBb); // fallbackBb is only reachable from nullcheckBb (jump destination) - fgAddRefPred(fallbackBb, nullcheckBb); } + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, nullcheckBb); + FlowEdge* const falseEdge = fgAddRefPred(fastPathBb, nullcheckBb); + nullcheckBb->SetTrueEdge(trueEdge); + nullcheckBb->SetFalseEdge(falseEdge); + // // Re-distribute weights (see '[weight: X]' on the diagrams above) // TODO: consider marking fallbackBb as rarely-taken @@ -699,11 +707,10 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St // fallbackBb GenTree* fallbackValueDef = gtNewStoreLclVarNode(finalLclNum, slowHelper); - BasicBlock* fallbackBb = - fgNewBBFromTreeAfter(BBJ_ALWAYS, tlsRootNullCondBB, fallbackValueDef, debugInfo, block, true); + BasicBlock* fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, tlsRootNullCondBB, fallbackValueDef, debugInfo, true); GenTree* fastPathValueDef = gtNewStoreLclVarNode(finalLclNum, gtCloneExpr(finalLcl)); - BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, block, true); + BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true); *callUse = finalLcl; @@ -713,14 +720,20 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St // // Update preds in all new blocks // - fgAddRefPred(fallbackBb, tlsRootNullCondBB); - fgAddRefPred(fastPathBb, tlsRootNullCondBB); + FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, tlsRootNullCondBB); + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, tlsRootNullCondBB); + tlsRootNullCondBB->SetTrueEdge(trueEdge); + tlsRootNullCondBB->SetFalseEdge(falseEdge); - fgAddRefPred(block, fallbackBb); - fgAddRefPred(block, fastPathBb); + { + FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); + } - tlsRootNullCondBB->SetTrueTarget(fastPathBb); - tlsRootNullCondBB->SetFalseTarget(fallbackBb); + { + FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); + fastPathBb->SetTargetEdge(newEdge); + } // Inherit the weights block->inheritWeight(prevBb); @@ -730,9 +743,9 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St // fallback will just execute first time fallbackBb->bbSetRunRarely(); - fgRemoveRefPred(block, prevBb); - fgAddRefPred(tlsRootNullCondBB, prevBb); - prevBb->SetTarget(tlsRootNullCondBB); + fgRemoveRefPred(prevBb->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(tlsRootNullCondBB, prevBb); + prevBb->SetTargetEdge(newEdge); // All blocks are expected to be in the same EH region assert(BasicBlock::sameEHRegion(prevBb, block)); @@ -1056,7 +1069,7 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* // fallbackBb GenTree* fallbackValueDef = gtNewStoreLclVarNode(threadStaticBlockLclNum, call); BasicBlock* fallbackBb = - fgNewBBFromTreeAfter(BBJ_ALWAYS, threadStaticBlockNullCondBB, fallbackValueDef, debugInfo, block, true); + fgNewBBFromTreeAfter(BBJ_ALWAYS, threadStaticBlockNullCondBB, fallbackValueDef, debugInfo, true); // fastPathBb if (isGCThreadStatic) @@ -1071,32 +1084,42 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* GenTree* fastPathValueDef = gtNewStoreLclVarNode(threadStaticBlockLclNum, gtCloneExpr(threadStaticBlockBaseLclValueUse)); - BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, block, true); - - // Set maxThreadStaticBlocksCondBB's jump targets - maxThreadStaticBlocksCondBB->SetTrueTarget(fallbackBb); - maxThreadStaticBlocksCondBB->SetFalseTarget(threadStaticBlockNullCondBB); - - // Set threadStaticBlockNullCondBB's jump targets - threadStaticBlockNullCondBB->SetTrueTarget(fastPathBb); - threadStaticBlockNullCondBB->SetFalseTarget(fallbackBb); + BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true); // // Update preds in all new blocks // assert(prevBb->KindIs(BBJ_ALWAYS)); - prevBb->SetTarget(maxThreadStaticBlocksCondBB); - fgRemoveRefPred(block, prevBb); - fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb); + fgRemoveRefPred(prevBb->GetTargetEdge()); - fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB); - fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB); + { + FlowEdge* const newEdge = fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb); + prevBb->SetTargetEdge(newEdge); + } + + { + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB); + FlowEdge* const falseEdge = fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB); + maxThreadStaticBlocksCondBB->SetTrueEdge(trueEdge); + maxThreadStaticBlocksCondBB->SetFalseEdge(falseEdge); + } - fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB); - fgAddRefPred(fallbackBb, threadStaticBlockNullCondBB); + { + FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB); + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, threadStaticBlockNullCondBB); + threadStaticBlockNullCondBB->SetTrueEdge(trueEdge); + threadStaticBlockNullCondBB->SetFalseEdge(falseEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); + fastPathBb->SetTargetEdge(newEdge); + } - fgAddRefPred(block, fastPathBb); - fgAddRefPred(block, fallbackBb); + { + FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); + } // Inherit the weights block->inheritWeight(prevBb); @@ -1376,14 +1399,12 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G GenTree* isInitedCmp = gtNewOperNode(GT_EQ, TYP_INT, isInitedActualValueNode, isInitedExpectedValue); isInitedCmp->gtFlags |= GTF_RELOP_JMP_USED; BasicBlock* isInitedBb = - fgNewBBFromTreeAfter(BBJ_COND, prevBb, gtNewOperNode(GT_JTRUE, TYP_VOID, isInitedCmp), debugInfo, block); + fgNewBBFromTreeAfter(BBJ_COND, prevBb, gtNewOperNode(GT_JTRUE, TYP_VOID, isInitedCmp), debugInfo); // Fallback basic block // TODO-CQ: for JIT we can replace the original call with CORINFO_HELP_INITCLASS // that only accepts a single argument - BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, isInitedBb, call, debugInfo, isInitedBb->Next(), true); - assert(helperCallBb->JumpsToNext()); - helperCallBb->SetFlags(BBF_NONE_QUIRK); + BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, isInitedBb, call, debugInfo, true); GenTree* replacementNode = nullptr; if (retValKind == SHRV_STATIC_BASE_PTR) @@ -1443,22 +1464,32 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G // // Unlink block and prevBb - fgRemoveRefPred(block, prevBb); + fgRemoveRefPred(prevBb->GetTargetEdge()); - // Block has two preds now: either isInitedBb or helperCallBb - fgAddRefPred(block, isInitedBb); - fgAddRefPred(block, helperCallBb); + { + // Block has two preds now: either isInitedBb or helperCallBb + FlowEdge* const newEdge = fgAddRefPred(block, helperCallBb); + helperCallBb->SetTargetEdge(newEdge); + assert(helperCallBb->JumpsToNext()); + helperCallBb->SetFlags(BBF_NONE_QUIRK); + } - // prevBb always flows into isInitedBb - assert(prevBb->KindIs(BBJ_ALWAYS)); - prevBb->SetTarget(isInitedBb); - prevBb->SetFlags(BBF_NONE_QUIRK); - assert(prevBb->JumpsToNext()); - fgAddRefPred(isInitedBb, prevBb); + { + // prevBb always flows into isInitedBb + assert(prevBb->KindIs(BBJ_ALWAYS)); + FlowEdge* const newEdge = fgAddRefPred(isInitedBb, prevBb); + prevBb->SetTargetEdge(newEdge); + prevBb->SetFlags(BBF_NONE_QUIRK); + assert(prevBb->JumpsToNext()); + } - // Both fastPathBb and helperCallBb have a single common pred - isInitedBb - isInitedBb->SetFalseTarget(helperCallBb); - fgAddRefPred(helperCallBb, isInitedBb); + { + // Both fastPathBb and helperCallBb have a single common pred - isInitedBb + FlowEdge* const trueEdge = fgAddRefPred(block, isInitedBb); + FlowEdge* const falseEdge = fgAddRefPred(helperCallBb, isInitedBb); + isInitedBb->SetTrueEdge(trueEdge); + isInitedBb->SetFalseEdge(falseEdge); + } // // Re-distribute weights @@ -1687,7 +1718,7 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // // Block 1: lengthCheckBb (we check that dstLen < srcLen) // - BasicBlock* lengthCheckBb = fgNewBBafter(BBJ_COND, prevBb, true, block); + BasicBlock* lengthCheckBb = fgNewBBafter(BBJ_COND, prevBb, true); lengthCheckBb->SetFlags(BBF_INTERNAL); // Set bytesWritten -1 by default, if the fast path is not taken we'll return it as the result. @@ -1709,9 +1740,8 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // In theory, we could just emit the const U8 data to the data section and use GT_BLK here // but that would be a bit less efficient since we would have to load the data from memory. // - BasicBlock* fastpathBb = fgNewBBafter(BBJ_ALWAYS, lengthCheckBb, true, lengthCheckBb->Next()); - assert(fastpathBb->JumpsToNext()); - fastpathBb->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); + BasicBlock* fastpathBb = fgNewBBafter(BBJ_ALWAYS, lengthCheckBb, true); + fastpathBb->SetFlags(BBF_INTERNAL); // The widest type we can use for loads const var_types maxLoadType = roundDownMaxType(srcLenU8); @@ -1764,19 +1794,32 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // Update preds in all new blocks // // block is no longer a predecessor of prevBb - fgRemoveRefPred(block, prevBb); - // prevBb flows into lengthCheckBb - assert(prevBb->KindIs(BBJ_ALWAYS)); - prevBb->SetTarget(lengthCheckBb); - prevBb->SetFlags(BBF_NONE_QUIRK); - assert(prevBb->JumpsToNext()); - fgAddRefPred(lengthCheckBb, prevBb); - // lengthCheckBb has two successors: block and fastpathBb - lengthCheckBb->SetFalseTarget(fastpathBb); - fgAddRefPred(fastpathBb, lengthCheckBb); - fgAddRefPred(block, lengthCheckBb); - // fastpathBb flows into block - fgAddRefPred(block, fastpathBb); + fgRemoveRefPred(prevBb->GetTargetEdge()); + + { + // prevBb flows into lengthCheckBb + assert(prevBb->KindIs(BBJ_ALWAYS)); + FlowEdge* const newEdge = fgAddRefPred(lengthCheckBb, prevBb); + prevBb->SetTargetEdge(newEdge); + prevBb->SetFlags(BBF_NONE_QUIRK); + assert(prevBb->JumpsToNext()); + } + + { + // lengthCheckBb has two successors: block and fastpathBb + FlowEdge* const trueEdge = fgAddRefPred(block, lengthCheckBb); + FlowEdge* const falseEdge = fgAddRefPred(fastpathBb, lengthCheckBb); + lengthCheckBb->SetTrueEdge(trueEdge); + lengthCheckBb->SetFalseEdge(falseEdge); + } + + { + // fastpathBb flows into block + FlowEdge* const newEdge = fgAddRefPred(block, fastpathBb); + fastpathBb->SetTargetEdge(newEdge); + assert(fastpathBb->JumpsToNext()); + fastpathBb->SetFlags(BBF_NONE_QUIRK); + } // // Re-distribute weights @@ -2344,8 +2387,8 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, // it's too late to rely on upstream phases to do this for us (unless we do optRepeat). GenTree* nullcheckOp = gtNewOperNode(GT_EQ, TYP_INT, tmpNode, gtNewNull()); nullcheckOp->gtFlags |= GTF_RELOP_JMP_USED; - BasicBlock* nullcheckBb = fgNewBBFromTreeAfter(BBJ_COND, firstBb, gtNewOperNode(GT_JTRUE, TYP_VOID, nullcheckOp), - debugInfo, lastBb, true); + BasicBlock* nullcheckBb = + fgNewBBFromTreeAfter(BBJ_COND, firstBb, gtNewOperNode(GT_JTRUE, TYP_VOID, nullcheckOp), debugInfo, true); // The very first statement in the whole expansion is to assign obj to tmp. // We assume it's the value we're going to return in most cases. @@ -2385,7 +2428,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, GenTree* mtCheck = gtNewOperNode(GT_EQ, TYP_INT, gtNewMethodTableLookup(gtCloneExpr(tmpNode)), expectedClsNode); mtCheck->gtFlags |= GTF_RELOP_JMP_USED; GenTree* jtrue = gtNewOperNode(GT_JTRUE, TYP_VOID, mtCheck); - typeChecksBbs[candidateId] = fgNewBBFromTreeAfter(BBJ_COND, lastTypeCheckBb, jtrue, debugInfo, lastBb, true); + typeChecksBbs[candidateId] = fgNewBBFromTreeAfter(BBJ_COND, lastTypeCheckBb, jtrue, debugInfo, true); lastTypeCheckBb = typeChecksBbs[candidateId]; // Insert the CSE node as the first statement in the block @@ -2407,13 +2450,13 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, { // fallback call is used only to throw InvalidCastException call->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; - fallbackBb = fgNewBBFromTreeAfter(BBJ_THROW, lastTypeCheckBb, call, debugInfo, nullptr, true); + fallbackBb = fgNewBBFromTreeAfter(BBJ_THROW, lastTypeCheckBb, call, debugInfo, true); } else if (typeCheckFailedAction == TypeCheckFailedAction::ReturnNull) { // if fallback call is not needed, we just assign null to tmp GenTree* fallbackTree = gtNewTempStore(tmpNum, gtNewNull()); - fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, lastBb, true); + fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true); } else { @@ -2424,7 +2467,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_CHKCASTCLASS_SPECIAL); } GenTree* fallbackTree = gtNewTempStore(tmpNum, call); - fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, lastBb, true); + fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true); } // Block 4: typeCheckSucceedBb @@ -2439,15 +2482,11 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, typeCheckSucceedTree = gtNewNothingNode(); } BasicBlock* typeCheckSucceedBb = - typeCheckNotNeeded ? nullptr - : fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, typeCheckSucceedTree, debugInfo, lastBb); + typeCheckNotNeeded ? nullptr : fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, typeCheckSucceedTree, debugInfo); // // Wire up the blocks // - firstBb->SetTarget(nullcheckBb); - nullcheckBb->SetTrueTarget(lastBb); - nullcheckBb->SetFalseTarget(typeCheckNotNeeded ? fallbackBb : typeChecksBbs[0]); // Tricky case - wire up multiple type check blocks (in most cases there is only one) for (int candidateId = 0; candidateId < numOfCandidates; candidateId++) @@ -2455,41 +2494,48 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, BasicBlock* curTypeCheckBb = typeChecksBbs[candidateId]; // All type checks jump straight to the typeCheckSucceedBb on success - curTypeCheckBb->SetTrueTarget(typeCheckSucceedBb); - fgAddRefPred(typeCheckSucceedBb, curTypeCheckBb); + FlowEdge* const trueEdge = fgAddRefPred(typeCheckSucceedBb, curTypeCheckBb); + curTypeCheckBb->SetTrueEdge(trueEdge); // or ... if (candidateId == numOfCandidates - 1) { // ... jump to the fallbackBb on last type check's failure - curTypeCheckBb->SetFalseTarget(fallbackBb); - fgAddRefPred(fallbackBb, curTypeCheckBb); + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, curTypeCheckBb); + curTypeCheckBb->SetFalseEdge(falseEdge); } else { // ... jump to the next type check on failure - curTypeCheckBb->SetFalseTarget(typeChecksBbs[candidateId + 1]); - fgAddRefPred(typeChecksBbs[candidateId + 1], curTypeCheckBb); + FlowEdge* const falseEdge = fgAddRefPred(typeChecksBbs[candidateId + 1], curTypeCheckBb); + curTypeCheckBb->SetFalseEdge(falseEdge); } } - fgRemoveRefPred(lastBb, firstBb); - fgAddRefPred(nullcheckBb, firstBb); - fgAddRefPred(lastBb, nullcheckBb); - if (typeCheckNotNeeded) + fgRemoveRefPred(firstBb->GetTargetEdge()); + { - fgAddRefPred(fallbackBb, nullcheckBb); + FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, firstBb); + firstBb->SetTargetEdge(newEdge); } - else + { - fgAddRefPred(typeChecksBbs[0], nullcheckBb); - fgAddRefPred(lastBb, typeCheckSucceedBb); + FlowEdge* const trueEdge = fgAddRefPred(lastBb, nullcheckBb); + nullcheckBb->SetTrueEdge(trueEdge); } - if (!fallbackBb->KindIs(BBJ_THROW)) + if (typeCheckNotNeeded) + { + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, nullcheckBb); + nullcheckBb->SetFalseEdge(falseEdge); + } + else { - // if fallbackBb is BBJ_THROW then it has no successors - fgAddRefPred(lastBb, fallbackBb); + FlowEdge* const falseEdge = fgAddRefPred(typeChecksBbs[0], nullcheckBb); + nullcheckBb->SetFalseEdge(falseEdge); + + FlowEdge* const newEdge = fgAddRefPred(lastBb, typeCheckSucceedBb); + typeCheckSucceedBb->SetTargetEdge(newEdge); } // @@ -2521,12 +2567,18 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, } else { + assert(fallbackBb->KindIs(BBJ_ALWAYS)); + FlowEdge* const newEdge = fgAddRefPred(lastBb, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); + fallbackBb->inheritWeightPercentage(lastTypeCheckBb, 100 - totalLikelihood); } + if (!typeCheckNotNeeded) { typeCheckSucceedBb->inheritWeightPercentage(typeChecksBbs[0], totalLikelihood); } + lastBb->inheritWeight(firstBb); // @@ -2537,12 +2589,12 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, assert(BasicBlock::sameEHRegion(firstBb, fallbackBb)); // call guarantees that obj is never null, we can drop the nullcheck - // by converting it to a BBJ_ALWAYS to typeCheckBb. + // by converting it to a BBJ_ALWAYS to its false target. if ((call->gtCallMoreFlags & GTF_CALL_M_CAST_OBJ_NONNULL) != 0) { fgRemoveStmt(nullcheckBb, nullcheckBb->lastStmt()); - nullcheckBb->SetKindAndTarget(BBJ_ALWAYS, typeCheckNotNeeded ? fallbackBb : typeChecksBbs[0]); - fgRemoveRefPred(lastBb, nullcheckBb); + fgRemoveRefPred(nullcheckBb->GetTrueEdge()); + nullcheckBb->SetKindAndTargetEdge(BBJ_ALWAYS, nullcheckBb->GetFalseEdge()); } // Bonus step: merge prevBb with nullcheckBb as they are likely to be mergeable diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index f9cb5af17925e7..1e6a573aa7b0de 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -739,7 +739,7 @@ bool OptIfConversionDsc::optIfConvert() // Update the flow from the original block. m_comp->fgRemoveAllRefPreds(m_startBlock->GetFalseTarget(), m_startBlock); - m_startBlock->SetKindAndTarget(BBJ_ALWAYS, m_startBlock->GetTrueTarget()); + m_startBlock->SetKindAndTargetEdge(BBJ_ALWAYS, m_startBlock->GetTrueEdge()); #ifdef DEBUG if (m_comp->verbose) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index be84141270654a..68812364a456b6 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2020,13 +2020,14 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H { // Create extra basic block for the spill // - BasicBlock* newBlk = fgNewBBbefore(BBJ_ALWAYS, hndBlk, /* extendRegion */ true, /* jumpDest */ hndBlk); + BasicBlock* newBlk = fgNewBBbefore(BBJ_ALWAYS, hndBlk, /* extendRegion */ true); newBlk->SetFlags(BBF_IMPORTED | BBF_DONT_REMOVE | BBF_NONE_QUIRK); newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; FlowEdge* const newEdge = fgAddRefPred(hndBlk, newBlk); newEdge->setLikelihood(1.0); + newBlk->SetTargetEdge(newEdge); // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); @@ -2493,7 +2494,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); block->SetFlags(BBF_FAILED_VERIFICATION); block->RemoveFlags(BBF_IMPORTED); @@ -4404,10 +4405,9 @@ void Compiler::impImportLeave(BasicBlock* block) callBlock = block; assert(callBlock->HasInitializedTarget()); - fgRemoveRefPred(callBlock->GetTarget(), callBlock); + fgRemoveRefPred(callBlock->GetTargetEdge()); - // callBlock will call the finally handler. Convert the BBJ_LEAVE to BBJ_CALLFINALLY. - callBlock->SetKindAndTarget(BBJ_CALLFINALLY, HBtab->ebdHndBeg); + // callBlock will call the finally handler. This will be set up later. if (endCatches) { @@ -4429,16 +4429,16 @@ void Compiler::impImportLeave(BasicBlock* block) // Calling the finally block. - // callBlock will call the finally handler - callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step, HBtab->ebdHndBeg); + // callBlock will call the finally handler. This will be set up later. + callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); // step's jump target shouldn't be set yet assert(!step->HasInitializedTarget()); // the previous call to a finally returns to this call (to the next finally in the chain) - step->SetTarget(callBlock); FlowEdge* const newEdge = fgAddRefPred(callBlock, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // The new block will inherit this block's weight. callBlock->inheritWeight(block); @@ -4486,10 +4486,9 @@ void Compiler::impImportLeave(BasicBlock* block) unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); - assert(callBlock->KindIs(BBJ_CALLFINALLY)); - assert(callBlock->TargetIs(HBtab->ebdHndBeg)); - FlowEdge* const newEdge = fgAddRefPred(callBlock->GetTarget(), callBlock); + FlowEdge* const newEdge = fgAddRefPred(HBtab->ebdHndBeg, callBlock); newEdge->setLikelihood(1.0); + callBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge); GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); @@ -4532,16 +4531,16 @@ void Compiler::impImportLeave(BasicBlock* block) // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. - BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step, leaveTarget); + BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->SetFlags(BBF_KEEP_BBJ_ALWAYS); // step's jump target shouldn't be set yet assert(!step->HasInitializedTarget()); - step->SetTarget(finalStep); { FlowEdge* const newEdge = fgAddRefPred(finalStep, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); } // The new block will inherit this block's weight. @@ -4574,6 +4573,7 @@ void Compiler::impImportLeave(BasicBlock* block) { FlowEdge* const newEdge = fgAddRefPred(leaveTarget, finalStep); newEdge->setLikelihood(1.0); + finalStep->SetTargetEdge(newEdge); } // Queue up the jump target for importing @@ -4690,10 +4690,11 @@ void Compiler::impImportLeave(BasicBlock* block) { fgRemoveRefPred(step->GetTarget(), step); } - step->SetTarget(exitBlock); // the previous step (maybe a call to a nested finally, or a nested catch - // exit) returns to this block + FlowEdge* const newEdge = fgAddRefPred(exitBlock, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // the previous step (maybe a call to a nested finally, or a nested catch + // exit) returns to this block // The new block will inherit this block's weight. exitBlock->inheritWeight(block); @@ -4728,17 +4729,16 @@ void Compiler::impImportLeave(BasicBlock* block) (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; - callBlock = - fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block, HBtab->ebdHndBeg); + callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. - fgRemoveRefPred(block->GetTarget(), block); - block->SetKindAndTarget(BBJ_ALWAYS, callBlock); + fgRemoveRefPred(block->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(callBlock, block); newEdge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // The new block will inherit this block's weight. callBlock->inheritWeight(block); @@ -4758,10 +4758,9 @@ void Compiler::impImportLeave(BasicBlock* block) callBlock = block; assert(callBlock->HasInitializedTarget()); - fgRemoveRefPred(callBlock->GetTarget(), callBlock); + fgRemoveRefPred(callBlock->GetTargetEdge()); - // callBlock will call the finally handler. Convert the BBJ_LEAVE to BBJ_CALLFINALLY - callBlock->SetKindAndTarget(BBJ_CALLFINALLY, HBtab->ebdHndBeg); +// callBlock will call the finally handler. This will be set up later. #ifdef DEBUG if (verbose) @@ -4804,11 +4803,12 @@ void Compiler::impImportLeave(BasicBlock* block) BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); if (step == block) { - fgRemoveRefPred(step->GetTarget(), step); + fgRemoveRefPred(step->GetTargetEdge()); } - step->SetTarget(step2); + FlowEdge* const newEdge = fgAddRefPred(step2, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); step2->inheritWeight(block); step2->CopyFlags(block, BBF_RUN_RARELY); step2->SetFlags(BBF_IMPORTED); @@ -4841,16 +4841,16 @@ void Compiler::impImportLeave(BasicBlock* block) assert((step == block) || !step->HasInitializedTarget()); // callBlock will call the finally handler - callBlock = - fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step, HBtab->ebdHndBeg); + callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); if (step == block) { - fgRemoveRefPred(step->GetTarget(), step); + fgRemoveRefPred(step->GetTargetEdge()); } - step->SetTarget(callBlock); // the previous call to a finally returns to this call (to the next - // finally in the chain) + FlowEdge* const newEdge = fgAddRefPred(callBlock, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // the previous call to a finally returns to this call (to the next + // finally in the chain) // The new block will inherit this block's weight. callBlock->inheritWeight(block); @@ -4884,10 +4884,9 @@ void Compiler::impImportLeave(BasicBlock* block) } #endif - assert(callBlock->KindIs(BBJ_CALLFINALLY)); - assert(callBlock->TargetIs(HBtab->ebdHndBeg)); - FlowEdge* const newEdge = fgAddRefPred(callBlock->GetTarget(), callBlock); + FlowEdge* const newEdge = fgAddRefPred(HBtab->ebdHndBeg, callBlock); newEdge->setLikelihood(1.0); + callBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge); } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) @@ -4951,11 +4950,12 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == block) { - fgRemoveRefPred(step->GetTarget(), step); + fgRemoveRefPred(step->GetTargetEdge()); } - step->SetTarget(catchStep); + FlowEdge* const newEdge = fgAddRefPred(catchStep, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // The new block will inherit this block's weight. catchStep->inheritWeight(block); @@ -5008,9 +5008,9 @@ void Compiler::impImportLeave(BasicBlock* block) { fgRemoveRefPred(step->GetTarget(), step); } - step->SetTarget(leaveTarget); // this is the ultimate destination of the LEAVE FlowEdge* const newEdge = fgAddRefPred(leaveTarget, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // this is the ultimate destination of the LEAVE #ifdef DEBUG if (verbose) @@ -5069,10 +5069,11 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // will be treated as pair and handled correctly. if (block->KindIs(BBJ_CALLFINALLY)) { - BasicBlock* dupBlock = BasicBlock::New(this, BBJ_CALLFINALLY, block->GetTarget()); + BasicBlock* dupBlock = BasicBlock::New(this); dupBlock->CopyFlags(block); - FlowEdge* const newEdge = fgAddRefPred(dupBlock->GetTarget(), dupBlock); + FlowEdge* const newEdge = fgAddRefPred(block->GetTarget(), dupBlock); newEdge->setLikelihood(1.0); + dupBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge); dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; @@ -5101,10 +5102,10 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) fgInitBBLookup(); - fgRemoveRefPred(block->GetTarget(), block); - block->SetKindAndTarget(BBJ_LEAVE, fgLookupBB(jmpAddr)); - FlowEdge* const newEdge = fgAddRefPred(block->GetTarget(), block); + fgRemoveRefPred(block->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(fgLookupBB(jmpAddr), block); newEdge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_LEAVE, newEdge); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The @@ -5865,7 +5866,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Change block to BBJ_THROW so we won't trigger importation of successors. // - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. @@ -7181,14 +7182,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) // We may have already modified `block`'s jump kind, if this is a re-importation. // bool jumpToNextOptimization = false; - if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget())) + if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge())) { JITDUMP(FMT_BB " always branches to " FMT_BB ", changing to BBJ_ALWAYS\n", block->bbNum, block->GetFalseTarget()->bbNum); fgRemoveRefPred(block->GetFalseTarget(), block); block->SetKind(BBJ_ALWAYS); - // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense to + // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense to // set BBF_NONE_QUIRK block->SetFlags(BBF_NONE_QUIRK); @@ -7260,18 +7261,18 @@ void Compiler::impImportBlockCode(BasicBlock* block) { JITDUMP("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->GetTrueTarget()->bbNum); - fgRemoveRefPred(block->GetFalseTarget(), block); + fgRemoveRefPred(block->GetFalseEdge()); block->SetKind(BBJ_ALWAYS); } else { - // TODO-NoFallThrough: Update once bbFalseTarget can diverge from bbNext + // TODO-NoFallThrough: Update once false target can diverge from bbNext assert(block->NextIs(block->GetFalseTarget())); JITDUMP("\nThe block jumps to the next " FMT_BB "\n", block->Next()->bbNum); - fgRemoveRefPred(block->GetTrueTarget(), block); - block->SetKindAndTarget(BBJ_ALWAYS, block->Next()); + fgRemoveRefPred(block->GetTrueEdge()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge()); - // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense + // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense // to set BBF_NONE_QUIRK block->SetFlags(BBF_NONE_QUIRK); } @@ -7443,14 +7444,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) // We may have already modified `block`'s jump kind, if this is a re-importation. // bool jumpToNextOptimization = false; - if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget())) + if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge())) { JITDUMP(FMT_BB " always branches to " FMT_BB ", changing to BBJ_ALWAYS\n", block->bbNum, block->GetFalseTarget()->bbNum); fgRemoveRefPred(block->GetFalseTarget(), block); block->SetKind(BBJ_ALWAYS); - // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense to + // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense to // set BBF_NONE_QUIRK block->SetFlags(BBF_NONE_QUIRK); @@ -7535,7 +7536,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { // transform the basic block into a BBJ_ALWAYS - block->SetKindAndTarget(BBJ_ALWAYS, curEdge->getDestinationBlock()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, curEdge); foundVal = true; } else diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index fbbe08d66fc3c6..0e98b7d44b472c 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -218,13 +218,12 @@ class IndirectCallTransformer // Arguments: // jumpKind - jump kind for the new basic block // insertAfter - basic block, after which compiler has to insert the new one. - // jumpDest - jump target for the new basic block. Defaults to nullptr. // // Return Value: // new basic block. - BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter, BasicBlock* jumpDest = nullptr) + BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter) { - BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true, jumpDest); + BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true); block->SetFlags(BBF_IMPORTED); return block; } @@ -272,32 +271,35 @@ class IndirectCallTransformer if (checkBlock != currBlock) { assert(currBlock->KindIs(BBJ_ALWAYS)); - currBlock->SetTarget(checkBlock); FlowEdge* const newEdge = compiler->fgAddRefPred(checkBlock, currBlock); newEdge->setLikelihood(1.0); + currBlock->SetTargetEdge(newEdge); } // checkBlock // Todo: get likelihoods right // assert(checkBlock->KindIs(BBJ_ALWAYS)); - checkBlock->SetCond(elseBlock, thenBlock); FlowEdge* const thenEdge = compiler->fgAddRefPred(thenBlock, checkBlock); thenEdge->setLikelihood(0.5); FlowEdge* const elseEdge = compiler->fgAddRefPred(elseBlock, checkBlock); elseEdge->setLikelihood(0.5); + checkBlock->SetCond(elseEdge, thenEdge); // thenBlock - assert(thenBlock->TargetIs(remainderBlock)); { + assert(thenBlock->KindIs(BBJ_ALWAYS)); FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, thenBlock); newEdge->setLikelihood(1.0); + thenBlock->SetTargetEdge(newEdge); } // elseBlock { + assert(elseBlock->KindIs(BBJ_ALWAYS)); FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, elseBlock); newEdge->setLikelihood(1.0); + elseBlock->SetTargetEdge(newEdge); } } @@ -376,7 +378,7 @@ class IndirectCallTransformer { assert(checkIdx == 0); - checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, currBlock, currBlock->Next()); + checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, currBlock); checkBlock->SetFlags(BBF_NONE_QUIRK); GenTree* fatPointerMask = new (compiler, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, FAT_POINTER_MASK); GenTree* fptrAddressCopy = compiler->gtCloneExpr(fptrAddress); @@ -395,7 +397,7 @@ class IndirectCallTransformer virtual void CreateThen(uint8_t checkIdx) { assert(remainderBlock != nullptr); - thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock, remainderBlock); + thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock); Statement* copyOfOriginalStmt = compiler->gtCloneStmt(stmt); compiler->fgInsertStmtAtEnd(thenBlock, copyOfOriginalStmt); } @@ -405,7 +407,7 @@ class IndirectCallTransformer // virtual void CreateElse() { - elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock, thenBlock->Next()); + elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock); elseBlock->SetFlags(BBF_NONE_QUIRK); GenTree* fixedFptrAddress = GetFixedFptrAddress(); @@ -614,10 +616,12 @@ class IndirectCallTransformer weight_t checkLikelihoodWt = ((weight_t)checkLikelihood) / 100.0; // prevCheckBlock is expected to jump to this new check (if its type check doesn't succeed) - prevCheckBlock->SetCond(checkBlock, prevCheckBlock->Next()); + assert(prevCheckBlock->KindIs(BBJ_ALWAYS)); + assert(prevCheckBlock->JumpsToNext()); FlowEdge* const checkEdge = compiler->fgAddRefPred(checkBlock, prevCheckBlock); checkEdge->setLikelihood(checkLikelihoodWt); checkBlock->inheritWeightPercentage(currBlock, checkLikelihood); + prevCheckBlock->SetCond(checkEdge, prevCheckBlock->GetTargetEdge()); } // Find last arg with a side effect. All args with any effect @@ -1021,19 +1025,21 @@ class IndirectCallTransformer weight_t elseLikelihoodWt = max(1.0 - thenLikelihoodWt, 0.0); // thenBlock always jumps to remainderBlock - thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock, remainderBlock); + thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock); thenBlock->CopyFlags(currBlock, BBF_SPLIT_GAINED); thenBlock->inheritWeightPercentage(currBlock, thenLikelihood); // Also, thenBlock has a single pred - last checkBlock assert(checkBlock->KindIs(BBJ_ALWAYS)); - checkBlock->SetTarget(thenBlock); - checkBlock->SetFlags(BBF_NONE_QUIRK); - assert(checkBlock->JumpsToNext()); FlowEdge* const thenEdge = compiler->fgAddRefPred(thenBlock, checkBlock); thenEdge->setLikelihood(thenLikelihoodWt); + checkBlock->SetTargetEdge(thenEdge); + checkBlock->SetFlags(BBF_NONE_QUIRK); + assert(checkBlock->JumpsToNext()); + FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, thenBlock); elseEdge->setLikelihood(elseLikelihoodWt); + thenBlock->SetTargetEdge(elseEdge); DevirtualizeCall(thenBlock, checkIdx); } @@ -1054,7 +1060,7 @@ class IndirectCallTransformer assert(elseLikelihood <= 100); weight_t elseLikelihoodDbl = ((weight_t)elseLikelihood) / 100.0; - elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock, thenBlock->Next()); + elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock); elseBlock->CopyFlags(currBlock, BBF_SPLIT_GAINED); elseBlock->SetFlags(BBF_NONE_QUIRK); @@ -1062,9 +1068,11 @@ class IndirectCallTransformer // where we know the last check is always true (in case of "exact" GDV) if (!checkFallsThrough) { - checkBlock->SetCond(elseBlock, checkBlock->Next()); + assert(checkBlock->KindIs(BBJ_ALWAYS)); + assert(checkBlock->JumpsToNext()); FlowEdge* const checkEdge = compiler->fgAddRefPred(elseBlock, checkBlock); checkEdge->setLikelihood(elseLikelihoodDbl); + checkBlock->SetCond(checkEdge, checkBlock->GetTargetEdge()); } else { @@ -1077,6 +1085,7 @@ class IndirectCallTransformer // elseBlock always flows into remainderBlock FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, elseBlock); elseEdge->setLikelihood(1.0); + elseBlock->SetTargetEdge(elseEdge); // Remove everything related to inlining from the original call origCall->ClearInlineInfo(); @@ -1176,9 +1185,9 @@ class IndirectCallTransformer // Finally, rewire the cold block to jump to the else block, // not fall through to the check block. // - FlowEdge* const oldEdge = compiler->fgRemoveRefPred(checkBlock, coldBlock); - coldBlock->SetKindAndTarget(BBJ_ALWAYS, elseBlock); - compiler->fgAddRefPred(elseBlock, coldBlock, oldEdge); + compiler->fgRemoveRefPred(coldBlock->GetTargetEdge()); + FlowEdge* const newEdge = compiler->fgAddRefPred(elseBlock, coldBlock, coldBlock->GetTargetEdge()); + coldBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } // When the current candidate hads sufficiently high likelihood, scan diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index a4eacd9069db42..e7195b5c98cb2b 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1984,10 +1984,11 @@ bool Compiler::fgNormalizeEHCase1() { // ...then we want to insert an empty, non-removable block outside the try to be the new first block of the // handler. - BasicBlock* newHndStart = BasicBlock::New(this, BBJ_ALWAYS, handlerStart); + BasicBlock* newHndStart = BasicBlock::New(this); fgInsertBBbefore(handlerStart, newHndStart); FlowEdge* newEdge = fgAddRefPred(handlerStart, newHndStart); newEdge->setLikelihood(1.0); + newHndStart->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // Handler begins have an extra implicit ref count. // BasicBlock::New has already handled this for newHndStart. @@ -2154,11 +2155,12 @@ bool Compiler::fgNormalizeEHCase2() // We've got multiple 'try' blocks starting at the same place! // Add a new first 'try' block for 'ehOuter' that will be outside 'eh'. - BasicBlock* newTryStart = BasicBlock::New(this, BBJ_ALWAYS, insertBeforeBlk); + BasicBlock* newTryStart = BasicBlock::New(this); newTryStart->bbRefs = 0; fgInsertBBbefore(insertBeforeBlk, newTryStart); FlowEdge* const newEdge = fgAddRefPred(insertBeforeBlk, newTryStart); newEdge->setLikelihood(1.0); + newTryStart->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // It's possible for a try to start at the beginning of a method. If so, we need // to adjust the implicit ref counts as we've just created a new first bb @@ -2346,7 +2348,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions() // Create a new bb for the fake filter BasicBlock* handlerBb = eh->ebdHndBeg; - BasicBlock* filterBb = BasicBlock::New(this, BBJ_EHFILTERRET, handlerBb); + BasicBlock* filterBb = BasicBlock::New(this); // Now we need to spill CATCH_ARG (it should be the first thing evaluated) GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); @@ -2376,6 +2378,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions() fgInsertBBbefore(handlerBb, filterBb); FlowEdge* const newEdge = fgAddRefPred(handlerBb, filterBb); newEdge->setLikelihood(1.0); + filterBb->SetKindAndTargetEdge(BBJ_EHFILTERRET, newEdge); fgNewStmtAtEnd(filterBb, retFilt, handlerBb->firstStmt()->GetDebugInfo()); filterBb->bbCatchTyp = BBCT_FILTER; @@ -2632,7 +2635,7 @@ bool Compiler::fgNormalizeEHCase3() // Add a new last block for 'ehOuter' that will be outside the EH region with which it encloses and // shares a 'last' pointer - BasicBlock* newLast = BasicBlock::New(this, BBJ_ALWAYS, insertAfterBlk->Next()); + BasicBlock* newLast = BasicBlock::New(this); newLast->bbRefs = 0; assert(insertAfterBlk != nullptr); fgInsertBBafter(insertAfterBlk, newLast); @@ -2683,6 +2686,7 @@ bool Compiler::fgNormalizeEHCase3() newLast->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); FlowEdge* const newEdge = fgAddRefPred(newLast, insertAfterBlk); newEdge->setLikelihood(1.0); + insertAfterBlk->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // Move the insert pointer. More enclosing equivalent 'last' blocks will be inserted after this. insertAfterBlk = newLast; @@ -4325,8 +4329,8 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) #endif // FEATURE_EH_FUNCLETS // If this is a handler for a filter, the last block of the filter will end with - // a BBJ_EHFILTERRET block that has a bbTarget that jumps to the first block of - // its handler. So we need to update it to keep things in sync. + // a BBJ_EHFILTERRET block that jumps to the first block of its handler. + // So we need to update it to keep things in sync. // if (HBtab->HasFilter()) { @@ -4337,15 +4341,15 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) #ifdef DEBUG if (verbose) { - printf("EH#%u: Updating bbTarget for filter ret block: " FMT_BB " => " FMT_BB "\n", - ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum); + printf("EH#%u: Updating target for filter ret block: " FMT_BB " => " FMT_BB "\n", ehGetIndex(HBtab), + bFilterLast->bbNum, bPrev->bbNum); } #endif // DEBUG - // Change the bbTarget for bFilterLast from the old first 'block' to the new first 'bPrev' - fgRemoveRefPred(bFilterLast->GetTarget(), bFilterLast); - bFilterLast->SetTarget(bPrev); + // Change the target for bFilterLast from the old first 'block' to the new first 'bPrev' + fgRemoveRefPred(bFilterLast->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(bPrev, bFilterLast); newEdge->setLikelihood(1.0); + bFilterLast->SetTargetEdge(newEdge); } } diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 96cd8bff015939..c6d37dc507b95a 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -860,17 +860,18 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* { for (unsigned i = 0; i < conds.Size(); ++i) { - BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true, slowPreheader); + BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->GetTrueTarget()->bbNum); - comp->fgAddRefPred(newBlk->GetTrueTarget(), newBlk); + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, slowPreheader->bbNum); + FlowEdge* const trueEdge = comp->fgAddRefPred(slowPreheader, newBlk); + newBlk->SetTrueEdge(trueEdge); if (insertAfter->KindIs(BBJ_COND)) { JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); - insertAfter->SetFalseTarget(newBlk); - comp->fgAddRefPred(newBlk, insertAfter); + FlowEdge* const falseEdge = comp->fgAddRefPred(newBlk, insertAfter); + insertAfter->SetFalseEdge(falseEdge); } JITDUMP("Adding conditions %u to " FMT_BB "\n", i, newBlk->bbNum); @@ -894,16 +895,18 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* } else { - BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true, slowPreheader); + BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->GetTrueTarget()->bbNum); - comp->fgAddRefPred(newBlk->GetTrueTarget(), newBlk); + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, slowPreheader->bbNum); + FlowEdge* const trueEdge = comp->fgAddRefPred(slowPreheader, newBlk); + newBlk->SetTrueEdge(trueEdge); - if (insertAfter->bbFallsThrough()) + if (insertAfter->KindIs(BBJ_COND)) { JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); - comp->fgAddRefPred(newBlk, insertAfter); + FlowEdge* const falseEdge = comp->fgAddRefPred(newBlk, insertAfter); + insertAfter->SetFalseEdge(falseEdge); } JITDUMP("Adding conditions to " FMT_BB "\n", newBlk->bbNum); @@ -1959,12 +1962,11 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // Make a new pre-header block for the fast loop. JITDUMP("Create new preheader block for fast loop\n"); - BasicBlock* fastPreheader = - fgNewBBafter(BBJ_ALWAYS, preheader, /*extendRegion*/ true, /*jumpDest*/ loop->GetHeader()); + BasicBlock* fastPreheader = fgNewBBafter(BBJ_ALWAYS, preheader, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", fastPreheader->bbNum, preheader->bbNum); fastPreheader->bbWeight = fastPreheader->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; - if (fastPreheader->JumpsToNext()) + if (fastPreheader->NextIs(loop->GetHeader())) { fastPreheader->SetFlags(BBF_NONE_QUIRK); } @@ -1972,7 +1974,10 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex assert(preheader->KindIs(BBJ_ALWAYS)); assert(preheader->TargetIs(loop->GetHeader())); - fgReplacePred(loop->GetHeader(), preheader, fastPreheader); + FlowEdge* const oldEdge = preheader->GetTargetEdge(); + fgReplacePred(oldEdge, fastPreheader); + fastPreheader->SetTargetEdge(oldEdge); + JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", preheader->bbNum, loop->GetHeader()->bbNum, fastPreheader->bbNum, loop->GetHeader()->bbNum); @@ -2039,9 +2044,12 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // We haven't set the jump target yet assert(slowPreheader->KindIs(BBJ_ALWAYS)); assert(!slowPreheader->HasInitializedTarget()); - slowPreheader->SetTarget(slowHeader); - fgAddRefPred(slowHeader, slowPreheader); + { + FlowEdge* const newEdge = fgAddRefPred(slowHeader, slowPreheader); + slowPreheader->SetTargetEdge(newEdge); + } + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", slowPreheader->bbNum, slowHeader->bbNum); BasicBlock* condLast = optInsertLoopChoiceConditions(context, loop, slowPreheader, preheader); @@ -2049,14 +2057,18 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // Now redirect the old preheader to jump to the first new condition that // was inserted by the above function. assert(preheader->KindIs(BBJ_ALWAYS)); - preheader->SetTarget(preheader->Next()); - fgAddRefPred(preheader->Next(), preheader); + + { + FlowEdge* const newEdge = fgAddRefPred(preheader->Next(), preheader); + preheader->SetTargetEdge(newEdge); + } + preheader->SetFlags(BBF_NONE_QUIRK); // And make sure we insert a pred link for the final fallthrough into the fast preheader. assert(condLast->NextIs(fastPreheader)); - condLast->SetFalseTarget(fastPreheader); - fgAddRefPred(fastPreheader, condLast); + FlowEdge* const falseEdge = fgAddRefPred(fastPreheader, condLast); + condLast->SetFalseEdge(falseEdge); } //------------------------------------------------------------------------- diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 5d981ba5774228..29ca8148dbe90c 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -859,7 +859,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); - originalSwitchBB->SetKindAndTarget(BBJ_ALWAYS, jumpTab[0]->getDestinationBlock()); + originalSwitchBB->SetKindAndTargetEdge(BBJ_ALWAYS, jumpTab[0]); if (originalSwitchBB->JumpsToNext()) { @@ -949,7 +949,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // originalSwitchBB is now a BBJ_ALWAYS, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->KindIs(BBJ_ALWAYS)); - assert(originalSwitchBB->NextIs(afterDefaultCondBlock)); + assert(originalSwitchBB->TargetIs(afterDefaultCondBlock)); + assert(originalSwitchBB->JumpsToNext()); assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->GetSwitchTargets()->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. @@ -960,10 +961,10 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point // to afterDefaultCondBlock. comp->fgRemoveRefPred(jumpTab[jumpCnt - 1]); - comp->fgAddRefPred(defaultBB, originalSwitchBB, jumpTab[jumpCnt - 1]); + FlowEdge* const trueEdge = comp->fgAddRefPred(defaultBB, originalSwitchBB, jumpTab[jumpCnt - 1]); // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->SetCond(defaultBB, afterDefaultCondBlock); + originalSwitchBB->SetCond(trueEdge, originalSwitchBB->GetTargetEdge()); bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; @@ -1012,7 +1013,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) comp->fgRemoveRefPred(uniqueSucc); } - afterDefaultCondBlock->SetKindAndTarget(BBJ_ALWAYS, uniqueSucc->getDestinationBlock()); + afterDefaultCondBlock->SetKindAndTargetEdge(BBJ_ALWAYS, uniqueSucc); if (afterDefaultCondBlock->JumpsToNext()) { @@ -1065,10 +1066,10 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // If we haven't used the afterDefaultCondBlock yet, then use that. if (fUsedAfterDefaultCondBlock) { - BasicBlock* newBlock = comp->fgNewBBafter(BBJ_ALWAYS, currentBlock, true, currentBlock->Next()); + BasicBlock* newBlock = comp->fgNewBBafter(BBJ_ALWAYS, currentBlock, true); newBlock->SetFlags(BBF_NONE_QUIRK); - currentBlock->SetFalseTarget(newBlock); - comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. + FlowEdge* const falseEdge = comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. + currentBlock->SetFalseEdge(falseEdge); currentBlock = newBlock; currentBBRange = &LIR::AsRange(currentBlock); } @@ -1079,7 +1080,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } // Wire up the predecessor list for the "branch" case. - comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]); + FlowEdge* const newEdge = comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { @@ -1088,13 +1089,14 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->SetKindAndTarget(BBJ_ALWAYS, targetBlock); + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->SetCond(targetBlock, currentBlock->Next()); + // We will set the false edge in a later iteration of the loop, or after. + currentBlock->SetCond(newEdge); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1116,7 +1118,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. - comp->fgAddRefPred(currentBlock->Next(), currentBlock); + FlowEdge* const falseEdge = comp->fgAddRefPred(currentBlock->Next(), currentBlock); + currentBlock->SetFalseEdge(falseEdge); } if (!fUsedAfterDefaultCondBlock) @@ -1127,7 +1130,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->KindIs(BBJ_SWITCH)); - currentBlock->SetKindAndTarget(BBJ_ALWAYS, currentBlock->Next()); + FlowEdge* const newEdge = comp->fgAddRefPred(currentBlock->Next(), currentBlock); + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); currentBlock->RemoveFlags(BBF_DONT_REMOVE); comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } @@ -1305,11 +1309,15 @@ bool Lowering::TryLowerSwitchToBitTest( comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); + // TODO: Use old edges to influence new edge likelihoods? + case0Edge = comp->fgAddRefPred(bbCase0, bbSwitch); + case1Edge = comp->fgAddRefPred(bbCase1, bbSwitch); + if (bbSwitch->NextIs(bbCase0)) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set bbSwitchCondition = GenCondition::C; - bbSwitch->SetCond(bbCase1, bbCase0); + bbSwitch->SetCond(case1Edge, case0Edge); } else { @@ -1317,13 +1325,9 @@ bool Lowering::TryLowerSwitchToBitTest( // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set bbSwitchCondition = GenCondition::NC; - bbSwitch->SetCond(bbCase0, bbCase1); + bbSwitch->SetCond(case0Edge, case1Edge); } - // TODO: Use old edges to influence new edge likelihoods? - comp->fgAddRefPred(bbCase0, bbSwitch); - comp->fgAddRefPred(bbCase1, bbSwitch); - var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG; GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 6ba44215a5f5da..b68f4518dfcba2 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6320,7 +6320,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. - compCurBB->SetKindAndTarget(BBJ_THROW); + compCurBB->SetKindAndTargetEdge(BBJ_THROW); } if (isRootReplaced) @@ -7467,7 +7467,8 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. - block->SetKindAndTarget(BBJ_ALWAYS, fgEntryBB); + FlowEdge* const newEdge = fgAddRefPred(fgEntryBB, block); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } else { @@ -7482,11 +7483,11 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa // block removal on it. // fgFirstBB->SetFlags(BBF_DONT_REMOVE); - block->SetKindAndTarget(BBJ_ALWAYS, fgFirstBB->Next()); + FlowEdge* const newEdge = fgAddRefPred(fgFirstBB->Next(), block); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } // Finish hooking things up. - fgAddRefPred(block->GetTarget(), block); block->RemoveFlags(BBF_HAS_JMP); } @@ -13205,7 +13206,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) // JTRUE 0 - transform the basic block into a BBJ_ALWAYS bTaken = block->GetFalseTarget(); bNotTaken = block->GetTrueTarget(); - block->SetKindAndTarget(BBJ_ALWAYS, bTaken); + block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge()); block->SetFlags(BBF_NONE_QUIRK); } @@ -13373,13 +13374,13 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - block->SetKindAndTarget(BBJ_ALWAYS, curEdge->getDestinationBlock()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, curEdge); foundVal = true; } else { // Remove 'curEdge' - fgRemoveRefPred(curEdge->getDestinationBlock(), block); + fgRemoveRefPred(curEdge); } } @@ -14125,8 +14126,8 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else #endif // !TARGET_X86 { - block->SetKindAndTarget(BBJ_ALWAYS, genReturnBB); - fgAddRefPred(genReturnBB, block); + FlowEdge* const newEdge = fgAddRefPred(genReturnBB, block); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); fgReturnCount--; } @@ -14628,16 +14629,24 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) assert(condBlock->bbWeight == remainderBlock->bbWeight); assert(block->KindIs(BBJ_ALWAYS)); - block->SetTarget(condBlock); - condBlock->SetTarget(elseBlock); - elseBlock->SetTarget(remainderBlock); + { + FlowEdge* const newEdge = fgAddRefPred(condBlock, block); + block->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(elseBlock, condBlock); + condBlock->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(remainderBlock, elseBlock); + elseBlock->SetTargetEdge(newEdge); + } + assert(condBlock->JumpsToNext()); assert(elseBlock->JumpsToNext()); - fgAddRefPred(condBlock, block); - fgAddRefPred(elseBlock, condBlock); - fgAddRefPred(remainderBlock, elseBlock); - condBlock->SetFlags(propagateFlagsToAll | BBF_NONE_QUIRK); elseBlock->SetFlags(propagateFlagsToAll | BBF_NONE_QUIRK); @@ -14654,17 +14663,20 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // gtReverseCond(condExpr); - thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true, remainderBlock); + thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->SetFlags(propagateFlagsToAll); - condBlock->SetCond(elseBlock, thenBlock); if (!block->HasFlag(BBF_INTERNAL)) { thenBlock->RemoveFlags(BBF_INTERNAL); thenBlock->SetFlags(BBF_IMPORTED); } - fgAddRefPred(thenBlock, condBlock); - fgAddRefPred(remainderBlock, thenBlock); + FlowEdge* const newEdge = fgAddRefPred(remainderBlock, thenBlock); + thenBlock->SetTargetEdge(newEdge); + + assert(condBlock->TargetIs(elseBlock)); + FlowEdge* const falseEdge = fgAddRefPred(thenBlock, condBlock); + condBlock->SetCond(condBlock->GetTargetEdge(), falseEdge); thenBlock->inheritWeightPercentage(condBlock, qmark->ThenNodeLikelihood()); elseBlock->inheritWeightPercentage(condBlock, qmark->ElseNodeLikelihood()); @@ -14678,8 +14690,11 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // bbj_cond(true) // gtReverseCond(condExpr); - condBlock->SetCond(remainderBlock, elseBlock); - fgAddRefPred(remainderBlock, condBlock); + + assert(condBlock->TargetIs(elseBlock)); + FlowEdge* const trueEdge = fgAddRefPred(remainderBlock, condBlock); + condBlock->SetCond(trueEdge, condBlock->GetTargetEdge()); + // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; elseBlock = nullptr; @@ -14694,8 +14709,9 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // +-->------------+ // bbj_cond(true) // - condBlock->SetCond(remainderBlock, elseBlock); - fgAddRefPred(remainderBlock, condBlock); + assert(condBlock->TargetIs(elseBlock)); + FlowEdge* const trueEdge = fgAddRefPred(remainderBlock, condBlock); + condBlock->SetCond(trueEdge, condBlock->GetTargetEdge()); elseBlock->inheritWeightPercentage(condBlock, qmark->ElseNodeLikelihood()); } diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index f9616636681b5c..713d9f17c58344 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -46,7 +46,7 @@ class OptBoolsDsc private: BasicBlock* m_b1; // The first basic block with the BBJ_COND conditional jump type BasicBlock* m_b2; // The next basic block of m_b1. Either BBJ_COND or BBJ_RETURN type - BasicBlock* m_b3; // m_b1->bbTarget. Null if m_b2 is not a return block. + BasicBlock* m_b3; // m_b1's target block. Null if m_b2 is not a return block. Compiler* m_comp; // The pointer to the Compiler instance @@ -89,7 +89,7 @@ class OptBoolsDsc // Notes: // m_b1 and m_b2 are set on entry. // -// Case 1: if b1.bbTarget == b2.bbTarget, it transforms +// Case 1: if b1->TargetIs(b2->GetTarget()), it transforms // B1 : brtrue(t1, Bx) // B2 : brtrue(t2, Bx) // B3 : @@ -107,7 +107,7 @@ class OptBoolsDsc // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // -// Case 2: if B2->FalseTargetIs(B1.bbTarget), it transforms +// Case 2: if B2->FalseTargetIs(B1->GetTarget()), it transforms // B1 : brtrue(t1, B3) // B2 : brtrue(t2, Bx) // B3 : @@ -123,7 +123,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock() m_t3 = nullptr; - // Check if m_b1 and m_b2 have the same bbTarget + // Check if m_b1 and m_b2 have the same target if (m_b1->TrueTargetIs(m_b2->GetTrueTarget())) { @@ -808,15 +808,20 @@ bool OptBoolsDsc::optOptimizeRangeTests() } // Re-direct firstBlock to jump to inRangeBb - m_comp->fgAddRefPred(inRangeBb, m_b1); + FlowEdge* const newEdge = m_comp->fgAddRefPred(inRangeBb, m_b1); + if (!cmp2IsReversed) { - m_b1->SetTrueTarget(inRangeBb); - m_b1->SetFalseTarget(notInRangeBb); + m_b1->SetFalseEdge(m_b1->GetTrueEdge()); + m_b1->SetTrueEdge(newEdge); + assert(m_b1->TrueTargetIs(inRangeBb)); + assert(m_b1->FalseTargetIs(notInRangeBb)); } else { - m_b1->SetFalseTarget(inRangeBb); + m_b1->SetFalseEdge(newEdge); + assert(m_b1->TrueTargetIs(notInRangeBb)); + assert(m_b1->FalseTargetIs(inRangeBb)); } // Remove the 2nd condition block as we no longer need it @@ -1012,8 +1017,8 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() m_comp->fgSetStmtSeq(s2); // Update the flow. - m_comp->fgRemoveRefPred(m_b1->GetTrueTarget(), m_b1); - m_b1->SetKindAndTarget(BBJ_ALWAYS, m_b1->GetFalseTarget()); + m_comp->fgRemoveRefPred(m_b1->GetTrueEdge()); + m_b1->SetKindAndTargetEdge(BBJ_ALWAYS, m_b1->GetFalseEdge()); m_b1->SetFlags(BBF_NONE_QUIRK); // Fixup flags. @@ -1266,22 +1271,19 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { // Update edges if m_b1: BBJ_COND and m_b2: BBJ_COND - FlowEdge* edge1 = m_comp->fgGetPredForBlock(m_b1->GetTrueTarget(), m_b1); + FlowEdge* edge1 = m_b1->GetTrueEdge(); FlowEdge* edge2; if (m_sameTarget) { - edge2 = m_comp->fgGetPredForBlock(m_b2->GetTrueTarget(), m_b2); + edge2 = m_b2->GetTrueEdge(); } else { - edge2 = m_comp->fgGetPredForBlock(m_b2->GetFalseTarget(), m_b2); - - m_comp->fgRemoveRefPred(m_b1->GetTrueTarget(), m_b1); - - m_b1->SetTrueTarget(m_b2->GetTrueTarget()); - - m_comp->fgAddRefPred(m_b2->GetTrueTarget(), m_b1); + edge2 = m_b2->GetFalseEdge(); + m_comp->fgRemoveRefPred(m_b1->GetTrueEdge()); + FlowEdge* const newEdge = m_comp->fgAddRefPred(m_b2->GetTrueTarget(), m_b1); + m_b1->SetTrueEdge(newEdge); } assert(edge1 != nullptr); @@ -1307,7 +1309,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() assert(m_b2->KindIs(BBJ_RETURN)); assert(m_b1->FalseTargetIs(m_b2)); assert(m_b3 != nullptr); - m_b1->SetKindAndTarget(BBJ_RETURN); + m_b1->SetKindAndTargetEdge(BBJ_RETURN); } else { @@ -1322,11 +1324,12 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { // Update bbRefs and bbPreds // - // Replace pred 'm_b2' for 'm_b2->bbFalseTarget' with 'm_b1' - // Remove pred 'm_b2' for 'm_b2->bbTrueTarget' - m_comp->fgReplacePred(m_b2->GetFalseTarget(), m_b2, m_b1); - m_comp->fgRemoveRefPred(m_b2->GetTrueTarget(), m_b2); - m_b1->SetFalseTarget(m_b2->GetFalseTarget()); + // Replace pred 'm_b2' for m_b2's false target with 'm_b1' + // Remove pred 'm_b2' for m_b2's true target + FlowEdge* falseEdge = m_b2->GetFalseEdge(); + m_comp->fgReplacePred(falseEdge, m_b1); + m_comp->fgRemoveRefPred(m_b2->GetTrueEdge()); + m_b1->SetFalseEdge(falseEdge); } // Get rid of the second block @@ -1361,7 +1364,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() // Notes: // m_b1, m_b2 and m_b3 of OptBoolsDsc are set on entry. // -// if B1.bbTarget == b3, it transforms +// if B1->TargetIs(b3), it transforms // B1 : brtrue(t1, B3) // B2 : ret(t2) // B3 : ret(0) diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 5f13e7a8c72afe..6975bcd27bf991 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -585,20 +585,24 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo case BBJ_CALLFINALLY: case BBJ_CALLFINALLYRET: case BBJ_LEAVE: + { + FlowEdge* newEdge; + // Determine if newBlk should be redirected to a different target from blk's target if (redirectMap->Lookup(blk->GetTarget(), &newTarget)) { // newBlk needs to be redirected to a new target - newBlk->SetKindAndTarget(blk->GetKind(), newTarget); + newEdge = fgAddRefPred(newTarget, newBlk); } else { // newBlk uses the same target as blk - newBlk->SetKindAndTarget(blk->GetKind(), blk->GetTarget()); + newEdge = fgAddRefPred(blk->GetTarget(), newBlk); } - fgAddRefPred(newBlk->GetTarget(), newBlk); + newBlk->SetKindAndTargetEdge(blk->GetKind(), newEdge); break; + } case BBJ_COND: { @@ -627,9 +631,9 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo falseTarget = blk->GetFalseTarget(); } - fgAddRefPred(trueTarget, newBlk); - fgAddRefPred(falseTarget, newBlk); - newBlk->SetCond(trueTarget, falseTarget); + FlowEdge* const trueEdge = fgAddRefPred(trueTarget, newBlk); + FlowEdge* const falseEdge = fgAddRefPred(falseTarget, newBlk); + newBlk->SetCond(trueEdge, falseEdge); break; } @@ -696,16 +700,18 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: + { // newBlk's jump target should not need to be redirected assert(!redirectMap->Lookup(blk->GetTarget(), &newTarget)); - newBlk->SetKindAndTarget(blk->GetKind(), blk->GetTarget()); - fgAddRefPred(newBlk->GetTarget(), newBlk); + FlowEdge* newEdge = fgAddRefPred(newBlk->GetTarget(), newBlk); + newBlk->SetKindAndTargetEdge(blk->GetKind(), newEdge); break; + } default: // blk doesn't have a jump destination assert(blk->NumSucc() == 0); - newBlk->SetKindAndTarget(blk->GetKind()); + newBlk->SetKindAndTargetEdge(blk->GetKind()); break; } @@ -1710,12 +1716,12 @@ void Compiler::optRedirectPrevUnrollIteration(FlowGraphNaturalLoop* loop, BasicB testCopyStmt->SetRootNode(sideEffList); } - fgRemoveRefPred(prevTestBlock->GetTrueTarget(), prevTestBlock); - fgRemoveRefPred(prevTestBlock->GetFalseTarget(), prevTestBlock); + fgRemoveRefPred(prevTestBlock->GetTrueEdge()); + fgRemoveRefPred(prevTestBlock->GetFalseEdge()); // Redirect exit edge from previous iteration to new entry. - prevTestBlock->SetKindAndTarget(BBJ_ALWAYS, target); - fgAddRefPred(target, prevTestBlock); + FlowEdge* const newEdge = fgAddRefPred(target, prevTestBlock); + prevTestBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); JITDUMP("Redirecting previously created exiting " FMT_BB " -> " FMT_BB "\n", prevTestBlock->bbNum, target->bbNum); @@ -1924,7 +1930,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) return false; } - // Since bTest is a BBJ_COND it will have a bbFalseTarget + // Since bTest is a BBJ_COND it will have a false target // BasicBlock* const bJoin = bTest->GetFalseTarget(); noway_assert(bJoin != nullptr); @@ -1946,7 +1952,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) } // It has to be a forward jump. Defer this check until after all the cheap checks - // are done, since it iterates forward in the block list looking for bbTarget. + // are done, since it iterates forward in the block list looking for block's target. // TODO-CQ: Check if we can also optimize the backwards jump as well. // if (!fgIsForwardBranch(block, block->GetTarget())) @@ -2136,10 +2142,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true, bJoin); - block->SetKindAndTarget(BBJ_ALWAYS, bNewCond); - block->SetFlags(BBF_NONE_QUIRK); - assert(block->JumpsToNext()); + BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); // Clone each statement in bTest and append to bNewCond. for (Statement* const stmt : bTest->Statements()) @@ -2198,12 +2201,17 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Update pred info // - bNewCond->SetFalseTarget(bTop); - fgAddRefPred(bJoin, bNewCond); - fgAddRefPred(bTop, bNewCond); + FlowEdge* const trueEdge = fgAddRefPred(bJoin, bNewCond); + FlowEdge* const falseEdge = fgAddRefPred(bTop, bNewCond); + bNewCond->SetTrueEdge(trueEdge); + bNewCond->SetFalseEdge(falseEdge); + + fgRemoveRefPred(block->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(bNewCond, block); - fgAddRefPred(bNewCond, block); - fgRemoveRefPred(bTest, block); + block->SetTargetEdge(newEdge); + block->SetFlags(BBF_NONE_QUIRK); + assert(block->JumpsToNext()); // Move all predecessor edges that look like loop entry edges to point to the new cloned condition // block, not the existing condition block. The idea is that if we only move `block` to point to @@ -2977,7 +2985,7 @@ bool Compiler::optCreatePreheader(FlowGraphNaturalLoop* loop) insertBefore = header; } - BasicBlock* preheader = fgNewBBbefore(BBJ_ALWAYS, insertBefore, false, header); + BasicBlock* preheader = fgNewBBbefore(BBJ_ALWAYS, insertBefore, false); preheader->SetFlags(BBF_INTERNAL); fgSetEHRegionForNewPreheaderOrExit(preheader); @@ -2990,7 +2998,8 @@ bool Compiler::optCreatePreheader(FlowGraphNaturalLoop* loop) JITDUMP("Created new preheader " FMT_BB " for " FMT_LP "\n", preheader->bbNum, loop->GetIndex()); - fgAddRefPred(header, preheader); + FlowEdge* const newEdge = fgAddRefPred(header, preheader); + preheader->SetTargetEdge(newEdge); for (FlowEdge* enterEdge : loop->EntryEdges()) { @@ -3093,26 +3102,27 @@ bool Compiler::optCanonicalizeExit(FlowGraphNaturalLoop* loop, BasicBlock* exit) BasicBlock* bottom = loop->GetLexicallyBottomMostBlock(); if (bottom->hasTryIndex() && (bottom->getTryIndex() == finallyBlock->getHndIndex()) && !bottom->hasHndIndex()) { - newExit = fgNewBBafter(BBJ_ALWAYS, bottom, true, exit); + newExit = fgNewBBafter(BBJ_ALWAYS, bottom, true); } else { // Otherwise just do the heavy-handed thing and insert it anywhere in the right region. - newExit = fgNewBBinRegion(BBJ_ALWAYS, finallyBlock->bbHndIndex, 0, nullptr, exit, /* putInFilter */ false, + newExit = fgNewBBinRegion(BBJ_ALWAYS, finallyBlock->bbHndIndex, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, /* insertAtEnd */ true); } } else #endif { - newExit = fgNewBBbefore(BBJ_ALWAYS, exit, false, exit); + newExit = fgNewBBbefore(BBJ_ALWAYS, exit, false); newExit->SetFlags(BBF_NONE_QUIRK); fgSetEHRegionForNewPreheaderOrExit(newExit); } newExit->SetFlags(BBF_INTERNAL); - fgAddRefPred(exit, newExit); + FlowEdge* const newEdge = fgAddRefPred(exit, newExit); + newExit->SetTargetEdge(newEdge); newExit->bbCodeOffs = exit->bbCodeOffs; diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 27b94470962ef0..a22534c9fe2941 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -101,13 +101,12 @@ class PatchpointTransformer // Arguments: // jumpKind - jump kind for the new basic block // insertAfter - basic block, after which compiler has to insert the new one. - // jumpDest - jump target for the new basic block. Defaults to nullptr. // // Return Value: // new basic block. - BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter, BasicBlock* jumpDest = nullptr) + BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter) { - BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true, jumpDest); + BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true); block->SetFlags(BBF_IMPORTED); return block; } @@ -143,21 +142,21 @@ class PatchpointTransformer // Current block now becomes the test block BasicBlock* remainderBlock = compiler->fgSplitBlockAtBeginning(block); - BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, block, block->Next()); + BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, block); // Update flow and flags - block->SetCond(remainderBlock, helperBlock); block->SetFlags(BBF_INTERNAL); - helperBlock->SetFlags(BBF_BACKWARD_JUMP | BBF_NONE_QUIRK); FlowEdge* const falseEdge = compiler->fgAddRefPred(helperBlock, block); FlowEdge* const trueEdge = compiler->fgGetPredForBlock(remainderBlock, block); trueEdge->setLikelihood(HIGH_PROBABILITY / 100.0); falseEdge->setLikelihood((100 - HIGH_PROBABILITY) / 100.0); + block->SetCond(trueEdge, falseEdge); FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, helperBlock); newEdge->setLikelihood(1.0); + helperBlock->SetTargetEdge(newEdge); // Update weights remainderBlock->inheritWeight(block); @@ -238,7 +237,7 @@ class PatchpointTransformer } // Update flow - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); // Add helper call // diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 1008b81194f8d5..1a523bcfbeec7a 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -363,16 +363,18 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert(lastBlock->FalseTargetIs(blockIfTrue)); fgRemoveRefPred(blockIfTrue, firstBlock); BasicBlock* targetBlock = blockIfTrue; - blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, targetBlock); + blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true); FlowEdge* const newEdge = fgAddRefPred(targetBlock, blockIfTrue); skipPredRemoval = true; + blockIfTrue->SetTargetEdge(newEdge); } else { assert(lastBlock->FalseTargetIs(blockIfFalse)); BasicBlock* targetBlock = blockIfFalse; - blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, targetBlock); + blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true); FlowEdge* const newEdge = fgAddRefPred(targetBlock, blockIfFalse); + blockIfFalse->SetTargetEdge(newEdge); } } diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index c7af200c3de60f..79cc5548355401 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -9874,7 +9874,7 @@ class ValueNumberState return false; } - if (!predBlock->KindIs(BBJ_COND) || predBlock->TrueTargetIs(predBlock->GetFalseTarget())) + if (!predBlock->KindIs(BBJ_COND) || predBlock->TrueEdgeIs(predBlock->GetFalseEdge())) { return true; }