-
Notifications
You must be signed in to change notification settings - Fork 7
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Refactor TransormPropagator to allow specifying a position and propagating to part of the DAG #1775
Conversation
TEST_F(NVFuserTest, FusionTransormPropagatorSelector_CUDA) { | ||
auto fusion = std::make_unique<Fusion>(); | ||
FusionGuard fg(fusion.get()); | ||
|
||
auto tv0 = makeSymbolicTensor(1); | ||
fusion->addInput(tv0); | ||
auto tv1 = makeSymbolicTensor(1); | ||
fusion->addInput(tv1); | ||
|
||
auto tv2 = add(tv0, tv1); | ||
|
||
auto tv3 = sin(tv2); | ||
auto tv4 = cos(tv2); | ||
|
||
fusion->addOutput(tv3); | ||
fusion->addOutput(tv4); | ||
|
||
tv2->split(0, 10); | ||
|
||
struct Selector : public MaxInfoSpanningTree::Selector { | ||
TensorView* tv0; | ||
TensorView* tv3; | ||
virtual bool allowPasC(TensorView* from, TensorView* to) override { | ||
return to == tv0; | ||
} | ||
virtual bool allowCasP(TensorView* from, TensorView* to) override { | ||
return to == tv3; | ||
} | ||
Selector(TensorView* tv0, TensorView* tv3) : tv0(tv0), tv3(tv3) {} | ||
} selector(tv0, tv3); | ||
|
||
TransformPropagator propagator(tv2); | ||
MaxRootDomainInfoSpanningTree(tv2, &selector).traverse(&propagator); | ||
|
||
TORCH_CHECK(tv0->nDims() == 2); | ||
TORCH_CHECK(tv1->nDims() == 1); | ||
TORCH_CHECK(tv2->nDims() == 2); | ||
TORCH_CHECK(tv3->nDims() == 2); | ||
TORCH_CHECK(tv4->nDims() == 1); | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please see this as an example of how to use selector.
TEST_F(NVFuserTest, FusionTransormPropagatorPos_CUDA) { | ||
auto fusion = std::make_unique<Fusion>(); | ||
FusionGuard fg(fusion.get()); | ||
|
||
auto tv0 = makeConcreteTensor({22, 105}); | ||
fusion->addInput(tv0); | ||
|
||
auto tv1 = sin(tv0); | ||
fusion->addOutput(tv1); | ||
|
||
tv1->split(0, 2); | ||
tv1->split(-1, 3); | ||
tv1->split(-1, 5); | ||
|
||
TransformPropagator propagator(tv1, 2); | ||
MaxRootDomainInfoSpanningTree(tv1, 2).traverse(&propagator); | ||
|
||
TORCH_CHECK(tv0->nDims() == 3); | ||
TORCH_CHECK(tv0->axis(0)->extent()->evaluateInt() == 11); | ||
TORCH_CHECK(tv0->axis(1)->extent()->evaluateInt() == 2); | ||
TORCH_CHECK(tv0->axis(2)->extent()->evaluateInt() == 105); | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please see this as an example for doing propagation with a leaf position.
TEST_F(NVFuserTest, FusionMaxRootDomainInfoSpanningTreePrintTwice_CUDA) { | ||
auto fusion = std::make_unique<Fusion>(); | ||
FusionGuard fg(fusion.get()); | ||
|
||
auto tv0 = makeSymbolicTensor(3); | ||
fusion->addInput(tv0); | ||
|
||
auto tv1 = sum(tv0, {0}); | ||
auto tv2 = neg(tv1); | ||
|
||
fusion->addOutput(tv2); | ||
|
||
tv1->split(0, 10); | ||
|
||
struct Printer : public MaxInfoSpanningTree::Propagator { | ||
std::stringstream ss; | ||
virtual void propagateTvPasC(TensorView* from, TensorView* to) override { | ||
ss << "propagateTvPasC" << std::endl; | ||
ss << "from: " << from << std::endl; | ||
ss << "to: " << to << std::endl; | ||
} | ||
virtual void propagateTvCasP(TensorView* from, TensorView* to) override { | ||
ss << "propagateTvCasP" << std::endl; | ||
ss << "from: " << from << std::endl; | ||
ss << "to: " << to << std::endl; | ||
} | ||
} printer1, printer2; | ||
printer1.ss << std::endl; | ||
printer2.ss << std::endl; | ||
|
||
MaxRootDomainInfoSpanningTree path(tv1); | ||
path.traverse(&printer1); | ||
path.traverse(&printer2); | ||
|
||
auto expect = R"ESCAPE( | ||
propagateTvPasC | ||
from: T1_l[ rS8{( ceilDiv(i1, 10) )}, rS9{10}, iS4{i2}, iS5{i3} ] | ||
to: T0_g[ iS0{i1}, iS1{i2}, iS2{i3} ] | ||
propagateTvCasP | ||
from: T1_l[ rS8{( ceilDiv(i1, 10) )}, rS9{10}, iS4{i2}, iS5{i3} ] | ||
to: T2_g[ iS6{i2}, iS7{i3} ] | ||
)ESCAPE"; | ||
TORCH_CHECK(printer1.ss.str() == expect); | ||
TORCH_CHECK(printer2.ss.str() == expect); | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please see this as an example of adding a custom propagator, and traversing the same path multiple times with a different propagator.
… transform-propagator-extension
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM!
while (!propagation.empty()) { | ||
auto next_hop = propagation.back(); | ||
propagation.pop_back(); | ||
auto allowCasP = [this](TensorView* from, TensorView* to) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Really interesting that you split these up.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This has a real use case in my computeAt PR. In #1743, I have
bool ConsumeAt::allowPasC(TensorView* from, TensorView* to) {
return consume_.count(to) > 0;
}
bool ConsumeAt::allowCasP(TensorView* from, TensorView* to) {
// If the producer is in the consume set, then the consumer must also be
// replayed to obtain a compatible loop structure so that this producer
// can be consumed in this loop.
return consume_.count(from) > 0 || consume_.count(to) > 0;
}
|
||
tv1->split(0, 10); | ||
|
||
struct Printer : public MaxInfoSpanningTree::Propagator { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Really cool.
@@ -850,7 +850,8 @@ void schedulePointwise(Fusion* fusion, const PointwiseParams& params) { | |||
} | |||
} | |||
|
|||
TransformPropagator(reference_tv).run(); | |||
TransformPropagator propagator(reference_tv); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nit: Since this is the only variant used at the moment, should we maintain an alias for it or just wait until we see what variants are actually going to be used?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For now, this is only two lines of code, so I think it is OK to leave it like this for now. If in the future, we need to repeatedly write some boilerplate code, for example, define some subclass of Selector
, etc., we could consider adding an alias for them.
I will merge this PR as is to unblock my computeAt work but feel free to follow up and I can resolve your suggestions in later PR.
TransformPropagator propagator(tv3); | ||
MaxRootDomainInfoSpanningTree(tv3).traverse(&propagator); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Would be nice to have a concise shortcut. Average users won't be concerned about the C++ interface, but it is still an interface for experts.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure, I think the short cuts will be built in scheduler utils. I think scheduler utils should be the concise place for easy to use interfaces.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I like the idea of having a shortcut in scheduler utils. But in unit tests, I am considering using TransformPropagatorWithCheck
to replace all the TransformPropagatorWith
#1791 (comment), so probably in the unit tests, we will not have much chance to use that shortcut.
* Refactor TransormPropagator to allow specifying a position and propagating to part of the DAG (#1775) `MaxInfoPropagator` is renamed to `MaxInfoSpanningTree`, it now only does path-finding, and the propagation is in a separate class `MaxInfoSpanningTree::Propagator`. Same for `MaxRootDomainInfoPropagator`. `MaxInfoSpanningTree` and `MaxRootDomainInfoSpanningTree` now allow specifying a selector, which controls which subgraph should be included in path-finding. `MaxRootDomainInfoSpanningTree` also gets a few new constructors for convenience to use. `TransormPropagator` is now a subclass of `MaxInfoSpanningTree::Propagator`, so the way to use it has changed. Now `MaxInfoSpanningTree` and `MaxRootDomainInfoSpanningTree` will store the path after generation so that the same path can be traversed multiple times. This will be useful to support use cases like new `computeAt`. Pseudo-code: ```C++ void TensorView::computeAt(TensorView tv, int pos) { auto ComputeAtSubgraphSelector selector(this, tv); MaxRootDomainInfoSpanningTree path(tv, pos, &selector); TransformPropagator propagator(tv, pos); path.traverse(&propagator); ComputeAtPosPropagator ca_propagator(tv, pos); path.traverse(&ca_propagator); } ``` * Revert scheduling changes. Cleanup only. * Start drafting grid persistent kernels. * Extend mma dimension and layout checking to support strided batched matmul and tensor contractions (#1761) Co-authored-by: Christian Sarofeen <csarofeen@nvidia.com> * Fix FusionMaxRootDomainInfoSpanningTreePrintTwice_CUDA (#1781) * Fix div(Val, TensorView) (#1778) * Fix div(scalar, tensor) * lintrunner: clang-format * Adding sibling path for MaxInfoSpanningTree (#1776) The sibling path is required to generate consistent replay for some cases where `MaxInfoSpanningTree` is used with a selector. For example, when the producer of a Welford is excluded from the propagation section. See test `FusionTransformPropagateSelectorSibling_CUDA` for a detailed example. Besides, since we know that siblings should be transformed exactly the same, the sibling path is a perfect next hop for preserving information. If you want a spanning tree without a sibling path, you can override `allowSibling` as `return false` in your selector; * Save. * Disable register reuse across serial broadcast ops (#1787) Disable memory aliasing for inner sharing across serial broadcast. * Fix isIntegralType error msg (#1789) * Transform propagator skip replay when possible (#1782) This comment in the code describes what this PR is doing: ```C++ // Note: [Using multiple TransformPropagators] // There are cases that we use multiple TransformPropagators along different // spanning trees with different references in the same fusion. Some of these // spanning trees could overlap. In cases when there are overlapping nodes, // TransformPropagator needs to respect the replay of others, because the // current TransformPropagator might not contain the most amount of // information on how to do the correct transformation. The logic below tells // TransformPropagator to skip the replay when not necessary. ``` * Output allocate patch (#1790) Caching strides along with sizes. This is to support current expand, which introduces non-contiguous output tensor * Add SpanningTreePrinter (#1786) * New compute at interface (#1743) Rewrite of the compute at pass to rely on the new propagation mechanisms. * Fix TransformReplay::getMatchedLeafPosWithoutReplay* (#1791) * Some further cleanup for the new computeAt interface (#1793) Revert MaxProducerPosUpdater to old algo. * Use TransformPropagatorWithCheck in many tests (#1795) * validateDomain in TransformPropagator (#1796) * InlinePropagator please don't replay (#1797) This PR makes `InlinePropagator` just set compute-at positions. It will not replay any tensor. If you want to replay, please use `TransformPropagator` and friends to do so. Currently, `InlinePropagator` is already asserting no replay for standard and best effort compute at. So this PR is mostly about making most inlined compute at works as well. This PR also does a lot of cleanups to remove the word "replay" from comments and variable and function names from `InlinePropagator`. I also cleaned up `recordReplayedPos` and `retrieveReplayedPos`, now the logic is much easier to understand. * Coding style cleanups (#1798) Per offline discussion with @csarofeen, this PR does many renaming for better coding style: For all propagation-related things, I am now using the names `P2C` and `C2P` instead of `CasP` and `PasC`. Because "A as B" somewhat implies we want to replay A the same as B, but "B to A" sounds more general and is a better word for this case. Also, I modified the order of function arguments to match the order in its name. For example `PasC` should have `(producer, consumer)` or `(to, from)`, but not `(consumer, producer)` or `(from, to)`, and `C2P` should have `(consumer, producer)` or `(from, to)`, but not `(producer, consumer)` or `(to, from)`. * Add parsing support for `_to_copy` to handle AMP casts. (#1756) 1. Add support for _to_copy() to support AMP casts. 2. refactored cast, accept none for dtype 3. python tests Co-authored-by: jjsjann123 <jiej@nvidia.com> * MMA Rfactor support for cross-warp and cross-CTA split on K dimension (#1554) * Indexing refactor stage 2 : Remove reference tensor in predicate indexing logic (#1784) Co-authored-by: Christian Sarofeen <csarofeen@nvidia.com> * More cleanup on InlinePropagator (#1800) I just realized that `InlinePropagator` can be further simplified because it no longer replays. Since `InlinePropagator` is no longer doing replay, it is more like a "for each" problem rather than a propagation problem: For each tensor `tv`, if we already know what is the max position of `tv` that is mapped to the reference tensor's selected outer dimensions(stored in `mapped_reference_pos_` in the code), setting the CA position is a very local operation, and is as simple as checking `tv` itself and all its consumers to determine the inline position. `InlinePropagator` is not completely a "for each" problem only because the computation of `mapped_reference_pos_` is a propagation problem. This cleanup reorganizes the code of `InlinePropagator` so it is clear that `InlinePropagator` is nothing but a two-step process: Step 1: Do a propagation to find the `mapped_reference_pos_` for all tensors. Step 2: For each tensor, check itself and its consumers to determine the CA position. Conceptually, I would like to split step 1 with step 2. Because this split makes these concepts decoupled. Especially, this PR makes `mapped_reference_pos_` only contain info about the reference tensor, and is independent of the CA position (Currently, this is not true for best effort and most inlined computeAt without this PR). Now, in my view, `InlinePropagator` is conceptually very simple and easy to understand. In terms of implementation, step 1 and step 2 can be interleaved, because when we don't need to know the `mapped_reference_pos_` for `tv`'s consumer in order to compute the CA position of `tv`. So a one-pass traverse could do both step 1 and step 2 altogether. * Temporarily disable test requring large shared memory. (#1802) * Grouping grid allreduces across iterations (#1755) * Extend the grouped grid reduction kernel The kernel itself should work with an arbitrary number of inputs, but the underlying data structure, Tuple, still explicitly needs to be specialized for the number of values, which is currently limited to 8. * Check siblings in getMaxPosAll (#1805) * remove dead indexing code (#1806) * Broadcast in dim with expand (#1794) Fixes #1788 Added expand in broadcast_in_dim to support expanding to concrete size. Note that we are not supporting dynamic shape for concrete size at this moment. * spam nvrtc options (#1783) TORCH_WARN on nvrtc debug option impacting performance. Co-authored-by: Gao, Xiang <qasdfgtyuiop@gmail.com> Co-authored-by: S. Song <41357537+shmsong@users.noreply.github.com> Co-authored-by: Ivan Yashchuk <IvanYashchuk@users.noreply.github.com> Co-authored-by: Sergey Lebedev <sergeyle@nvidia.com> Co-authored-by: jjsjann123 <jiej@nvidia.com> Co-authored-by: Kevin Stephano <kevin.stephano@gmail.com> Co-authored-by: Naoya Maruyama <naoyam@users.noreply.github.com>
Syncing nvfuser devel branch to upstream master. https://github.com/csarofeen/pytorch/ Code changes includes: - TransformPropagator refactor: switched to Dijkstra instead of exhaustive enumeration on all possible paths to reduce compilation time on transform propagation; - Indexing refactor: remove reference tensor creation in all tensor indexing logic (csarofeen#1690) - (more) generic grouped grid reduction kernel; - Minor parser/fuser patches: 1. zero-dim tensor reduction support 3. no-op binary removal within fused graph 4. expand supported in fusion Squashed commits to WAR github API Commits that's actually in this PR from the devel branch: ``` a054b3e Refactor TransormPropagator to allow specifying a position and propagating to part of the DAG (csarofeen#1775) d67e1cd Indexing refactor stage 1: remove reference tensor creation in all tensor indexing logic (csarofeen#1690) 1b65299 Issue 1770 (csarofeen#1774) 35b0427 Avoid compilation errors like below: (csarofeen#1773) 452c773 Ignore reductions of zero-dim tensors per PyTorch conventions (csarofeen#1771) 31d6c56 TransformPropagator refactor (csarofeen#1769) 570c5a8 Merge pull request csarofeen#1767 from csarofeen/upstream_merge_0621 9d6c3d8 merging upstream 61305cd 0ed815f New TransformPropagator algorithm (csarofeen#1763) 6c19520 no-op binary removal (csarofeen#1764) ec7fa41 Proper propagation of IterType (csarofeen#1762) b263562 Fix dimensionality check (csarofeen#1759) 2d6343f More generic grouped grid reduction kernel (csarofeen#1740) 64e2b56 [nvfuser] prevent spamming warning message (pytorch#77777) (csarofeen#1758) 0c43162 [nvFuser] Improving bitwise ops support (pytorch#77158) (csarofeen#1757) b93a147 Parser expand (csarofeen#1754) ``` RUN_TORCHBENCH: nvfuser Pull Request resolved: pytorch#80355 Approved by: https://github.com/davidberard98
MaxInfoPropagator
is renamed toMaxInfoSpanningTree
, it now only does path-finding, and the propagation is in a separate classMaxInfoSpanningTree::Propagator
. Same forMaxRootDomainInfoPropagator
.MaxInfoSpanningTree
andMaxRootDomainInfoSpanningTree
now allow specifying a selector, which controls which subgraph should be included in path-finding.MaxRootDomainInfoSpanningTree
also gets a few new constructors for convenience to use.TransormPropagator
is now a subclass ofMaxInfoSpanningTree::Propagator
, so the way to use it has changed.Now
MaxInfoSpanningTree
andMaxRootDomainInfoSpanningTree
will store the path after generation so that the same path can be traversed multiple times. This will be useful to support use cases like newcomputeAt
. Pseudo-code: