-
Notifications
You must be signed in to change notification settings - Fork 70
Lower stream-parallelized matmul #5302
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
aae88a4
a9e64ca
eec7ad7
d8cf165
0e2f762
89f3173
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -479,6 +479,10 @@ class ShardByStream : public Expr { | |
| } | ||
| }; | ||
|
|
||
| // Creates a ShardByStream without needing the output TensorView. Returns the | ||
| // output TensorView. | ||
| TensorView* shardByStream(TensorView* in, Val* stream_index); | ||
|
|
||
| class ForLoop : public Expr { | ||
| public: | ||
| using Expr::Expr; | ||
|
|
@@ -492,6 +496,8 @@ class ForLoop : public Expr { | |
|
|
||
| NVFUSER_DECLARE_CLONE_AND_CREATE | ||
|
|
||
| static ForLoop* createFromIterDomain(Val* index, IterDomain* iter_domain); | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm on the fence about this. The method is coupled with the ForLoop class so I moved here to save some typing. The downside is less access control because createFromIterDomain could access private fields/methods of ForLoop. |
||
|
|
||
| std::string toString(int indent_size = 0) const override; | ||
| std::string toInlineString(int indent_size = 0) const override; | ||
| const char* getOpString() const override { | ||
|
|
@@ -519,6 +525,4 @@ class ForLoop : public Expr { | |
| } | ||
| }; | ||
|
|
||
| ForLoop* createForLoopFromIterDomain(Val* index, IterDomain* iter_domain); | ||
|
|
||
| } // namespace nvfuser::hir | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -11,6 +11,7 @@ | |
| #include <host_ir/lower_to_communication.h> | ||
| #include <host_ir/lowering.h> | ||
| #include <host_ir/pass/insert_deallocations.h> | ||
| #include <multidevice/utils.h> | ||
| #include <runtime/executor_abstract.h> | ||
|
|
||
| namespace nvfuser { | ||
|
|
@@ -36,6 +37,30 @@ void recomputeOutputTvs(Expr* e, IrCloner& ir_cloner) { | |
| } | ||
| } | ||
|
|
||
| // Finds the stream-parallelized IterDomain in the loop domain of a TensorView, | ||
| // or nullptr if not found. This is different from `getShardedIterDomain(tv, | ||
| // ParallelType::Stream)`, which searches the allocation domain. Consider | ||
| // unifying them into one function with an extra DomainType parameter. | ||
| IterDomain* findStreamIterDomain(TensorView* tv) { | ||
| const std::vector<IterDomain*>& loop = tv->getLoopDomain(); | ||
| // FinalizeMultideviceDomains pass puts the stream IterDomain to the | ||
| // front. | ||
| if (!loop.empty() && loop.front()->isStream()) { | ||
| return loop.front(); | ||
| } | ||
| return nullptr; | ||
| } | ||
|
|
||
| // Finds the stream IterDomain in the outputs of a segment. | ||
| IterDomain* findStreamIterDomain(const std::vector<Val*>& outs) { | ||
| for (auto* out : ir_utils::filterByType<TensorView>(outs)) { | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So we are finding the stream ID in any of the outputs of a segment? Why not use the above variation directly with any of the segment outputs as they must have mapped stream IDs.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Because I'm not sure about CPU-scalar TensorViews from composite ops. But I should probably harden the check to enforce every TensorView to have a Stream IterDomain. Wdyt?
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In their blackbox state, it does not look we can currently support SDPA ops, for example. So adding an assert makes sense to signal something is wrong. I guess this is something I need to fix in
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Why not? At least, batch and/or head can be easily parallelized on stream without changing the implementation of the SDPA op, assuming ShardByStreams are added properly of course. |
||
| if (auto* stream_id = findStreamIterDomain(out)) { | ||
| return stream_id; | ||
| } | ||
| } | ||
| return nullptr; | ||
| } | ||
|
|
||
| void lowerSegment( | ||
| const SegmentedGroup& group, | ||
| const AliasInfoMap& aliases, | ||
|
|
@@ -72,15 +97,99 @@ void lowerSegment( | |
| } | ||
| } break; | ||
| case SchedulerType::ExprEval: { | ||
| // push back segment's exprs into the container as top level | ||
| // expressions | ||
| for (auto* e : group.stablyOrderedExprs()) { | ||
| // Pseudocode: | ||
| // clang-format off | ||
| // ``` | ||
| // clone all expressions and store the copies to a list | ||
| // if no expressions are stream parallelized: | ||
| // append the list to the top level | ||
| // return | ||
| // for each non-input TensorView: | ||
| // if it needs an out-of-loop allocation: | ||
| // create an Allocate and append it to the top level | ||
| // create a new, empty for loop | ||
| // for each cloned expression: | ||
| // for each input or output TensorView of that expression: | ||
| // shard it by stream if it's allocated outside the loop | ||
| // add the cloned expression to the loop body with the maybe-sharded inputs and outputs | ||
| // ``` | ||
| // clang-format on | ||
| std::vector<Expr*> cloned_exprs; | ||
| cloned_exprs.reserve(group.exprs().size()); | ||
| for (Expr* e : group.stablyOrderedExprs()) { | ||
| auto* e_clone = ir_cloner.clone(e); | ||
| recomputeOutputTvs(e, ir_cloner); | ||
| hic.pushBackTopLevelExprs(e_clone); | ||
| cloned_exprs.push_back(e_clone); | ||
| } | ||
|
|
||
| std::vector<Val*> cloned_outs = ir_cloner.clone(group.outputs()); | ||
| // All expressions in the group are expected to be stream parallelized in | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we enforce this constraint? If so is there an assertion somewhere?
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We don't but we should. I'm waiting for a isResharding-like method to do that easily. |
||
| // the same way. So it's safe to find the stream IterDomain from any of | ||
| // them. Ideally, loop domains should be tied to expressions not | ||
| // TensorViews. | ||
| IterDomain* stream_id = findStreamIterDomain(cloned_outs); | ||
| if (stream_id == nullptr) { | ||
| for (Expr* e : cloned_exprs) { | ||
| hic.pushBackTopLevelExprs(e); | ||
| } | ||
| } else { | ||
| for (Expr* e : cloned_exprs) { | ||
| for (auto* out : ir_utils::filterByType<TensorView>(e->outputs())) { | ||
| if (getShardedIterDomain(out, ParallelType::Stream) == nullptr) { | ||
| auto* allocate = | ||
| IrBuilder::create<kir::Allocate>(out, MemoryType::Global); | ||
| hic.pushBackTopLevelExprs(allocate); | ||
| } | ||
| } | ||
| } | ||
|
|
||
| auto* stream_index = IrBuilder::create<Val>(DataType::Index); | ||
| auto* for_loop = | ||
| hir::ForLoop::createFromIterDomain(stream_index, stream_id); | ||
| hic.pushBackTopLevelExprs(for_loop); | ||
|
|
||
| std::unordered_map<Val*, Val*> replacement_map; | ||
| for (Expr* e : cloned_exprs) { | ||
| for (auto ins_or_out : | ||
| {ir_utils::filterByType<TensorView>(e->inputs()), | ||
| ir_utils::filterByType<TensorView>(e->outputs())}) { | ||
| for (auto* tv : ins_or_out) { | ||
| if (replacement_map.count(tv) > 0) { | ||
| continue; | ||
| } | ||
| if (findStreamIterDomain(tv) != nullptr && | ||
| getShardedIterDomain(tv, ParallelType::Stream) == nullptr) { | ||
| // Loop is stream parallelized but allocation is not. | ||
| TensorView* sharded_tv = hir::shardByStream(tv, stream_index); | ||
| for_loop->body().push_back(sharded_tv->definition()); | ||
| replacement_map[tv] = sharded_tv; | ||
| } | ||
| } | ||
| } | ||
|
|
||
| std::vector<Val*> new_inputs; | ||
| std::transform( | ||
| e->inputs().begin(), | ||
| e->inputs().end(), | ||
| std::back_inserter(new_inputs), | ||
| [&replacement_map](Val* input) { | ||
| return getOrDefault(replacement_map, input, input); | ||
| }); | ||
| std::vector<Val*> new_outputs; | ||
| std::transform( | ||
| e->outputs().begin(), | ||
| e->outputs().end(), | ||
| std::back_inserter(new_outputs), | ||
| [&replacement_map](Val* output) { | ||
| return getOrDefault(replacement_map, output, output); | ||
| }); | ||
| Expr* new_e = e->newObjectFunc()( | ||
| e->container(), new_inputs, new_outputs, e->attributes()); | ||
| for_loop->body().push_back(new_e); | ||
| } | ||
| } | ||
| } break; | ||
| default: | ||
| default: { | ||
| const int group_id = group.groupId(); | ||
|
|
||
| // Copy the input/output TensorViews to the container. | ||
|
|
@@ -123,6 +232,7 @@ void lowerSegment( | |
| cloned_outs, | ||
| cache_id); | ||
| hic.pushBackTopLevelExprs(launch_kernel); | ||
| } | ||
| } | ||
| } | ||
| } // namespace | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -8,6 +8,8 @@ | |
|
|
||
| #pragma once | ||
|
|
||
| #include <vector> | ||
|
|
||
| #include <c10/core/Device.h> | ||
|
|
||
| namespace nvfuser { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This will resolved using #5316?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
No. It's one of the cases where
out's contiguity ought to be different fromindue to the slicing effect.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Oh okay, got it!
So in such cases the replay may in fact overwrite a correct contiguity as most users of selfReplay create the new TensorDomain using ops API, which sets the contiguity correctly. This is something we should consider for #5316.