forked from PaddlePaddle/Paddle
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[Semi Auto] Refactor Replicated Rule (PaddlePaddle#56839)
* adapt general spmd rule * polish details * add new rules * bugfix for set_partial * bugfix * unitest * adapt for argument for tensor and vector of tensor --------- Co-authored-by: Chen Weihang <chenweihang@baidu.com>
- Loading branch information
Showing
15 changed files
with
1,207 additions
and
25 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
164 changes: 164 additions & 0 deletions
164
paddle/phi/infermeta/spmd_rules/default_data_parallel.cc
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,164 @@ | ||
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "paddle/phi/infermeta/spmd_rules/default_data_parallel.h" | ||
|
||
#include "glog/logging.h" | ||
|
||
#include "paddle/phi/core/distributed/auto_parallel/dist_attr.h" | ||
#include "paddle/phi/core/distributed/auto_parallel/inferspmd_utils.h" | ||
#include "paddle/phi/core/distributed/auto_parallel/utils.h" | ||
|
||
namespace phi { | ||
namespace distributed { | ||
|
||
using phi::distributed::auto_parallel::str_join; | ||
|
||
////////////////// Utils Functions ////////////////// | ||
std::vector<int64_t> GetDefaultDataParallelDimsmapping( | ||
const int64_t batch_axis_dim, const int ndim) { | ||
std::vector<int64_t> dims_mapping(ndim, -1); | ||
dims_mapping[0] = batch_axis_dim; | ||
return dims_mapping; | ||
} | ||
|
||
////////////////// InferMeta(Contains SPMD) Functions ////////////////// | ||
|
||
SpmdInfo DefaultDataParallelSpmdInferForward( | ||
const std::vector<const DistMetaTensor*>& ins, | ||
const std::vector<const DistMetaTensor*>& outs) { | ||
// step1: Build Einsum Notation for input tensor's batch axis | ||
int64_t ninputs = ins.size(); | ||
int64_t noutputs = outs.size(); | ||
std::vector<std::pair<std::string, std::vector<int64_t>>> axes_sharding_info; | ||
std::string batch_axis = "b"; | ||
|
||
for (int64_t i = 0; i < ninputs; ++i) { | ||
axes_sharding_info.push_back( | ||
{batch_axis, {ins[i]->dist_attr().dims_mapping()[0]}}); | ||
} | ||
|
||
// Step2: Sharding Merge | ||
std::unordered_map<std::string, int64_t> axis_to_dim_map = | ||
ShardingMergeForTensors(axes_sharding_info); | ||
int64_t batch_axis_dim = axis_to_dim_map[batch_axis]; | ||
|
||
// Step3: Infer Output's Batch Axis Dims Mapping. | ||
std::vector<TensorDistAttr> output_dist_attrs; | ||
for (int64_t i = 0; i < noutputs; i++) { | ||
int ndim = outs[i]->dims().size(); | ||
TensorDistAttr dist_attr_dst = | ||
CopyTensorDistAttrForOutput(ins[0]->dist_attr()); | ||
std::vector<int64_t> dst_dims_maping = | ||
GetDefaultDataParallelDimsmapping(batch_axis_dim, ndim); | ||
dist_attr_dst.set_dims_mapping(dst_dims_maping); | ||
output_dist_attrs.emplace_back(dist_attr_dst); | ||
} | ||
|
||
// Step4: Merge and get Inputs' Batch Axis New Dims Mapping. | ||
std::vector<TensorDistAttr> dst_input_dist_attrs; | ||
for (int64_t i = 0; i < ninputs; i++) { | ||
int ndim = ins[i]->dims().size(); | ||
TensorDistAttr dist_attr_dst = | ||
CopyTensorDistAttrForOutput(ins[i]->dist_attr()); | ||
std::vector<int64_t> dst_dims_maping = | ||
GetDefaultDataParallelDimsmapping(batch_axis_dim, ndim); | ||
dist_attr_dst.set_dims_mapping(dst_dims_maping); | ||
dst_input_dist_attrs.emplace_back(dist_attr_dst); | ||
} | ||
|
||
VLOG(4) << "DefaultDataParallelSpmd InferForward:"; | ||
for (int64_t i = 0; i < ninputs; i++) { | ||
VLOG(4) << "Input" << std::to_string(i) << " shape: [" | ||
<< str_join(phi::vectorize(ins[i]->dims())) << "] " | ||
<< "src_dims_mapping: [" | ||
<< str_join(ins[i]->dist_attr().dims_mapping()) << "] " | ||
<< "dst_dims_mapping: [" | ||
<< str_join(dst_input_dist_attrs[i].dims_mapping()) << "]"; | ||
} | ||
|
||
for (int64_t i = 0; i < noutputs; i++) { | ||
VLOG(4) << "Output" << std::to_string(i) << " shape: [" | ||
<< str_join(phi::vectorize(outs[i]->dims())) << "] " | ||
<< "dst_dims_mapping: [" | ||
<< str_join(output_dist_attrs[i].dims_mapping()) << "]"; | ||
} | ||
|
||
return {dst_input_dist_attrs, output_dist_attrs}; | ||
} | ||
SpmdInfo DefaultDataParallelSpmdInferBackward( | ||
const std::vector<const DistMetaTensor*>& ins, | ||
const std::vector<const DistMetaTensor*>& outs) { | ||
// step1: Build Einsum Notation for input tensor's batch axis | ||
int64_t ninputs = ins.size(); | ||
int64_t noutputs = outs.size(); | ||
std::vector<std::pair<std::string, std::vector<int64_t>>> axes_sharding_info; | ||
std::string batch_axis = "b"; | ||
|
||
for (int64_t i = 0; i < noutputs; ++i) { | ||
axes_sharding_info.push_back( | ||
{batch_axis, {outs[i]->dist_attr().dims_mapping()[0]}}); | ||
} | ||
|
||
// Step2: Sharding Merge | ||
std::unordered_map<std::string, int64_t> axis_to_dim_map = | ||
ShardingMergeForTensors(axes_sharding_info); | ||
int64_t batch_axis_dim = axis_to_dim_map[batch_axis]; | ||
|
||
// Step3: Infer Output's Batch Axis Dims Mapping. | ||
std::vector<TensorDistAttr> output_dist_attrs; | ||
for (int64_t i = 0; i < noutputs; i++) { | ||
int ndim = outs[i]->dims().size(); | ||
TensorDistAttr dist_attr_dst = | ||
CopyTensorDistAttrForOutput(outs[i]->dist_attr()); | ||
std::vector<int64_t> dst_dims_maping = | ||
GetDefaultDataParallelDimsmapping(batch_axis_dim, ndim); | ||
dist_attr_dst.set_dims_mapping(dst_dims_maping); | ||
output_dist_attrs.emplace_back(dist_attr_dst); | ||
} | ||
|
||
// Step4: Merge and get Inputs' Batch Axis New Dims Mapping. | ||
std::vector<TensorDistAttr> dst_input_dist_attrs; | ||
for (int64_t i = 0; i < ninputs; i++) { | ||
int ndim = ins[i]->dims().size(); | ||
TensorDistAttr dist_attr_dst = | ||
CopyTensorDistAttrForOutput(ins[i]->dist_attr()); | ||
std::vector<int64_t> dst_dims_maping = | ||
GetDefaultDataParallelDimsmapping(batch_axis_dim, ndim); | ||
dist_attr_dst.set_dims_mapping(dst_dims_maping); | ||
dst_input_dist_attrs.emplace_back(dist_attr_dst); | ||
} | ||
|
||
VLOG(4) << "DefaultDataParallelSpmd InferBackward:"; | ||
for (int64_t i = 0; i < noutputs; i++) { | ||
VLOG(4) << "Output" << std::to_string(i) << " shape: [" | ||
<< str_join(phi::vectorize(outs[i]->dims())) << "] " | ||
<< "src_dims_mapping: [" | ||
<< str_join(outs[i]->dist_attr().dims_mapping()) << "] " | ||
<< "dst_dims_mapping: [" | ||
<< str_join(output_dist_attrs[i].dims_mapping()) << "]"; | ||
} | ||
|
||
for (int64_t i = 0; i < ninputs; i++) { | ||
VLOG(4) << "Input" << std::to_string(i) << " shape: [" | ||
<< str_join(phi::vectorize(ins[i]->dims())) << "] " | ||
<< "dst_dims_mapping: [" | ||
<< str_join(dst_input_dist_attrs[i].dims_mapping()) << "]"; | ||
} | ||
|
||
return {dst_input_dist_attrs, output_dist_attrs}; | ||
} | ||
|
||
} // namespace distributed | ||
} // namespace phi |
Oops, something went wrong.