Skip to content

Commit

Permalink
【Hackathon 6th Fundable Projects 2 No.23】 modernize-use-emplace (Padd…
Browse files Browse the repository at this point in the history
…lePaddle#63847)

* 4.24

* 4.24_2
  • Loading branch information
ApricityXX authored and co63oc committed May 6, 2024
1 parent cb35a06 commit 89d5d3f
Show file tree
Hide file tree
Showing 11 changed files with 17 additions and 24 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/pir/drr/src/rewrite_pattern.cc
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ MatchContextImpl DrrRewritePattern::CreateOperations(
size_t new_max_input_op_index = max_input_op_index + 1;
op_2_temp_program_index[new_op] = new_max_input_op_index;
if (new_max_input_op_index >= temp_program.size()) {
temp_program.push_back({});
temp_program.emplace_back();
}
temp_program[new_max_input_op_index].push_back(new_op);
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,8 +244,8 @@ std::vector<ProcessMesh> GetSubMeshes(const ProcessMesh& process_mesh) {
std::vector<int64_t> sub_process_ids(process_ids.begin() + start_position,
process_ids.begin() + end_position);

sub_process_meshes.emplace_back(ProcessMesh(
sub_process_mesh_shape, sub_process_ids, sub_process_mesh_dim_names));
sub_process_meshes.emplace_back(
sub_process_mesh_shape, sub_process_ids, sub_process_mesh_dim_names);
}
return sub_process_meshes;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,8 @@ void SameStatusReshardFunction::Eval(phi::DeviceContext* dev_ctx,

std::vector<std::pair<int64_t, int64_t>> p2p_pair;
for (size_t i = 0; i < out_process_ids.size(); ++i) {
p2p_pair.emplace_back(
std::make_pair(in_process_ids[i], out_process_ids[i]));
p2p_pair.emplace_back(in_process_ids[i], out_process_ids[i]);
}

int64_t cur_global_rank = GetCurGlobalRank();
for (const auto& iter : p2p_pair) {
int64_t src = iter.first;
Expand Down
7 changes: 3 additions & 4 deletions paddle/phi/infermeta/spmd_rules/layer_norm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -241,10 +241,9 @@ SpmdInfo LayerNormInferSpmdReverse(const DistMetaTensor& x,
// tensor's dims mapping the same as output tensor's dims mapping.
// step2.1 merge dims mappings of output, mean, variance.
std::vector<std::pair<std::string, std::vector<int64_t>>> axes_sharding_info;
axes_sharding_info.emplace_back(std::make_pair(out_axes, out_dims_mapping));
axes_sharding_info.emplace_back(std::make_pair(mean_axes, mean_dims_mapping));
axes_sharding_info.emplace_back(
std::make_pair(variance_axes, variance_dims_mapping));
axes_sharding_info.emplace_back(out_axes, out_dims_mapping);
axes_sharding_info.emplace_back(mean_axes, mean_dims_mapping);
axes_sharding_info.emplace_back(variance_axes, variance_dims_mapping);
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors(axes_sharding_info);

Expand Down
6 changes: 2 additions & 4 deletions paddle/phi/infermeta/spmd_rules/rms_norm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,8 @@ SpmdInfo RmsNormInferSpmdReverse(const DistMetaTensor& x,
std::string scale_axes(1, x_axes[x_ndim - 1]);

std::vector<std::pair<std::string, std::vector<int64_t>>> axes_sharding_info;
axes_sharding_info.emplace_back(
std::make_pair(out_axes, out_dims_mapping_src));
axes_sharding_info.emplace_back(
std::make_pair(variance_axes, invvar_dims_mapping_src));
axes_sharding_info.emplace_back(out_axes, out_dims_mapping_src);
axes_sharding_info.emplace_back(variance_axes, invvar_dims_mapping_src);
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors(axes_sharding_info);

Expand Down
8 changes: 3 additions & 5 deletions paddle/phi/infermeta/spmd_rules/slice.cc
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ SpmdInfo SliceInferSpmdReverseBase(const DistMetaTensor& input,
// Step2.1: merge output shardings
std::vector<std::pair<std::string, std::vector<int64_t>>> axes_sharding_info;
std::vector<int64_t> out_dims_mapping = output.dist_attr().dims_mapping();
axes_sharding_info.emplace_back(std::make_pair(out_axes, out_dims_mapping));
axes_sharding_info.emplace_back(out_axes, out_dims_mapping);

std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors(axes_sharding_info);
Expand Down Expand Up @@ -303,10 +303,8 @@ SpmdInfo SliceGradInferBase(const DistMetaTensor& input,
// Step2: Sharding Propagation
// Step2.1: merge input shardings
std::vector<std::pair<std::string, std::vector<int64_t>>> axes_sharding_info;
axes_sharding_info.emplace_back(
std::make_pair(out_axes, out_dist_attr.dims_mapping()));
axes_sharding_info.emplace_back(
std::make_pair(input_axes, input_dist_attr.dims_mapping()));
axes_sharding_info.emplace_back(out_axes, out_dist_attr.dims_mapping());
axes_sharding_info.emplace_back(input_axes, input_dist_attr.dims_mapping());
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors(axes_sharding_info);

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/spmd_rules/split.cc
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ SpmdInfo SplitWithNumInferSpmdReverse(
std::vector<std::pair<std::string, std::vector<int64_t>>> axes_sharding_info;
for (int i = 0; i < nouts; i++) {
std::vector<int64_t> out_dims_mapping = outs[i]->dist_attr().dims_mapping();
axes_sharding_info.emplace_back(std::make_pair(out_axes, out_dims_mapping));
axes_sharding_info.emplace_back(out_axes, out_dims_mapping);
}
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors(axes_sharding_info);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/spmd_rules/unbind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ SpmdInfo UnbindInferSpmdReverse(const DistMetaTensor& x,
std::vector<std::pair<std::string, std::vector<int64_t>>> axes_sharding_info;
for (int i = 0; i < nouts; i++) {
std::vector<int64_t> out_dims_mapping = outs[i]->dist_attr().dims_mapping();
axes_sharding_info.emplace_back(std::make_pair(out_axes, out_dims_mapping));
axes_sharding_info.emplace_back(out_axes, out_dims_mapping);
}
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors(axes_sharding_info);
Expand Down
2 changes: 1 addition & 1 deletion paddle/pir/src/core/parser/ir_parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ std::vector<std::string> IrParser::ParseValueList() {
Token index_token = ConsumeToken();
while (index_token.val_ != ")") {
if (index_token.token_type_ == NULL_) {
value_index.push_back("null");
value_index.emplace_back("null");
} else {
std::string str = index_token.val_;
value_index.push_back(str);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ static inline bool IsStaticShape(const Value& value) {
symbol::ShapeOrDataDimExprs CreateShapeOrDataByDDim(const pir::DDim& dims) {
std::vector<symbol::DimExpr> dim_exprs;
for (int i = 0; i < dims.size(); ++i) {
dim_exprs.emplace_back(symbol::DimExpr{dims.at(i)});
dim_exprs.emplace_back(dims.at(i));
}
return symbol::TensorShapeOrDataDimExprs{dim_exprs};
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/testing/paddle_gtest_main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ int main(int argc, char** argv) { // NOLINT
#if defined(PADDLE_WITH_DISTRIBUTE) && !defined(PADDLE_WITH_PSLIB)
if (paddle::flags::FindFlag("max_body_size")) {
setenv("FLAGS_max_body_size", "2147483647", 1);
envs.push_back("max_body_size");
envs.emplace_back("max_body_size");
}
#endif

Expand Down

0 comments on commit 89d5d3f

Please sign in to comment.