diff --git a/paddle/fluid/pir/drr/src/rewrite_pattern.cc b/paddle/fluid/pir/drr/src/rewrite_pattern.cc index f0e32c9689b12..7752881b76697 100644 --- a/paddle/fluid/pir/drr/src/rewrite_pattern.cc +++ b/paddle/fluid/pir/drr/src/rewrite_pattern.cc @@ -544,7 +544,7 @@ MatchContextImpl DrrRewritePattern::CreateOperations( size_t new_max_input_op_index = max_input_op_index + 1; op_2_temp_program_index[new_op] = new_max_input_op_index; if (new_max_input_op_index >= temp_program.size()) { - temp_program.push_back({}); + temp_program.emplace_back(); } temp_program[new_max_input_op_index].push_back(new_op); }); diff --git a/paddle/phi/core/distributed/auto_parallel/reshard/reshard_utils.cc b/paddle/phi/core/distributed/auto_parallel/reshard/reshard_utils.cc index 73a367fac273d..01fbaf99c3c15 100644 --- a/paddle/phi/core/distributed/auto_parallel/reshard/reshard_utils.cc +++ b/paddle/phi/core/distributed/auto_parallel/reshard/reshard_utils.cc @@ -244,8 +244,8 @@ std::vector GetSubMeshes(const ProcessMesh& process_mesh) { std::vector sub_process_ids(process_ids.begin() + start_position, process_ids.begin() + end_position); - sub_process_meshes.emplace_back(ProcessMesh( - sub_process_mesh_shape, sub_process_ids, sub_process_mesh_dim_names)); + sub_process_meshes.emplace_back( + sub_process_mesh_shape, sub_process_ids, sub_process_mesh_dim_names); } return sub_process_meshes; } diff --git a/paddle/phi/core/distributed/auto_parallel/reshard/same_status_reshard_function.cc b/paddle/phi/core/distributed/auto_parallel/reshard/same_status_reshard_function.cc index 0a86275203b51..6452fe9d06929 100644 --- a/paddle/phi/core/distributed/auto_parallel/reshard/same_status_reshard_function.cc +++ b/paddle/phi/core/distributed/auto_parallel/reshard/same_status_reshard_function.cc @@ -80,10 +80,8 @@ void SameStatusReshardFunction::Eval(phi::DeviceContext* dev_ctx, std::vector> p2p_pair; for (size_t i = 0; i < out_process_ids.size(); ++i) { - p2p_pair.emplace_back( - std::make_pair(in_process_ids[i], out_process_ids[i])); + p2p_pair.emplace_back(in_process_ids[i], out_process_ids[i]); } - int64_t cur_global_rank = GetCurGlobalRank(); for (const auto& iter : p2p_pair) { int64_t src = iter.first; diff --git a/paddle/phi/infermeta/spmd_rules/layer_norm.cc b/paddle/phi/infermeta/spmd_rules/layer_norm.cc index 6ea65d106bc71..8949e39119a4e 100644 --- a/paddle/phi/infermeta/spmd_rules/layer_norm.cc +++ b/paddle/phi/infermeta/spmd_rules/layer_norm.cc @@ -241,10 +241,9 @@ SpmdInfo LayerNormInferSpmdReverse(const DistMetaTensor& x, // tensor's dims mapping the same as output tensor's dims mapping. // step2.1 merge dims mappings of output, mean, variance. std::vector>> axes_sharding_info; - axes_sharding_info.emplace_back(std::make_pair(out_axes, out_dims_mapping)); - axes_sharding_info.emplace_back(std::make_pair(mean_axes, mean_dims_mapping)); - axes_sharding_info.emplace_back( - std::make_pair(variance_axes, variance_dims_mapping)); + axes_sharding_info.emplace_back(out_axes, out_dims_mapping); + axes_sharding_info.emplace_back(mean_axes, mean_dims_mapping); + axes_sharding_info.emplace_back(variance_axes, variance_dims_mapping); std::unordered_map axis_to_dim_map = ShardingMergeForTensors(axes_sharding_info); diff --git a/paddle/phi/infermeta/spmd_rules/rms_norm.cc b/paddle/phi/infermeta/spmd_rules/rms_norm.cc index 3a96e9b9ff3e7..7d4a670e05fcd 100644 --- a/paddle/phi/infermeta/spmd_rules/rms_norm.cc +++ b/paddle/phi/infermeta/spmd_rules/rms_norm.cc @@ -101,10 +101,8 @@ SpmdInfo RmsNormInferSpmdReverse(const DistMetaTensor& x, std::string scale_axes(1, x_axes[x_ndim - 1]); std::vector>> axes_sharding_info; - axes_sharding_info.emplace_back( - std::make_pair(out_axes, out_dims_mapping_src)); - axes_sharding_info.emplace_back( - std::make_pair(variance_axes, invvar_dims_mapping_src)); + axes_sharding_info.emplace_back(out_axes, out_dims_mapping_src); + axes_sharding_info.emplace_back(variance_axes, invvar_dims_mapping_src); std::unordered_map axis_to_dim_map = ShardingMergeForTensors(axes_sharding_info); diff --git a/paddle/phi/infermeta/spmd_rules/slice.cc b/paddle/phi/infermeta/spmd_rules/slice.cc index 9daed3ce8c764..cde458df747e2 100644 --- a/paddle/phi/infermeta/spmd_rules/slice.cc +++ b/paddle/phi/infermeta/spmd_rules/slice.cc @@ -175,7 +175,7 @@ SpmdInfo SliceInferSpmdReverseBase(const DistMetaTensor& input, // Step2.1: merge output shardings std::vector>> axes_sharding_info; std::vector out_dims_mapping = output.dist_attr().dims_mapping(); - axes_sharding_info.emplace_back(std::make_pair(out_axes, out_dims_mapping)); + axes_sharding_info.emplace_back(out_axes, out_dims_mapping); std::unordered_map axis_to_dim_map = ShardingMergeForTensors(axes_sharding_info); @@ -303,10 +303,8 @@ SpmdInfo SliceGradInferBase(const DistMetaTensor& input, // Step2: Sharding Propagation // Step2.1: merge input shardings std::vector>> axes_sharding_info; - axes_sharding_info.emplace_back( - std::make_pair(out_axes, out_dist_attr.dims_mapping())); - axes_sharding_info.emplace_back( - std::make_pair(input_axes, input_dist_attr.dims_mapping())); + axes_sharding_info.emplace_back(out_axes, out_dist_attr.dims_mapping()); + axes_sharding_info.emplace_back(input_axes, input_dist_attr.dims_mapping()); std::unordered_map axis_to_dim_map = ShardingMergeForTensors(axes_sharding_info); diff --git a/paddle/phi/infermeta/spmd_rules/split.cc b/paddle/phi/infermeta/spmd_rules/split.cc index 1765aa9c40535..c9b98c1abe88d 100644 --- a/paddle/phi/infermeta/spmd_rules/split.cc +++ b/paddle/phi/infermeta/spmd_rules/split.cc @@ -159,7 +159,7 @@ SpmdInfo SplitWithNumInferSpmdReverse( std::vector>> axes_sharding_info; for (int i = 0; i < nouts; i++) { std::vector out_dims_mapping = outs[i]->dist_attr().dims_mapping(); - axes_sharding_info.emplace_back(std::make_pair(out_axes, out_dims_mapping)); + axes_sharding_info.emplace_back(out_axes, out_dims_mapping); } std::unordered_map axis_to_dim_map = ShardingMergeForTensors(axes_sharding_info); diff --git a/paddle/phi/infermeta/spmd_rules/unbind.cc b/paddle/phi/infermeta/spmd_rules/unbind.cc index 0e869aad2674d..79634e8076771 100644 --- a/paddle/phi/infermeta/spmd_rules/unbind.cc +++ b/paddle/phi/infermeta/spmd_rules/unbind.cc @@ -127,7 +127,7 @@ SpmdInfo UnbindInferSpmdReverse(const DistMetaTensor& x, std::vector>> axes_sharding_info; for (int i = 0; i < nouts; i++) { std::vector out_dims_mapping = outs[i]->dist_attr().dims_mapping(); - axes_sharding_info.emplace_back(std::make_pair(out_axes, out_dims_mapping)); + axes_sharding_info.emplace_back(out_axes, out_dims_mapping); } std::unordered_map axis_to_dim_map = ShardingMergeForTensors(axes_sharding_info); diff --git a/paddle/pir/src/core/parser/ir_parser.cc b/paddle/pir/src/core/parser/ir_parser.cc index 866da48051a12..1085beeeae983 100644 --- a/paddle/pir/src/core/parser/ir_parser.cc +++ b/paddle/pir/src/core/parser/ir_parser.cc @@ -252,7 +252,7 @@ std::vector IrParser::ParseValueList() { Token index_token = ConsumeToken(); while (index_token.val_ != ")") { if (index_token.token_type_ == NULL_) { - value_index.push_back("null"); + value_index.emplace_back("null"); } else { std::string str = index_token.val_; value_index.push_back(str); diff --git a/paddle/pir/src/dialect/shape/transforms/shape_optimization_pass.cc b/paddle/pir/src/dialect/shape/transforms/shape_optimization_pass.cc index 84ac6f57ab72a..a1ca2ba4a5416 100644 --- a/paddle/pir/src/dialect/shape/transforms/shape_optimization_pass.cc +++ b/paddle/pir/src/dialect/shape/transforms/shape_optimization_pass.cc @@ -284,7 +284,7 @@ static inline bool IsStaticShape(const Value& value) { symbol::ShapeOrDataDimExprs CreateShapeOrDataByDDim(const pir::DDim& dims) { std::vector dim_exprs; for (int i = 0; i < dims.size(); ++i) { - dim_exprs.emplace_back(symbol::DimExpr{dims.at(i)}); + dim_exprs.emplace_back(dims.at(i)); } return symbol::TensorShapeOrDataDimExprs{dim_exprs}; } diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 03de6b055fe7e..18e055831a406 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -35,7 +35,7 @@ int main(int argc, char** argv) { // NOLINT #if defined(PADDLE_WITH_DISTRIBUTE) && !defined(PADDLE_WITH_PSLIB) if (paddle::flags::FindFlag("max_body_size")) { setenv("FLAGS_max_body_size", "2147483647", 1); - envs.push_back("max_body_size"); + envs.emplace_back("max_body_size"); } #endif