Skip to content

Commit

Permalink
[clang-tidy] NO.15 enable performance-inefficient-vector-operation (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
enkilee authored Feb 20, 2024
1 parent fec05e5 commit d84616c
Show file tree
Hide file tree
Showing 17 changed files with 28 additions and 1 deletion.
1 change: 1 addition & 0 deletions paddle/fluid/framework/new_executor/interpreter/plan.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ const std::vector<std::shared_ptr<Job>>& Plan::JobList() const {

const std::vector<std::string> Plan::JobTypes() const {
std::vector<std::string> res;
res.reserve(type_to_program_.size());
for (auto kv : type_to_ir_program_) {
res.emplace_back(kv.first);
}
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/jit/function_schema.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ const std::string& Argument::Name() const { return name_; }

const std::vector<std::string> FunctionSchema::InputArgNames() const {
std::vector<std::string> input_arg_names;
input_arg_names.reserve(input_args.size());
for (auto& arg : input_args) {
input_arg_names.emplace_back(arg.Name());
}
Expand All @@ -36,6 +37,7 @@ const std::vector<std::string> FunctionSchema::InputArgNames() const {

const std::vector<std::string> FunctionSchema::OutputArgNames() const {
std::vector<std::string> output_arg_names;
output_arg_names.reserve(output_args.size());
for (auto& arg : output_args) {
output_arg_names.emplace_back(arg.Name());
}
Expand Down
6 changes: 6 additions & 0 deletions paddle/fluid/pir/dialect/operator/utils/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -188,41 +188,47 @@ static std::unordered_map<

if (element_type == AttrType::BOOL) {
std::vector<bool> vec_bools;
vec_bools.reserve(attr_vec.size());
for (auto vec_element : attr_vec) {
vec_bools.push_back(
vec_element.dyn_cast<pir::BoolAttribute>().data());
}
return VariantType{vec_bools};
} else if (element_type == AttrType::INT32) {
std::vector<int> vec_int32;
vec_int32.reserve(attr_vec.size());
for (auto vec_element : attr_vec) {
vec_int32.push_back(
vec_element.dyn_cast<pir::Int32Attribute>().data());
}
return VariantType{vec_int32};
} else if (element_type == AttrType::INT64) {
std::vector<int64_t> vec_int64;
vec_int64.reserve(attr_vec.size());
for (auto vec_element : attr_vec) {
vec_int64.push_back(
vec_element.dyn_cast<pir::Int64Attribute>().data());
}
return VariantType{vec_int64};
} else if (element_type == AttrType::FLOAT) {
std::vector<float> vec_float;
vec_float.reserve(attr_vec.size());
for (auto vec_element : attr_vec) {
vec_float.push_back(
vec_element.dyn_cast<pir::FloatAttribute>().data());
}
return VariantType{vec_float};
} else if (element_type == AttrType::DOUBLE) {
std::vector<double> vec_double;
vec_double.reserve(attr_vec.size());
for (auto vec_element : attr_vec) {
vec_double.push_back(
vec_element.dyn_cast<pir::DoubleAttribute>().data());
}
return VariantType{vec_double};
} else if (element_type == AttrType::STRING) {
std::vector<std::string> vec_string;
vec_string.reserve(attr_vec.size());
for (auto vec_element : attr_vec) {
vec_string.push_back(
vec_element.dyn_cast<pir::StrAttribute>().AsString());
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/pir/drr/src/rewrite_pattern.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ bool DrrRewritePattern::PatternGraphMatch(
return false;
}
std::vector<const OpCall*> drr_output_sequence;
drr_output_sequence.reserve(bind_map.size());
std::vector<pir::Operation*> ir_output_sequence;
std::unordered_map<const OpCall*, pir::Operation*> output_op_map;
for (const auto& pair : bind_map) {
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/backends/device_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ std::vector<std::string> DeviceManager::GetAllDeviceTypes() {
phi::AutoRDLock lock(&_global_device_manager_rw_lock);
auto& dev_impl_map = Instance().device_impl_map_;
std::vector<std::string> devices;
devices.reserve(dev_impl_map.size());
for (const auto& map_item : dev_impl_map) {
devices.push_back(map_item.first);
}
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/core/distributed/auto_parallel/dist_mapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ void DistributedMapper::set_process_id_to_device_ids(
const std::map<int64_t, std::pair<std::string, std::vector<int64_t>>>&
process_id_to_device_ids) {
std::vector<std::string> device_mesh_names;
device_mesh_names.reserve(device_meshes_.size());
for (const auto& item : device_meshes_) {
device_mesh_names.push_back(item.first);
}
Expand Down Expand Up @@ -83,6 +84,7 @@ DistributedMapper DistributedMapper::from_proto(
proto.process_id_to_device_ids(i).device_mesh_name();
std::vector<int64_t> device_ids;
int num_devices = proto.process_id_to_device_ids(i).device_ids_size();
device_ids.reserve(num_devices);
for (int j = 0; j < num_devices; ++j) {
device_ids.push_back(proto.process_id_to_device_ids(i).device_ids(j));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ void ReshardSToRWithPadding(DeviceContext* dev_ctx,

// Concat the result after split on correct axis.
std::vector<const DenseTensor*> concat_input_vec;
concat_input_vec.reserve(split_out_vec.size());
for (const auto& tensor : split_out_vec) {
concat_input_vec.emplace_back(&tensor);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/spmd_rules/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ void AlignDimsSharding(std::vector<TensorDistAttr>* input_attrs_ptr,
for (auto pair : partial_dim_to_type) {
placements[pair.first] = std::make_shared<PartialStatus>(pair.second);
}
new_input_attrs.emplace_back(FromPlacements(e, placements));
new_input_attrs.emplace_back(FromPlacements(e, placements)); // NOLINT
}
std::swap(input_attrs, new_input_attrs);
}
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/cpu/overlap_add_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ void OverlapAddGradKernel(const Context& dev_ctx,
// Restore output dims when the number of dims is larger than 2.
if (out_grad_rank > 2) {
std::vector<int64_t> restored_x_grad_shape;
restored_x_grad_shape.reserve(preserved_dims.size());
for (int i = 0; i < preserved_dims.size(); i++) {
restored_x_grad_shape.push_back(preserved_dims[i]);
}
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/cpu/overlap_add_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ void OverlapAddKernel(const Context& dev_ctx,
// Restore output dims when the number of dims is larger than 2.
if (out_rank > 2) {
std::vector<int64_t> restored_out_shape;
restored_out_shape.reserve(preserved_dims.size());
for (int i = 0; i < preserved_dims.size(); i++) {
restored_out_shape.push_back(preserved_dims[i]);
}
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/funcs/jit/benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ void BenchAllImpls(const typename KernelTuple::attr_type& attr, Args... args) {
BenchFunc<KernelTuple, Args...> benchmark;
std::vector<std::pair<std::string, double>> infos;
auto funcs = jit::GetAllCandidateFuncsWithTypes<KernelTuple, PlaceType>(attr);
infos.reserve(funcs.size());
for (auto const& f : funcs) {
infos.push_back(std::make_pair(f.first, benchmark(f.second, args...)));
}
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ void SumCooGradCPUKernel(const Context& dev_ctx,
std::map<std::vector<IntT>, int64_t> indices_map;
for (auto j = 0; j < dout_indices.dims()[1]; ++j) {
std::vector<IntT> pos;
pos.reserve(dout_indices.dims()[0]);
for (int i = 0; i < dout_indices.dims()[0]; ++i) {
pos.push_back(dout_indices_data[j + i * dout_indices.dims()[1]]);
}
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/stride/split_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ void SplitWithNumStridedKernel(const Context& dev_ctx,
int axis_value = axis_scalar.to<int>();
auto input_axis_dim = x.dims().at(axis_value);
std::vector<int64_t> sections_vec;
sections_vec.reserve(num);
for (int i = 0; i < num; ++i) {
sections_vec.push_back(input_axis_dim / num);
}
Expand Down
1 change: 1 addition & 0 deletions paddle/testing/paddle_gtest_main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ int main(int argc, char** argv) { // NOLINT
paddle::memory::allocation::UseAllocatorStrategyGFlag();
testing::InitGoogleTest(&argc, argv);
std::vector<char*> new_argv;
new_argv.reserve(argc);
for (int i = 0; i < argc; ++i) {
new_argv.push_back(argv[i]);
}
Expand Down
1 change: 1 addition & 0 deletions test/cpp/fluid/framework/attribute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,7 @@ TEST(Attribute, ProtoAttrToAttribute_scalars) {
proto_attr_scalars.set_type(paddle::framework::proto::SCALARS);

std::vector<paddle::experimental::Scalar> scalars;
scalars.reserve(10);
for (int i = 0; i < 10; i++) {
scalars.emplace_back(i);
}
Expand Down
1 change: 1 addition & 0 deletions test/cpp/fluid/framework/op_desc_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ TEST(OpDesc, SetScalarsAttr) {
paddle::experimental::Scalar scalar(std::complex<double>(42.1, 42.1));

std::vector<paddle::experimental::Scalar> scalars;
scalars.reserve(4);
for (int i = 0; i < 4; i++) {
scalars.emplace_back(i);
}
Expand Down
5 changes: 5 additions & 0 deletions test/cpp/phi/kernels/test_fused_adam_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ auto GenerateConstantTensorVectors(

static auto ToConstTensorPtrVector(const std::vector<DenseTensor> &tensors) {
std::vector<const DenseTensor *> results;
results.reserve(tensors.size());
for (const auto &t : tensors) {
results.push_back(&t);
}
Expand All @@ -80,6 +81,7 @@ static auto ToConstTensorPtrVector(const std::vector<DenseTensor> &tensors) {
static auto ToMutableTensorPtrVector(
std::vector<DenseTensor> &tensors) { // NOLINT
std::vector<DenseTensor *> results;
results.reserve(tensors.size());
for (auto &t : tensors) {
results.push_back(&t);
}
Expand All @@ -88,6 +90,7 @@ static auto ToMutableTensorPtrVector(

static auto ToMetaTensorVector(const std::vector<DenseTensor> &tensors) {
std::vector<MetaTensor> results;
results.reserve(tensors.size());
for (auto &t : tensors) {
results.emplace_back(t);
}
Expand All @@ -97,6 +100,7 @@ static auto ToMetaTensorVector(const std::vector<DenseTensor> &tensors) {
static auto ToConstMetaTensorPtrVector(
const std::vector<MetaTensor> &meta_tensors) {
std::vector<const MetaTensor *> results;
results.reserve(meta_tensors.size());
for (auto &t : meta_tensors) {
results.push_back(&t);
}
Expand All @@ -106,6 +110,7 @@ static auto ToConstMetaTensorPtrVector(
static auto ToMutableMetaTensorPtrVector(
std::vector<MetaTensor> &meta_tensors) { // NOLINT
std::vector<MetaTensor *> results;
results.reserve(meta_tensors.size());
for (auto &t : meta_tensors) {
results.push_back(&t);
}
Expand Down

0 comments on commit d84616c

Please sign in to comment.