diff --git a/.clang-tidy b/.clang-tidy index 98cfdf21cea70b..2ed67098e2a025 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -13,7 +13,7 @@ bugprone-exception-escape, -bugprone-forwarding-reference-overload, bugprone-inaccurate-erase, bugprone-incorrect-roundings, --bugprone-infinite-loop, +bugprone-infinite-loop, bugprone-integer-division, -bugprone-macro-repeated-side-effects, -bugprone-misplaced-operator-in-strlen-in-alloc, @@ -28,7 +28,7 @@ bugprone-signed-char-misuse, -bugprone-sizeof-container, -bugprone-sizeof-expression, -bugprone-string-constructor, --bugprone-string-integer-assignment, +bugprone-string-integer-assignment, -bugprone-string-literal-with-embedded-nul, -bugprone-suspicious-enum-usage, -bugprone-suspicious-memset-usage, diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc index 2d713d392fe4f5..67f2eae2be5e68 100644 --- a/paddle/fluid/framework/ir/graph_helper.cc +++ b/paddle/fluid/framework/ir/graph_helper.cc @@ -297,21 +297,19 @@ std::vector TopologyDfsSortOperations(const Graph &graph) { // traverse the graph int num_ops = static_cast(op_queue.size()); - while (num_ops) { - for (auto cur_op : op_queue) { - if (!cur_op || in_degree[cur_op] > 0) continue; - // visit this node - // put all the output var of this op valid. - for (auto *out_var : cur_op->outputs) { - if (!out_var) continue; - set_out_ops_ready(out_var); - } - VLOG(8) << "visit " << cur_op->Name(); - nodes.push_back(cur_op); - - cur_op = nullptr; - num_ops--; - } + for (auto cur_op : op_queue) { + if (!cur_op || in_degree[cur_op] > 0) continue; + // visit this node + // put all the output var of this op valid. + for (auto *out_var : cur_op->outputs) { + if (!out_var) continue; + set_out_ops_ready(out_var); + } + VLOG(8) << "visit " << cur_op->Name(); + nodes.push_back(cur_op); + + cur_op = nullptr; + num_ops--; } return nodes; diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index f6cd03489c73d7..d8d8e583e8f8eb 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -281,7 +281,7 @@ std::string GenerateEngineKey(const std::set &engine_inputs, engine_hash_key += precision; engine_hash_key += "#"; - engine_hash_key += use_cuda_graph; + engine_hash_key += std::to_string(use_cuda_graph); auto engine_key = std::to_string(std::hash()(engine_hash_key)); VLOG(2) << "TRT engine hash key: " << engine_hash_key; diff --git a/paddle/phi/infermeta/ternary.cc b/paddle/phi/infermeta/ternary.cc index 10807b7a3a87a9..5915d81b9a0997 100644 --- a/paddle/phi/infermeta/ternary.cc +++ b/paddle/phi/infermeta/ternary.cc @@ -1588,7 +1588,8 @@ void QuantLinearInferMeta(const MetaTensor& x, in_mat_dims, w_dims0, common::make_ddim({w_dims0, w_dims1}))); - output_dims.reserve(static_cast(in_num_col_dims + 1)); + output_dims.reserve(static_cast(in_num_col_dims) + + static_cast(1)); for (int i = 0; i < in_num_col_dims; ++i) { output_dims.push_back(in_dims[i]); }