Skip to content

Commit

Permalink
[clang-tidy] NO.23 bugprone-branch-clone (PaddlePaddle#57522)
Browse files Browse the repository at this point in the history
* clangtidyNo23

* fix

* fix
  • Loading branch information
enkilee authored and iosmers committed Sep 21, 2023
1 parent 48754b7 commit 85c07c1
Show file tree
Hide file tree
Showing 47 changed files with 79 additions and 142 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ bugprone-argument-comment,
-bugprone-assert-side-effect,
-bugprone-bad-signal-to-kill-thread,
-bugprone-bool-pointer-implicit-conversion,
-bugprone-branch-clone,
bugprone-branch-clone,
bugprone-copy-constructor-init,
-bugprone-dangling-handle,
-bugprone-dynamic-static-initializers,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ ccl::CCLComm GetCCLComm(const Place& place, int global_gid) {
#else
return nullptr;
#endif
} else if (place.GetType() == phi::AllocationType::CUSTOM) {
} else if (place.GetType() == phi::AllocationType::CUSTOM) { // NOLINT
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
return static_cast<paddle::distributed::ProcessGroupCustom*>(pg)->XCCLComm(
place);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/fetch_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const {
static void TransData(const phi::DenseTensor &src_item,
phi::DenseTensor *dst_item) {
if (src_item.IsInitialized() && src_item.numel() > 0) {
if (platform::is_gpu_place(src_item.place())) {
if (platform::is_gpu_place(src_item.place())) { // NOLINT
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
TensorCopy(src_item, platform::CPUPlace(), dst_item);
#endif
Expand Down
5 changes: 2 additions & 3 deletions paddle/fluid/framework/downpour_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -362,9 +362,8 @@ void DownpourWorker::CopySparseTable() {
if (src_table == dest_table) {
continue;
} else if (!copy_table_config_.sparse_copy_by_feasign()) {
if (feasign_set_.find(src_table) == feasign_set_.end()) {
continue;
} else if (feasign_set_[src_table].empty()) {
if (feasign_set_.find(src_table) == feasign_set_.end() ||
feasign_set_[src_table].empty()) {
continue;
}
feanum = fleet_ptr_->CopyTable(src_table, dest_table);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/executor_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) {
execution_strategy.num_threads_ = 2;
break;
}
case platform::DeviceType::CUDA: {
case platform::DeviceType::CUDA: { // NOLINT
// NOTE: According experiments, one thread is faster in
// most model training.
execution_strategy.num_threads_ = 1;
Expand Down
9 changes: 4 additions & 5 deletions paddle/fluid/framework/io/fs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -399,13 +399,12 @@ void hdfs_mv(const std::string& src, const std::string& dest) {
}

int fs_select_internal(const std::string& path) {
if (fs_begin_with_internal(path, "hdfs:")) {
return 1;
} else if (fs_begin_with_internal(path, "afs:")) {
if (fs_begin_with_internal(path, "hdfs:") ||
fs_begin_with_internal(path, "afs:")) {
return 1;
} else {
return 0;
}

return 0;
}

std::shared_ptr<FILE> fs_open_read(const std::string& path,
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/framework/ir/constant_folding_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,7 @@ void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const {
std::unordered_map<std::string, int> map;
for (auto in_node : op_node->inputs) {
map[in_node->Name()] = 0;
if (!in_node->Var()->Persistable()) {
input_persis = false;
} else if (!in_node->inputs.empty()) {
if (!in_node->Var()->Persistable() || !in_node->inputs.empty()) {
input_persis = false;
}
}
Expand Down
5 changes: 2 additions & 3 deletions paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -400,9 +400,8 @@ void QuantDequantMkldnnPass::RemoveFakeOps(

if (fake_quantize_types.count(op_node->Name())) {
CollectFakeQuantizeOps(graph, op_node, &nodes2rm);
} else if (fake_dequantize_types.count(op_node->Name())) {
CollectFakeDequantizeOps(graph, op_node, &nodes2rm);
} else if (fake_quantize_dequantize_types.count(op_node->Name())) {
} else if (fake_dequantize_types.count(op_node->Name()) ||
fake_quantize_dequantize_types.count(op_node->Name())) {
CollectFakeDequantizeOps(graph, op_node, &nodes2rm);
} else if (onnx_format_quantize_dequantize_types.count(op_node->Name())) {
CollectQuantizeDequantizeOpsFromONNXFormat(graph, op_node, &nodes2rm);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,10 @@ void InterpreterCoreEventGarbageCollector::Add(

if (var->IsType<phi::DenseTensor>()) {
Add(var->GetMutable<phi::DenseTensor>()->MoveMemoryHolder(), event, ctx);
} else if (var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) {
} else if (
var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<LoDRankTable>()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ void InterpreterCoreFastGarbageCollector::Add(Variable* var) {

if (var->IsType<phi::DenseTensor>()) {
Add(var->GetMutable<phi::DenseTensor>()->MoveMemoryHolder());
} else if (var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) {
} else if (
var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<LoDRankTable>()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -267,10 +267,8 @@ phi::TensorBase* GetTensorFormVar(framework::Variable* var) {
return var->template GetMutable<phi::TensorArray>();
} else if (var->template IsType<framework::Strings>()) {
return var->template GetMutable<framework::Strings>();
} else if (var->template IsType<paddle::framework::RawTensor>()) {
return var->template GetMutable<paddle::framework::RawTensor>();
} else if (!var->IsInitialized()) {
// The following is for RAW type of var
} else if (var->template IsType<paddle::framework::RawTensor>() ||
!var->IsInitialized()) {
return var->template GetMutable<paddle::framework::RawTensor>();
} else {
PADDLE_THROW(platform::errors::Unimplemented(
Expand Down
7 changes: 4 additions & 3 deletions paddle/fluid/framework/new_executor/new_ir_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -758,9 +758,10 @@ void NewIRInterpreter::RecordStreamForGC(InstructionBase* instr) {

if (var->IsType<phi::DenseTensor>()) {
TensorRecordStream(*(var->GetMutable<phi::DenseTensor>()));
} else if (var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) {
} else if (
var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT
// do nothing
} else if (var->IsType<phi::SelectedRows>()) {
TensorRecordStream(
Expand Down
7 changes: 4 additions & 3 deletions paddle/fluid/framework/new_executor/program_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1292,9 +1292,10 @@ void ProgramInterpreter::RecordStreamForGC(const Instruction& instr) {

if (var->IsType<phi::DenseTensor>()) {
TensorRecordStream(*(var->GetMutable<phi::DenseTensor>()));
} else if (var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) {
} else if (
var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT
// do nothing
} else if (var->IsType<phi::SelectedRows>()) {
TensorRecordStream(
Expand Down
9 changes: 2 additions & 7 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2777,8 +2777,6 @@ void OperatorWithKernel::ParseInputDataType(
const phi::DenseTensor* t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
} else if (var->IsType<phi::SelectedRows>()) {
t = &(var->Get<phi::SelectedRows>().value());
} else if (var->IsType<phi::SparseCooTensor>()) {
Expand Down Expand Up @@ -3221,11 +3219,8 @@ void OperatorWithKernel::BuildPhiKernelContext(
} else if (var->template IsType<framework::Strings>()) {
tensor_out = var->template GetMutable<framework::Strings>();
phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
} else if (var->template IsType<paddle::framework::RawTensor>()) {
tensor_out = var->template GetMutable<paddle::framework::RawTensor>();
phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
} else if (!var->IsInitialized()) {
// The following is for RAW type of var
} else if (var->template IsType<paddle::framework::RawTensor>() ||
!var->IsInitialized()) {
tensor_out = var->template GetMutable<paddle::framework::RawTensor>();
phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
} else {
Expand Down
10 changes: 4 additions & 6 deletions paddle/fluid/framework/parallel_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -693,7 +693,7 @@ ParallelExecutor::ParallelExecutor(const std::vector<platform::Place> &places,

// broadcast parameters from the 0th device to others:
auto need_broadcast = [&]() -> bool {
if (member_->build_strategy_.num_trainers_ > 1) {
if (member_->build_strategy_.num_trainers_ > 1) { // NOLINT
// 1. num_tariners would be grater than 1 for nccl distributed training.
return true;
} else if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
Expand Down Expand Up @@ -936,11 +936,9 @@ void ParallelExecutor::BCastParamsToDevices(
auto share_memory = [&] { t->ShareDataWith(main_tensor); };

// FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
if (member_->build_strategy_.async_mode_) {
share_memory();
} else if (member_->use_all_reduce_ ||
member_->IsUseCUDA(member_->use_device_) ||
var == "@LR_DECAY_COUNTER@") {
if (member_->use_all_reduce_ ||
member_->IsUseCUDA(member_->use_device_) ||
var == "@LR_DECAY_COUNTER@") {
copy_memory();
} else {
share_memory();
Expand Down
6 changes: 4 additions & 2 deletions paddle/fluid/framework/tensor_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ void TensorCopyImpl(const TENSOR& src,
auto size = src.numel() * phi::SizeOf(src.dtype());
#endif

if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
if (platform::is_cpu_place(src_place) &&
platform::is_cpu_place(dst_place)) { // NOLINT
memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
}
#ifdef PADDLE_WITH_CUSTOM_DEVICE
Expand Down Expand Up @@ -327,7 +328,8 @@ void TensorCopySync(const phi::DenseTensor& src,
return;
}
auto size = src.numel() * phi::SizeOf(src.dtype());
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
if (platform::is_cpu_place(src_place) &&
platform::is_cpu_place(dst_place)) { // NOLINT
memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
}
#ifdef PADDLE_WITH_CUSTOM_DEVICE
Expand Down
7 changes: 2 additions & 5 deletions paddle/fluid/framework/var_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -386,11 +386,8 @@ struct SetVarAttrDescVisitor {
template <typename T>
void operator()(T &&v) {
using U = std::decay_t<decltype(v)>;
if (std::is_same<U, int>::value) {
set_attr_value(v);
} else if (std::is_same<U, std::string>::value) {
set_attr_value(v);
} else if (std::is_same<U, std::vector<int>>::value) {
if (std::is_same<U, int>::value || std::is_same<U, std::string>::value ||
std::is_same<U, std::vector<int>>::value) {
set_attr_value(v);
} else {
PADDLE_THROW(platform::errors::Unavailable(
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/api/analysis_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2006,7 +2006,7 @@ std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
static_cast<void *>(scope), this->GetDeviceContexts()));
res->input_or_output_ = true;
res->SetName(name);
if (platform::is_cpu_place(place_)) {
if (platform::is_cpu_place(place_)) { // NOLINT
res->SetPlace(PaddlePlace::kCPU);
} else if (platform::is_ipu_place(place_)) {
// Currently, IPUPlace's tensor copy between cpu and ipu has been set in
Expand Down Expand Up @@ -2057,7 +2057,7 @@ std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
static_cast<void *>(scope), this->GetDeviceContexts()));
res->input_or_output_ = false;
res->SetName(name);
if (platform::is_cpu_place(place_)) {
if (platform::is_cpu_place(place_)) { // NOLINT
res->SetPlace(PaddlePlace::kCPU);
} else if (platform::is_ipu_place(place_)) {
// Currently, IPUPlace's tensor copy between cpu and ipu has been set in
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/memory/memcpy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -743,7 +743,7 @@ void Copy<phi::Place, phi::Place>(phi::Place dst_place,
VLOG(4) << "memory::Copy " << num << " Bytes from " << src_place << " to "
<< dst_place;
if (src_place.GetType() == phi::AllocationType::CPU &&
dst_place.GetType() == phi::AllocationType::CPU) {
dst_place.GetType() == phi::AllocationType::CPU) { // NOLINT
std::memcpy(dst, src, num);
}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/operators/batch_norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -386,8 +386,6 @@ phi::KernelKey BatchNormGradOp::GetExpectedKernelType(
const phi::DenseTensor *t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand Down Expand Up @@ -530,8 +528,6 @@ phi::KernelKey BatchNormDoubleGradOp::GetExpectedKernelType(
const phi::DenseTensor *t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/operators/data_norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -495,8 +495,6 @@ class DataNormGradOp : public framework::OperatorWithKernel {
const phi::DenseTensor *t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(platform::errors::InvalidArgument(
Expand Down
15 changes: 2 additions & 13 deletions paddle/fluid/operators/detection/multiclass_nms_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,7 @@ class MultiClassNMSOp : public framework::OperatorWithKernel {
}
// Here the box_dims[0] is not the real dimension of output.
// It will be rewritten in the computing kernel.
if (score_size == 3) {
ctx->SetOutputDim("Out", {-1, box_dims[2] + 2});
} else {
ctx->SetOutputDim("Out", {-1, box_dims[2] + 2});
}
ctx->SetOutputDim("Out", {-1, box_dims[2] + 2});
if (!ctx->IsRuntime()) {
ctx->SetLoDLevel("Out", std::max(ctx->GetLoDLevel("BBoxes"), 1));
}
Expand Down Expand Up @@ -584,14 +580,7 @@ class MultiClassNMS2Op : public MultiClassNMSOp {

void InferShape(framework::InferShapeContext* ctx) const override {
MultiClassNMSOp::InferShape(ctx);

auto score_dims = ctx->GetInputDim("Scores");
auto score_size = score_dims.size();
if (score_size == 3) {
ctx->SetOutputDim("Index", {-1, 1});
} else {
ctx->SetOutputDim("Index", {-1, 1});
}
ctx->SetOutputDim("Index", {-1, 1});
if (!ctx->IsRuntime()) {
ctx->SetLoDLevel("Index", std::max(ctx->GetLoDLevel("BBoxes"), 1));
}
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/operators/fused/fused_bn_activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -303,8 +303,6 @@ phi::KernelKey FusedBatchNormActGradOp::GetExpectedKernelType(
const phi::DenseTensor *t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/operators/fused/fused_bn_add_activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -267,8 +267,6 @@ phi::KernelKey FusedBatchNormAddActGradOp::GetExpectedKernelType(
const phi::DenseTensor *t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ class FusionGRUMKLDNNKernel : public framework::OpKernel<T> {
const bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

// BF16 does not support force output
if (!is_bf16 && force_fp32_output) {
if (!is_bf16 && force_fp32_output) { // NOLINT
RunKernel<float>(ctx);
} else {
RunKernel<T>(ctx);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ class FusionLSTMMKLDNNKernel : public framework::OpKernel<T> {
const bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

// BF16 does not support force output
if (!is_bf16 && force_fp32_output) {
if (!is_bf16 && force_fp32_output) { // NOLINT
RunKernel<float>(ctx);
} else {
RunKernel<T>(ctx);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -688,7 +688,7 @@ class MultiGRUMKLDNNKernel : public framework::OpKernel<T> {
const bool force_fp32_output =
ctx.HasAttr("force_fp32_output") && ctx.Attr<bool>("force_fp32_output");

if (force_fp32_output) {
if (force_fp32_output) { // NOLINT
RunKernel<float>(ctx);
} else {
RunKernel<T>(ctx);
Expand All @@ -706,7 +706,7 @@ class MultiGRUMKLDNNKernel : public framework::OpKernel<T> {
auto gru_out_L2R = handler.executeSingleGru(input_mem, layer, L2R);
handler.reorderInputL2RtoR2L(input_mem, layer);
auto gru_out_R2L = handler.executeSingleGru(input_mem, layer, R2L);
if (layer < layers - 1)
if (layer < layers - 1) // NOLINT
handler.template reorderOutputR2LtoL2R<T>(gru_out_R2L, layer);
else
handler.template reorderOutputR2LtoL2R<Tout>(gru_out_R2L, layer);
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/operators/inplace_abn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -284,8 +284,6 @@ class InplaceABNGradOp : public framework::OperatorWithKernel {
const phi::DenseTensor* t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ class FCMKLDNNKernel : public framework::OpKernel<T_in> {
bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";

IF_CHANGE_FC_TW_TYPENAME((std::is_same<T_in, uint8_t>::value), ([&] {
if (force_fp32_output) {
if (force_fp32_output) { // NOLINT
this->RunKernel<float, T_w>(ctx);
} else if (phi::funcs::is_int8<T_in>()) {
if (fuse_relu) {
Expand Down
Loading

0 comments on commit 85c07c1

Please sign in to comment.