From 3a0011262ab5024581b6d86bc37c8837847867d8 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 20 Sep 2023 03:05:18 +0000 Subject: [PATCH 1/3] clangtidyNo23 --- .clang-tidy | 2 +- .../collective/processgroup_comm_utils.cc | 2 +- paddle/fluid/framework/details/fetch_op_handle.cc | 2 +- paddle/fluid/framework/downpour_worker.cc | 5 ++--- paddle/fluid/framework/executor_cache.cc | 2 +- paddle/fluid/framework/io/fs.cc | 9 ++++----- paddle/fluid/framework/ir/constant_folding_pass.cc | 4 +--- .../ir/mkldnn/quant_dequant_mkldnn_pass.cc | 5 ++--- .../garbage_collector/event_garbage_collector.cc | 7 ++++--- .../garbage_collector/fast_garbage_collector.cc | 7 ++++--- .../new_executor/interpreter/static_build.cc | 6 ++---- .../framework/new_executor/new_ir_interpreter.cc | 7 ++++--- .../framework/new_executor/program_interpreter.cc | 7 ++++--- paddle/fluid/framework/operator.cc | 9 ++------- paddle/fluid/framework/parallel_executor.cc | 10 ++++------ paddle/fluid/framework/tensor_util.cc | 6 ++++-- paddle/fluid/framework/var_desc.cc | 7 ++----- paddle/fluid/inference/api/analysis_predictor.cc | 4 ++-- paddle/fluid/memory/memcpy.cc | 2 +- paddle/fluid/operators/batch_norm_op.cc | 4 ---- paddle/fluid/operators/data_norm_op.cc | 2 -- .../fluid/operators/detection/multiclass_nms_op.cc | 13 +++---------- .../fluid/operators/fused/fused_bn_activation_op.cc | 2 -- .../operators/fused/fused_bn_add_activation_op.cc | 2 -- .../operators/fused/mkldnn/fusion_gru_mkldnn_op.cc | 2 +- .../operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc | 2 +- .../operators/fused/mkldnn/multi_gru_mkldnn_op.cc | 4 ++-- paddle/fluid/operators/inplace_abn_op.cc | 2 -- paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc | 2 +- paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc | 2 -- paddle/fluid/operators/reader/buffered_reader.cc | 6 ++---- paddle/fluid/operators/sum_op.cc | 2 +- .../fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc | 5 ++--- paddle/fluid/platform/place.cc | 6 +----- .../fluid/prim/api/manual_prim/static_prim_api.cc | 2 -- paddle/fluid/pybind/eager_method.cc | 6 ++---- paddle/fluid/pybind/eager_properties.cc | 6 ++---- paddle/fluid/pybind/eager_utils.cc | 9 +++------ paddle/fluid/pybind/inference_api.cc | 2 +- paddle/fluid/pybind/op_function_common.cc | 4 +--- paddle/phi/core/compat/convert_utils.cc | 2 +- paddle/phi/core/kernel_factory.cc | 5 ++--- paddle/phi/infermeta/unary.cc | 2 +- paddle/phi/kernels/cpu/diagonal_grad_kernel.cc | 6 ++---- paddle/phi/kernels/cpu/generate_proposals_kernel.cc | 8 +------- paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc | 4 ++-- paddle/phi/kernels/funcs/vol2col.cc | 4 ++-- 47 files changed, 80 insertions(+), 139 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 735763bfcef5f..34d98c1ec9c3e 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -4,7 +4,7 @@ bugprone-argument-comment, -bugprone-assert-side-effect, -bugprone-bad-signal-to-kill-thread, -bugprone-bool-pointer-implicit-conversion, --bugprone-branch-clone, +bugprone-branch-clone, bugprone-copy-constructor-init, -bugprone-dangling-handle, -bugprone-dynamic-static-initializers, diff --git a/paddle/fluid/distributed/collective/processgroup_comm_utils.cc b/paddle/fluid/distributed/collective/processgroup_comm_utils.cc index 94723906fccb1..eec697f523945 100644 --- a/paddle/fluid/distributed/collective/processgroup_comm_utils.cc +++ b/paddle/fluid/distributed/collective/processgroup_comm_utils.cc @@ -51,7 +51,7 @@ ccl::CCLComm GetCCLComm(const Place& place, int global_gid) { #else return nullptr; #endif - } else if (place.GetType() == phi::AllocationType::CUSTOM) { + } else if (place.GetType() == phi::AllocationType::CUSTOM) { // NOLINT #if defined(PADDLE_WITH_CUSTOM_DEVICE) return static_cast(pg)->XCCLComm( place); diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 2a504b2a0fc2b..b71c476a2c95e 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -120,7 +120,7 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const { static void TransData(const phi::DenseTensor &src_item, phi::DenseTensor *dst_item) { if (src_item.IsInitialized() && src_item.numel() > 0) { - if (platform::is_gpu_place(src_item.place())) { + if (platform::is_gpu_place(src_item.place())) { // NOLINT #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) TensorCopy(src_item, platform::CPUPlace(), dst_item); #endif diff --git a/paddle/fluid/framework/downpour_worker.cc b/paddle/fluid/framework/downpour_worker.cc index 8a0406864cde7..e69a25bb32781 100644 --- a/paddle/fluid/framework/downpour_worker.cc +++ b/paddle/fluid/framework/downpour_worker.cc @@ -362,9 +362,8 @@ void DownpourWorker::CopySparseTable() { if (src_table == dest_table) { continue; } else if (!copy_table_config_.sparse_copy_by_feasign()) { - if (feasign_set_.find(src_table) == feasign_set_.end()) { - continue; - } else if (feasign_set_[src_table].empty()) { + if (feasign_set_.find(src_table) == feasign_set_.end() || + feasign_set_[src_table].empty()) { continue; } feanum = fleet_ptr_->CopyTable(src_table, dest_table); diff --git a/paddle/fluid/framework/executor_cache.cc b/paddle/fluid/framework/executor_cache.cc index 1044f785451e0..b30cb6def3447 100644 --- a/paddle/fluid/framework/executor_cache.cc +++ b/paddle/fluid/framework/executor_cache.cc @@ -47,7 +47,7 @@ static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) { execution_strategy.num_threads_ = 2; break; } - case platform::DeviceType::CUDA: { + case platform::DeviceType::CUDA: { // NOLINT // NOTE: According experiments, one thread is faster in // most model training. execution_strategy.num_threads_ = 1; diff --git a/paddle/fluid/framework/io/fs.cc b/paddle/fluid/framework/io/fs.cc index a39147a97cf7e..4a689409d412b 100644 --- a/paddle/fluid/framework/io/fs.cc +++ b/paddle/fluid/framework/io/fs.cc @@ -399,13 +399,12 @@ void hdfs_mv(const std::string& src, const std::string& dest) { } int fs_select_internal(const std::string& path) { - if (fs_begin_with_internal(path, "hdfs:")) { - return 1; - } else if (fs_begin_with_internal(path, "afs:")) { + if (fs_begin_with_internal(path, "hdfs:") || + fs_begin_with_internal(path, "afs:")) { return 1; + } else { + return 0; } - - return 0; } std::shared_ptr fs_open_read(const std::string& path, diff --git a/paddle/fluid/framework/ir/constant_folding_pass.cc b/paddle/fluid/framework/ir/constant_folding_pass.cc index 3b3f23933fb6d..f8e0ac9475b5d 100644 --- a/paddle/fluid/framework/ir/constant_folding_pass.cc +++ b/paddle/fluid/framework/ir/constant_folding_pass.cc @@ -81,9 +81,7 @@ void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const { std::unordered_map map; for (auto in_node : op_node->inputs) { map[in_node->Name()] = 0; - if (!in_node->Var()->Persistable()) { - input_persis = false; - } else if (!in_node->inputs.empty()) { + if (!in_node->Var()->Persistable() || !in_node->inputs.empty()) { input_persis = false; } } diff --git a/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc index 8f19225dc53b4..655183dc712c0 100644 --- a/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc @@ -400,9 +400,8 @@ void QuantDequantMkldnnPass::RemoveFakeOps( if (fake_quantize_types.count(op_node->Name())) { CollectFakeQuantizeOps(graph, op_node, &nodes2rm); - } else if (fake_dequantize_types.count(op_node->Name())) { - CollectFakeDequantizeOps(graph, op_node, &nodes2rm); - } else if (fake_quantize_dequantize_types.count(op_node->Name())) { + } else if (fake_dequantize_types.count(op_node->Name()) || + fake_quantize_dequantize_types.count(op_node->Name())) { CollectFakeDequantizeOps(graph, op_node, &nodes2rm); } else if (onnx_format_quantize_dequantize_types.count(op_node->Name())) { CollectQuantizeDequantizeOpsFromONNXFormat(graph, op_node, &nodes2rm); diff --git a/paddle/fluid/framework/new_executor/garbage_collector/event_garbage_collector.cc b/paddle/fluid/framework/new_executor/garbage_collector/event_garbage_collector.cc index e826c94712568..e63164c020c36 100644 --- a/paddle/fluid/framework/new_executor/garbage_collector/event_garbage_collector.cc +++ b/paddle/fluid/framework/new_executor/garbage_collector/event_garbage_collector.cc @@ -88,9 +88,10 @@ void InterpreterCoreEventGarbageCollector::Add( if (var->IsType()) { Add(var->GetMutable()->MoveMemoryHolder(), event, ctx); - } else if (var->IsType< - operators::reader:: - OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { + } else if ( + var->IsType< + operators::reader:: + OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT // TODO(xiongkun03) in old executor, this type of variable is not support // eager deletion. so we just leave it here ? } else if (var->IsType()) { diff --git a/paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.cc b/paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.cc index 4bc8b298012ab..e7efc1f10c324 100644 --- a/paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.cc +++ b/paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.cc @@ -34,9 +34,10 @@ void InterpreterCoreFastGarbageCollector::Add(Variable* var) { if (var->IsType()) { Add(var->GetMutable()->MoveMemoryHolder()); - } else if (var->IsType< - operators::reader:: - OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { + } else if ( + var->IsType< + operators::reader:: + OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT // TODO(xiongkun03) in old executor, this type of variable is not support // eager deletion. so we just leave it here ? } else if (var->IsType()) { diff --git a/paddle/fluid/framework/new_executor/interpreter/static_build.cc b/paddle/fluid/framework/new_executor/interpreter/static_build.cc index 69b4920050925..cf33fd1975a2a 100644 --- a/paddle/fluid/framework/new_executor/interpreter/static_build.cc +++ b/paddle/fluid/framework/new_executor/interpreter/static_build.cc @@ -197,10 +197,8 @@ phi::TensorBase* GetTensorFormVar(framework::Variable* var) { return var->template GetMutable(); } else if (var->template IsType()) { return var->template GetMutable(); - } else if (var->template IsType()) { - return var->template GetMutable(); - } else if (!var->IsInitialized()) { - // The following is for RAW type of var + } else if (var->template IsType() || + !var->IsInitialized()) { return var->template GetMutable(); } else { PADDLE_THROW(platform::errors::Unimplemented( diff --git a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc index 64809b546f202..1ea18f0069aee 100644 --- a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc +++ b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc @@ -758,9 +758,10 @@ void NewIRInterpreter::RecordStreamForGC(InstructionBase* instr) { if (var->IsType()) { TensorRecordStream(*(var->GetMutable())); - } else if (var->IsType< - operators::reader:: - OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { + } else if ( + var->IsType< + operators::reader:: + OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT // do nothing } else if (var->IsType()) { TensorRecordStream( diff --git a/paddle/fluid/framework/new_executor/program_interpreter.cc b/paddle/fluid/framework/new_executor/program_interpreter.cc index a29e45515d894..e3478cee3ad02 100644 --- a/paddle/fluid/framework/new_executor/program_interpreter.cc +++ b/paddle/fluid/framework/new_executor/program_interpreter.cc @@ -1283,9 +1283,10 @@ void ProgramInterpreter::RecordStreamForGC(const Instruction& instr) { if (var->IsType()) { TensorRecordStream(*(var->GetMutable())); - } else if (var->IsType< - operators::reader:: - OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { + } else if ( + var->IsType< + operators::reader:: + OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT // do nothing } else if (var->IsType()) { TensorRecordStream( diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 9b9979bc70f4c..7a3271a48debc 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -2777,8 +2777,6 @@ void OperatorWithKernel::ParseInputDataType( const phi::DenseTensor* t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); } else if (var->IsType()) { t = &(var->Get().value()); } else if (var->IsType()) { @@ -3221,11 +3219,8 @@ void OperatorWithKernel::BuildPhiKernelContext( } else if (var->template IsType()) { tensor_out = var->template GetMutable(); phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out); - } else if (var->template IsType()) { - tensor_out = var->template GetMutable(); - phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out); - } else if (!var->IsInitialized()) { - // The following is for RAW type of var + } else if (var->template IsType() || + !var->IsInitialized()) { tensor_out = var->template GetMutable(); phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out); } else { diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 24832d2060911..637a2454dd189 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -693,7 +693,7 @@ ParallelExecutor::ParallelExecutor(const std::vector &places, // broadcast parameters from the 0th device to others: auto need_broadcast = [&]() -> bool { - if (member_->build_strategy_.num_trainers_ > 1) { + if (member_->build_strategy_.num_trainers_ > 1) { // NOLINT // 1. num_tariners would be grater than 1 for nccl distributed training. return true; } else if (member_->local_scopes_.size() != 1 && local_scopes.empty()) { @@ -936,11 +936,9 @@ void ParallelExecutor::BCastParamsToDevices( auto share_memory = [&] { t->ShareDataWith(main_tensor); }; // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix. - if (member_->build_strategy_.async_mode_) { - share_memory(); - } else if (member_->use_all_reduce_ || - member_->IsUseCUDA(member_->use_device_) || - var == "@LR_DECAY_COUNTER@") { + if (member_->use_all_reduce_ || + member_->IsUseCUDA(member_->use_device_) || + var == "@LR_DECAY_COUNTER@") { copy_memory(); } else { share_memory(); diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 6fe75d1a90dab..90612e5692595 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -78,7 +78,8 @@ void TensorCopyImpl(const TENSOR& src, auto size = src.numel() * phi::SizeOf(src.dtype()); #endif - if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { + if (platform::is_cpu_place(src_place) && + platform::is_cpu_place(dst_place)) { // NOLINT memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size); } #ifdef PADDLE_WITH_CUSTOM_DEVICE @@ -327,7 +328,8 @@ void TensorCopySync(const phi::DenseTensor& src, return; } auto size = src.numel() * phi::SizeOf(src.dtype()); - if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { + if (platform::is_cpu_place(src_place) && + platform::is_cpu_place(dst_place)) { // NOLINT memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size); } #ifdef PADDLE_WITH_CUSTOM_DEVICE diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index b0130e055c075..836ba0fb762b3 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -386,11 +386,8 @@ struct SetVarAttrDescVisitor { template void operator()(T &&v) { using U = std::decay_t; - if (std::is_same::value) { - set_attr_value(v); - } else if (std::is_same::value) { - set_attr_value(v); - } else if (std::is_same>::value) { + if (std::is_same::value || std::is_same::value || + std::is_same>::value) { set_attr_value(v); } else { PADDLE_THROW(platform::errors::Unavailable( diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 43f9682882b1a..8cb15be537157 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1990,7 +1990,7 @@ std::unique_ptr AnalysisPredictor::GetInputTensor( static_cast(scope), this->GetDeviceContexts())); res->input_or_output_ = true; res->SetName(name); - if (platform::is_cpu_place(place_)) { + if (platform::is_cpu_place(place_)) { // NOLINT res->SetPlace(PaddlePlace::kCPU); } else if (platform::is_ipu_place(place_)) { // Currently, IPUPlace's tensor copy between cpu and ipu has been set in @@ -2041,7 +2041,7 @@ std::unique_ptr AnalysisPredictor::GetOutputTensor( static_cast(scope), this->GetDeviceContexts())); res->input_or_output_ = false; res->SetName(name); - if (platform::is_cpu_place(place_)) { + if (platform::is_cpu_place(place_)) { // NOLINT res->SetPlace(PaddlePlace::kCPU); } else if (platform::is_ipu_place(place_)) { // Currently, IPUPlace's tensor copy between cpu and ipu has been set in diff --git a/paddle/fluid/memory/memcpy.cc b/paddle/fluid/memory/memcpy.cc index 656d6273afb3f..cf253d6c4ebdc 100644 --- a/paddle/fluid/memory/memcpy.cc +++ b/paddle/fluid/memory/memcpy.cc @@ -743,7 +743,7 @@ void Copy(phi::Place dst_place, VLOG(4) << "memory::Copy " << num << " Bytes from " << src_place << " to " << dst_place; if (src_place.GetType() == phi::AllocationType::CPU && - dst_place.GetType() == phi::AllocationType::CPU) { + dst_place.GetType() == phi::AllocationType::CPU) { // NOLINT std::memcpy(dst, src, num); } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 4f1c7ab3857d7..1d45cee715409 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -386,8 +386,6 @@ phi::KernelKey BatchNormGradOp::GetExpectedKernelType( const phi::DenseTensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( @@ -530,8 +528,6 @@ phi::KernelKey BatchNormDoubleGradOp::GetExpectedKernelType( const phi::DenseTensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc index 493351654d5eb..2e70168876162 100644 --- a/paddle/fluid/operators/data_norm_op.cc +++ b/paddle/fluid/operators/data_norm_op.cc @@ -495,8 +495,6 @@ class DataNormGradOp : public framework::OperatorWithKernel { const phi::DenseTensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); } if (t == nullptr) { PADDLE_THROW(platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 432713c60d969..ba2fd381eae3c 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -101,11 +101,7 @@ class MultiClassNMSOp : public framework::OperatorWithKernel { } // Here the box_dims[0] is not the real dimension of output. // It will be rewritten in the computing kernel. - if (score_size == 3) { - ctx->SetOutputDim("Out", {-1, box_dims[2] + 2}); - } else { - ctx->SetOutputDim("Out", {-1, box_dims[2] + 2}); - } + ctx->SetOutputDim("Out", {-1, box_dims[2] + 2}); if (!ctx->IsRuntime()) { ctx->SetLoDLevel("Out", std::max(ctx->GetLoDLevel("BBoxes"), 1)); } @@ -587,11 +583,8 @@ class MultiClassNMS2Op : public MultiClassNMSOp { auto score_dims = ctx->GetInputDim("Scores"); auto score_size = score_dims.size(); - if (score_size == 3) { - ctx->SetOutputDim("Index", {-1, 1}); - } else { - ctx->SetOutputDim("Index", {-1, 1}); - } + + ctx->SetOutputDim("Index", {-1, 1}); if (!ctx->IsRuntime()) { ctx->SetLoDLevel("Index", std::max(ctx->GetLoDLevel("BBoxes"), 1)); } diff --git a/paddle/fluid/operators/fused/fused_bn_activation_op.cc b/paddle/fluid/operators/fused/fused_bn_activation_op.cc index 88b11f1ef39c5..ca59a466a5c2b 100644 --- a/paddle/fluid/operators/fused/fused_bn_activation_op.cc +++ b/paddle/fluid/operators/fused/fused_bn_activation_op.cc @@ -303,8 +303,6 @@ phi::KernelKey FusedBatchNormActGradOp::GetExpectedKernelType( const phi::DenseTensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( diff --git a/paddle/fluid/operators/fused/fused_bn_add_activation_op.cc b/paddle/fluid/operators/fused/fused_bn_add_activation_op.cc index a33a91b082e5c..ed416d4ad13d1 100644 --- a/paddle/fluid/operators/fused/fused_bn_add_activation_op.cc +++ b/paddle/fluid/operators/fused/fused_bn_add_activation_op.cc @@ -267,8 +267,6 @@ phi::KernelKey FusedBatchNormAddActGradOp::GetExpectedKernelType( const phi::DenseTensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc index 05d1e64f92ae7..5ec5e8081bb6f 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc @@ -248,7 +248,7 @@ class FusionGRUMKLDNNKernel : public framework::OpKernel { const bool force_fp32_output = ctx.Attr("force_fp32_output"); // BF16 does not support force output - if (!is_bf16 && force_fp32_output) { + if (!is_bf16 && force_fp32_output) { // NOLINT RunKernel(ctx); } else { RunKernel(ctx); diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc index d973c5e89a626..4972db5804322 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc @@ -329,7 +329,7 @@ class FusionLSTMMKLDNNKernel : public framework::OpKernel { const bool force_fp32_output = ctx.Attr("force_fp32_output"); // BF16 does not support force output - if (!is_bf16 && force_fp32_output) { + if (!is_bf16 && force_fp32_output) { // NOLINT RunKernel(ctx); } else { RunKernel(ctx); diff --git a/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc index 90ecbe4506d98..1c8e0a1b56a97 100644 --- a/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc @@ -688,7 +688,7 @@ class MultiGRUMKLDNNKernel : public framework::OpKernel { const bool force_fp32_output = ctx.HasAttr("force_fp32_output") && ctx.Attr("force_fp32_output"); - if (force_fp32_output) { + if (force_fp32_output) { // NOLINT RunKernel(ctx); } else { RunKernel(ctx); @@ -706,7 +706,7 @@ class MultiGRUMKLDNNKernel : public framework::OpKernel { auto gru_out_L2R = handler.executeSingleGru(input_mem, layer, L2R); handler.reorderInputL2RtoR2L(input_mem, layer); auto gru_out_R2L = handler.executeSingleGru(input_mem, layer, R2L); - if (layer < layers - 1) + if (layer < layers - 1) // NOLINT handler.template reorderOutputR2LtoL2R(gru_out_R2L, layer); else handler.template reorderOutputR2LtoL2R(gru_out_R2L, layer); diff --git a/paddle/fluid/operators/inplace_abn_op.cc b/paddle/fluid/operators/inplace_abn_op.cc index eee0f1f304bc3..a53a9867b9903 100644 --- a/paddle/fluid/operators/inplace_abn_op.cc +++ b/paddle/fluid/operators/inplace_abn_op.cc @@ -284,8 +284,6 @@ class InplaceABNGradOp : public framework::OperatorWithKernel { const phi::DenseTensor* t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( diff --git a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc index a7f6bc512ffce..692b7f0721ceb 100644 --- a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc @@ -359,7 +359,7 @@ class FCMKLDNNKernel : public framework::OpKernel { bool fuse_relu = ctx.Attr("activation_type") == "relu"; IF_CHANGE_FC_TW_TYPENAME((std::is_same::value), ([&] { - if (force_fp32_output) { + if (force_fp32_output) { // NOLINT this->RunKernel(ctx); } else if (phi::funcs::is_int8()) { if (fuse_relu) { diff --git a/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc index b7a33edb82a00..3c53b05152b7e 100644 --- a/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc @@ -105,8 +105,6 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { InferShapeSqueezeOp(ctx, x_dims, out_dims); break; case ReshapeKernelOpName::flatten: - InferShapeFlattenOp(ctx, x_dims, out_dims); - break; case ReshapeKernelOpName::flatten2: InferShapeFlattenOp(ctx, x_dims, out_dims); break; diff --git a/paddle/fluid/operators/reader/buffered_reader.cc b/paddle/fluid/operators/reader/buffered_reader.cc index 2e24caa91c6bb..b73ffe4319be7 100644 --- a/paddle/fluid/operators/reader/buffered_reader.cc +++ b/paddle/fluid/operators/reader/buffered_reader.cc @@ -213,10 +213,8 @@ void BufferedReader::ReadAsync(size_t i) { auto cpu_ptr = cpu[i].data(); auto gpu_ptr = gpu_ptrs[i]; auto size = cpu[i].numel() * phi::SizeOf(cpu[i].dtype()); - if (platform::is_cuda_pinned_place(cpu_place)) { - memory::Copy( - place_, gpu_ptr, cpu_place, cpu_ptr, size, stream_.get()); - } else if ((platform::is_gpu_place(cpu_place))) { + if (platform::is_cuda_pinned_place(cpu_place) || + platform::is_gpu_place(cpu_place)) { memory::Copy( place_, gpu_ptr, cpu_place, cpu_ptr, size, stream_.get()); } else { diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 5cf9fba9f2681..ebb4cd7cf132d 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -76,7 +76,7 @@ class SumOp : public framework::OperatorWithKernel { // NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_DNNL if (!((data_type == framework::proto::VarType::FP32 || data_type == framework::proto::VarType::BF16) && - ctx.OutputVar("Out")->IsType())) { + ctx.OutputVar("Out")->IsType())) { // NOLINT this->SetDnnFallback(true); } else if (!std::all_of(x_vars.begin(), x_vars.end(), diff --git a/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc index a3997ee97db6a..437523e41bf3e 100644 --- a/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc +++ b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc @@ -196,9 +196,8 @@ void BuildValue(pir::Value value, variable_list); } // Only support DenseTensor or Vector - if (!value.type()) { - var->GetMutable(); - } else if (value.type().isa()) { + if (!value.type() || + value.type().isa()) { var->GetMutable(); } else if (value.type().isa()) { var->GetMutable(); diff --git a/paddle/fluid/platform/place.cc b/paddle/fluid/platform/place.cc index b8452a594e358..d38d0418e4639 100644 --- a/paddle/fluid/platform/place.cc +++ b/paddle/fluid/platform/place.cc @@ -62,11 +62,7 @@ bool is_same_place(const Place &p1, const Place &p2) { if (places_are_same_class(p1, p2)) { if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) { return true; - } else if (is_xpu_place(p1)) { - return p1 == p2; - } else if (is_ipu_place(p1)) { - return p1 == p2; - } else if (is_custom_place(p1)) { + } else if (is_xpu_place(p1) || is_ipu_place(p1) || is_custom_place(p1)) { return p1 == p2; } else { return p1 == p2; diff --git a/paddle/fluid/prim/api/manual_prim/static_prim_api.cc b/paddle/fluid/prim/api/manual_prim/static_prim_api.cc index c907be2d10256..c45a473b4a8d3 100644 --- a/paddle/fluid/prim/api/manual_prim/static_prim_api.cc +++ b/paddle/fluid/prim/api/manual_prim/static_prim_api.cc @@ -50,8 +50,6 @@ Tensor full(const IntArray& shape, op->SetAttr("shape", shape.GetData()); switch (dtype) { case phi::DataType::FLOAT16: - op->SetAttr("str_value", std::to_string(value.to())); - break; case phi::DataType::BFLOAT16: op->SetAttr("str_value", std::to_string(value.to())); break; diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 59ef86423788a..e72f5dc77f99c 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1617,7 +1617,8 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self, py::isinstance(value_obj_tmp) || py::isinstance(value_obj_tmp) || PyComplex_Check(value_obj)) { - if (self->tensor.dtype() == phi::DataType::FLOAT32) { + if (self->tensor.dtype() == phi::DataType::FLOAT32 || + self->tensor.dtype() == phi::DataType::FLOAT16) { attrs["values"] = std::vector{ value_obj_tmp.cast()}; } else if (self->tensor.dtype() == phi::DataType::FLOAT64) { @@ -1632,9 +1633,6 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self, } else if (self->tensor.dtype() == phi::DataType::BOOL) { attrs["values"] = std::vector{ value_obj_tmp.cast()}; - } else if (self->tensor.dtype() == phi::DataType::FLOAT16) { - attrs["values"] = std::vector{ - value_obj_tmp.cast()}; } else if (self->tensor.dtype() == phi::DataType::COMPLEX64) { attrs["values"] = std::vector{ value_obj_tmp.cast>()}; diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 59ecee2c5d668..517c210830022 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -92,13 +92,11 @@ Tensor's type. PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { EAGER_TRY - if (!self->tensor.defined()) { + if (!self->tensor.defined() || self->tensor.is_dense_tensor()) { // be same to old dygraph return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR); } - if (self->tensor.is_dense_tensor()) { - return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR); - } else if (self->tensor.is_selected_rows()) { + if (self->tensor.is_selected_rows()) { return ToPyObject(paddle::framework::proto::VarType::SELECTED_ROWS); } else if (egr::IsVariableCompatTensor(self->tensor)) { return ToPyObject(static_cast( diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 0432ca88d6ada..87660d9fd88ca 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -173,13 +173,11 @@ bool PyObject_CheckIRVectorOfOpResult(PyObject* obj) { } } bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos) { - if (obj == Py_None) { + if (obj == Py_None || obj == Py_False) { return false; // To be compatible with QA integration testing. Some // test cases pass in None. } else if (obj == Py_True) { return true; - } else if (obj == Py_False) { - return false; } else { PADDLE_THROW(platform::errors::InvalidArgument( "argument (position %d) must be " @@ -1125,9 +1123,8 @@ static paddle::Tensor& GetTensorFromPyObject(const std::string& op_type, return emptytensor; } - if (PyObject_TypeCheck(obj, p_tensor_type)) { - return reinterpret_cast(obj)->tensor; - } else if (PyObject_TypeCheck(obj, p_string_tensor_type)) { + if (PyObject_TypeCheck(obj, p_tensor_type) || + PyObject_TypeCheck(obj, p_string_tensor_type)) { return reinterpret_cast(obj)->tensor; } else { PADDLE_THROW(platform::errors::InvalidArgument( diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 4d180aa61f9ed..508f525ab7758 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -239,7 +239,7 @@ void PaddleInferTensorCreate(paddle_infer::Tensor &tensor, // NOLINT paddle_infer::PlaceType ToPaddleInferPlace( phi::AllocationType allocation_type) { - if (allocation_type == phi::AllocationType::CPU) { + if (allocation_type == phi::AllocationType::CPU) { // NOLINT return paddle_infer::PlaceType::kCPU; } else if (allocation_type == phi::AllocationType::GPU) { return paddle_infer::PlaceType::kGPU; diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index 29c4c2fd0a7c5..4b956e97ad7b5 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -119,13 +119,11 @@ bool PyObject_CheckString(PyObject* obj) { return PyUnicode_Check(obj); } bool CastPyArg2Boolean(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { - if (obj == Py_None) { + if (obj == Py_None || obj == Py_False) { return false; // To be compatible with QA integration testing. Some // test case pass in None. } else if (obj == Py_True) { return true; - } else if (obj == Py_False) { - return false; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " diff --git a/paddle/phi/core/compat/convert_utils.cc b/paddle/phi/core/compat/convert_utils.cc index d82b37328850f..d4c5de0dbe6dc 100644 --- a/paddle/phi/core/compat/convert_utils.cc +++ b/paddle/phi/core/compat/convert_utils.cc @@ -67,7 +67,7 @@ phi::Place TransToPhiPlace(const Backend& backend, bool set_device_id) { set_device_id ? phi::backends::gpu::GetCurrentDeviceId() : 0); #endif #ifdef PADDLE_WITH_DNNL - case phi::Backend::ONEDNN: + case phi::Backend::ONEDNN: // NOLINT return phi::CPUPlace(); #endif #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) diff --git a/paddle/phi/core/kernel_factory.cc b/paddle/phi/core/kernel_factory.cc index d58decadfadca..f9c1dca46b2fb 100644 --- a/paddle/phi/core/kernel_factory.cc +++ b/paddle/phi/core/kernel_factory.cc @@ -63,9 +63,8 @@ KernelFactory& KernelFactory::Instance() { bool KernelFactory::HasCompatiblePhiKernel(const std::string& op_type) const { if (deprecated_op_names.find(op_type) == deprecated_op_names.end()) { - if (phi::OpUtilsMap::Instance().Contains(op_type)) { - return true; - } else if (kernels_.find(op_type) != kernels_.end()) { + if (phi::OpUtilsMap::Instance().Contains(op_type) || + (kernels_.find(op_type) != kernels_.end())) { return true; } } diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index aa1b6526cd5f8..e0df80157013e 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -1130,7 +1130,7 @@ void ExpandInferMeta(const MetaTensor& x, std::max(static_cast(x_dims.size()), expand_shape.size()); std::vector out_shape(out_rank); for (int i = 0; i < static_cast(expand_shape.size()); ++i) { - if (x_dims[i] == -1) { + if (x_dims[i] == -1) { // NOLINT out_shape[i] = -1; } else if (expand_shape[i] == -1) { if (static_cast(x_dims.size()) > i) { diff --git a/paddle/phi/kernels/cpu/diagonal_grad_kernel.cc b/paddle/phi/kernels/cpu/diagonal_grad_kernel.cc index 5ccb5ad8c43b4..d8383b45beb79 100644 --- a/paddle/phi/kernels/cpu/diagonal_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/diagonal_grad_kernel.cc @@ -63,10 +63,8 @@ void DiagonalGradKernel(const Context& dev_ctx, idx_dim.erase(idx_dim.begin() + std::min(axis1_, axis2_)); bool flag = false; - if (offset_ == 0 && axis1_dim == axis2_dim) { - idx_dim.push_back(axis1_dim); - flag = true; - } else if (offset_ > 0 && (axis1_dim + offset_) == axis2_dim) { + if ((offset_ == 0 && axis1_dim == axis2_dim) || + (offset_ > 0 && (axis1_dim + offset_) == axis2_dim)) { idx_dim.push_back(axis1_dim); flag = true; } else if (offset_ < 0 && (axis1_dim + offset_) == axis2_dim) { diff --git a/paddle/phi/kernels/cpu/generate_proposals_kernel.cc b/paddle/phi/kernels/cpu/generate_proposals_kernel.cc index 2e468ef2d07ff..e9764035613ed 100644 --- a/paddle/phi/kernels/cpu/generate_proposals_kernel.cc +++ b/paddle/phi/kernels/cpu/generate_proposals_kernel.cc @@ -52,13 +52,7 @@ void ClipTiledBoxes(const phi::CPUContext& ctx, T im_h = is_scale ? round(im_info_data[0] / im_info_data[2]) : im_info_data[0]; for (int64_t i = 0; i < input_boxes.numel(); ++i) { - if (i % 4 == 0) { - out_data[i] = - std::max(std::min(input_boxes_data[i], im_w - offset), zero); - } else if (i % 4 == 1) { - out_data[i] = - std::max(std::min(input_boxes_data[i], im_h - offset), zero); - } else if (i % 4 == 2) { + if ((i % 4 == 0) || (i % 4 == 2)) { out_data[i] = std::max(std::min(input_boxes_data[i], im_w - offset), zero); } else { diff --git a/paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc b/paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc index 0ca3be62a3971..fac19f142dffc 100644 --- a/paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc @@ -256,7 +256,7 @@ void CalculateEGrad(const T* out_grad_data, for (int64_t j = 0; j < bcast.out_len; j++) { int64_t x_add = bcast.use_bcast ? bcast.l_offset[j] : j; int64_t e_add = bcast.use_bcast ? bcast.r_offset[j] : j; - if (message_op == "ADD") { + if (message_op == "ADD") { // NOLINT #ifdef PADDLE_WITH_MKLML #pragma omp atomic #endif @@ -283,7 +283,7 @@ void CalculateEGrad(const T* out_grad_data, for (int64_t j = 0; j < bcast.out_len; j++) { int64_t x_add = bcast.use_bcast ? bcast.l_offset[j] : j; int64_t e_add = bcast.use_bcast ? bcast.r_offset[j] : j; - if (message_op == "ADD") { + if (message_op == "ADD") { // NOLINT #ifdef PADDLE_WITH_MKLML #pragma omp atomic #endif diff --git a/paddle/phi/kernels/funcs/vol2col.cc b/paddle/phi/kernels/funcs/vol2col.cc index 0f411b8894ce9..e505fcb3de337 100644 --- a/paddle/phi/kernels/funcs/vol2col.cc +++ b/paddle/phi/kernels/funcs/vol2col.cc @@ -66,7 +66,7 @@ class Vol2ColFunctor { // changed bool paddings_size_is_6 = (paddings.size() == 6); - int pad_d_forth = paddings_size_is_6 ? paddings[0] : paddings[0]; + int pad_d_forth = paddings[0]; int pad_d_back = paddings_size_is_6 ? paddings[1] : paddings[0]; int pad_h_up = paddings_size_is_6 ? paddings[2] : paddings[1]; int pad_h_down = paddings_size_is_6 ? paddings[3] : paddings[1]; @@ -191,7 +191,7 @@ class Col2VolFunctor { input_channels * filter_depth * filter_height * filter_width; bool paddings_size_is_6 = (paddings.size() == 6); - int pad_d_forth = paddings_size_is_6 ? paddings[0] : paddings[0]; + int pad_d_forth = paddings[0]; int pad_d_back = paddings_size_is_6 ? paddings[1] : paddings[0]; int pad_h_up = paddings_size_is_6 ? paddings[2] : paddings[1]; int pad_h_down = paddings_size_is_6 ? paddings[3] : paddings[1]; From 1a7de36c95d4cf60eeed60a3cb9901b00acc379b Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 20 Sep 2023 04:32:00 +0000 Subject: [PATCH 2/3] fix --- paddle/fluid/operators/detection/multiclass_nms_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index ba2fd381eae3c..d783d62592b37 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -582,7 +582,6 @@ class MultiClassNMS2Op : public MultiClassNMSOp { MultiClassNMSOp::InferShape(ctx); auto score_dims = ctx->GetInputDim("Scores"); - auto score_size = score_dims.size(); ctx->SetOutputDim("Index", {-1, 1}); if (!ctx->IsRuntime()) { From 66ace72003cea5680b44c9618a0764dc6b685bc6 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 20 Sep 2023 04:33:19 +0000 Subject: [PATCH 3/3] fix --- paddle/fluid/operators/detection/multiclass_nms_op.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index d783d62592b37..8519752bc1049 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -580,9 +580,6 @@ class MultiClassNMS2Op : public MultiClassNMSOp { void InferShape(framework::InferShapeContext* ctx) const override { MultiClassNMSOp::InferShape(ctx); - - auto score_dims = ctx->GetInputDim("Scores"); - ctx->SetOutputDim("Index", {-1, 1}); if (!ctx->IsRuntime()) { ctx->SetLoDLevel("Index", std::max(ctx->GetLoDLevel("BBoxes"), 1));