diff --git a/.clang-tidy b/.clang-tidy index a4871eddb7fb3..c9e147ed803a3 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -181,7 +181,7 @@ modernize-redundant-void-arg, -modernize-unary-static-assert, -modernize-use-bool-literals, modernize-use-emplace, --modernize-use-equals-default, +modernize-use-equals-default, -modernize-use-equals-delete, -modernize-use-noexcept, modernize-use-nullptr, diff --git a/paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc b/paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc index 95e9a8d03213e..36b6322836b20 100644 --- a/paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc +++ b/paddle/fluid/distributed/auto_parallel/spmd_rules/dist_tensor_spec.cc @@ -34,7 +34,7 @@ DistTensorSpec::DistTensorSpec(const DistTensorSpec& spec) { dist_attr_.copy_from(spec.dist_attr()); } -DistTensorSpec::~DistTensorSpec() {} +DistTensorSpec::~DistTensorSpec() = default; DistTensorSpec::DistTensorSpec(const Tensor& tensor) { shape_ = tensor.shape(); diff --git a/paddle/fluid/distributed/collective/process_group_nccl.cc b/paddle/fluid/distributed/collective/process_group_nccl.cc index ff580d6baa95c..c2d6533804a22 100644 --- a/paddle/fluid/distributed/collective/process_group_nccl.cc +++ b/paddle/fluid/distributed/collective/process_group_nccl.cc @@ -47,7 +47,7 @@ ProcessGroupNCCL::NCCLTask::NCCLTask(const Place& place, comm_event_(place), task_place_(place) {} -ProcessGroupNCCL::NCCLTask::~NCCLTask() {} +ProcessGroupNCCL::NCCLTask::~NCCLTask() = default; bool ProcessGroupNCCL::NCCLTask::IsCompleted() { return comm_event_.Query(); } diff --git a/paddle/fluid/distributed/fleet_executor/fleet_executor.cc b/paddle/fluid/distributed/fleet_executor/fleet_executor.cc index 712ae8cf4435e..f2a9f9cc6bfd8 100644 --- a/paddle/fluid/distributed/fleet_executor/fleet_executor.cc +++ b/paddle/fluid/distributed/fleet_executor/fleet_executor.cc @@ -48,7 +48,7 @@ FleetExecutor::FleetExecutor(const FleetExecutorDesc& exe_desc) InitMessageBus(); } -FleetExecutor::~FleetExecutor() { +FleetExecutor::~FleetExecutor() { // NOLINT for (const auto& carrier_id : carrier_ids_) { GlobalMap::Get(carrier_id)->Release(); } diff --git a/paddle/fluid/distributed/fleet_executor/interceptor.cc b/paddle/fluid/distributed/fleet_executor/interceptor.cc index 7898f3a6892b5..350d064b847ad 100644 --- a/paddle/fluid/distributed/fleet_executor/interceptor.cc +++ b/paddle/fluid/distributed/fleet_executor/interceptor.cc @@ -24,7 +24,7 @@ namespace distributed { Interceptor::Interceptor(int64_t interceptor_id, TaskNode* node) : interceptor_id_(interceptor_id), node_(node) {} -Interceptor::~Interceptor() { +Interceptor::~Interceptor() { // NOLINT // FIXME(wangxi): throw in stop function // std::lock_guard lock(mutex_); // PADDLE_ENFORCE_EQ(messages_.empty(), true, diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.cc b/paddle/fluid/distributed/ps/service/communicator/communicator.cc index 1b24a07628eb9..a800ce98915a3 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.cc +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.cc @@ -38,7 +38,7 @@ inline double GetCurrentUS() { return 1e+6 * time.tv_sec + time.tv_usec; } -Communicator::Communicator() {} +Communicator::Communicator() = default; void Communicator::InitGFlag(const std::string &gflags) { VLOG(3) << "Init With Gflags:" << gflags; diff --git a/paddle/fluid/distributed/ps/table/common_graph_table.cc b/paddle/fluid/distributed/ps/table/common_graph_table.cc index 8310b253c9a00..316b2295c3389 100644 --- a/paddle/fluid/distributed/ps/table/common_graph_table.cc +++ b/paddle/fluid/distributed/ps/table/common_graph_table.cc @@ -1205,7 +1205,7 @@ Node *GraphShard::find_node(uint64_t id) { return iter == node_location.end() ? nullptr : bucket[iter->second]; } -GraphTable::~GraphTable() { +GraphTable::~GraphTable() { // NOLINT #ifdef PADDLE_WITH_GPU_GRAPH clear_graph(); #endif diff --git a/paddle/fluid/framework/data_feed.cc b/paddle/fluid/framework/data_feed.cc index 45d29c6c2ea9b..05257d0845591 100644 --- a/paddle/fluid/framework/data_feed.cc +++ b/paddle/fluid/framework/data_feed.cc @@ -1999,7 +1999,7 @@ void PaddleBoxDataFeed::PutToFeedVec(const std::vector& ins_vec) { #endif } -SlotRecordInMemoryDataFeed::~SlotRecordInMemoryDataFeed() { +SlotRecordInMemoryDataFeed::~SlotRecordInMemoryDataFeed() { // NOLINT #if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS) stop_token_.store(true); for (auto& thread : pack_threads_) { diff --git a/paddle/fluid/framework/data_feed.h b/paddle/fluid/framework/data_feed.h index 1057640842c2c..dd17c9d4d0bab 100644 --- a/paddle/fluid/framework/data_feed.h +++ b/paddle/fluid/framework/data_feed.h @@ -1821,7 +1821,7 @@ class MultiSlotInMemoryDataFeed : public InMemoryDataFeed { class SlotRecordInMemoryDataFeed : public InMemoryDataFeed { public: - SlotRecordInMemoryDataFeed() {} + SlotRecordInMemoryDataFeed() = default; virtual ~SlotRecordInMemoryDataFeed(); void Init(const DataFeedDesc& data_feed_desc) override; void LoadIntoMemory() override; diff --git a/paddle/fluid/framework/details/fetch_async_op_handle.cc b/paddle/fluid/framework/details/fetch_async_op_handle.cc index 00b70d889b1a1..89617f0adf9fd 100644 --- a/paddle/fluid/framework/details/fetch_async_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_async_op_handle.cc @@ -37,7 +37,7 @@ FetchAsyncOpHandle::FetchAsyncOpHandle(ir::Node *node, local_exec_scopes_(local_exec_scopes), return_merged_(return_merged) {} -FetchAsyncOpHandle::~FetchAsyncOpHandle() {} +FetchAsyncOpHandle::~FetchAsyncOpHandle() = default; void FetchAsyncOpHandle::RecordWaitEventOnCtx( platform::DeviceContext *waited_ctx) { diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index a36b63da9b8b6..2a504b2a0fc2b 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -35,7 +35,7 @@ FetchOpHandle::FetchOpHandle(ir::Node *node, local_exec_scopes_(local_exec_scopes), return_merged_(return_merged) {} -FetchOpHandle::~FetchOpHandle() {} +FetchOpHandle::~FetchOpHandle() = default; void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { PADDLE_THROW(platform::errors::PermissionDenied( diff --git a/paddle/fluid/framework/details/nan_inf_utils_detail.cc b/paddle/fluid/framework/details/nan_inf_utils_detail.cc index 80c029a5fd976..54cc804955337 100644 --- a/paddle/fluid/framework/details/nan_inf_utils_detail.cc +++ b/paddle/fluid/framework/details/nan_inf_utils_detail.cc @@ -27,7 +27,7 @@ namespace paddle { namespace framework { namespace details { struct DebugTools { - DebugTools() {} + DebugTools() = default; std::string path = ""; int stack_limit = 1; }; diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 6a5f0a38204e5..36015052062ea 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -30,7 +30,7 @@ std::string OpHandleBase::DebugString() const { return ss.str(); } -OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW { +OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW { // NOLINT #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) for (auto &ev : events_) { if (ev.second) { diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index 9dac1a7203f8d..98f2c01100d0d 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -40,7 +40,7 @@ ScaleLossGradOpHandle::ScaleLossGradOpHandle(ir::Node *node, this->SetDeviceContext(place_, dev_ctx); } -ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} +ScaleLossGradOpHandle::~ScaleLossGradOpHandle() = default; struct ScaleLossGradFunctor { float coeff_; diff --git a/paddle/fluid/framework/details/ssa_graph_executor.cc b/paddle/fluid/framework/details/ssa_graph_executor.cc index 2723a46dcfae3..6ade32097bd82 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/ssa_graph_executor.cc @@ -19,7 +19,7 @@ namespace paddle { namespace framework { namespace details { -SSAGraphExecutor::~SSAGraphExecutor() {} +SSAGraphExecutor::~SSAGraphExecutor() = default; void ClearFetchOp(ir::Graph* graph, std::vector* fetch_ops) { if (fetch_ops->empty()) return; diff --git a/paddle/fluid/framework/details/var_handle.cc b/paddle/fluid/framework/details/var_handle.cc index 95d62e66415e7..b91606ac43669 100644 --- a/paddle/fluid/framework/details/var_handle.cc +++ b/paddle/fluid/framework/details/var_handle.cc @@ -18,7 +18,7 @@ namespace paddle { namespace framework { namespace details { -VarHandleBase::~VarHandleBase() {} +VarHandleBase::~VarHandleBase() = default; VarHandle::~VarHandle() { VLOG(4) << "deleting var handle " << DebugString(); } diff --git a/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc b/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc index 3bd1ad609f3ac..18c7dcc196b5a 100644 --- a/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc +++ b/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc @@ -24,7 +24,7 @@ namespace paddle { namespace framework { namespace ir { -AdaptivePool2dConvertGlobalPass::AdaptivePool2dConvertGlobalPass() { +AdaptivePool2dConvertGlobalPass::AdaptivePool2dConvertGlobalPass() { // NOLINT AddOpCompat(OpCompat("pool2d")) .AddInput("X") .IsTensor() diff --git a/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.h b/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.h index 4a1405004e247..50ee52fde1171 100644 --- a/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.h +++ b/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.h @@ -32,7 +32,7 @@ class Graph; class AdaptivePool2dConvertGlobalPass : public FusePassBase { public: AdaptivePool2dConvertGlobalPass(); - virtual ~AdaptivePool2dConvertGlobalPass() {} + virtual ~AdaptivePool2dConvertGlobalPass() = default; protected: void ApplyImpl(ir::Graph* graph) const override; diff --git a/paddle/fluid/framework/ir/constant_folding_pass.cc b/paddle/fluid/framework/ir/constant_folding_pass.cc index 41c5eac81a150..8f227929b395b 100644 --- a/paddle/fluid/framework/ir/constant_folding_pass.cc +++ b/paddle/fluid/framework/ir/constant_folding_pass.cc @@ -51,7 +51,7 @@ struct ConstantFolding : public PatternBase { }; } // namespace patterns -ConstantFoldingPass::ConstantFoldingPass() {} +ConstantFoldingPass::ConstantFoldingPass() = default; void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const { PADDLE_ENFORCE_NOT_NULL( diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc index de1374073c389..256de39c24cd0 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc @@ -756,7 +756,7 @@ void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const { AddStatis(found_conv_bn_count); } -ConvTransposeBNFusePass::ConvTransposeBNFusePass() { +ConvTransposeBNFusePass::ConvTransposeBNFusePass() { // NOLINT AddOpCompat(OpCompat("conv2d_transpose")) .AddInput("Input") .IsTensor() @@ -800,7 +800,8 @@ ConvTransposeBNFusePass::ConvTransposeBNFusePass() { .End(); } -ConvTransposeEltwiseAddBNFusePass::ConvTransposeEltwiseAddBNFusePass() { +ConvTransposeEltwiseAddBNFusePass:: + ConvTransposeEltwiseAddBNFusePass() { // NOLINT AddOpCompat(OpCompat("conv2d_transpose")) .AddInput("Input") .IsTensor() @@ -844,7 +845,7 @@ ConvTransposeEltwiseAddBNFusePass::ConvTransposeEltwiseAddBNFusePass() { .End(); } -DepthwiseConvBNFusePass::DepthwiseConvBNFusePass() { +DepthwiseConvBNFusePass::DepthwiseConvBNFusePass() { // NOLINT AddOpCompat(OpCompat("depthwise_conv2d")) .AddInput("Input") .IsTensor() diff --git a/paddle/fluid/framework/ir/cost_model.cc b/paddle/fluid/framework/ir/cost_model.cc index 9ca3190fd092f..0f75697caefcf 100644 --- a/paddle/fluid/framework/ir/cost_model.cc +++ b/paddle/fluid/framework/ir/cost_model.cc @@ -30,7 +30,7 @@ using platform::MemEvent; const double CostData::NOT_MEASURED = -1; -CostData::~CostData() { +CostData::~CostData() { // NOLINT // TODO(zhhsplendid): when we save a copy of program/graph, we should delete // here. } diff --git a/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc index fc4ca24b2ae63..20e693681e7bd 100644 --- a/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc @@ -56,7 +56,7 @@ static const std::initializer_list rnn_variable_names{ class ComputePropagateScalesMkldnnPassTest : public testing::Test { public: - ComputePropagateScalesMkldnnPassTest() { + ComputePropagateScalesMkldnnPassTest() { // NOLINT pass.reset(new ComputePropagateScalesMkldnnPass()); } diff --git a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc index 3160b45f5c534..fca71d0bd6900 100644 --- a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc @@ -32,7 +32,7 @@ class Graph; PADDLE_ENFORCE_NOT_NULL( \ id, platform::errors::InvalidArgument("Subgraph has no node %s.", #id)); -DepthwiseConvMKLDNNPass::DepthwiseConvMKLDNNPass() { +DepthwiseConvMKLDNNPass::DepthwiseConvMKLDNNPass() { // NOLINT AddOpCompat(OpCompat("depthwise_conv2d")) .AddInput("Input") .IsTensor() diff --git a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h index 06ce5a41b6c42..4ca34bc505f4c 100644 --- a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h +++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h @@ -25,7 +25,7 @@ class Graph; class DepthwiseConvMKLDNNPass : public FusePassBase { public: DepthwiseConvMKLDNNPass(); - virtual ~DepthwiseConvMKLDNNPass() {} + virtual ~DepthwiseConvMKLDNNPass() = default; protected: void ApplyImpl(ir::Graph* graph) const override; diff --git a/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.cc index 9160d88824552..a219e47072782 100644 --- a/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.cc @@ -23,7 +23,7 @@ namespace paddle { namespace framework { namespace ir { -Int8ScaleCalculationMkldnnPass::Int8ScaleCalculationMkldnnPass() { +Int8ScaleCalculationMkldnnPass::Int8ScaleCalculationMkldnnPass() { // NOLINT AddOpCompat(OpCompat("conv2d")) .AddInput("Input") .IsTensor() diff --git a/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.h b/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.h index 06b66798f40ca..37bbcaee93378 100644 --- a/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.h +++ b/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass.h @@ -27,7 +27,7 @@ class Graph; class Int8ScaleCalculationMkldnnPass : public FusePassBase { public: Int8ScaleCalculationMkldnnPass(); - virtual ~Int8ScaleCalculationMkldnnPass() {} + virtual ~Int8ScaleCalculationMkldnnPass() = default; protected: void ApplyImpl(ir::Graph* graph) const override; diff --git a/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.cc index 093001675ec7f..11eba402b55d4 100644 --- a/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.cc @@ -76,7 +76,7 @@ void QuantizeConvInput(Scope* scope, } // namespace -ParamsQuantizationMkldnnPass::ParamsQuantizationMkldnnPass() { +ParamsQuantizationMkldnnPass::ParamsQuantizationMkldnnPass() { // NOLINT AddOpCompat(OpCompat("fused_conv2d")) .AddInput("Input") .IsTensor() diff --git a/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.h b/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.h index 89b2a52f2f554..e681d9701b8d8 100644 --- a/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.h +++ b/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass.h @@ -27,7 +27,7 @@ class Graph; class ParamsQuantizationMkldnnPass : public FusePassBase { public: ParamsQuantizationMkldnnPass(); - virtual ~ParamsQuantizationMkldnnPass() {} + virtual ~ParamsQuantizationMkldnnPass() = default; protected: void ApplyImpl(ir::Graph* graph) const override; diff --git a/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass_tester.cc index f15317477f193..58e2a74ce1d40 100755 --- a/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/params_quantization_mkldnn_pass_tester.cc @@ -65,7 +65,7 @@ struct TestScope { }; struct ProgramStrategy { - virtual ~ProgramStrategy() {} + virtual ~ProgramStrategy() = default; std::unique_ptr CreateGraph() { CreateProgram(); diff --git a/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc index ebd3d8821fa66..09bebfaec99c3 100644 --- a/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc @@ -170,7 +170,8 @@ void FuseQuantTranspose2DequantOneDNNPass::ApplyImpl(Graph *graph) const { FuseTranspose2Dequantize(graph, "transpose2"); } -FuseQuantTranspose2DequantOneDNNPass::FuseQuantTranspose2DequantOneDNNPass() { +FuseQuantTranspose2DequantOneDNNPass:: + FuseQuantTranspose2DequantOneDNNPass() { // NOLINT AddOpCompat(OpCompat("transpose2")) .AddInput("X") .IsTensor() diff --git a/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.h b/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.h index 1357279577a99..8dd2eb2e90b5a 100644 --- a/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.h +++ b/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.h @@ -23,7 +23,7 @@ namespace ir { class FuseQuantTranspose2DequantOneDNNPass : public FusePassBase { public: - virtual ~FuseQuantTranspose2DequantOneDNNPass() {} + virtual ~FuseQuantTranspose2DequantOneDNNPass() = default; FuseQuantTranspose2DequantOneDNNPass(); protected: diff --git a/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc b/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc index 455e0621adb0f..1e5a1c8265f15 100644 --- a/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc @@ -31,7 +31,7 @@ namespace ir { GET_IR_NODE(reshape2_op); \ GET_IR_NODE(reshape2_out); -ShuffleChannelMKLDNNDetectPass::ShuffleChannelMKLDNNDetectPass() { +ShuffleChannelMKLDNNDetectPass::ShuffleChannelMKLDNNDetectPass() { // NOLINT AddOpCompat(OpCompat("reshape2")) .AddInput("X") .IsTensor() diff --git a/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.h b/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.h index 231b63c3b6a00..98b8ea463bdb5 100644 --- a/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.h +++ b/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.h @@ -27,7 +27,7 @@ class Graph; class ShuffleChannelMKLDNNDetectPass : public FusePassBase { public: ShuffleChannelMKLDNNDetectPass(); - virtual ~ShuffleChannelMKLDNNDetectPass() {} + virtual ~ShuffleChannelMKLDNNDetectPass() = default; protected: void ApplyImpl(ir::Graph* graph) const override; diff --git a/paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc b/paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc index a48720f2bbb94..764c1a62faf74 100644 --- a/paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc +++ b/paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc @@ -37,7 +37,7 @@ namespace paddle { namespace framework { namespace ir { class Node; -ReverseRollFusePass::ReverseRollFusePass() { +ReverseRollFusePass::ReverseRollFusePass() { // NOLINT AddOpCompat(OpCompat("reshape2")) .AddInput("X") .IsTensor() diff --git a/paddle/fluid/framework/ir/reverse_roll_fuse_pass.h b/paddle/fluid/framework/ir/reverse_roll_fuse_pass.h index f7c8229311ed9..1b4801385c820 100644 --- a/paddle/fluid/framework/ir/reverse_roll_fuse_pass.h +++ b/paddle/fluid/framework/ir/reverse_roll_fuse_pass.h @@ -54,7 +54,7 @@ namespace ir { class ReverseRollFusePass : public FusePassBase { public: ReverseRollFusePass(); - virtual ~ReverseRollFusePass() {} + virtual ~ReverseRollFusePass() = default; protected: void ApplyImpl(ir::Graph *graph) const override; diff --git a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc index fb98c3b1216c6..1157fdd91a07c 100644 --- a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc +++ b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc @@ -31,7 +31,7 @@ namespace ir { GET_IR_NODE(reshape2_op); \ GET_IR_NODE(reshape2_out); -ShuffleChannelDetectPass::ShuffleChannelDetectPass() { +ShuffleChannelDetectPass::ShuffleChannelDetectPass() { // NOLINT AddOpCompat(OpCompat("reshape2")) .AddInput("X") .IsTensor() diff --git a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.h b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.h index 4576cfd865bb3..67b0236165415 100644 --- a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.h +++ b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.h @@ -27,7 +27,7 @@ class Graph; class ShuffleChannelDetectPass : public FusePassBase { public: ShuffleChannelDetectPass(); - virtual ~ShuffleChannelDetectPass() {} + virtual ~ShuffleChannelDetectPass() = default; protected: void ApplyImpl(ir::Graph* graph) const override; diff --git a/paddle/fluid/framework/ir/sigmoid_elementmul_fuse_pass.cc b/paddle/fluid/framework/ir/sigmoid_elementmul_fuse_pass.cc index 6904e5604fb5c..177949ff9a41f 100644 --- a/paddle/fluid/framework/ir/sigmoid_elementmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/sigmoid_elementmul_fuse_pass.cc @@ -67,7 +67,7 @@ SigmoidElementmulFusePattern::SigmoidElementmulFusePattern( } // namespace patterns -SigmoidElementmulFusePass::SigmoidElementmulFusePass() {} +SigmoidElementmulFusePass::SigmoidElementmulFusePass() = default; void SigmoidElementmulFusePass::ApplyImpl(ir::Graph* graph) const { PADDLE_ENFORCE_NOT_NULL( diff --git a/paddle/fluid/framework/ir/trt_map_ops_to_matrix_multiply_pass.cc b/paddle/fluid/framework/ir/trt_map_ops_to_matrix_multiply_pass.cc index d81074112fedd..75094d6b4f3a9 100644 --- a/paddle/fluid/framework/ir/trt_map_ops_to_matrix_multiply_pass.cc +++ b/paddle/fluid/framework/ir/trt_map_ops_to_matrix_multiply_pass.cc @@ -28,7 +28,7 @@ namespace ir { class Node; -TrtMapOpsToMatrixMultiplyPass::TrtMapOpsToMatrixMultiplyPass() {} +TrtMapOpsToMatrixMultiplyPass::TrtMapOpsToMatrixMultiplyPass() = default; void TrtMapOpsToMatrixMultiplyPass::ApplyImpl(ir::Graph* graph) const { PADDLE_ENFORCE_NOT_NULL( diff --git a/paddle/fluid/framework/ir/yolo_box_fuse_pass.cc b/paddle/fluid/framework/ir/yolo_box_fuse_pass.cc index 6a30ea2408f9a..7e2ba4dcabee2 100644 --- a/paddle/fluid/framework/ir/yolo_box_fuse_pass.cc +++ b/paddle/fluid/framework/ir/yolo_box_fuse_pass.cc @@ -149,7 +149,7 @@ struct YoloBoxPattern : public PatternBase { }; } // namespace patterns -YoloBoxFusePass::YoloBoxFusePass() {} +YoloBoxFusePass::YoloBoxFusePass() = default; void YoloBoxFusePass::ApplyImpl(ir::Graph* graph) const { PADDLE_ENFORCE_NOT_NULL( diff --git a/paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc b/paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc index fc39bcaa5de36..08eea9f67d031 100644 --- a/paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc +++ b/paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc @@ -27,7 +27,7 @@ InterpreterCoreNoEventGarbageCollector:: } InterpreterCoreNoEventGarbageCollector:: - ~InterpreterCoreNoEventGarbageCollector() { + ~InterpreterCoreNoEventGarbageCollector() { // NOLINT queue_.reset(nullptr); } diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.cc b/paddle/fluid/framework/new_executor/new_executor_defs.cc index 4950430867c7c..007cd16e7a607 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.cc +++ b/paddle/fluid/framework/new_executor/new_executor_defs.cc @@ -37,7 +37,7 @@ VariableScope::VariableScope(Scope* scope) { "You have passed a nullptr to construct VariableScope.")); } -VariableScope::~VariableScope() {} +VariableScope::~VariableScope() = default; Scope* VariableScope::GetMutableScope() const { return scope_; } diff --git a/paddle/fluid/framework/phi_utils.cc b/paddle/fluid/framework/phi_utils.cc index 7ac4a4bf27e81..98e13b6e092fb 100644 --- a/paddle/fluid/framework/phi_utils.cc +++ b/paddle/fluid/framework/phi_utils.cc @@ -40,7 +40,7 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker { platform::errors::InvalidArgument("Op proto cannot be nullptr.")); } - ~KernelArgsNameMakerByOpProto() override {} + ~KernelArgsNameMakerByOpProto() override = default; const paddle::small_vector& GetInputArgsNames() override; const paddle::small_vector& GetOutputArgsNames() override; diff --git a/paddle/fluid/framework/program_utils.cc b/paddle/fluid/framework/program_utils.cc index 197c74ccac3d9..2d8a35ca00a76 100644 --- a/paddle/fluid/framework/program_utils.cc +++ b/paddle/fluid/framework/program_utils.cc @@ -187,7 +187,7 @@ void ProgramProcessor::AddDepToBlockOp(const BlockDesc &block) { } } -ProgramProcessor::ProgramProcessor() {} +ProgramProcessor::ProgramProcessor() = default; } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index 499884208bebd..c085bd265b808 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -72,7 +72,7 @@ void ReaderBase::Start() { } } -ReaderBase::~ReaderBase() {} +ReaderBase::~ReaderBase() = default; DecoratedReader::~DecoratedReader() { VLOG(1) << "~DecoratedReader"; diff --git a/paddle/fluid/framework/var_type_traits.cc b/paddle/fluid/framework/var_type_traits.cc index d73c9b7d95957..c1f192673a702 100644 --- a/paddle/fluid/framework/var_type_traits.cc +++ b/paddle/fluid/framework/var_type_traits.cc @@ -114,7 +114,7 @@ struct VarIdToTypeIndexMapHolder { } private: - VarIdToTypeIndexMapHolder() { + VarIdToTypeIndexMapHolder() { // NOLINT VarIdToTypeIndexMapInitializer::Init(&id_to_type_map_, &type_to_id_map_); } diff --git a/paddle/fluid/imperative/amp_auto_cast.cc b/paddle/fluid/imperative/amp_auto_cast.cc index 11493b8514579..bf6c32be2f372 100644 --- a/paddle/fluid/imperative/amp_auto_cast.cc +++ b/paddle/fluid/imperative/amp_auto_cast.cc @@ -131,7 +131,9 @@ AutoCastGuard::AutoCastGuard(std::shared_ptr tracer, AmpLevel level) } } -AutoCastGuard::~AutoCastGuard() { tracer_->SetAmpLevel(pre_amp_level_); } +AutoCastGuard::~AutoCastGuard() { // NOLINT + tracer_->SetAmpLevel(pre_amp_level_); +} AmpOperators::AmpOperators() : allow_ops_(new std::unordered_set()), @@ -163,7 +165,7 @@ AmpOperators::AmpOperators() << unsupported_bf16_ops_->size(); } -AmpOperators::~AmpOperators() {} +AmpOperators::~AmpOperators() = default; AmpOperators& AmpOperators::Instance() { static AmpOperators instance; diff --git a/paddle/fluid/inference/analysis/analyzer.cc b/paddle/fluid/inference/analysis/analyzer.cc index afa653dec9069..2736b7bf520a3 100644 --- a/paddle/fluid/inference/analysis/analyzer.cc +++ b/paddle/fluid/inference/analysis/analyzer.cc @@ -23,7 +23,7 @@ namespace paddle { namespace inference { namespace analysis { -Analyzer::Analyzer() {} +Analyzer::Analyzer() = default; void Analyzer::Run(Argument *argument) { RunAnalysis(argument); } diff --git a/paddle/fluid/inference/analysis/passes/passes.cc b/paddle/fluid/inference/analysis/passes/passes.cc index 26e0f34c0ec08..4cb157e730dcb 100644 --- a/paddle/fluid/inference/analysis/passes/passes.cc +++ b/paddle/fluid/inference/analysis/passes/passes.cc @@ -27,7 +27,7 @@ namespace paddle { namespace inference { namespace analysis { -PassRegistry::PassRegistry() { +PassRegistry::PassRegistry() { // NOLINT // Register manually to avoid the trivial `USE_OP` like macro for easier use // and link. passes_.emplace("ir_analysis_pass", diff --git a/paddle/fluid/inference/api/resource_manager.cc b/paddle/fluid/inference/api/resource_manager.cc index 3f06ee5722af9..495f1894ea2c6 100644 --- a/paddle/fluid/inference/api/resource_manager.cc +++ b/paddle/fluid/inference/api/resource_manager.cc @@ -50,7 +50,7 @@ class EigenGpuStreamDevice : public Eigen::StreamInterface { EigenGpuStreamDevice() : scratch_(nullptr), semaphore_(nullptr) { Eigen::initializeDeviceProp(); } - ~EigenGpuStreamDevice() override {} + ~EigenGpuStreamDevice() override = default; void Reinitialize(gpuStream_t cuda_stream, phi::Allocator* allocator, diff --git a/paddle/fluid/inference/tensorrt/convert/activation_op.cc b/paddle/fluid/inference/tensorrt/convert/activation_op.cc index af88b353eb002..43ef00ef1dfdc 100644 --- a/paddle/fluid/inference/tensorrt/convert/activation_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/activation_op.cc @@ -29,7 +29,7 @@ namespace tensorrt { class ActivationOpConverter : public OpConverter { public: - ActivationOpConverter() {} + ActivationOpConverter() = default; void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { diff --git a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc index 81e175fac5ea2..419383ff0a334 100644 --- a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc @@ -21,7 +21,7 @@ namespace tensorrt { class ElementwiseTensorOpConverter : public OpConverter { public: - ElementwiseTensorOpConverter() {} + ElementwiseTensorOpConverter() = default; void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { @@ -325,7 +325,7 @@ class ElementwiseTensorModOpConverter : public ElementwiseTensorOpConverter { // https://github.com/PaddlePaddle/Paddle/blob/release/2.4/python/paddle/tensor/math.py#L420 class PowOpConverter : public OpConverter { public: - PowOpConverter() {} + PowOpConverter() = default; void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { diff --git a/paddle/fluid/inference/tensorrt/convert/equal_op.cc b/paddle/fluid/inference/tensorrt/convert/equal_op.cc index cde8bea9c4579..98db107752817 100644 --- a/paddle/fluid/inference/tensorrt/convert/equal_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/equal_op.cc @@ -21,7 +21,7 @@ namespace tensorrt { class EqualOpConverter : public OpConverter { public: - EqualOpConverter() {} + EqualOpConverter() = default; void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { @@ -74,7 +74,7 @@ class EqualOpConverter : public OpConverter { class NotEqualOpConverter : public OpConverter { public: - NotEqualOpConverter() {} + NotEqualOpConverter() = default; void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { diff --git a/paddle/fluid/inference/tensorrt/convert/square_op.cc b/paddle/fluid/inference/tensorrt/convert/square_op.cc index fdccc5a23eb74..a59ec9d242de4 100644 --- a/paddle/fluid/inference/tensorrt/convert/square_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/square_op.cc @@ -20,7 +20,7 @@ namespace tensorrt { class SquareOpConverter : public OpConverter { public: - SquareOpConverter() {} + SquareOpConverter() = default; void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { diff --git a/paddle/fluid/inference/tensorrt/convert/top_k_op.cc b/paddle/fluid/inference/tensorrt/convert/top_k_op.cc index 4fcf6c4374617..1b9209ebd8755 100644 --- a/paddle/fluid/inference/tensorrt/convert/top_k_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/top_k_op.cc @@ -29,7 +29,7 @@ namespace tensorrt { class TopKOpConverter : public OpConverter { public: - TopKOpConverter() {} + TopKOpConverter() = default; void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { diff --git a/paddle/fluid/inference/tensorrt/convert/unary_op.cc b/paddle/fluid/inference/tensorrt/convert/unary_op.cc index a313fa1f71126..14881d1206357 100644 --- a/paddle/fluid/inference/tensorrt/convert/unary_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/unary_op.cc @@ -29,7 +29,7 @@ namespace tensorrt { class UnaryOpConverter : public OpConverter { public: - UnaryOpConverter() {} + UnaryOpConverter() = default; void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { diff --git a/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc b/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc index 4dfbf8e754e45..98acd9342fc90 100644 --- a/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc +++ b/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc @@ -23,7 +23,7 @@ namespace tensorrt { class ExprWrapper { public: - ExprWrapper() {} + ExprWrapper() = default; ExprWrapper(const nvinfer1::IDimensionExpr* expr, nvinfer1::IExprBuilder* expr_builder) { this->expr = expr; diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index cbd21097ee740..40b9590c519d8 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -35,7 +35,7 @@ namespace tensorrt { // Just tell by the op_types. struct SimpleOpTypeSetTeller : public Teller { - SimpleOpTypeSetTeller() { + SimpleOpTypeSetTeller() { // NOLINT #if IS_TRT_VERSION_GE(7130) // use TensorRT plugin teller_set.insert("group_norm"); @@ -3083,7 +3083,7 @@ struct SimpleOpTypeSetTeller : public Teller { struct GenericPluginTeller : public Teller { public: - GenericPluginTeller() {} + GenericPluginTeller() = default; bool operator()(const framework::OpDesc& desc, bool use_no_calib_int8 = false, bool with_dynamic_shape = false) override { @@ -3125,7 +3125,7 @@ struct GenericPluginTeller : public Teller { struct CustomPluginTeller : public Teller { public: - CustomPluginTeller() {} + CustomPluginTeller() = default; bool operator()(const framework::OpDesc& desc, bool use_no_calib_int8 = false, bool with_dynamic_shape = false) override { @@ -3178,7 +3178,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, return false; } -OpTeller::OpTeller() { +OpTeller::OpTeller() { // NOLINT tellers_.emplace_back(new tensorrt::SimpleOpTypeSetTeller); tellers_.emplace_back(new tensorrt::GenericPluginTeller); tellers_.emplace_back(new tensorrt::CustomPluginTeller); diff --git a/paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_plugin.cu index c5082e9f851dc..c0d0d8b90feae 100644 --- a/paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_plugin.cu @@ -405,7 +405,7 @@ char const* EmbLayerNormPlugin::getPluginNamespace() const noexcept { return mNamespace.c_str(); } -EmbLayerNormPluginCreator::EmbLayerNormPluginCreator() {} +EmbLayerNormPluginCreator::EmbLayerNormPluginCreator() = default; char const* EmbLayerNormPluginCreator::getPluginName() const noexcept { return EMB_LAYER_NORM_NAME; diff --git a/paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.cu index e9ad763b43c8b..500e18e741707 100644 --- a/paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.cu @@ -771,7 +771,7 @@ char const* EmbLayerNormVarSeqlenPluginBase::getPluginNamespace() } EmbLayerNormVarSeqlenPluginBaseCreator:: - EmbLayerNormVarSeqlenPluginBaseCreator() {} + EmbLayerNormVarSeqlenPluginBaseCreator() = default; char const* EmbLayerNormVarSeqlenPluginBaseCreator::getPluginName() const noexcept { diff --git a/paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.cu index e3a8bf2270e73..83208887f417c 100644 --- a/paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.cu @@ -405,7 +405,7 @@ void RoiAlignPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT { void RoiAlignPluginDynamic::destroy() TRT_NOEXCEPT {} -RoiAlignPluginDynamicCreator::RoiAlignPluginDynamicCreator() {} +RoiAlignPluginDynamicCreator::RoiAlignPluginDynamicCreator() = default; void RoiAlignPluginDynamicCreator::setPluginNamespace(const char* lib_namespace) TRT_NOEXCEPT { diff --git a/paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.cu index e77f12769c0f3..fde877fb46e85 100644 --- a/paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.cu @@ -36,7 +36,7 @@ StackPluginDynamic::StackPluginDynamic(void const* serial_data, DeserializeValue(&serial_data, &serial_length, &with_fp16_); } -StackPluginDynamic::~StackPluginDynamic() {} +StackPluginDynamic::~StackPluginDynamic() = default; nvinfer1::IPluginV2DynamicExt* StackPluginDynamic::clone() const TRT_NOEXCEPT { return new StackPluginDynamic(axis_, num_stack_, with_fp16_); @@ -230,7 +230,7 @@ int StackPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc, return cudaGetLastError() != cudaSuccess; } -StackPluginDynamicCreator::StackPluginDynamicCreator() {} +StackPluginDynamicCreator::StackPluginDynamicCreator() = default; const char* StackPluginDynamicCreator::getPluginName() const TRT_NOEXCEPT { return "stack_plugin"; diff --git a/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu index 4cd7b65e2756b..a8bf130978dfd 100644 --- a/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu @@ -437,7 +437,7 @@ nvinfer1::IPluginV2Ext* YoloBoxPlugin::clone() const TRT_NOEXCEPT { input_w_); } -YoloBoxPluginCreator::YoloBoxPluginCreator() {} +YoloBoxPluginCreator::YoloBoxPluginCreator() = default; void YoloBoxPluginCreator::setPluginNamespace(const char* lib_namespace) TRT_NOEXCEPT { diff --git a/paddle/fluid/ir_adaptor/translator/attribute_translator.cc b/paddle/fluid/ir_adaptor/translator/attribute_translator.cc index 49845754b5027..ada0d80688ac2 100644 --- a/paddle/fluid/ir_adaptor/translator/attribute_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/attribute_translator.cc @@ -34,7 +34,7 @@ class AttributeVisitor { public: ir::IrContext* ctx; AttributeVisitor() { ctx = ir::IrContext::Instance(); } - ~AttributeVisitor() {} + ~AttributeVisitor() = default; public: virtual ir::Attribute operator()(int i) { diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index b2a886e4aee4f..1d8866c43ed23 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -127,7 +127,7 @@ class CUDAGraphAllocator : underlying_allocator_(allocator) {} public: - ~CUDAGraphAllocator() override {} + ~CUDAGraphAllocator() override = default; static std::shared_ptr Create( const std::shared_ptr& allocator) { @@ -1272,7 +1272,7 @@ AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_; AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {} // delete m_ may cause core dump when the destructor of python in conflict with // cpp. -AllocatorFacade::~AllocatorFacade() {} +AllocatorFacade::~AllocatorFacade() = default; AllocatorFacade& AllocatorFacade::Instance() { static AllocatorFacade* instance = new AllocatorFacade; diff --git a/paddle/fluid/operators/collective/c_comm_init_all_op.cc b/paddle/fluid/operators/collective/c_comm_init_all_op.cc index d125354353d91..2dc9af0139546 100644 --- a/paddle/fluid/operators/collective/c_comm_init_all_op.cc +++ b/paddle/fluid/operators/collective/c_comm_init_all_op.cc @@ -38,7 +38,7 @@ namespace operators { class CCommInitAllInferShape : public framework::InferShapeBase { public: - ~CCommInitAllInferShape() override {} + ~CCommInitAllInferShape() override = default; void operator()(framework::InferShapeContext* ctx) const override{}; }; diff --git a/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc b/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc index cf209fab7c842..39d22fcd5f50d 100644 --- a/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc +++ b/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc @@ -38,7 +38,7 @@ namespace operators { class CCommInitMultiTrainerInferShape : public framework::InferShapeBase { public: - ~CCommInitMultiTrainerInferShape() override {} + ~CCommInitMultiTrainerInferShape() override = default; void operator()(framework::InferShapeContext* ctx) const override{}; }; diff --git a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu index dee676a7640f4..f4a9f0a77a53b 100644 --- a/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu +++ b/paddle/fluid/operators/fused/fused_fc_elementwise_layernorm_op.cu @@ -44,7 +44,7 @@ static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); } template struct PairForLayerNorm { - __device__ __forceinline__ PairForLayerNorm() {} + __device__ __forceinline__ PairForLayerNorm() = default; __device__ __forceinline__ PairForLayerNorm(const T& first, const T& second) : first_(first), second_(second) {} diff --git a/paddle/fluid/operators/math/beam_search.cc b/paddle/fluid/operators/math/beam_search.cc index 2b6599e4f39ff..1e3f4f390f652 100644 --- a/paddle/fluid/operators/math/beam_search.cc +++ b/paddle/fluid/operators/math/beam_search.cc @@ -113,7 +113,7 @@ class BeamSearchFunctor { * The basic items help to sort. */ struct Item { - Item() {} + Item() = default; Item(size_t offset, size_t id, float score) : offset(offset), id(id), score(score) {} // offset in the higher lod level. diff --git a/paddle/fluid/operators/math/beam_search.cu b/paddle/fluid/operators/math/beam_search.cu index b9f5624db7317..bd8e905389e81 100644 --- a/paddle/fluid/operators/math/beam_search.cu +++ b/paddle/fluid/operators/math/beam_search.cu @@ -21,7 +21,7 @@ namespace operators { namespace math { struct Triple { - __device__ __forceinline__ Triple() {} + __device__ __forceinline__ Triple() = default; __device__ __forceinline__ Triple(int o, int i, float s) : offset(o), id(i), score(s) {} diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index a9d31ac783780..fb6931836d764 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -22,7 +22,7 @@ namespace paddle { namespace operators { namespace math { -Sampler::~Sampler() {} +Sampler::~Sampler() = default; UniformSampler::UniformSampler(int64_t range, unsigned int seed) : Sampler(range, seed), inv_range_(1.0 / (range + 1)) { diff --git a/paddle/fluid/operators/reader/py_reader.cc b/paddle/fluid/operators/reader/py_reader.cc index 89a5c256add4f..2db8ac6b1bcb9 100644 --- a/paddle/fluid/operators/reader/py_reader.cc +++ b/paddle/fluid/operators/reader/py_reader.cc @@ -36,7 +36,9 @@ void PyReader::ReadNext(paddle::framework::LoDTensorArray* out) { if (!success) out->clear(); } -PyReader::~PyReader() { queue_->Close(); } +PyReader::~PyReader() { // NOLINT + queue_->Close(); +} void PyReader::Shutdown() { queue_->Close(); } diff --git a/paddle/fluid/platform/profiler/cuda_tracer.cc b/paddle/fluid/platform/profiler/cuda_tracer.cc index 5f64f114daec0..a462521db5144 100644 --- a/paddle/fluid/platform/profiler/cuda_tracer.cc +++ b/paddle/fluid/platform/profiler/cuda_tracer.cc @@ -47,7 +47,7 @@ std::unordered_map CreateThreadIdMapping() { } } // namespace details -CudaTracer::CudaTracer() {} +CudaTracer::CudaTracer() = default; void CudaTracer::PrepareTracing() { PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/platform/profiler/custom_device/custom_tracer.cc b/paddle/fluid/platform/profiler/custom_device/custom_tracer.cc index 740f27f2922a3..7ea473dfdc150 100644 --- a/paddle/fluid/platform/profiler/custom_device/custom_tracer.cc +++ b/paddle/fluid/platform/profiler/custom_device/custom_tracer.cc @@ -32,7 +32,7 @@ CustomTracer::CustomTracer(const std::string& dev_type) : dev_type_(dev_type) { #endif } -CustomTracer::~CustomTracer() { +CustomTracer::~CustomTracer() { // NOLINT #ifdef PADDLE_WITH_CUSTOM_DEVICE phi::DeviceManager::ProfilerFinalize(dev_type_, &collector_, context_); #endif diff --git a/paddle/ir/core/ir_context.cc b/paddle/ir/core/ir_context.cc index acef7c4c02654..5c609f183c40d 100644 --- a/paddle/ir/core/ir_context.cc +++ b/paddle/ir/core/ir_context.cc @@ -29,7 +29,7 @@ namespace ir { // AbstractType, TypeStorage, AbstractAttribute, AttributeStorage, Dialect. class IrContextImpl { public: - IrContextImpl() {} + IrContextImpl() = default; ~IrContextImpl() { std::lock_guard guard(destructor_lock_); diff --git a/paddle/ir/core/storage_manager.cc b/paddle/ir/core/storage_manager.cc index 7bdc131bbc387..e1f6c3afe8631 100644 --- a/paddle/ir/core/storage_manager.cc +++ b/paddle/ir/core/storage_manager.cc @@ -66,7 +66,7 @@ struct ParametricStorageManager { std::function destroy_; }; -StorageManager::StorageManager() {} +StorageManager::StorageManager() = default; StorageManager::~StorageManager() = default; diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index 84540605c817e..afb17f62035d1 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -53,7 +53,7 @@ using IntArray = experimental::IntArray; class AbstractAutogradMeta { public: // No AbstractAutogradMeta should be created - virtual ~AbstractAutogradMeta() {} + virtual ~AbstractAutogradMeta() = default; }; /** diff --git a/paddle/phi/api/lib/tensor.cc b/paddle/phi/api/lib/tensor.cc index 305f50ead0d36..9924c95cd0759 100644 --- a/paddle/phi/api/lib/tensor.cc +++ b/paddle/phi/api/lib/tensor.cc @@ -403,12 +403,7 @@ void Tensor::reset() { /* Part 6: Operator overloading */ -Tensor &Tensor::operator=(const Tensor &x) & { - impl_ = x.impl_; - autograd_meta_ = x.autograd_meta_; - name_ = x.name_; - return *this; -} +Tensor &Tensor::operator=(const Tensor &x) & = default; Tensor &Tensor::operator=(Tensor &&x) & { impl_ = std::move(x.impl_); diff --git a/paddle/phi/backends/dynload/dynamic_loader.cc b/paddle/phi/backends/dynload/dynamic_loader.cc index 899853dcbf155..e266fb0357884 100644 --- a/paddle/phi/backends/dynload/dynamic_loader.cc +++ b/paddle/phi/backends/dynload/dynamic_loader.cc @@ -109,7 +109,7 @@ namespace phi { namespace dynload { struct PathNode { - PathNode() {} + PathNode() = default; std::string path = ""; }; diff --git a/paddle/phi/backends/gpu/gpu_context.cc b/paddle/phi/backends/gpu/gpu_context.cc index ac7de9d33a7aa..33076ab8e6544 100644 --- a/paddle/phi/backends/gpu/gpu_context.cc +++ b/paddle/phi/backends/gpu/gpu_context.cc @@ -66,7 +66,7 @@ class EigenGpuStreamDevice : public Eigen::StreamInterface { EigenGpuStreamDevice() : scratch_(nullptr), semaphore_(nullptr) { Eigen::initializeDeviceProp(); } - ~EigenGpuStreamDevice() override {} + ~EigenGpuStreamDevice() override = default; void Reinitialize(gpuStream_t cuda_stream, Allocator* allocator, diff --git a/paddle/phi/backends/onednn/onednn_context.cc b/paddle/phi/backends/onednn/onednn_context.cc index 5fc81d30b65de..19836949e6f8a 100644 --- a/paddle/phi/backends/onednn/onednn_context.cc +++ b/paddle/phi/backends/onednn/onednn_context.cc @@ -95,7 +95,7 @@ struct OneDNNContext::Impl { p_mutex_.reset(new std::mutex()); } - ~Impl() {} + ~Impl() = default; void ResetBlobMap(void* ptr) { VLOG(4) << OneDNNContext::tls().get_curr_exec() << " " << ptr; diff --git a/paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc b/paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc index 916f25d1b50cf..e951bc07ba871 100644 --- a/paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc +++ b/paddle/phi/kernels/cpu/multiclass_nms3_kernel.cc @@ -27,7 +27,7 @@ template class Point_ { public: // default constructor - Point_() {} + Point_() = default; Point_(T _x, T _y) {} Point_(const Point_& pt UNUSED) {} diff --git a/paddle/phi/kernels/cpu/rnn_grad_kernel.cc b/paddle/phi/kernels/cpu/rnn_grad_kernel.cc index 9be2d7e3731dc..82415b88b662f 100644 --- a/paddle/phi/kernels/cpu/rnn_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_grad_kernel.cc @@ -53,7 +53,7 @@ void CreateLstmGrad(phi::funcs::LstmMetaGrad* lstm_grad) { template struct GradCell { - virtual ~GradCell() {} + virtual ~GradCell() = default; virtual void operator()(const CPUContext& dev_ctx UNUSED, DenseTensor* gate_tensor UNUSED, DenseTensor* state_tensor UNUSED, @@ -355,7 +355,7 @@ struct LSTMGradCell : GradCell { template struct GradLayer { explicit GradLayer(const GradCellType& cell) : cell_(cell) {} - virtual ~GradLayer() {} + virtual ~GradLayer() = default; void run_rnn_grad_function( const CPUContext& dev_ctx, const DenseTensor* input, @@ -690,7 +690,7 @@ struct SingleGradLayer : GradLayer { // explicit SingleGradLayer(GradCellType& cell) : cell_(cell) {} explicit SingleGradLayer(const GradCellType& cell) : GradLayer(cell) {} - ~SingleGradLayer() override {} + ~SingleGradLayer() override = default; void operator()(const CPUContext& dev_ctx, const DenseTensor* input, const DenseTensor* output, @@ -802,7 +802,7 @@ template struct BidirGradLayer : GradLayer { explicit BidirGradLayer(const GradCellType& cell) : GradLayer(cell) {} - ~BidirGradLayer() override {} + ~BidirGradLayer() override = default; void operator()(const CPUContext& dev_ctx, const DenseTensor* input, const DenseTensor* output, diff --git a/paddle/phi/kernels/cpu/rnn_kernel.cc b/paddle/phi/kernels/cpu/rnn_kernel.cc index e899ee4e4260d..b2e7dd19fafd8 100644 --- a/paddle/phi/kernels/cpu/rnn_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_kernel.cc @@ -34,7 +34,7 @@ namespace phi { template struct Cell { - virtual ~Cell() {} + virtual ~Cell() = default; virtual void operator()(const CPUContext* dev_ctx UNUSED, DenseTensor* input UNUSED, const DenseTensor* weight_hh UNUSED, @@ -208,7 +208,7 @@ struct LSTMCell : Cell { template struct Layer { explicit Layer(const CellType& cell) : cell_(cell) {} - virtual ~Layer() {} + virtual ~Layer() = default; void preprocess(const CPUContext& dev_ctx, const DenseTensor& input, const DenseTensor& weight, diff --git a/paddle/phi/kernels/funcs/concat_and_split_functor.cu b/paddle/phi/kernels/funcs/concat_and_split_functor.cu index d2e01503d43b2..975b1361423ab 100644 --- a/paddle/phi/kernels/funcs/concat_and_split_functor.cu +++ b/paddle/phi/kernels/funcs/concat_and_split_functor.cu @@ -65,7 +65,7 @@ struct PointerWrapper { const void* ins_addr[Size]; __device__ inline const void* operator[](int i) const { return ins_addr[i]; } - PointerWrapper() {} + PointerWrapper() = default; PointerWrapper(const phi::GPUContext& ctx, const std::vector& ins, const T** pre_alloced_host_ptr) { @@ -84,7 +84,7 @@ template struct PADDLE_ALIGN(256) AlignedPointerWrapper : public PointerWrapper { public: - AlignedPointerWrapper() {} + AlignedPointerWrapper() = default; AlignedPointerWrapper(const phi::GPUContext& ctx, const std::vector& ins, const T** pre_alloced_host_ptr) { @@ -98,7 +98,7 @@ struct PointerToPointer { void** ins_addr{nullptr}; __device__ inline const void* operator[](int i) const { return ins_addr[i]; } - PointerToPointer() {} + PointerToPointer() = default; PointerToPointer(const phi::GPUContext& ctx, const std::vector& ins, const T** pre_alloced_host_ptr, @@ -186,9 +186,7 @@ struct PointerToPointerAndCol { template struct alignas(MovSize) Packed { - __device__ Packed() { - // do nothing - } + __device__ Packed() = default; union { char buf[MovSize]; }; @@ -621,7 +619,7 @@ struct PointerAndColArray public: funcs::ValueArray val_array; - PointerAndColArray() {} + PointerAndColArray() = default; PointerAndColArray(const phi::GPUContext& ctx, const int out_col_num, IndexT* out_cols, diff --git a/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu b/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu index 983fdb2656465..4feadcf899a44 100644 --- a/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu @@ -32,7 +32,7 @@ namespace phi { template struct MaskedSelectGradFunctor { - HOSTDEVICE MaskedSelectGradFunctor() {} + HOSTDEVICE MaskedSelectGradFunctor() = default; HOSTDEVICE inline void operator()(OutT* out, const MT* mask, diff --git a/paddle/phi/kernels/gpu/masked_select_kernel.cu b/paddle/phi/kernels/gpu/masked_select_kernel.cu index 89cb714d78db9..cd92d7f03e7df 100644 --- a/paddle/phi/kernels/gpu/masked_select_kernel.cu +++ b/paddle/phi/kernels/gpu/masked_select_kernel.cu @@ -30,7 +30,7 @@ namespace phi { template struct MaskedSelectFunctor { - HOSTDEVICE MaskedSelectFunctor() {} + HOSTDEVICE MaskedSelectFunctor() = default; HOSTDEVICE inline void operator()(OutT* out, const MT* mask, diff --git a/paddle/phi/kernels/gpu/p_norm_kernel.cu b/paddle/phi/kernels/gpu/p_norm_kernel.cu index fb869a00d9c50..556a6308ff47a 100644 --- a/paddle/phi/kernels/gpu/p_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/p_norm_kernel.cu @@ -63,7 +63,7 @@ __device__ __forceinline__ double inline_pow(double base, double exponent) { template struct NonzeroFunctor { - HOSTDEVICE explicit inline NonzeroFunctor() {} + HOSTDEVICE explicit inline NonzeroFunctor() = default; HOSTDEVICE inline T operator()(const T x) const { return static_cast(static_cast(x) != 0); } @@ -71,7 +71,7 @@ struct NonzeroFunctor { template struct AbsFunctor { - HOSTDEVICE explicit inline AbsFunctor() {} + HOSTDEVICE explicit inline AbsFunctor() = default; HOSTDEVICE inline T operator()(const T x) const { return static_cast(inline_abs(x)); } diff --git a/paddle/phi/kernels/gpu/rms_norm_kernel.cu b/paddle/phi/kernels/gpu/rms_norm_kernel.cu index ccbb1f2f4baa6..37b937ead81af 100644 --- a/paddle/phi/kernels/gpu/rms_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/rms_norm_kernel.cu @@ -177,9 +177,7 @@ typename std::enable_if::value == false, bool>::type CanPackAs( template struct alignas(sizeof(T) * N) Pack { - __device__ Pack() { - // do nothing - } + __device__ Pack() = default; T elem[N]; }; diff --git a/test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc b/test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc index 0d88d44defffc..e8cc2ab121e23 100644 --- a/test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc +++ b/test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc @@ -375,7 +375,7 @@ class CudnnBNAddReluTester { SetUp(); } - ~CudnnBNAddReluTester() {} + ~CudnnBNAddReluTester() = default; void CheckForward(float diff, bool is_relative_atol = false) { LOG(INFO) << "[CheckForward, diff=" << diff diff --git a/test/cpp/fluid/fused/cudnn_norm_conv_test.cc b/test/cpp/fluid/fused/cudnn_norm_conv_test.cc index 4f7555aed8282..16ea8f5ade084 100644 --- a/test/cpp/fluid/fused/cudnn_norm_conv_test.cc +++ b/test/cpp/fluid/fused/cudnn_norm_conv_test.cc @@ -235,7 +235,7 @@ class CudnnNormConvolutionTester { SetUp(); } - ~CudnnNormConvolutionTester() {} + ~CudnnNormConvolutionTester() = default; void CheckForward(float diff, bool is_relative_atol = false) { phi::GPUContext *ctx = static_cast( diff --git a/test/cpp/fluid/fused/fused_dropout_act_bias_test.cu b/test/cpp/fluid/fused/fused_dropout_act_bias_test.cu index 29fb7701d0c6a..db2289d74282c 100644 --- a/test/cpp/fluid/fused/fused_dropout_act_bias_test.cu +++ b/test/cpp/fluid/fused/fused_dropout_act_bias_test.cu @@ -89,7 +89,7 @@ struct TestFusedDropoutActBias { ctx = reinterpret_cast(devicectx); } - ~TestFusedDropoutActBias() {} + ~TestFusedDropoutActBias() = default; void SetUp() { const int n = rows * cols; diff --git a/test/cpp/fluid/fused/fused_layernorm_residual_dropout_bias_test.cu b/test/cpp/fluid/fused/fused_layernorm_residual_dropout_bias_test.cu index 60cb67c837a12..3374bf83e2f6c 100644 --- a/test/cpp/fluid/fused/fused_layernorm_residual_dropout_bias_test.cu +++ b/test/cpp/fluid/fused/fused_layernorm_residual_dropout_bias_test.cu @@ -95,7 +95,7 @@ struct TestFusedLayernormResidualDropoutBias { ctx = reinterpret_cast(devicectx); } - ~TestFusedLayernormResidualDropoutBias() {} + ~TestFusedLayernormResidualDropoutBias() = default; void SetUp() { using U = LayerNormParamType; diff --git a/test/cpp/inference/api/analyzer_dam_tester.cc b/test/cpp/inference/api/analyzer_dam_tester.cc index 46ac10d51c7d1..b6242510d446f 100644 --- a/test/cpp/inference/api/analyzer_dam_tester.cc +++ b/test/cpp/inference/api/analyzer_dam_tester.cc @@ -35,7 +35,7 @@ struct DataRecord { size_t batch_size{1}; size_t num_samples; // total number of samples - DataRecord() { + DataRecord() { // NOLINT turns = new std::vector>[FLAGS_max_turn_num]; // turns data : FLAGS_max_turn_num turns_mask = new std::vector