diff --git a/paddle/fluid/distributed/collective/reducer.cc b/paddle/fluid/distributed/collective/reducer.cc index 1438bf1f161ec8..b1e81130728e45 100644 --- a/paddle/fluid/distributed/collective/reducer.cc +++ b/paddle/fluid/distributed/collective/reducer.cc @@ -21,8 +21,7 @@ PD_DECLARE_bool(use_stream_safe_cuda_allocator); COMMON_DECLARE_string(allocator_strategy); -namespace paddle { -namespace distributed { +namespace paddle::distributed { static bool IsStreamSafeAllocator() { return (FLAGS_allocator_strategy == "auto_growth" && @@ -1367,5 +1366,4 @@ std::ostream &operator<<(std::ostream &out, const EagerGroup &group) { return out; } -} // namespace distributed -} // namespace paddle +} // namespace paddle::distributed diff --git a/paddle/fluid/distributed/fleet_executor/message_service.cc b/paddle/fluid/distributed/fleet_executor/message_service.cc index d99ca36ee4a482..5862a48182f7bf 100644 --- a/paddle/fluid/distributed/fleet_executor/message_service.cc +++ b/paddle/fluid/distributed/fleet_executor/message_service.cc @@ -18,8 +18,7 @@ #include "paddle/fluid/distributed/fleet_executor/global.h" #include "paddle/fluid/distributed/fleet_executor/message_bus.h" -namespace paddle { -namespace distributed { +namespace paddle::distributed { void MessageServiceImpl::ReceiveInterceptorMessage( google::protobuf::RpcController* control_base, @@ -46,6 +45,5 @@ void MessageServiceImpl::IncreaseBarrierCount( response->set_rst(true); } -} // namespace distributed -} // namespace paddle +} // namespace paddle::distributed #endif diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.cc b/paddle/fluid/distributed/ps/service/communicator/communicator.cc index 66a5fb1dd5a07a..e57ec2ef187c80 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.cc +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.cc @@ -25,8 +25,7 @@ limitations under the License. */ #define LEARNING_RATE_DECAY_COUNTER "@LR_DECAY_COUNTER@" #define STEP_COUNTER "@PS_STEP_COUNTER@" -namespace paddle { -namespace distributed { +namespace paddle::distributed { using phi::SelectedRows; @@ -1590,5 +1589,4 @@ void FLCommunicator::StartCoordinator( new std::thread(&FLCommunicator::SendThreadAsync, this)); } -} // namespace distributed -} // namespace paddle +} // namespace paddle::distributed diff --git a/paddle/fluid/framework/fleet/metrics.cc b/paddle/fluid/framework/fleet/metrics.cc index 82bbd16b483ae2..5bb768529bc493 100644 --- a/paddle/fluid/framework/fleet/metrics.cc +++ b/paddle/fluid/framework/fleet/metrics.cc @@ -21,8 +21,7 @@ #include "paddle/fluid/framework/lod_tensor.h" #if defined(PADDLE_WITH_PSLIB) || defined(PADDLE_WITH_PSCORE) -namespace paddle { -namespace framework { +namespace paddle::framework { std::shared_ptr Metric::s_instance_ = nullptr; @@ -393,6 +392,5 @@ BasicAucCalculator::WuaucRocData BasicAucCalculator::computeSingleUserAuc( return {tp, fp, auc}; } -} // namespace framework -} // namespace paddle +} // namespace paddle::framework #endif diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc index 08d93630eadcdc..4e68d98be13ab5 100644 --- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc @@ -20,9 +20,7 @@ #include "paddle/fluid/framework/op_version_registry.h" #include "paddle/phi/kernels/funcs/blas/blas.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { static int BuildFusion(Graph* graph, const std::string& name_scope, @@ -288,9 +286,7 @@ void EmbeddingFCLSTMFusePass::ApplyImpl(ir::Graph* graph) const { AddStatis(fusion_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(embedding_fc_lstm_fuse_pass, paddle::framework::ir::EmbeddingFCLSTMFusePass); diff --git a/paddle/fluid/framework/ir/fused_continuous_same_ops_pass.cc b/paddle/fluid/framework/ir/fused_continuous_same_ops_pass.cc index 62b043bf3294b0..ec67c675076a77 100644 --- a/paddle/fluid/framework/ir/fused_continuous_same_ops_pass.cc +++ b/paddle/fluid/framework/ir/fused_continuous_same_ops_pass.cc @@ -21,19 +21,16 @@ namespace phi { class DenseTensor; -} // namespace phi +} // namespace phi -namespace paddle { -namespace framework { +namespace paddle::framework { class Scope; -} // namespace framework -} // namespace paddle +} // namespace paddle::framework -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { -namespace patterns { +} // namespace paddle::framework::ir +namespace paddle::framework::ir::patterns { struct ContinuousSameOpsPattern : public PatternBase { ContinuousSameOpsPattern(PDPattern* pattern, @@ -85,7 +82,8 @@ ContinuousSameOpsPattern::ContinuousSameOpsPattern( .LinksTo({second_out_var_node}); } -} // namespace patterns +} // namespace paddle::framework::ir::patterns +namespace paddle::framework::ir { /* Fused continuous same ops into one. @@ -221,9 +219,7 @@ void FusedContinuousSameOpsPass::ApplyImpl(ir::Graph* graph) const { LOG(INFO) << "Total delete op counts: " << total_delete_op_count; } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(fused_continuous_same_ops_pass, paddle::framework::ir::FusedContinuousSameOpsPass); diff --git a/paddle/fluid/framework/ir/fused_feedforward_pass.cc b/paddle/fluid/framework/ir/fused_feedforward_pass.cc index 0ae23adfc425c8..4f1c51b96b1c42 100644 --- a/paddle/fluid/framework/ir/fused_feedforward_pass.cc +++ b/paddle/fluid/framework/ir/fused_feedforward_pass.cc @@ -19,9 +19,7 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/enforce.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { void FusedFeedForwardPass::ApplyImpl(ir::Graph *graph) const { FusePassBase::Init(scope_name, graph); @@ -752,9 +750,7 @@ ir::Graph *FusedFeedForwardPass::FusedFeedForwardBwd( return graph; } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(fused_feedforward_pass, paddle::framework::ir::FusedFeedForwardPass); diff --git a/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc b/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc index 24e5712462b113..9ebe1fdcce309d 100644 --- a/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc +++ b/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc @@ -19,10 +19,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/fusion_group/operation.h" -namespace paddle { -namespace framework { -namespace ir { -namespace fusion_group { +namespace paddle::framework::ir::fusion_group { template static T StringTo(const std::string& str) { @@ -219,7 +216,4 @@ std::string OperationExpression::GetExpression( return ret.str(); } -} // namespace fusion_group -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir::fusion_group diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc index 69d304d0a7c2b0..501ed3fec11c46 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc @@ -23,9 +23,7 @@ #include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h" #include "paddle/fluid/framework/ir/pass.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Graph; @@ -362,9 +360,7 @@ void ReferenceCountPass::ApplyImpl(ir::Graph *graph) const { } } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(reference_count_pass, paddle::framework::ir::ReferenceCountPass) .RequirePassAttr(paddle::framework::ir::kMemOptVarInfoMapList) diff --git a/paddle/fluid/framework/ir/onednn/cpu_bfloat16_placement_pass.cc b/paddle/fluid/framework/ir/onednn/cpu_bfloat16_placement_pass.cc index a07887dafb2767..9adc07d9c14639 100644 --- a/paddle/fluid/framework/ir/onednn/cpu_bfloat16_placement_pass.cc +++ b/paddle/fluid/framework/ir/onednn/cpu_bfloat16_placement_pass.cc @@ -21,9 +21,7 @@ limitations under the License. */ #include "paddle/fluid/platform/onednn_helper.h" #include "paddle/utils/string/pretty_log.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { using string::PrettyLogDetail; @@ -111,9 +109,7 @@ int CPUBfloat16PlacementPass::RemoveUnsupportedOperators( return detected_operators; } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(cpu_bfloat16_placement_pass, paddle::framework::ir::CPUBfloat16PlacementPass) diff --git a/paddle/fluid/framework/ir/onednn/fc_onednn_pass.cc b/paddle/fluid/framework/ir/onednn/fc_onednn_pass.cc index 082579428a01a4..62a0f3d20f19d6 100644 --- a/paddle/fluid/framework/ir/onednn/fc_onednn_pass.cc +++ b/paddle/fluid/framework/ir/onednn/fc_onednn_pass.cc @@ -17,15 +17,11 @@ #include "paddle/phi/core/enforce.h" #include "paddle/utils/string/pretty_log.h" -namespace paddle { -namespace framework { +namespace paddle::framework { class OpDesc; -} // namespace framework -} // namespace paddle +} // namespace paddle::framework -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Graph; @@ -85,8 +81,6 @@ void FCMKLDNNPass::ApplyImpl(ir::Graph* graph) const { } } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(fc_onednn_pass, paddle::framework::ir::FCMKLDNNPass); diff --git a/paddle/fluid/framework/ir/preln_layernorm_x_fuse_pass.cc b/paddle/fluid/framework/ir/preln_layernorm_x_fuse_pass.cc index 5f4c59333f2602..08a576c64fa565 100644 --- a/paddle/fluid/framework/ir/preln_layernorm_x_fuse_pass.cc +++ b/paddle/fluid/framework/ir/preln_layernorm_x_fuse_pass.cc @@ -19,18 +19,11 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/framework/op_version_registry.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Node; -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir -namespace paddle { -namespace framework { -namespace ir { -namespace patterns { +namespace paddle::framework::ir::patterns { struct PrelnLayerNormX : public PatternBase { PrelnLayerNormX(PDPattern *pattern, const std::string &name_scope) @@ -86,7 +79,8 @@ void PrelnLayerNormX::operator()(PDNode *x, .LinksTo({layer_norm_out_var}); } -} // namespace patterns +} // namespace paddle::framework::ir::patterns +namespace paddle::framework::ir { int PrelnLayerNormXFusePass::ApplyLayerNormShiftPattern( ir::Graph *graph) const { @@ -253,9 +247,7 @@ void PrelnLayerNormXFusePass::ApplyImpl(ir::Graph *graph) const { AddStatis(found_subgraph_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(preln_layernorm_x_fuse_pass, paddle::framework::ir::PrelnLayerNormXFusePass); diff --git a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc index 40519a58d006f9..72e95c5f441500 100644 --- a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc +++ b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc @@ -16,19 +16,13 @@ #include -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Node; -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir #define MAX_CONCAT_INPUTS 200 -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { PDNode* BuildSeqPoolConcatPattern(PDPattern* pattern, const std::string& name_scope, @@ -217,9 +211,7 @@ void SeqPoolConcatFusePass::ApplyImpl(ir::Graph* graph) const { AddStatis(fusion_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(seqpool_concat_fuse_pass, paddle::framework::ir::SeqPoolConcatFusePass); diff --git a/paddle/fluid/framework/ir/sync_batch_norm_pass.cc b/paddle/fluid/framework/ir/sync_batch_norm_pass.cc index 828418597e623a..5f5a032f6c5c82 100644 --- a/paddle/fluid/framework/ir/sync_batch_norm_pass.cc +++ b/paddle/fluid/framework/ir/sync_batch_norm_pass.cc @@ -15,9 +15,7 @@ limitations under the License. */ #include "glog/logging.h" #include "paddle/fluid/framework/ir/pass.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Graph; @@ -43,8 +41,6 @@ class SyncBatchNormPass : public Pass { } } }; -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(sync_batch_norm_pass, paddle::framework::ir::SyncBatchNormPass); diff --git a/paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.cc b/paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.cc index e9df08d4698e28..9264e5fc92f7fd 100644 --- a/paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.cc +++ b/paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.cc @@ -14,8 +14,7 @@ #include "paddle/fluid/framework/new_executor/garbage_collector/fast_garbage_collector.h" -namespace paddle { -namespace framework { +namespace paddle::framework { void InterpreterCoreFastGarbageCollector::Add(Variable* var, const Instruction&) { @@ -102,5 +101,4 @@ void InterpreterCoreFastGarbageCollector::Add(Garbage garbage) { } } -} // namespace framework -} // namespace paddle +} // namespace paddle::framework diff --git a/paddle/fluid/framework/op_registry.cc b/paddle/fluid/framework/op_registry.cc index ca1e1b5296d7bc..d4a993b53d9b6a 100644 --- a/paddle/fluid/framework/op_registry.cc +++ b/paddle/fluid/framework/op_registry.cc @@ -17,8 +17,7 @@ limitations under the License. */ #include "glog/logging.h" -namespace paddle { -namespace framework { +namespace paddle::framework { std::unique_ptr OpRegistry::CreateOp( const std::string& type, @@ -140,5 +139,4 @@ std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { return op; } -} // namespace framework -} // namespace paddle +} // namespace paddle::framework diff --git a/paddle/fluid/imperative/heter_ccl_context.cc b/paddle/fluid/imperative/heter_ccl_context.cc index 37929dc6e9c8f6..d63f416f1db8f6 100644 --- a/paddle/fluid/imperative/heter_ccl_context.cc +++ b/paddle/fluid/imperative/heter_ccl_context.cc @@ -27,14 +27,11 @@ #include "paddle/utils/string/split.h" #include "paddle/utils/string/string_helper.h" -namespace paddle { -namespace framework { +namespace paddle::framework { class Variable; -} // namespace framework -} // namespace paddle +} // namespace paddle::framework -namespace paddle { -namespace imperative { +namespace paddle::imperative { HeterParallelContext::HeterParallelContext(const ParallelStrategy &strategy, const int &device_id) @@ -196,5 +193,4 @@ void HeterParallelContext::SynchronizeCompute() { node_parallel_ctx_->SynchronizeCompute(); } -} // namespace imperative -} // namespace paddle +} // namespace paddle::imperative diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index e2a2917e52803f..cbf22de94a7d78 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -26,8 +26,7 @@ #endif #include "paddle/phi/core/dense_tensor.h" #include "paddle/utils/string/string_helper.h" -namespace paddle { -namespace imperative { +namespace paddle::imperative { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \ defined(PADDLE_WITH_XPU_BKCL) || defined(PADDLE_WITH_GLOO) || \ @@ -1160,5 +1159,4 @@ std::vector> AssignGroupBySize( } #endif -} // namespace imperative -} // namespace paddle +} // namespace paddle::imperative diff --git a/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc b/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc index d7061133070094..a3c531cb114767 100644 --- a/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc +++ b/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc @@ -22,9 +22,7 @@ #include "paddle/fluid/inference/io.h" #include "paddle/phi/common/backend.h" -namespace paddle { -namespace inference { -namespace analysis { +namespace paddle::inference::analysis { ConvertToMixedPrecisionPass::ConvertToMixedPrecisionPass( const std::string& model_file, @@ -242,6 +240,4 @@ void ConvertToMixedPrecision( pass.Run(); } -} // namespace analysis -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::analysis diff --git a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_roformer_op.cc b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_roformer_op.cc index e65dbd4832ff55..f5f48c76e8cbb9 100644 --- a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_roformer_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_roformer_op.cc @@ -15,9 +15,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/plugin/multihead_matmul_roformer_plugin.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { class MultiheadMatMulRoformerOpConverter : public OpConverter { public: @@ -245,9 +243,7 @@ class MultiheadMatMulRoformerOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(multihead_matmul_roformer, MultiheadMatMulRoformerOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/preln_layernorm_shift_partition_op.cc b/paddle/fluid/inference/tensorrt/convert/preln_layernorm_shift_partition_op.cc index 6017edecaaa58a..3a981d6194c098 100644 --- a/paddle/fluid/inference/tensorrt/convert/preln_layernorm_shift_partition_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/preln_layernorm_shift_partition_op.cc @@ -15,9 +15,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/plugin/prelnlayernorm_shift_partition_op.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { class PrelnLayerNormShiftPartitionOpConverter : public OpConverter { public: @@ -83,9 +81,7 @@ class PrelnLayerNormShiftPartitionOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(preln_layernorm_shift_partition, PrelnLayerNormShiftPartitionOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/yolo_box_op.cc b/paddle/fluid/inference/tensorrt/convert/yolo_box_op.cc index 3ed413a913fd07..8ae9aa4915e1b1 100644 --- a/paddle/fluid/inference/tensorrt/convert/yolo_box_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/yolo_box_op.cc @@ -14,9 +14,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { class YoloBoxOpConverter : public OpConverter { public: @@ -80,8 +78,6 @@ class YoloBoxOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(yolo_box, YoloBoxOpConverter); diff --git a/paddle/fluid/inference/utils/io_utils.cc b/paddle/fluid/inference/utils/io_utils.cc index db007d269b8975..d60e9ab56fd6ef 100644 --- a/paddle/fluid/inference/utils/io_utils.cc +++ b/paddle/fluid/inference/utils/io_utils.cc @@ -23,8 +23,7 @@ #include "paddle/fluid/inference/analysis/helper.h" #include "paddle/fluid/inference/utils/shape_range_info.pb.h" -namespace paddle { -namespace inference { +namespace paddle::inference { // ========================================================= // Item | Type | Bytes @@ -344,5 +343,4 @@ void UpdateShapeRangeInfo( inference::SerializeShapeRangeInfo(path, shape_range_infos); } -} // namespace inference -} // namespace paddle +} // namespace paddle::inference diff --git a/paddle/fluid/jit/engine/predictor_engine.cc b/paddle/fluid/jit/engine/predictor_engine.cc index a753adc51a5408..c96922c7d2fbc6 100644 --- a/paddle/fluid/jit/engine/predictor_engine.cc +++ b/paddle/fluid/jit/engine/predictor_engine.cc @@ -19,8 +19,7 @@ #include "paddle/fluid/jit/function_utils.h" #include "paddle/fluid/platform/device_context.h" -namespace paddle { -namespace jit { +namespace paddle::jit { PredictorEngine::PredictorEngine( const std::shared_ptr &info, @@ -84,5 +83,4 @@ std::vector PredictorEngine::operator()( return utils::ToDenseTensors(this->operator()(utils::ToTensors(inputs))); } -} // namespace jit -} // namespace paddle +} // namespace paddle::jit diff --git a/paddle/fluid/jit/serializer.cc b/paddle/fluid/jit/serializer.cc index 24bcf03a764385..8a81b3c729871c 100644 --- a/paddle/fluid/jit/serializer.cc +++ b/paddle/fluid/jit/serializer.cc @@ -29,8 +29,7 @@ COMMON_DECLARE_string(jit_engine_type); -namespace paddle { -namespace jit { +namespace paddle::jit { using FunctionInfoMap = std::unordered_map>; @@ -138,5 +137,4 @@ Layer Load(const std::string& file_path, const phi::Place& place) { return deserializer(file_path, place); } -} // namespace jit -} // namespace paddle +} // namespace paddle::jit diff --git a/paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.cc b/paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.cc index 52399df8ce5ff3..37dc216002dcd0 100644 --- a/paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.cc @@ -18,9 +18,7 @@ #include "paddle/fluid/memory/allocation/aligned_allocator.h" -namespace paddle { -namespace memory { -namespace allocation { +namespace paddle::memory::allocation { bool NeedSplit(size_t block_size, size_t alignment, size_t alloc_size) { return block_size > (alloc_size * 2) || (block_size - alloc_size) > alignment; @@ -252,6 +250,4 @@ phi::Allocation *VirtualMemoryAutoGrowthBestFitAllocator::AllocFromFreeBlocks( return nullptr; } -} // namespace allocation -} // namespace memory -} // namespace paddle +} // namespace paddle::memory::allocation diff --git a/paddle/fluid/operators/collective/alltoall_op.cc b/paddle/fluid/operators/collective/alltoall_op.cc index dd7b6f166d7aed..879f1c082372db 100644 --- a/paddle/fluid/operators/collective/alltoall_op.cc +++ b/paddle/fluid/operators/collective/alltoall_op.cc @@ -14,8 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/collective/alltoall_op.h" -namespace paddle { -namespace operators { +namespace paddle::operators { class AllToAllBaseOp : public framework::OperatorWithKernel { public: @@ -61,8 +60,7 @@ Scatter tensors from all participators to all participators. } }; -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/collective/c_split_op.cc b/paddle/fluid/operators/collective/c_split_op.cc index b0c998ff79710e..a0244e045379a0 100644 --- a/paddle/fluid/operators/collective/c_split_op.cc +++ b/paddle/fluid/operators/collective/c_split_op.cc @@ -14,8 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/collective/c_split_op.h" -namespace paddle { -namespace operators { +namespace paddle::operators { class CSplitOp : public framework::OperatorWithKernel { public: @@ -109,8 +108,7 @@ Split the tensor evenly according to its rank. } }; -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/fused/fused_adam_op.cc b/paddle/fluid/operators/fused/fused_adam_op.cc index d786dbd7c2728f..9fccf1cb810316 100644 --- a/paddle/fluid/operators/fused/fused_adam_op.cc +++ b/paddle/fluid/operators/fused/fused_adam_op.cc @@ -17,8 +17,7 @@ #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/infermeta/multiary.h" -namespace paddle { -namespace operators { +namespace paddle::operators { using Tensor = phi::DenseTensor; @@ -155,8 +154,7 @@ param\_out & = param - learning\_rate * (\frac{moment\_1}{\sqrt{moment\_2} + \ep } }; -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; DECLARE_INFER_SHAPE_FUNCTOR(fused_adam, diff --git a/paddle/fluid/operators/fused/fused_matmul_op.cc b/paddle/fluid/operators/fused/fused_matmul_op.cc index 93d79d677f8a5a..0e9c34ba8aeed8 100644 --- a/paddle/fluid/operators/fused/fused_matmul_op.cc +++ b/paddle/fluid/operators/fused/fused_matmul_op.cc @@ -20,8 +20,7 @@ #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/infermeta/binary.h" -namespace paddle { -namespace operators { +namespace paddle::operators { static std::vector GetInputShape(phi::DDim dim, std::vector shape, @@ -237,8 +236,7 @@ class FusedMatmulOpMaker : public framework::OpProtoAndCheckerMaker { } }; -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; DECLARE_INFER_SHAPE_FUNCTOR(fused_matmul, diff --git a/paddle/fluid/operators/pull_gpups_sparse_op.cc b/paddle/fluid/operators/pull_gpups_sparse_op.cc index 1ec27120cc51fd..a283d326723ea8 100644 --- a/paddle/fluid/operators/pull_gpups_sparse_op.cc +++ b/paddle/fluid/operators/pull_gpups_sparse_op.cc @@ -14,8 +14,7 @@ #include "paddle/fluid/operators/pull_gpups_sparse_op.h" -namespace paddle { -namespace operators { +namespace paddle::operators { class PullGpuPSSparseOp : public framework::OperatorWithKernel { public: @@ -136,8 +135,7 @@ class PushGpuPSSparseOp : public framework::OperatorWithKernel { ctx.GetPlace()); } }; -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; REGISTER_OPERATOR(pull_gpups_sparse, diff --git a/paddle/fluid/operators/py_func_op.cc b/paddle/fluid/operators/py_func_op.cc index 140f415e39d40c..fcd788c37e6603 100644 --- a/paddle/fluid/operators/py_func_op.cc +++ b/paddle/fluid/operators/py_func_op.cc @@ -24,8 +24,7 @@ #include "paddle/fluid/framework/op_registry.h" -namespace paddle { -namespace operators { +namespace paddle::operators { namespace py = ::pybind11; @@ -348,8 +347,7 @@ class PyFuncOp : public framework::OperatorBase { } }; -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/rrelu_op.cc b/paddle/fluid/operators/rrelu_op.cc index 3111ad4e5015d8..1f84851221ebc6 100644 --- a/paddle/fluid/operators/rrelu_op.cc +++ b/paddle/fluid/operators/rrelu_op.cc @@ -19,8 +19,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/infermeta/unary.h" -namespace paddle { -namespace operators { +namespace paddle::operators { class RReluOp : public framework::OperatorWithKernel { public: @@ -110,8 +109,7 @@ class RReluGradOpMaker : public framework::SingleGradOpMaker { } }; -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; DECLARE_INFER_SHAPE_FUNCTOR(rrelu, diff --git a/paddle/fluid/pir/dialect/operator/ir/manual_onednn_op.cc b/paddle/fluid/pir/dialect/operator/ir/manual_onednn_op.cc index 7ee35911589cd4..4dee247fd55415 100644 --- a/paddle/fluid/pir/dialect/operator/ir/manual_onednn_op.cc +++ b/paddle/fluid/pir/dialect/operator/ir/manual_onednn_op.cc @@ -43,9 +43,7 @@ paddle::onednn::dialect::ExpandOp #include "paddle/pir/include/core/ir_context.h" #include "paddle/pir/include/core/op_base.h" -namespace paddle { -namespace onednn { -namespace dialect { +namespace paddle::onednn::dialect { const char* ExpandOp::attributes_name[1] = {"mkldnn_data_type"}; // NOLINT @@ -358,9 +356,7 @@ bool ExpandOp::InferSymbolicShape( return true; } -} // namespace dialect -} // namespace onednn -} // namespace paddle +} // namespace paddle::onednn::dialect IR_DEFINE_EXPLICIT_TYPE_ID(paddle::onednn::dialect::ExpandOp) #endif diff --git a/paddle/fluid/platform/device_event_base.cc b/paddle/fluid/platform/device_event_base.cc index 6079691fe873cb..429cc6657885cb 100644 --- a/paddle/fluid/platform/device_event_base.cc +++ b/paddle/fluid/platform/device_event_base.cc @@ -17,8 +17,7 @@ #include "paddle/fluid/platform/device_event_cpu.h" #include "paddle/fluid/platform/event.h" -namespace paddle { -namespace platform { +namespace paddle::platform { EventCreateFunction DeviceEvent::event_creator_[MaxDeviceTypes]; // NOLINT EventRecordFunction DeviceEvent::event_recorder_[MaxDeviceTypes]; // NOLINT @@ -130,8 +129,7 @@ void EventResetCPU(const DeviceEvent* event) { wrapper->status_ = EventStatus::INITIALIZED; } -} // namespace platform -} // namespace paddle +} // namespace paddle::platform using ::paddle::platform::kCPU; REGISTER_EVENT_CREATE_FUNCTION(kCPU, paddle::platform::DeviceEventCreateCPU) diff --git a/paddle/fluid/platform/stream_callback_manager.cc b/paddle/fluid/platform/stream_callback_manager.cc index 6719a1b6e97bcf..fd4996e3dd122a 100644 --- a/paddle/fluid/platform/stream_callback_manager.cc +++ b/paddle/fluid/platform/stream_callback_manager.cc @@ -16,8 +16,7 @@ #include "paddle/fluid/platform/device/device_wrapper.h" -namespace paddle { -namespace platform { +namespace paddle::platform { #ifdef PADDLE_WITH_HIP static void StreamCallbackFunc(gpuStream_t stream, @@ -88,5 +87,4 @@ template class StreamCallbackManager; #ifdef PADDLE_WITH_HIP template struct StreamCallbackManager; #endif -} // namespace platform -} // namespace paddle +} // namespace paddle::platform diff --git a/paddle/fluid/pybind/bind_cost_model.cc b/paddle/fluid/pybind/bind_cost_model.cc index 5f99fad6476668..3e13784b74cc47 100644 --- a/paddle/fluid/pybind/bind_cost_model.cc +++ b/paddle/fluid/pybind/bind_cost_model.cc @@ -24,8 +24,7 @@ using paddle::framework::CostData; using paddle::framework::CostModel; using paddle::framework::ProgramDesc; -namespace paddle { -namespace pybind { +namespace paddle::pybind { void BindCostModel(py::module* m) { py::class_(*m, "CostData") @@ -56,5 +55,4 @@ void BindCostModel(py::module* m) { }); } -} // namespace pybind -} // namespace paddle +} // namespace paddle::pybind diff --git a/paddle/fluid/pybind/pir.cc b/paddle/fluid/pybind/pir.cc index 29c5c764c97534..e49415bac891a8 100644 --- a/paddle/fluid/pybind/pir.cc +++ b/paddle/fluid/pybind/pir.cc @@ -121,8 +121,7 @@ using pybind11::return_value_policy; COMMON_DECLARE_bool(print_ir); COMMON_DECLARE_bool(pir_apply_shape_optimization_pass); -namespace paddle { -namespace pybind { +namespace paddle::pybind { PyTypeObject *g_ir_value_pytype = nullptr; @@ -2498,5 +2497,4 @@ void BindPir(pybind11::module *module) { BindIrParser(&ir_module); } -} // namespace pybind -} // namespace paddle +} // namespace paddle::pybind diff --git a/paddle/phi/api/lib/tensor_copy.cc b/paddle/phi/api/lib/tensor_copy.cc index 4e951570089954..22ecb16f6b2eb6 100644 --- a/paddle/phi/api/lib/tensor_copy.cc +++ b/paddle/phi/api/lib/tensor_copy.cc @@ -29,8 +29,7 @@ limitations under the License. */ #include "paddle/phi/core/distributed/auto_parallel/reshard/reshard_utils.h" #include "paddle/phi/infermeta/spmd_rules/rules.h" #endif -namespace paddle { -namespace experimental { +namespace paddle::experimental { void copy(const Tensor& src, const Place& place, bool blocking, Tensor* dst) { auto kernel_key_set = ParseKernelKeyByInputArgs(src); @@ -94,5 +93,4 @@ void copy(const Tensor& src, const Place& place, bool blocking, Tensor* dst) { VLOG(6) << "copy finished. "; } -} // namespace experimental -} // namespace paddle +} // namespace paddle::experimental diff --git a/paddle/phi/backends/onednn/axpy_handler.cc b/paddle/phi/backends/onednn/axpy_handler.cc index 0713ebd1adba79..d7756f69e8f715 100644 --- a/paddle/phi/backends/onednn/axpy_handler.cc +++ b/paddle/phi/backends/onednn/axpy_handler.cc @@ -21,8 +21,7 @@ #include "paddle/phi/backends/onednn/onednn_helper.h" -namespace phi { -namespace funcs { +namespace phi::funcs { template class AXPYHandler { @@ -146,5 +145,4 @@ void OneDNNAXPYHandler::operator()(const T *x, T *y) { template class OneDNNAXPYHandler; template class OneDNNAXPYHandler; -} // namespace funcs -} // namespace phi +} // namespace phi::funcs diff --git a/paddle/phi/core/distributed/store/tcp_utils.cc b/paddle/phi/core/distributed/store/tcp_utils.cc index 64c5424928b9ff..f6f871952c36c8 100644 --- a/paddle/phi/core/distributed/store/tcp_utils.cc +++ b/paddle/phi/core/distributed/store/tcp_utils.cc @@ -20,9 +20,7 @@ #include "glog/logging.h" -namespace phi { -namespace distributed { -namespace tcputils { +namespace phi::distributed::tcputils { std::error_code socket_error() { #ifdef _WIN32 @@ -223,6 +221,4 @@ std::string receive_string(SocketType socket) { return std::string(v.data(), v.size()); } -} // namespace tcputils -} // namespace distributed -} // namespace phi +} // namespace phi::distributed::tcputils diff --git a/paddle/phi/kernels/funcs/cross_entropy.cc b/paddle/phi/kernels/funcs/cross_entropy.cc index cf53e9ea65efcc..3b68238b7b1798 100644 --- a/paddle/phi/kernels/funcs/cross_entropy.cc +++ b/paddle/phi/kernels/funcs/cross_entropy.cc @@ -17,8 +17,7 @@ limitations under the License. */ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/utils/data_type.h" -namespace phi { -namespace funcs { +namespace phi::funcs { using Tensor = phi::DenseTensor; template ::operator()( template class CrossEntropyFunctor; template class CrossEntropyFunctor; -} // namespace funcs -} // namespace phi +} // namespace phi::funcs diff --git a/paddle/phi/kernels/funcs/eigen/scale.cc b/paddle/phi/kernels/funcs/eigen/scale.cc index b3e5246a572269..9eb1d11527c4a0 100644 --- a/paddle/phi/kernels/funcs/eigen/scale.cc +++ b/paddle/phi/kernels/funcs/eigen/scale.cc @@ -16,8 +16,7 @@ limitations under the License. */ #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" -namespace phi { -namespace funcs { +namespace phi::funcs { template struct EigenScale { @@ -52,5 +51,4 @@ template struct EigenScale; template struct EigenScale>; template struct EigenScale>; -} // namespace funcs -} // namespace phi +} // namespace phi::funcs diff --git a/paddle/phi/kernels/funcs/fc_functor.cc b/paddle/phi/kernels/funcs/fc_functor.cc index 5344a23a64ed3c..88613a29b5353d 100644 --- a/paddle/phi/kernels/funcs/fc_functor.cc +++ b/paddle/phi/kernels/funcs/fc_functor.cc @@ -18,8 +18,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/jit/kernels.h" -namespace phi { -namespace funcs { +namespace phi::funcs { template void FCFunctor::operator()(const DeviceContext& context, @@ -100,5 +99,4 @@ void FCFunctor::operator()(const DeviceContext& context, template class FCFunctor; template class FCFunctor; -} // namespace funcs -} // namespace phi +} // namespace phi::funcs diff --git a/paddle/phi/kernels/funcs/jit/gen/sgd.cc b/paddle/phi/kernels/funcs/jit/gen/sgd.cc index 930e7794cf94ff..29035e27c7e35f 100644 --- a/paddle/phi/kernels/funcs/jit/gen/sgd.cc +++ b/paddle/phi/kernels/funcs/jit/gen/sgd.cc @@ -19,9 +19,7 @@ #include "paddle/phi/backends/cpu/cpu_info.h" #include "paddle/phi/kernels/funcs/jit/registry.h" -namespace phi { -namespace jit { -namespace gen { +namespace phi::jit::gen { void SgdJitCode::mainCode(int num_regs) { constexpr size_t block_size = sizeof(float) * YMM_FLOAT_BLOCK; @@ -141,9 +139,7 @@ class SgdCreator : public JitCodeCreator { } }; -} // namespace gen -} // namespace jit -} // namespace phi +} // namespace phi::jit::gen namespace gen = phi::jit::gen; diff --git a/paddle/phi/kernels/funcs/matrix_solve.cc b/paddle/phi/kernels/funcs/matrix_solve.cc index 31baedb3c314d0..75cb14763e71c8 100644 --- a/paddle/phi/kernels/funcs/matrix_solve.cc +++ b/paddle/phi/kernels/funcs/matrix_solve.cc @@ -14,8 +14,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/matrix_solve.h" -namespace phi { -namespace funcs { +namespace phi::funcs { template void MatrixSolveFunctor::operator()(const Context& dev_ctx, @@ -28,5 +27,4 @@ void MatrixSolveFunctor::operator()(const Context& dev_ctx, template class MatrixSolveFunctor; template class MatrixSolveFunctor; -} // namespace funcs -} // namespace phi +} // namespace phi::funcs diff --git a/paddle/phi/kernels/funcs/tensor_formatter.cc b/paddle/phi/kernels/funcs/tensor_formatter.cc index 7c4cd28fe20c74..d846daae701c4b 100644 --- a/paddle/phi/kernels/funcs/tensor_formatter.cc +++ b/paddle/phi/kernels/funcs/tensor_formatter.cc @@ -20,8 +20,7 @@ #include "paddle/phi/common/place.h" #include "paddle/phi/core/tensor_utils.h" -namespace paddle { -namespace funcs { +namespace paddle::funcs { void TensorFormatter::SetPrintTensorType(bool print_tensor_type) { print_tensor_type_ = print_tensor_type; @@ -162,5 +161,4 @@ template void TensorFormatter::FormatData( template void TensorFormatter::FormatData( const phi::DenseTensor& print_tensor, std::stringstream& log_stream); -} // namespace funcs -} // namespace paddle +} // namespace paddle::funcs diff --git a/paddle/phi/kernels/fusion/cpu/fusion_squared_mat_sub_kernel.cc b/paddle/phi/kernels/fusion/cpu/fusion_squared_mat_sub_kernel.cc index 4f443645774688..1a5fd750b0ea84 100644 --- a/paddle/phi/kernels/fusion/cpu/fusion_squared_mat_sub_kernel.cc +++ b/paddle/phi/kernels/fusion/cpu/fusion_squared_mat_sub_kernel.cc @@ -19,8 +19,7 @@ #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/funcs/jit/kernels.h" -namespace phi { -namespace fusion { +namespace phi::fusion { template void FusionSquaredMatSubKernel(const Context& dev_ctx, @@ -78,8 +77,7 @@ void FusionSquaredMatSubKernel(const Context& dev_ctx, vscal(&scalar_t, o_data, o_data, o_numel); } -} // namespace fusion -} // namespace phi +} // namespace phi::fusion PD_REGISTER_KERNEL(fusion_squared_mat_sub, CPU, diff --git a/paddle/phi/kernels/fusion/onednn/fused_matmul_kernel.cc b/paddle/phi/kernels/fusion/onednn/fused_matmul_kernel.cc index 34c23e6fc288bf..2e0ec565e7d65d 100644 --- a/paddle/phi/kernels/fusion/onednn/fused_matmul_kernel.cc +++ b/paddle/phi/kernels/fusion/onednn/fused_matmul_kernel.cc @@ -20,8 +20,7 @@ using dnnl::memory; -namespace phi { -namespace fusion { +namespace phi::fusion { template class FusedMatmulOneDNNHandler @@ -599,8 +598,7 @@ void FusedMatmulKernel(const Context &dev_ctx, } } -} // namespace fusion -} // namespace phi +} // namespace phi::fusion PD_REGISTER_KERNEL(fused_matmul, OneDNN, diff --git a/paddle/phi/kernels/selected_rows/activation_kernel.cc b/paddle/phi/kernels/selected_rows/activation_kernel.cc index 4a27d0763a235c..7a72cf72246d9b 100644 --- a/paddle/phi/kernels/selected_rows/activation_kernel.cc +++ b/paddle/phi/kernels/selected_rows/activation_kernel.cc @@ -19,8 +19,7 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/activation_kernel.h" -namespace phi { -namespace sr { +namespace phi::sr { template void SquareKernel(const Context& dev_ctx, @@ -40,8 +39,7 @@ void SqrtKernel(const Context& dev_ctx, phi::SqrtKernel(dev_ctx, x.value(), out->mutable_value()); } -} // namespace sr -} // namespace phi +} // namespace phi::sr PD_REGISTER_KERNEL( square_sr, CPU, ALL_LAYOUT, phi::sr::SquareKernel, float, double) {}