diff --git a/paddle/fluid/distributed/auto_parallel/dist_attr.cc b/paddle/fluid/distributed/auto_parallel/dist_attr.cc index 805641cf01837..26d438c23349d 100644 --- a/paddle/fluid/distributed/auto_parallel/dist_attr.cc +++ b/paddle/fluid/distributed/auto_parallel/dist_attr.cc @@ -23,9 +23,7 @@ limitations under the License. */ #include "paddle/fluid/framework/var_desc.h" #include "paddle/phi/core/distributed/auto_parallel/proto_helper.h" -namespace paddle { -namespace distributed { -namespace auto_parallel { +namespace paddle::distributed::auto_parallel { using phi::distributed::auto_parallel::str_join; @@ -487,6 +485,4 @@ bool operator==(const OperatorDistAttr& lhs, const OperatorDistAttr& rhs) { return true; } -} // namespace auto_parallel -} // namespace distributed -} // namespace paddle +} // namespace paddle::distributed::auto_parallel diff --git a/paddle/fluid/distributed/fleet_executor/dist_model.cc b/paddle/fluid/distributed/fleet_executor/dist_model.cc index 4c19069b33705..ee7540d05d5ef 100644 --- a/paddle/fluid/distributed/fleet_executor/dist_model.cc +++ b/paddle/fluid/distributed/fleet_executor/dist_model.cc @@ -28,8 +28,7 @@ #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" -namespace paddle { -namespace distributed { +namespace paddle::distributed { namespace { bool IsPersistable(const framework::VarDesc *var) { @@ -705,5 +704,4 @@ bool DistModel::Run(const std::vector &input_data, return true; } -} // namespace distributed -} // namespace paddle +} // namespace paddle::distributed diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc index 2d72a08a13f30..67ad3b729b9f3 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc @@ -24,15 +24,12 @@ static const int max_port = 65535; -namespace paddle { -namespace framework { +namespace paddle::framework { class Scope; class Variable; -} // namespace framework -} // namespace paddle +} // namespace paddle::framework -namespace paddle { -namespace distributed { +namespace paddle::distributed { PD_DEFINE_int32(pserver_push_dense_merge_limit, 12, @@ -2066,5 +2063,4 @@ void BrpcPsClient::PushDenseRawGradient(std::shared_ptr &task, } } -} // namespace distributed -} // namespace paddle +} // namespace paddle::distributed diff --git a/paddle/fluid/framework/details/multi_devices_helper.cc b/paddle/fluid/framework/details/multi_devices_helper.cc index d2379c2c49a19..097bc0dd59127 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.cc +++ b/paddle/fluid/framework/details/multi_devices_helper.cc @@ -18,9 +18,7 @@ #include "paddle/fluid/framework/details/share_tensor_buffer_op_handle.h" #include "paddle/fluid/framework/ir/graph_helper.h" -namespace paddle { -namespace framework { -namespace details { +namespace paddle::framework::details { static constexpr size_t kUndefinedDevIdx = -1UL; @@ -300,6 +298,4 @@ bool HasKeepLastReadOp(const ir::Graph &graph) { return HasDropLastReadOpImpl(graph, false); } -} // namespace details -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::details diff --git a/paddle/fluid/framework/dist_multi_trainer.cc b/paddle/fluid/framework/dist_multi_trainer.cc index 119b6e569cef3..2e4def46d0e61 100644 --- a/paddle/fluid/framework/dist_multi_trainer.cc +++ b/paddle/fluid/framework/dist_multi_trainer.cc @@ -22,8 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/device_worker_factory.h" #include "paddle/fluid/framework/trainer.h" -namespace paddle { -namespace framework { +namespace paddle::framework { void DistMultiTrainer::Initialize(const TrainerDesc &trainer_desc, Dataset *dataset) { @@ -232,5 +231,4 @@ void DistMultiTrainer::MergeToRootScope(phi::DenseTensor *root_tensor, root_data[i] += data[i]; } } -} // namespace framework -} // namespace paddle +} // namespace paddle::framework diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc index 4a0dd02db0f24..f002d88aa58a1 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc @@ -19,9 +19,7 @@ #include "paddle/fluid/platform/device/gpu/gpu_info.h" #endif -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { #define GET_IR_NODE(node__) GET_IR_NODE_FROM_SUBGRAPH(node__, node__, pattern); #define GET_NODES \ @@ -171,9 +169,7 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const { AddStatis(found_conv_eltwise_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(conv_elementwise_add_fuse_pass, paddle::framework::ir::ConvElementwiseAddFusePass); diff --git a/paddle/fluid/framework/ir/dense_multihead_matmul_to_sparse_pass_tester.cc b/paddle/fluid/framework/ir/dense_multihead_matmul_to_sparse_pass_tester.cc index 98f55003b266b..23247ae6114ac 100644 --- a/paddle/fluid/framework/ir/dense_multihead_matmul_to_sparse_pass_tester.cc +++ b/paddle/fluid/framework/ir/dense_multihead_matmul_to_sparse_pass_tester.cc @@ -16,9 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/pass_tester_helper.h" #include "paddle/fluid/framework/op_version_registry.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { void AddVarToScope(Scope* param_scope, const std::string& name, @@ -147,9 +145,7 @@ TEST(DenseMultiHeadMatmulToSparsePass, basic) { num_fused_nodes_after)); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir USE_PASS(multihead_matmul_fuse_pass); USE_PASS(multihead_matmul_fuse_pass_v2); diff --git a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass_tester.cc b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass_tester.cc index 7ac5b1690ea1c..be2ec2284fb13 100644 --- a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass_tester.cc @@ -18,9 +18,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/pass_tester_helper.h" #include "paddle/fluid/framework/op_version_registry.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { TEST(EmbeddingElewiseLayernormFusePass, basic) { // inputs operator output @@ -100,8 +98,6 @@ TEST(EmbeddingElewiseLayernormFusePass, pass_op_version_check) { .IsPassCompatible("embedding_eltwise_layernorm_fuse_pass")); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir USE_PASS(embedding_eltwise_layernorm_fuse_pass); diff --git a/paddle/fluid/framework/ir/fuse_bn_act_pass.cc b/paddle/fluid/framework/ir/fuse_bn_act_pass.cc index 048b33a649f94..7e6c6d145de87 100644 --- a/paddle/fluid/framework/ir/fuse_bn_act_pass.cc +++ b/paddle/fluid/framework/ir/fuse_bn_act_pass.cc @@ -19,19 +19,13 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/enforce.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Node; -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { void FuseBatchNormActPass::ApplyImpl(ir::Graph *graph) const { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) @@ -351,8 +345,6 @@ std::vector FuseBatchNormActPass::ReplaceNode( return new_list; } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(fuse_bn_act_pass, paddle::framework::ir::FuseBatchNormActPass); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector_tester.cc b/paddle/fluid/framework/ir/graph_pattern_detector_tester.cc index c775677c51688..640b2bb361afa 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector_tester.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector_tester.cc @@ -16,9 +16,7 @@ #include "paddle/fluid/framework/ir/graph_pattern_detector.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Node; @@ -207,6 +205,4 @@ TEST(GraphPatternDetector, IntermediateCheck) { ASSERT_EQ(count, 1); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir diff --git a/paddle/fluid/framework/ir/layernorm_shift_partition_fuse_pass.cc b/paddle/fluid/framework/ir/layernorm_shift_partition_fuse_pass.cc index dbe990f636372..d613b01271b80 100644 --- a/paddle/fluid/framework/ir/layernorm_shift_partition_fuse_pass.cc +++ b/paddle/fluid/framework/ir/layernorm_shift_partition_fuse_pass.cc @@ -23,9 +23,7 @@ #include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/platform/enforce.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Node; @@ -256,9 +254,7 @@ void LayerNormShiftPartitionFusePass::ApplyImpl(ir::Graph* graph) const { AddStatis(found_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(layernorm_shift_partition_fuse_pass, paddle::framework::ir::LayerNormShiftPartitionFusePass); diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_inplace_op_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_inplace_op_pass.cc index e89c98f15f6f8..e6ec82abf3563 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_inplace_op_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_inplace_op_pass.cc @@ -20,9 +20,7 @@ #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/platform/enforce.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Graph; @@ -301,9 +299,7 @@ void BufferSharedInplaceOpPass::ApplyImpl(ProgramDesc *main_program, block->Flush(); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(buffer_shared_inplace_pass, paddle::framework::ir::BufferSharedInplaceOpPass) diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/pylayer_op_eager_deletion_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/pylayer_op_eager_deletion_pass.cc index 5ec567f81a44a..c8978ef0056e5 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/pylayer_op_eager_deletion_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/pylayer_op_eager_deletion_pass.cc @@ -17,9 +17,7 @@ #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/operators/controlflow/op_variant.h" #include "paddle/fluid/operators/controlflow/pylayer_op_helper.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { using OpVariant = operators::OpVariant; class PyLayerOpEagerDeletionPass : public Pass { protected: @@ -94,9 +92,7 @@ class PyLayerOpEagerDeletionPass : public Pass { } }; -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(pylayer_op_eager_deletion_pass, paddle::framework::ir::PyLayerOpEagerDeletionPass); diff --git a/paddle/fluid/framework/ir/node.cc b/paddle/fluid/framework/ir/node.cc index 5516a2d799ad1..be1bb0865cba5 100644 --- a/paddle/fluid/framework/ir/node.cc +++ b/paddle/fluid/framework/ir/node.cc @@ -14,9 +14,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/node.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { // msvc15 don't support constexpr in correct way. // static constexpr member implies inline since CXX17 and may cause multiple // definition. @@ -39,6 +37,4 @@ std::unique_ptr CreateNodeForTest(OpDesc *op_desc) { return std::unique_ptr(new Node(op_desc)); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir diff --git a/paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass.cc b/paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass.cc index 91f878a16abd0..e501cff846551 100644 --- a/paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass.cc +++ b/paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass.cc @@ -23,9 +23,7 @@ #include "paddle/phi/core/enforce.h" #include "paddle/utils/string/pretty_log.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { using string::PrettyLogDetail; @@ -635,9 +633,7 @@ void CPUQuantizeSquashPass::ApplyImpl(ir::Graph* graph) const { QuantizeBf16Conv(graph); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(cpu_quantize_squash_pass, paddle::framework::ir::CPUQuantizeSquashPass); diff --git a/paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass_tester.cc b/paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass_tester.cc index fc57bdb6b52ef..3bee2cc94146f 100644 --- a/paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass_tester.cc +++ b/paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass_tester.cc @@ -18,9 +18,7 @@ #include "paddle/fluid/framework/naive_executor.h" #include "paddle/phi/common/place.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { void SetOp(ProgramDesc* prog, const std::string& type, @@ -1179,8 +1177,6 @@ TEST(CpuQuantizeSquashPass, squash_all_u8_input_to_concat2) { BuildU8U8ConcatProgramDesc(1.2f, 1.2f), expected_operators, remove_nodes); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir USE_PASS(cpu_quantize_squash_pass); diff --git a/paddle/fluid/framework/ir/onednn/matmul_transpose_reshape_onednn_fuse_pass.cc b/paddle/fluid/framework/ir/onednn/matmul_transpose_reshape_onednn_fuse_pass.cc index 0b742d763bebc..85e098e7bfe0d 100644 --- a/paddle/fluid/framework/ir/onednn/matmul_transpose_reshape_onednn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/onednn/matmul_transpose_reshape_onednn_fuse_pass.cc @@ -18,9 +18,7 @@ #include "paddle/phi/core/enforce.h" #include "paddle/utils/string/pretty_log.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { using string::PrettyLogDetail; @@ -202,9 +200,7 @@ MatmulTransposeReshapeMKLDNNPass::MatmulTransposeReshapeMKLDNNPass() { .End(); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(matmul_transpose_reshape_onednn_fuse_pass, paddle::framework::ir::MatmulTransposeReshapeMKLDNNPass); diff --git a/paddle/fluid/framework/ir/onednn/quant_dequant_onednn_pass.cc b/paddle/fluid/framework/ir/onednn/quant_dequant_onednn_pass.cc index 6ffd3963504f2..afb9585d968bb 100644 --- a/paddle/fluid/framework/ir/onednn/quant_dequant_onednn_pass.cc +++ b/paddle/fluid/framework/ir/onednn/quant_dequant_onednn_pass.cc @@ -20,9 +20,7 @@ #include "paddle/fluid/framework/ir/onednn/onednn_pass_util.h" #include "paddle/fluid/framework/op_version_registry.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { void QuantDequantMkldnnPass::MarkSkipQuantizedOps( ir::Graph* graph, const std::unordered_set& skip_ops) const { @@ -758,9 +756,7 @@ void QuantDequantMkldnnPass::ApplyImpl(ir::Graph* graph) const { graph, "has_quant_info", "var_quant_scales", var_quant_scales); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(quant_dequant_onednn_pass, paddle::framework::ir::QuantDequantMkldnnPass); diff --git a/paddle/fluid/framework/ir/pass.cc b/paddle/fluid/framework/ir/pass.cc index 0b3ebd324dc7a..ac8f8be58a605 100644 --- a/paddle/fluid/framework/ir/pass.cc +++ b/paddle/fluid/framework/ir/pass.cc @@ -20,21 +20,17 @@ limitations under the License. */ #include "paddle/fluid/framework/op_proto_maker.h" #include "paddle/fluid/framework/program_utils.h" -namespace paddle { -namespace framework { +namespace paddle::framework { class Scope; -namespace ir { +} // namespace paddle::framework +namespace paddle::framework::ir { class Graph; -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir #ifdef PADDLE_WITH_DNNL #include "paddle/fluid/platform/onednn_helper.h" #endif -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { static const char kParamScopeAttr[] = "__param_scope__"; // NOLINT @@ -299,6 +295,4 @@ PassRegistry &PassRegistry::Instance() { static PassRegistry g_pass_info_map; return g_pass_info_map; } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir diff --git a/paddle/fluid/framework/no_need_buffer_vars_inference.cc b/paddle/fluid/framework/no_need_buffer_vars_inference.cc index 794548aa6b44b..cf40833dfdfb9 100644 --- a/paddle/fluid/framework/no_need_buffer_vars_inference.cc +++ b/paddle/fluid/framework/no_need_buffer_vars_inference.cc @@ -19,8 +19,7 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/imperative/saved_variable_wrapper_list.h" -namespace paddle { -namespace framework { +namespace paddle::framework { const Attribute &InferNoNeedBufferVarsContext::GetAttr( const std::string &name) const { @@ -66,5 +65,4 @@ bool DyGraphInferNoNeedBufferVarsContext::HasOutput( return false; } -} // namespace framework -} // namespace paddle +} // namespace paddle::framework diff --git a/paddle/fluid/framework/op_version_registry.cc b/paddle/fluid/framework/op_version_registry.cc index 198394619e00d..a2edad1eb386f 100644 --- a/paddle/fluid/framework/op_version_registry.cc +++ b/paddle/fluid/framework/op_version_registry.cc @@ -14,9 +14,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_version_registry.h" -namespace paddle { -namespace framework { -namespace compatible { +namespace paddle::framework::compatible { OpVersionDesc&& OpVersionDesc::NewInput(const std::string& name, const std::string& remark) { @@ -102,6 +100,4 @@ PassVersionCheckerRegistrar& PassVersionCheckerRegistrar::GetInstance() { // Provide a fake registration item for pybind testing. #include "paddle/fluid/framework/op_version_registry.inl" -} // namespace compatible -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::compatible diff --git a/paddle/fluid/framework/program_converter.cc b/paddle/fluid/framework/program_converter.cc index 2645b3cf826ab..a3b094879957b 100644 --- a/paddle/fluid/framework/program_converter.cc +++ b/paddle/fluid/framework/program_converter.cc @@ -25,8 +25,7 @@ #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/version.h" -namespace paddle { -namespace framework { +namespace paddle::framework { using paddle::experimental::ExtractPlainVector; using paddle::experimental::WrapAsScalars; @@ -80,7 +79,8 @@ std::pair> DetectLegacyOps( return std::make_pair(is_legacy_program, legacy_op_map); } -namespace no_scalar { +} // namespace paddle::framework +namespace paddle::framework::no_scalar { void ConvertSetValueOp(OpDesc* op) { std::vector values = PADDLE_GET_CONST( std::vector, op->GetAttr("values", false)); @@ -189,9 +189,9 @@ void ConvertProgram(ProgramDesc* program) { } } } -} // namespace no_scalar +} // namespace paddle::framework::no_scalar -namespace scalar { +namespace paddle::framework::scalar { void ConvertSetValueOp(OpDesc* op) { std::vector values; @@ -319,6 +319,4 @@ void ConvertProgram(ProgramDesc* program) { } } } -} // namespace scalar -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::scalar diff --git a/paddle/fluid/imperative/all_reduce.cc b/paddle/fluid/imperative/all_reduce.cc index f86bce962e021..445f5d92ae5ab 100644 --- a/paddle/fluid/imperative/all_reduce.cc +++ b/paddle/fluid/imperative/all_reduce.cc @@ -34,8 +34,7 @@ #include "paddle/fluid/platform/device_context.h" #include "paddle/utils/string/string_helper.h" -namespace paddle { -namespace imperative { +namespace paddle::imperative { static const platform::Place &GetVarPlace(const framework::Variable &src) { if (src.IsType()) { @@ -272,7 +271,6 @@ void AllReduce(const framework::Variable &src, AllReduce(src, dst, strategy, 0, true); } -} // namespace imperative -} // namespace paddle +} // namespace paddle::imperative #endif diff --git a/paddle/fluid/imperative/var_helper.cc b/paddle/fluid/imperative/var_helper.cc index 9561962935ffe..7da3f352bdffc 100644 --- a/paddle/fluid/imperative/var_helper.cc +++ b/paddle/fluid/imperative/var_helper.cc @@ -27,8 +27,7 @@ #include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/platform/place.h" #include "paddle/phi/core/selected_rows.h" -namespace paddle { -namespace imperative { +namespace paddle::imperative { /* GetVariableWrapper */ template <> @@ -303,5 +302,4 @@ template void SetCachedValue( std::shared_ptr var, const phi::KernelKey &key, std::shared_ptr res); -} // namespace imperative -} // namespace paddle +} // namespace paddle::imperative diff --git a/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc index 217d52e0dad1c..e77dcfee4e2c1 100644 --- a/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc +++ b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc @@ -16,9 +16,7 @@ #include "paddle/fluid/inference/analysis/argument.h" -namespace paddle { -namespace inference { -namespace analysis { +namespace paddle::inference::analysis { void AdjustCudnnWorkSpacePass::RunImpl(Argument* argument) { if (!argument->use_gpu()) return; @@ -40,6 +38,4 @@ std::string AdjustCudnnWorkSpacePass::repr() const { return "adjust-cudnn-work-space-pass"; } -} // namespace analysis -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::analysis diff --git a/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc b/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc index c0df0703d1fd3..b0c88649753da 100644 --- a/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc @@ -18,9 +18,7 @@ namespace nvinfer1 { class IScaleLayer; // NOLINT } // namespace nvinfer1 -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { class BatchNormOpConverter : public OpConverter { public: @@ -179,8 +177,6 @@ class BatchNormOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(batch_norm, BatchNormOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/layernorm_shift_partition_op.cc b/paddle/fluid/inference/tensorrt/convert/layernorm_shift_partition_op.cc index 4f4b09b6173a2..3d3c79e1185c8 100644 --- a/paddle/fluid/inference/tensorrt/convert/layernorm_shift_partition_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/layernorm_shift_partition_op.cc @@ -15,9 +15,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/plugin/layernorm_shift_partition_op.h" #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { class LayerNormShiftPartitionOpConverter : public OpConverter { public: @@ -101,9 +99,7 @@ class LayerNormShiftPartitionOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(layernorm_shift_partition, LayerNormShiftPartitionOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/slice_op.cc b/paddle/fluid/inference/tensorrt/convert/slice_op.cc index 4dbeff4761401..564c0a441bcfb 100644 --- a/paddle/fluid/inference/tensorrt/convert/slice_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/slice_op.cc @@ -11,9 +11,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { class SliceOpConverter : public OpConverter { public: @@ -223,8 +221,6 @@ class SliceOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(slice, SliceOpConverter); diff --git a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc index 0506e80738662..024391739ad70 100644 --- a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc @@ -43,9 +43,7 @@ COMMON_DECLARE_uint64(initial_gpu_memory_in_mb); COMMON_DECLARE_uint64(reallocate_gpu_memory_in_mb); PD_DECLARE_bool(benchmark); -namespace paddle { -namespace memory { -namespace legacy { +namespace paddle::memory::legacy { template void *Alloc(const Place &place, size_t size); @@ -623,9 +621,9 @@ size_t Usage::operator()(const platform::CUDAPinnedPlace &cuda_pinned) const { "'CUDAPinnedPlace' is not supported in CPU only device.")); #endif } -} // namespace legacy +} // namespace paddle::memory::legacy -namespace allocation { +namespace paddle::memory::allocation { phi::Allocation *NaiveBestFitAllocator::AllocateImpl(size_t size) { void *ptr = paddle::platform::VisitPlace(place_, legacy::AllocVisitor(size)); @@ -644,6 +642,4 @@ uint64_t NaiveBestFitAllocator::ReleaseImpl(const platform::Place &place) { return paddle::platform::VisitPlace(place, legacy::ReleaseVisitor()); } -} // namespace allocation -} // namespace memory -} // namespace paddle +} // namespace paddle::memory::allocation diff --git a/paddle/fluid/memory/malloc.cc b/paddle/fluid/memory/malloc.cc index 7dae4436854ef..4c3ee5ed6fe27 100644 --- a/paddle/fluid/memory/malloc.cc +++ b/paddle/fluid/memory/malloc.cc @@ -18,8 +18,7 @@ limitations under the License. */ #include "paddle/fluid/platform/place.h" #include "paddle/phi/core/stream.h" -namespace paddle { -namespace memory { +namespace paddle::memory { std::shared_ptr AllocShared(const platform::Place& place, size_t size) { @@ -85,5 +84,4 @@ void RecordStream(std::shared_ptr allocation, stream); } #endif -} // namespace memory -} // namespace paddle +} // namespace paddle::memory diff --git a/paddle/fluid/operators/collective/c_allreduce_min_op.cc b/paddle/fluid/operators/collective/c_allreduce_min_op.cc index 10dc4b9506b2c..efc9af8aa2a2e 100644 --- a/paddle/fluid/operators/collective/c_allreduce_min_op.cc +++ b/paddle/fluid/operators/collective/c_allreduce_min_op.cc @@ -14,19 +14,16 @@ limitations under the License. */ #include "paddle/fluid/operators/collective/c_allreduce_op.h" -namespace paddle { -namespace framework { +namespace paddle::framework { class OpDesc; template class EmptyGradOpMaker; -} // namespace framework -namespace imperative { +} // namespace paddle::framework +namespace paddle::imperative { class OpBase; -} // namespace imperative -} // namespace paddle +} // namespace paddle::imperative -namespace paddle { -namespace operators { +namespace paddle::operators { class CAllReduceMinOpMaker : public CAllReduceOpMaker { protected: @@ -37,8 +34,7 @@ DECLARE_INPLACE_OP_INFERER(AllreduceMinInplaceInferer, {"X", "Out"}); DEFINE_C_ALLREDUCE_CPU_KERNEL(CAllReduceMin, kRedMin) -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; diff --git a/paddle/fluid/platform/cpu_helper.cc b/paddle/fluid/platform/cpu_helper.cc index 95d770f1b8006..37ca90bee793f 100644 --- a/paddle/fluid/platform/cpu_helper.cc +++ b/paddle/fluid/platform/cpu_helper.cc @@ -26,8 +26,7 @@ limitations under the License. */ #include #endif -namespace paddle { -namespace platform { +namespace paddle::platform { void SetNumThreads(int num_threads) { #ifdef PADDLE_USE_OPENBLAS @@ -57,5 +56,4 @@ void SetNumThreads(int num_threads) { #endif } -} // namespace platform -} // namespace paddle +} // namespace paddle::platform diff --git a/paddle/fluid/platform/dynload/cuda_driver.cc b/paddle/fluid/platform/dynload/cuda_driver.cc index c0c752cab5fc5..c9d330974ac10 100644 --- a/paddle/fluid/platform/dynload/cuda_driver.cc +++ b/paddle/fluid/platform/dynload/cuda_driver.cc @@ -16,9 +16,7 @@ limitations under the License. */ #include "paddle/phi/backends/dynload/cuda_driver.h" -namespace paddle { -namespace platform { -namespace dynload { +namespace paddle::platform::dynload { #define DEFINE_WRAP(__name) DynLoad__##__name __name @@ -30,6 +28,4 @@ CUDA_ROUTINE_EACH(DEFINE_WRAP); bool HasCUDADriver() { return phi::dynload::HasCUDADriver(); } -} // namespace dynload -} // namespace platform -} // namespace paddle +} // namespace paddle::platform::dynload diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index ddd0cf8a4164e..7d1cf87424014 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -53,8 +53,7 @@ thread_local std::shared_ptr> phi::ProfilerHelper::g_mem_event_list; std::mutex phi::ProfilerHelper::g_all_mem_event_lists_mutex; #endif -namespace paddle { -namespace platform { +namespace paddle::platform { MemEventRecorder MemEventRecorder::recorder; @@ -961,5 +960,4 @@ static void DockHostEventRecorderDevicePart( EmulateCorrelation(thr_events); } -} // namespace platform -} // namespace paddle +} // namespace paddle::platform diff --git a/paddle/fluid/platform/profiler/custom_device/custom_tracer.cc b/paddle/fluid/platform/profiler/custom_device/custom_tracer.cc index 6e3966d96d4bc..8f19567e50fd7 100644 --- a/paddle/fluid/platform/profiler/custom_device/custom_tracer.cc +++ b/paddle/fluid/platform/profiler/custom_device/custom_tracer.cc @@ -23,8 +23,7 @@ #include "paddle/phi/backends/device_manager.h" #endif -namespace paddle { -namespace platform { +namespace paddle::platform { CustomTracer::CustomTracer(const std::string& dev_type) : dev_type_(dev_type), context_(nullptr) { @@ -116,8 +115,7 @@ void CustomTracer::CollectTraceData(TraceEventCollector* collector) { collector_.ClearAll(); } -} // namespace platform -} // namespace paddle +} // namespace paddle::platform #ifdef PADDLE_WITH_CUSTOM_DEVICE void profiler_add_runtime_trace_event(C_Profiler prof, void* event) { diff --git a/paddle/fluid/pybind/compatible.cc b/paddle/fluid/pybind/compatible.cc index 25bf9c7bd05c4..f2bfce6a61829 100644 --- a/paddle/fluid/pybind/compatible.cc +++ b/paddle/fluid/pybind/compatible.cc @@ -33,8 +33,7 @@ using paddle::framework::compatible::OpUpdateType; using paddle::framework::compatible::OpVersion; using paddle::framework::compatible::OpVersionDesc; -namespace paddle { -namespace pybind { +namespace paddle::pybind { namespace { using paddle::framework::compatible::PassVersionCheckerRegistrar; @@ -150,5 +149,4 @@ void BindCompatible(py::module *m) { BindOpVersion(m); } -} // namespace pybind -} // namespace paddle +} // namespace paddle::pybind diff --git a/paddle/phi/backends/cpu/cpu_info.cc b/paddle/phi/backends/cpu/cpu_info.cc index 2c78e9f706a3b..3c492aa97cd06 100644 --- a/paddle/phi/backends/cpu/cpu_info.cc +++ b/paddle/phi/backends/cpu/cpu_info.cc @@ -48,9 +48,7 @@ PHI_DEFINE_EXPORTED_bool(use_pinned_memory, // NOLINT true, "If set, allocate cpu pinned memory."); -namespace phi { -namespace backends { -namespace cpu { +namespace phi::backends::cpu { size_t CpuTotalPhysicalMemory() { #ifdef __APPLE__ @@ -199,6 +197,4 @@ bool MayIUse(const cpu_isa_t cpu_isa) { } #endif -} // namespace cpu -} // namespace backends -} // namespace phi +} // namespace phi::backends::cpu diff --git a/paddle/phi/backends/dynload/mklml.cc b/paddle/phi/backends/dynload/mklml.cc index e5b490e519d12..79479a89046c3 100644 --- a/paddle/phi/backends/dynload/mklml.cc +++ b/paddle/phi/backends/dynload/mklml.cc @@ -14,8 +14,7 @@ limitations under the License. */ #include "paddle/phi/backends/dynload/mklml.h" -namespace phi { -namespace dynload { +namespace phi::dynload { std::once_flag mklml_dso_flag; void* mklml_dso_handle = nullptr; @@ -29,5 +28,4 @@ DEFINE_WRAP(mkl_scsrmm); DEFINE_WRAP(mkl_dcsrmm); #endif -} // namespace dynload -} // namespace phi +} // namespace phi::dynload diff --git a/paddle/phi/core/distributed/auto_parallel/reshard/s_to_s_reshard_function.cc b/paddle/phi/core/distributed/auto_parallel/reshard/s_to_s_reshard_function.cc index f4b05c8c13064..4e93f74912bad 100644 --- a/paddle/phi/core/distributed/auto_parallel/reshard/s_to_s_reshard_function.cc +++ b/paddle/phi/core/distributed/auto_parallel/reshard/s_to_s_reshard_function.cc @@ -25,8 +25,7 @@ #include "paddle/phi/kernels/reshape_kernel.h" #include "paddle/phi/kernels/transpose_kernel.h" -namespace phi { -namespace distributed { +namespace phi::distributed { bool SToSReshardFunction::IsSuitable(const DistTensor& in, const TensorDistAttr& out_dist_attr) { @@ -191,5 +190,4 @@ void SToSReshardFunctionCrossMesh::Eval(DeviceContext* dev_ctx, } } -} // namespace distributed -} // namespace phi +} // namespace phi::distributed diff --git a/paddle/phi/core/distributed/store/socket.cpp b/paddle/phi/core/distributed/store/socket.cpp index 8b260e9da202b..2a8dd1da914ec 100644 --- a/paddle/phi/core/distributed/store/socket.cpp +++ b/paddle/phi/core/distributed/store/socket.cpp @@ -24,8 +24,7 @@ #include #include -namespace phi { -namespace distributed { +namespace phi::distributed { #ifdef _WIN32 static int _get_sockname_of_win(int sock, char* out, int out_len) { @@ -76,5 +75,4 @@ std::string GetSockName(int fd) { return std::string(out.data()); } -} // namespace distributed -} // namespace phi +} // namespace phi::distributed diff --git a/paddle/phi/infermeta/spmd_rules/fused_rope.cc b/paddle/phi/infermeta/spmd_rules/fused_rope.cc index 1db67a896bab7..f3aa720d61ece 100644 --- a/paddle/phi/infermeta/spmd_rules/fused_rope.cc +++ b/paddle/phi/infermeta/spmd_rules/fused_rope.cc @@ -21,8 +21,7 @@ limitations under the License. */ #include "paddle/phi/core/distributed/auto_parallel/utils.h" #include "paddle/phi/infermeta/spmd_rules/utils.h" -namespace phi { -namespace distributed { +namespace phi::distributed { using auto_parallel::str_join; const int kNumHeadsDimIndex = 2; @@ -571,5 +570,4 @@ SpmdInfo FusedRopeGradInferSpmd(const DistMetaTensor& sin, return {dist_attrs, spmd_info.second}; } -} // namespace distributed -} // namespace phi +} // namespace phi::distributed diff --git a/paddle/phi/kernels/funcs/jit/gen/act.cc b/paddle/phi/kernels/funcs/jit/gen/act.cc index e1877c46c81b0..cd240028aec3c 100644 --- a/paddle/phi/kernels/funcs/jit/gen/act.cc +++ b/paddle/phi/kernels/funcs/jit/gen/act.cc @@ -18,9 +18,7 @@ #include "paddle/phi/backends/cpu/cpu_info.h" #include "paddle/phi/kernels/funcs/jit/registry.h" -namespace phi { -namespace jit { -namespace gen { +namespace phi::jit::gen { const float ALIGN32_BEG exp_float_consts[] ALIGN32_END = { // NOLINT REPEAT_8TIMES(1.f), @@ -149,9 +147,7 @@ size_t VTanhCreator::CodeSize(const int& d) const { #undef DECLARE_ACT_CREATOR -} // namespace gen -} // namespace jit -} // namespace phi +} // namespace phi::jit::gen namespace gen = phi::jit::gen; diff --git a/paddle/phi/kernels/funcs/jit/helper.cc b/paddle/phi/kernels/funcs/jit/helper.cc index 5ab391678bd90..58f9a245ab190 100644 --- a/paddle/phi/kernels/funcs/jit/helper.cc +++ b/paddle/phi/kernels/funcs/jit/helper.cc @@ -18,8 +18,7 @@ #include "paddle/phi/core/enforce.h" -namespace phi { -namespace jit { +namespace phi::jit { std::map>& GetFuncCacheMap() { static thread_local std::map> g_func_cache_map; @@ -149,5 +148,4 @@ typename std::enable_if::value>::type pack_weights( "Only supports pack weights with float type.")); } -} // namespace jit -} // namespace phi +} // namespace phi::jit diff --git a/paddle/phi/kernels/selected_rows/uniform_kernel.cc b/paddle/phi/kernels/selected_rows/uniform_kernel.cc index 0af5d8788c71f..4b6ea429782b2 100644 --- a/paddle/phi/kernels/selected_rows/uniform_kernel.cc +++ b/paddle/phi/kernels/selected_rows/uniform_kernel.cc @@ -20,8 +20,7 @@ limitations under the License. */ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/uniform_kernel.h" -namespace phi { -namespace sr { +namespace phi::sr { template void UniformRawKernel(const Context& dev_ctx, @@ -58,8 +57,7 @@ void UniformKernel(const Context& dev_ctx, dev_ctx, shape, dtype, min, max, seed, out->mutable_value()); } -} // namespace sr -} // namespace phi +} // namespace phi::sr PD_REGISTER_KERNEL(uniform_raw_sr, CPU, diff --git a/paddle/phi/kernels/sparse/cpu/conv_kernel.cc b/paddle/phi/kernels/sparse/cpu/conv_kernel.cc index 52695d8b4a3ff..2534b04dba743 100644 --- a/paddle/phi/kernels/sparse/cpu/conv_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/conv_kernel.cc @@ -19,8 +19,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/sparse/cpu/conv.h" -namespace phi { -namespace sparse { +namespace phi::sparse { /** * x: (N, D, H, W, C) @@ -206,8 +205,7 @@ void Conv3dCooKernel(const Context& dev_ctx, counter); })); } -} // namespace sparse -} // namespace phi +} // namespace phi::sparse PD_REGISTER_KERNEL( conv3d_coo, CPU, ALL_LAYOUT, phi::sparse::Conv3dCooKernel, float, double) { diff --git a/paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc b/paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc index 177b89be10a86..f82756edd081b 100644 --- a/paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc @@ -22,8 +22,7 @@ #include "paddle/phi/kernels/sparse/empty_kernel.h" #include "paddle/phi/kernels/sparse/impl/unary_grad_kernel_impl.h" -namespace phi { -namespace sparse { +namespace phi::sparse { template void SumCooGradCPUKernel(const Context& dev_ctx, @@ -194,8 +193,7 @@ void SumCooGradKernel(const Context& dev_ctx, })); } -} // namespace sparse -} // namespace phi +} // namespace phi::sparse PD_REGISTER_KERNEL(sum_coo_grad, CPU, diff --git a/paddle/phi/kernels/sparse/cpu/unary_kernel.cc b/paddle/phi/kernels/sparse/cpu/unary_kernel.cc index 4e4394af45a01..50aba3051778c 100644 --- a/paddle/phi/kernels/sparse/cpu/unary_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/unary_kernel.cc @@ -21,8 +21,7 @@ #include "paddle/phi/kernels/sparse/impl/unary_grad_kernel_impl.h" #include "paddle/phi/kernels/sparse/impl/unary_kernel_impl.h" -namespace phi { -namespace sparse { +namespace phi::sparse { template void DivScalarCooKernel(const Context& dev_ctx, @@ -56,8 +55,7 @@ void DivScalarCsrKernel(const Context& dev_ctx, dev, eigen_out, eigen_x, static_cast(scalar)); } -} // namespace sparse -} // namespace phi +} // namespace phi::sparse #define PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(name, prefix) \ PD_REGISTER_KERNEL(name##_coo, \ diff --git a/paddle/pir/src/core/utils.cc b/paddle/pir/src/core/utils.cc index c95c7c814743d..984e626172bf9 100644 --- a/paddle/pir/src/core/utils.cc +++ b/paddle/pir/src/core/utils.cc @@ -14,8 +14,7 @@ #include "paddle/pir/include/core/utils.h" -namespace pir { -namespace detail { +namespace pir::detail { std::size_t hash_combine(std::size_t lhs, std::size_t rhs) { lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2); @@ -66,5 +65,4 @@ void PrintHeader(const std::string &header, std::ostream &os) { os << "===" << std::string(line_len, '-') << "===\n"; } -} // namespace detail -} // namespace pir +} // namespace pir::detail