diff --git a/paddle/fluid/distributed/fleet_executor/task_loop.cc b/paddle/fluid/distributed/fleet_executor/task_loop.cc index 270bce7786038..44e853a0d9684 100644 --- a/paddle/fluid/distributed/fleet_executor/task_loop.cc +++ b/paddle/fluid/distributed/fleet_executor/task_loop.cc @@ -17,8 +17,7 @@ #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/errors.h" -namespace paddle { -namespace distributed { +namespace paddle::distributed { thread_local TaskLoop* TaskLoop::thread_local_loop_ = nullptr; @@ -81,5 +80,4 @@ void TaskLoop::AbortNotInLoopThread() { std::this_thread::get_id())); } -} // namespace distributed -} // namespace paddle +} // namespace paddle::distributed diff --git a/paddle/fluid/distributed/ps/table/graph/graph_node.cc b/paddle/fluid/distributed/ps/table/graph/graph_node.cc index 31c098c49fba2..fa8fa61a23eab 100644 --- a/paddle/fluid/distributed/ps/table/graph/graph_node.cc +++ b/paddle/fluid/distributed/ps/table/graph/graph_node.cc @@ -15,8 +15,7 @@ #include "paddle/fluid/distributed/ps/table/graph/graph_node.h" #include -namespace paddle { -namespace distributed { +namespace paddle::distributed { GraphNode::~GraphNode() { if (sampler != nullptr) { @@ -122,5 +121,4 @@ void FeatureNode::recover_from_buffer(char* buffer) { feature.push_back(str); // NOLINT } } -} // namespace distributed -} // namespace paddle +} // namespace paddle::distributed diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index 4c78b12fd4ac4..5e4edb1ca2870 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -24,9 +24,7 @@ COMMON_DECLARE_bool(sync_nccl_allreduce); #endif -namespace paddle { -namespace framework { -namespace details { +namespace paddle::framework::details { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, @@ -335,6 +333,4 @@ void AllReduceOpHandle::SyncNCCLAllReduce() { #endif std::string AllReduceOpHandle::Name() const { return "all_reduce"; } -} // namespace details -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::details diff --git a/paddle/fluid/framework/details/eager_deletion_op_handle.cc b/paddle/fluid/framework/details/eager_deletion_op_handle.cc index 4dbff851f00e2..b8db1e321257b 100644 --- a/paddle/fluid/framework/details/eager_deletion_op_handle.cc +++ b/paddle/fluid/framework/details/eager_deletion_op_handle.cc @@ -21,15 +21,11 @@ #endif #include -namespace paddle { -namespace framework { +namespace paddle::framework { class Variable; -} // namespace framework -} // namespace paddle +} // namespace paddle::framework -namespace paddle { -namespace framework { -namespace details { +namespace paddle::framework::details { EagerDeletionOpHandle::EagerDeletionOpHandle( ir::Node *node, @@ -213,6 +209,4 @@ std::vector EagerDeletionOpHandle::VarsToDelete() const { return var_names; } -} // namespace details -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::details diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index fe43126ca8abe..05e1693eb650e 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -26,9 +26,7 @@ PADDLE_DEFINE_EXPORTED_bool( false, "Whether to make the result of computation deterministic in CPU side."); -namespace paddle { -namespace framework { -namespace details { +namespace paddle::framework::details { std::once_flag CollectiveContext::init_flag_; std::unique_ptr CollectiveContext::context_; @@ -318,6 +316,4 @@ std::vector ReduceOpHandle::GetInputValues( } std::string ReduceOpHandle::Name() const { return "reduce"; } -} // namespace details -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::details diff --git a/paddle/fluid/framework/ir/delete_cast_op_pass_test.cc b/paddle/fluid/framework/ir/delete_cast_op_pass_test.cc index 17f0c642a60d1..c5480db1ca466 100644 --- a/paddle/fluid/framework/ir/delete_cast_op_pass_test.cc +++ b/paddle/fluid/framework/ir/delete_cast_op_pass_test.cc @@ -16,9 +16,7 @@ #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/ir/pass_tester_helper.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { void AddVarToScope(Scope* param_scope, const std::string& name, @@ -315,8 +313,6 @@ TEST(ApplyCastPass, basic) { cast_num_in_graph)); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir USE_PASS(delete_cast_op_pass); diff --git a/paddle/fluid/framework/ir/fusion_group/code_generator.cc b/paddle/fluid/framework/ir/fusion_group/code_generator.cc index 2e5c2b5be4ac3..defc320495064 100644 --- a/paddle/fluid/framework/ir/fusion_group/code_generator.cc +++ b/paddle/fluid/framework/ir/fusion_group/code_generator.cc @@ -17,10 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/fusion_group/code_generator_helper.h" #include "paddle/fluid/framework/ir/fusion_group/cuda_resources.h" -namespace paddle { -namespace framework { -namespace ir { -namespace fusion_group { +namespace paddle::framework::ir::fusion_group { std::string ExtractDataType(const std::vector& nodes) { std::string dtype_str = ""; @@ -373,7 +370,4 @@ std::unordered_map CodeGenerator::EncodeVarNodes( return var_ids; } -} // namespace fusion_group -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir::fusion_group diff --git a/paddle/fluid/framework/ir/merge_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/merge_layernorm_fuse_pass.cc index 2e6aaa37808ae..1fbe22ff33021 100644 --- a/paddle/fluid/framework/ir/merge_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/merge_layernorm_fuse_pass.cc @@ -39,9 +39,7 @@ GET_IR_NODE(layernorm_40_in_bias); \ GET_IR_NODE(layernorm_40_in_scale); \ GET_IR_NODE(layernorm_40_out); -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { MergeLayernormFusePass::MergeLayernormFusePass() { AddOpCompat(OpCompat("reshape2")) .AddInput("X") @@ -176,9 +174,7 @@ void MergeLayernormFusePass::ApplyImpl(ir::Graph* graph) const { gpd(graph, handler); AddStatis(fusion_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(merge_layernorm_fuse_pass, paddle::framework::ir::MergeLayernormFusePass); REGISTER_PASS_CAPABILITY(merge_layernorm_fuse_pass) diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/add_reader_dependency_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/add_reader_dependency_pass.cc index b907869b4a38e..e0b96b69116a4 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/add_reader_dependency_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/add_reader_dependency_pass.cc @@ -16,9 +16,7 @@ #include "paddle/fluid/framework/ir/pass.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Graph; @@ -106,9 +104,7 @@ void AddReaderDependencyPass::ApplyImpl(Graph *graph) const { } } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(add_reader_dependency_pass, paddle::framework::ir::AddReaderDependencyPass); diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/set_reader_device_info_utils.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/set_reader_device_info_utils.cc index f4f0e393c2499..72e8baaba5017 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/set_reader_device_info_utils.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/set_reader_device_info_utils.cc @@ -17,9 +17,7 @@ #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { static std::unordered_set ReaderOpSet() { return {"create_py_reader"}; @@ -78,6 +76,4 @@ void SetReaderOpDeviceInfo(Graph *graph, size_t dev_cnt, size_t dev_idx) { VLOG(10) << "Found op number " << found_op_num; } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir diff --git a/paddle/fluid/framework/ir/onednn/compute_propagate_scales_onednn_pass.cc b/paddle/fluid/framework/ir/onednn/compute_propagate_scales_onednn_pass.cc index 1c733636ca7b0..cf17f00fa4080 100644 --- a/paddle/fluid/framework/ir/onednn/compute_propagate_scales_onednn_pass.cc +++ b/paddle/fluid/framework/ir/onednn/compute_propagate_scales_onednn_pass.cc @@ -21,9 +21,7 @@ #include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/framework/op_version_registry.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { void ComputePropagateScalesMkldnnPass::GetTensorFromVector( const std::vector& data_v, phi::DenseTensor* tensor) const { @@ -516,9 +514,7 @@ void ComputePropagateScalesMkldnnPass::ApplyImpl(ir::Graph* graph) const { graph, "has_quant_info", "var_quant_scales", var_quant_scales); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(compute_propagate_scales_onednn_pass, paddle::framework::ir::ComputePropagateScalesMkldnnPass); diff --git a/paddle/fluid/framework/ir/onednn/conv_elementwise_add_onednn_fuse_pass.cc b/paddle/fluid/framework/ir/onednn/conv_elementwise_add_onednn_fuse_pass.cc index 7733730f7d605..14857f3c550d8 100644 --- a/paddle/fluid/framework/ir/onednn/conv_elementwise_add_onednn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/onednn/conv_elementwise_add_onednn_fuse_pass.cc @@ -19,9 +19,7 @@ #include "paddle/fluid/framework/op_version_registry.h" #include "paddle/utils/string/pretty_log.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { ResidualConnectionMKLDNNFusePass::ResidualConnectionMKLDNNFusePass() { AddOpCompat(OpCompat("conv2d")) @@ -305,9 +303,7 @@ void ResidualConnectionMKLDNNFusePass::ApplyImpl(ir::Graph* graph) const { AddStatis(graph_with_stats.second); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(conv_elementwise_add_onednn_fuse_pass, paddle::framework::ir::ResidualConnectionMKLDNNFusePass); diff --git a/paddle/fluid/framework/ir/onednn/operator_reshape2_onednn_fuse_pass.cc b/paddle/fluid/framework/ir/onednn/operator_reshape2_onednn_fuse_pass.cc index a21ddd579be3c..f937a1c681b17 100644 --- a/paddle/fluid/framework/ir/onednn/operator_reshape2_onednn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/onednn/operator_reshape2_onednn_fuse_pass.cc @@ -19,9 +19,7 @@ #include "paddle/phi/backends/onednn/onednn_reuse.h" #include "paddle/utils/string/pretty_log.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { using string::PrettyLogDetail; @@ -132,9 +130,7 @@ void FuseOperatorReshape2OneDNNPass::FuseReshape2(Graph *graph, op_type); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(operator_reshape2_onednn_fuse_pass, paddle::framework::ir::FuseOperatorReshape2OneDNNPass); diff --git a/paddle/fluid/framework/ir/onednn/squeeze2_transpose2_onednn_fuse_pass.cc b/paddle/fluid/framework/ir/onednn/squeeze2_transpose2_onednn_fuse_pass.cc index 4af9c6a770436..7ac8edbb6005c 100644 --- a/paddle/fluid/framework/ir/onednn/squeeze2_transpose2_onednn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/onednn/squeeze2_transpose2_onednn_fuse_pass.cc @@ -17,9 +17,7 @@ #include "paddle/phi/backends/onednn/onednn_reuse.h" #include "paddle/utils/string/pretty_log.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { using string::PrettyLogDetail; @@ -77,9 +75,7 @@ void FuseSqueeze2Transpose2OneDNNPass::ApplyImpl(Graph *graph) const { } } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(squeeze2_transpose2_onednn_fuse_pass, paddle::framework::ir::FuseSqueeze2Transpose2OneDNNPass); diff --git a/paddle/fluid/framework/ir/placement_pass_base.cc b/paddle/fluid/framework/ir/placement_pass_base.cc index ccf2bf22ab57b..718e15b01fd72 100644 --- a/paddle/fluid/framework/ir/placement_pass_base.cc +++ b/paddle/fluid/framework/ir/placement_pass_base.cc @@ -18,9 +18,7 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { void PlacementPassBase::ApplyImpl(ir::Graph* graph) const { VLOG(3) << "Applies " << GetPlacementName() << " placement strategy."; @@ -43,6 +41,4 @@ void PlacementPassBase::ApplyImpl(ir::Graph* graph) const { } } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir diff --git a/paddle/fluid/framework/ir/preln_elementwise_groupnorm_act_pass.cc b/paddle/fluid/framework/ir/preln_elementwise_groupnorm_act_pass.cc index 7cbb5c169f63c..3917423754ba4 100644 --- a/paddle/fluid/framework/ir/preln_elementwise_groupnorm_act_pass.cc +++ b/paddle/fluid/framework/ir/preln_elementwise_groupnorm_act_pass.cc @@ -18,18 +18,11 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/framework/op_version_registry.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Node; -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir -namespace paddle { -namespace framework { -namespace ir { -namespace patterns { +namespace paddle::framework::ir::patterns { struct PrelnGroupNormAct : public PatternBase { PrelnGroupNormAct(PDPattern *pattern, const std::string &name_scope) @@ -92,7 +85,8 @@ void PrelnGroupNormAct::operator()(PDNode *x, PDNode *y, bool with_act) { } } -} // namespace patterns +} // namespace paddle::framework::ir::patterns +namespace paddle::framework::ir { int PrelnGroupNormActFusePass::ApplyAddGNPattern(ir::Graph *graph, bool with_act) const { @@ -203,9 +197,7 @@ void PrelnGroupNormActFusePass::ApplyImpl(ir::Graph *graph) const { AddStatis(found_subgraph_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(preln_elementwise_groupnorm_act_pass, paddle::framework::ir::PrelnGroupNormActFusePass); diff --git a/paddle/fluid/framework/ir/trt_qk_multihead_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/trt_qk_multihead_matmul_fuse_pass.cc index d68694106b5c7..c6a22c143fb66 100644 --- a/paddle/fluid/framework/ir/trt_qk_multihead_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/trt_qk_multihead_matmul_fuse_pass.cc @@ -22,10 +22,7 @@ #endif #include "paddle/phi/kernels/funcs/blas/blas.h" -namespace paddle { -namespace framework { -namespace ir { -namespace patterns { +namespace paddle::framework::ir::patterns { // input_qk input_v // |q |k v @@ -249,7 +246,8 @@ PDNode* TrtQKMultiHeadMatmulPattern::operator()() { return reshape2_qkv_out_var; } -} // namespace patterns +} // namespace paddle::framework::ir::patterns +namespace paddle::framework::ir { int TrtQkMultiHeadMatmulFusePass::BuildQkFusion(Graph* graph, const std::string& name_scope, @@ -575,9 +573,7 @@ void TrtQkMultiHeadMatmulFusePass::ApplyImpl(Graph* graph) const { AddStatis(fusion_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(trt_qk_multihead_matmul_fuse_pass, paddle::framework::ir::TrtQkMultiHeadMatmulFusePass); diff --git a/paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc index 0708218dbd07c..e90cadc782a61 100644 --- a/paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc @@ -22,18 +22,11 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/helper.h" #endif -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { class Node; -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir -namespace paddle { -namespace framework { -namespace ir { -namespace patterns { +namespace paddle::framework::ir::patterns { struct TrtSkipLayerNorm : public PatternBase { TrtSkipLayerNorm(PDPattern *pattern, const std::string &name_scope) @@ -102,7 +95,8 @@ PDNode *TrtSkipLayerNorm::operator()(PDNode *x, PDNode *y) { return layer_norm_out_var; } -} // namespace patterns +} // namespace paddle::framework::ir::patterns +namespace paddle::framework::ir { void TrtSkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const { PADDLE_ENFORCE_NOT_NULL( @@ -271,9 +265,7 @@ void TrtSkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const { AddStatis(found_subgraph_count); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(trt_skip_layernorm_fuse_pass, paddle::framework::ir::TrtSkipLayerNormFusePass); diff --git a/paddle/fluid/framework/ir/trt_support_nhwc_pass.cc b/paddle/fluid/framework/ir/trt_support_nhwc_pass.cc index d9907555a17b5..6b49a99c02364 100644 --- a/paddle/fluid/framework/ir/trt_support_nhwc_pass.cc +++ b/paddle/fluid/framework/ir/trt_support_nhwc_pass.cc @@ -26,9 +26,7 @@ #include "paddle/fluid/framework/ir/node.h" #include "paddle/phi/common/data_type.h" -namespace paddle { -namespace framework { -namespace ir { +namespace paddle::framework::ir { namespace { @@ -383,8 +381,6 @@ void TrtSupportNHWCPass::ApplyImpl(Graph *graph) const { AddStatis(transposed_ops.size()); } -} // namespace ir -} // namespace framework -} // namespace paddle +} // namespace paddle::framework::ir REGISTER_PASS(trt_support_nhwc_pass, paddle::framework::ir::TrtSupportNHWCPass); diff --git a/paddle/fluid/framework/new_executor/instruction/instruction_util.cc b/paddle/fluid/framework/new_executor/instruction/instruction_util.cc index 609fd78106747..e7a05d75f6e99 100644 --- a/paddle/fluid/framework/new_executor/instruction/instruction_util.cc +++ b/paddle/fluid/framework/new_executor/instruction/instruction_util.cc @@ -42,8 +42,7 @@ COMMON_DECLARE_bool(dynamic_static_unified_comm); #endif -namespace paddle { -namespace framework { +namespace paddle::framework { std::vector GetValueIds(pir::Value value, const ValueExecutionInfo& value_exec_info) { @@ -407,5 +406,4 @@ bool GetCondData(const phi::DenseTensor& cond) { return cpu_cond->data()[0]; } -} // namespace framework -} // namespace paddle +} // namespace paddle::framework diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index 416d46c01e1f2..d5fe408d53401 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -35,8 +35,7 @@ PADDLE_DEFINE_EXPORTED_bool(new_executor_use_local_scope, "Use local_scope in new executor(especially used " "in UT), can turn off for better performance"); -namespace paddle { -namespace framework { +namespace paddle::framework { InterpreterCore::InterpreterCore(const platform::Place& place, const BlockDesc& block, @@ -170,5 +169,4 @@ Variable* InterpreterCore::DebugVar(const std::string& name) const { return impl_->DebugVar(name); } -} // namespace framework -} // namespace paddle +} // namespace paddle::framework diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index 9f4f46c60cea4..2a39e664276ed 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -39,8 +39,7 @@ COMMON_DECLARE_bool(check_nan_inf); PD_DECLARE_bool(benchmark); COMMON_DECLARE_bool(run_kp_kernel); -namespace paddle { -namespace imperative { +namespace paddle::imperative { static const phi::Kernel empty_kernel; static const framework::RuntimeContext empty_ctx({}, {}); @@ -752,5 +751,4 @@ void PreparedOp::Run(const NameVarMap& ins, } } -} // namespace imperative -} // namespace paddle +} // namespace paddle::imperative diff --git a/paddle/fluid/inference/analysis/passes/save_optimized_model_pass.cc b/paddle/fluid/inference/analysis/passes/save_optimized_model_pass.cc index aaf9439d2b9ed..e8b8c27a24e58 100644 --- a/paddle/fluid/inference/analysis/passes/save_optimized_model_pass.cc +++ b/paddle/fluid/inference/analysis/passes/save_optimized_model_pass.cc @@ -20,9 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/framework/scope.h" -namespace paddle { -namespace inference { -namespace analysis { +namespace paddle::inference::analysis { void SaveOptimizedModelPass::SaveOptimizedModel(Argument* argument) { std::string model_opt_cache_dir = argument->optimized_model_save_path(); @@ -137,6 +135,4 @@ std::string SaveOptimizedModelPass::repr() const { return "save_optimized_model_pass"; } -} // namespace analysis -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::analysis diff --git a/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc index 37a53d31f47b5..547ec74c19fa6 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc @@ -14,9 +14,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { template void ConvertConv3d(TensorRTEngine* engine, @@ -192,9 +190,7 @@ class Deconv3dOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(conv3d, Conv3dOpConverter); REGISTER_TRT_OP_CONVERTER(conv3d_transpose, Deconv3dOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc b/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc index d3fda4cb24e28..f505c36b2ed5c 100644 --- a/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc @@ -14,9 +14,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { // LeakyRelu converter from fluid to tensorRT class LeakyReluOpConverter : public OpConverter { @@ -121,8 +119,6 @@ class LeakyReluOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(leaky_relu, LeakyReluOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/matrix_multiply_op.cc b/paddle/fluid/inference/tensorrt/convert/matrix_multiply_op.cc index 16d6f3f20750c..fd72f8b78f9af 100644 --- a/paddle/fluid/inference/tensorrt/convert/matrix_multiply_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/matrix_multiply_op.cc @@ -12,9 +12,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/phi/common/data_type.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { /* * After trt_map_ops_to_matrix_multiply_pass(mul, matmul, matmul_v2 -> @@ -266,8 +264,6 @@ class MatrixMultiplyOpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(matrix_multiply, MatrixMultiplyOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc b/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc index 107217477d14f..f2d00ab4b4667 100644 --- a/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc @@ -13,9 +13,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { class MultiClassNMS3OpConverter : public OpConverter { public: @@ -170,8 +168,6 @@ class MultiClassNMS3OpConverter : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(multiclass_nms3, MultiClassNMS3OpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/transformer_input_convert_op.cc b/paddle/fluid/inference/tensorrt/convert/transformer_input_convert_op.cc index 1dca9bb818c38..f7fda67a3643f 100644 --- a/paddle/fluid/inference/tensorrt/convert/transformer_input_convert_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/transformer_input_convert_op.cc @@ -15,9 +15,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/plugin/transformer_input_output_convert_plugin.h" -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { /* * Convert Transformer Input(pos_id, max_seqlen). @@ -58,8 +56,6 @@ class TransformerInputConvert : public OpConverter { } }; -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt REGISTER_TRT_OP_CONVERTER(transformer_input_convert, TransformerInputConvert); diff --git a/paddle/fluid/inference/tensorrt/test_dynamic_engine.cc b/paddle/fluid/inference/tensorrt/test_dynamic_engine.cc index d87c9af8cfa67..ae12901e7da90 100644 --- a/paddle/fluid/inference/tensorrt/test_dynamic_engine.cc +++ b/paddle/fluid/inference/tensorrt/test_dynamic_engine.cc @@ -29,9 +29,7 @@ limitations under the License. */ #include "paddle/phi/common/float16.h" using float16 = phi::dtype::float16; -namespace paddle { -namespace inference { -namespace tensorrt { +namespace paddle::inference::tensorrt { class TensorRTDynamicShapeValueEngineTest : public ::testing::Test { public: @@ -1049,6 +1047,4 @@ TEST_F(TensorRTDynamicShapeGNTest, test_trt_dynamic_shape_groupnorm) { } */ #endif -} // namespace tensorrt -} // namespace inference -} // namespace paddle +} // namespace paddle::inference::tensorrt diff --git a/paddle/fluid/memory/allocation/cpu_allocator.cc b/paddle/fluid/memory/allocation/cpu_allocator.cc index 398c015627860..426eeeae70e55 100644 --- a/paddle/fluid/memory/allocation/cpu_allocator.cc +++ b/paddle/fluid/memory/allocation/cpu_allocator.cc @@ -19,9 +19,7 @@ #include "paddle/fluid/memory/stats.h" #include "paddle/fluid/platform/enforce.h" -namespace paddle { -namespace memory { -namespace allocation { +namespace paddle::memory::allocation { bool CPUAllocator::IsAllocThreadSafe() const { return true; } @@ -52,6 +50,4 @@ phi::Allocation *CPUAllocator::AllocateImpl(size_t size) { HOST_MEMORY_STAT_UPDATE(Reserved, 0, size); return new Allocation(p, size, platform::CPUPlace()); } -} // namespace allocation -} // namespace memory -} // namespace paddle +} // namespace paddle::memory::allocation diff --git a/paddle/fluid/memory/allocation/memory_block_desc.cc b/paddle/fluid/memory/allocation/memory_block_desc.cc index d20d56a6d05e8..1d1f3c2396921 100644 --- a/paddle/fluid/memory/allocation/memory_block_desc.cc +++ b/paddle/fluid/memory/allocation/memory_block_desc.cc @@ -17,9 +17,7 @@ limitations under the License. */ #include "paddle/fluid/memory/allocation/memory_block.h" -namespace paddle { -namespace memory { -namespace detail { +namespace paddle::memory::detail { MemoryBlock::Desc::Desc(MemoryBlock::Type t, size_t i, @@ -74,6 +72,4 @@ bool MemoryBlock::Desc::CheckGuards() const { return guard_begin == hash(*this, 1) && guard_end == hash(*this, 2); } -} // namespace detail -} // namespace memory -} // namespace paddle +} // namespace paddle::memory::detail diff --git a/paddle/fluid/operators/collective/c_allreduce_avg_op.cc b/paddle/fluid/operators/collective/c_allreduce_avg_op.cc index 963ea26321bdb..13d07557f1e7c 100644 --- a/paddle/fluid/operators/collective/c_allreduce_avg_op.cc +++ b/paddle/fluid/operators/collective/c_allreduce_avg_op.cc @@ -14,17 +14,14 @@ limitations under the License. */ #include "paddle/fluid/operators/collective/c_allreduce_op.h" -namespace paddle { -namespace framework { +namespace paddle::framework { class OpDesc; -} // namespace framework -namespace imperative { +} // namespace paddle::framework +namespace paddle::imperative { class OpBase; -} // namespace imperative -} // namespace paddle +} // namespace paddle::imperative -namespace paddle { -namespace operators { +namespace paddle::operators { class CAllReduceAvgOpMaker : public CAllReduceOpMaker { protected: @@ -33,8 +30,7 @@ class CAllReduceAvgOpMaker : public CAllReduceOpMaker { DECLARE_INPLACE_OP_INFERER(AllreduceAvgInplaceInferer, {"X", "Out"}); -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/collective/partial_send_op.cc b/paddle/fluid/operators/collective/partial_send_op.cc index cf2a0ece1a7ab..961b8c4cf1382 100644 --- a/paddle/fluid/operators/collective/partial_send_op.cc +++ b/paddle/fluid/operators/collective/partial_send_op.cc @@ -14,8 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/collective/partial_send_op.h" -namespace paddle { -namespace operators { +namespace paddle::operators { class PartialSendOp : public framework::OperatorWithKernel { public: @@ -84,8 +83,7 @@ Reference: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/p2p.h } }; -} // namespace operators -} // namespace paddle +} // namespace paddle::operators namespace ops = paddle::operators; diff --git a/paddle/fluid/pir/dialect/distributed/ir/dist_tools.cc b/paddle/fluid/pir/dialect/distributed/ir/dist_tools.cc index 505b178a452b0..f0d4fcee3b19d 100644 --- a/paddle/fluid/pir/dialect/distributed/ir/dist_tools.cc +++ b/paddle/fluid/pir/dialect/distributed/ir/dist_tools.cc @@ -17,8 +17,7 @@ #include "paddle/common/enforce.h" #include "paddle/pir/include/core/operation.h" -namespace paddle { -namespace dialect { +namespace paddle::dialect { bool AllInputAreDist(const std::vector& inputs) { for (auto value : inputs) { @@ -225,5 +224,4 @@ void CopyLeafOpToMesh(pir::Value value, ProcessMeshAttribute mesh_attr) { } } } -} // namespace dialect -} // namespace paddle +} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/ir/op_attribute.cc b/paddle/fluid/pir/dialect/operator/ir/op_attribute.cc index 8a843a8881734..4eb8190eaa111 100644 --- a/paddle/fluid/pir/dialect/operator/ir/op_attribute.cc +++ b/paddle/fluid/pir/dialect/operator/ir/op_attribute.cc @@ -16,8 +16,7 @@ #include "paddle/common/enforce.h" #include "paddle/common/errors.h" -namespace paddle { -namespace dialect { +namespace paddle::dialect { const phi::IntArray &IntArrayAttribute::data() const { return storage()->GetAsKey(); } @@ -130,8 +129,7 @@ DataLayoutAttribute DataLayoutAttribute::Parse( parser.ctx, StringToDataLayoutMap().at(datalayout_token_val)); } -} // namespace dialect -} // namespace paddle +} // namespace paddle::dialect IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::IntArrayAttribute) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ScalarAttribute) diff --git a/paddle/fluid/pir/dialect/operator/transforms/param_to_variable.cc b/paddle/fluid/pir/dialect/operator/transforms/param_to_variable.cc index 1d93e27c59b0b..78cb8e6460769 100644 --- a/paddle/fluid/pir/dialect/operator/transforms/param_to_variable.cc +++ b/paddle/fluid/pir/dialect/operator/transforms/param_to_variable.cc @@ -21,8 +21,7 @@ #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/dense_tensor.h" -namespace paddle { -namespace dialect { +namespace paddle::dialect { std::shared_ptr ParameterConvertInterface::ParameterToVariable(pir::Parameter *parameter) { if (parameter->type().isa()) { @@ -79,7 +78,6 @@ std::unique_ptr ParameterConvertInterface::VariableToParameter( } } -} // namespace dialect -} // namespace paddle +} // namespace paddle::dialect IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ParameterConvertInterface) diff --git a/paddle/fluid/platform/device/gpu/gpu_info.cc b/paddle/fluid/platform/device/gpu/gpu_info.cc index 358d52d03d31b..d7b164862cd7e 100644 --- a/paddle/fluid/platform/device/gpu/gpu_info.cc +++ b/paddle/fluid/platform/device/gpu/gpu_info.cc @@ -68,8 +68,7 @@ PADDLE_DEFINE_EXPORTED_uint64(cuda_memory_async_pool_realease_threshold, "Amount of reserved memory in bytes to hold onto " "before trying to release memory back to the OS"); -namespace paddle { -namespace platform { +namespace paddle::platform { void GpuMemoryUsage(size_t *available, size_t *total) { size_t actual_available, actual_total; @@ -719,5 +718,4 @@ void GpuMemsetAsync(void *dst, int value, size_t count, gpuStream_t stream) { phi::backends::gpu::GpuMemsetAsync(dst, value, count, stream); } -} // namespace platform -} // namespace paddle +} // namespace paddle::platform diff --git a/paddle/fluid/platform/dynload/nccl.cc b/paddle/fluid/platform/dynload/nccl.cc index 7b0ea3bb7f3c1..ee270918b59c7 100644 --- a/paddle/fluid/platform/dynload/nccl.cc +++ b/paddle/fluid/platform/dynload/nccl.cc @@ -14,9 +14,7 @@ limitations under the License. */ #include "paddle/fluid/platform/dynload/nccl.h" -namespace paddle { -namespace platform { -namespace dynload { +namespace paddle::platform::dynload { #define DEFINE_WRAP(__name) DynLoad__##__name __name @@ -38,6 +36,4 @@ NCCL_RAND_ROUTINE_EACH_AFTER_2703(DEFINE_WRAP) NCCL_RAND_ROUTINE_EACH_AFTER_21100(DEFINE_WRAP) #endif -} // namespace dynload -} // namespace platform -} // namespace paddle +} // namespace paddle::platform::dynload diff --git a/paddle/fluid/prim/api/manual_prim/utils/static_utils.cc b/paddle/fluid/prim/api/manual_prim/utils/static_utils.cc index 2f76e8bbd966f..43ab21ccd3e06 100644 --- a/paddle/fluid/prim/api/manual_prim/utils/static_utils.cc +++ b/paddle/fluid/prim/api/manual_prim/utils/static_utils.cc @@ -23,8 +23,7 @@ #include "paddle/fluid/prim/utils/static/static_global_utils.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/core/utils/data_type.h" -namespace paddle { -namespace prim { +namespace paddle::prim { using Tensor = paddle::Tensor; template <> TEST_API Tensor empty(const paddle::experimental::IntArray& shape, @@ -69,5 +68,4 @@ void by_pass(const paddle::Tensor& x, paddle::Tensor* real_out) { set_output(out, real_out); } -} // namespace prim -} // namespace paddle +} // namespace paddle::prim diff --git a/paddle/fluid/prim/utils/static/static_global_utils.cc b/paddle/fluid/prim/utils/static/static_global_utils.cc index 3d1aa2158048d..71179429dc997 100644 --- a/paddle/fluid/prim/utils/static/static_global_utils.cc +++ b/paddle/fluid/prim/utils/static/static_global_utils.cc @@ -14,12 +14,10 @@ #include "paddle/fluid/prim/utils/static/static_global_utils.h" -namespace paddle { -namespace prim { +namespace paddle::prim { StaticCompositeContext* StaticCompositeContext::static_composite_context_ = new StaticCompositeContext(); thread_local bool StaticCompositeContext::enable_bwd_prim_ = false; thread_local bool StaticCompositeContext::enable_fwd_prim_ = false; thread_local bool StaticCompositeContext::enable_eager_prim_ = false; -} // namespace prim -} // namespace paddle +} // namespace paddle::prim diff --git a/paddle/fluid/pybind/graph.cc b/paddle/fluid/pybind/graph.cc index 6acba237ba928..4e5329bbf2bfc 100644 --- a/paddle/fluid/pybind/graph.cc +++ b/paddle/fluid/pybind/graph.cc @@ -47,8 +47,7 @@ using paddle::framework::ir::NodeComp; using paddle::framework::ir::TopologySortOperations; using pybind11::return_value_policy; -namespace paddle { -namespace pybind { +namespace paddle::pybind { void BindGraph(py::module *m) { m->def("graph_safe_remove_nodes", [](Graph *graph, const std::unordered_set &nodes) { @@ -408,5 +407,4 @@ void BindPass(py::module *m) { }); } -} // namespace pybind -} // namespace paddle +} // namespace paddle::pybind diff --git a/paddle/phi/api/lib/context_pool.cc b/paddle/phi/api/lib/context_pool.cc index ee1e21a58e2f1..e2eb1af09d8a5 100644 --- a/paddle/phi/api/lib/context_pool.cc +++ b/paddle/phi/api/lib/context_pool.cc @@ -23,8 +23,7 @@ limitations under the License. */ #include "paddle/phi/core/cuda_stream.h" #endif -namespace paddle { -namespace experimental { +namespace paddle::experimental { void DeviceContextPool::SyncDeviceContext(const Place& place) { if (!phi::DeviceContextPool::IsInitialized()) { @@ -64,8 +63,7 @@ phi::DeviceContext* DeviceContextPool::GetMutable(const Place& place) { return const_cast(Get(place)); // NOLINT } -} // namespace experimental -} // namespace paddle +} // namespace paddle::experimental namespace paddle { diff --git a/paddle/phi/backends/dynload/cublas.cc b/paddle/phi/backends/dynload/cublas.cc index 2fe9ae774bf7a..b870a90cb091c 100644 --- a/paddle/phi/backends/dynload/cublas.cc +++ b/paddle/phi/backends/dynload/cublas.cc @@ -14,8 +14,7 @@ limitations under the License. */ #include "paddle/phi/backends/dynload/cublas.h" -namespace phi { -namespace dynload { +namespace phi::dynload { std::once_flag cublas_dso_flag; void *cublas_dso_handle = nullptr; @@ -34,5 +33,4 @@ CUBLAS_BLAS_ROUTINE_EACH_R3(DEFINE_WRAP); #ifdef CUBLAS_BLAS_ROUTINE_EACH_R4 CUBLAS_BLAS_ROUTINE_EACH_R4(DEFINE_WRAP); #endif -} // namespace dynload -} // namespace phi +} // namespace phi::dynload diff --git a/paddle/phi/backends/dynload/cusparse.cc b/paddle/phi/backends/dynload/cusparse.cc index ce8f87dc3cdfa..9d89b746df5b7 100644 --- a/paddle/phi/backends/dynload/cusparse.cc +++ b/paddle/phi/backends/dynload/cusparse.cc @@ -14,8 +14,7 @@ limitations under the License. */ #include "paddle/phi/backends/dynload/cusparse.h" -namespace phi { -namespace dynload { +namespace phi::dynload { std::once_flag cusparse_dso_flag; void *cusparse_dso_handle; @@ -34,5 +33,4 @@ CUSPARSE_ROUTINE_EACH_R2(DEFINE_WRAP); CUSPARSE_ROUTINE_EACH_R3(DEFINE_WRAP); #endif -} // namespace dynload -} // namespace phi +} // namespace phi::dynload diff --git a/paddle/phi/core/distributed/auto_parallel/dist_attr.cc b/paddle/phi/core/distributed/auto_parallel/dist_attr.cc index 62fbd97c46ab2..98dfa339589a5 100644 --- a/paddle/phi/core/distributed/auto_parallel/dist_attr.cc +++ b/paddle/phi/core/distributed/auto_parallel/dist_attr.cc @@ -21,8 +21,7 @@ limitations under the License. */ #include "glog/logging.h" #include "paddle/phi/core/distributed/auto_parallel/proto_helper.h" -namespace phi { -namespace distributed { +namespace phi::distributed { using phi::distributed::auto_parallel::str_join; using phi::distributed::auto_parallel::TensorDistAttrProto; @@ -450,5 +449,4 @@ bool TensorDistAttr::is_partial(int64_t mesh_axis) const { void TensorDistAttr::set_skip_check_mesh(bool skip) { skip_check_mesh_ = skip; } -} // namespace distributed -} // namespace phi +} // namespace phi::distributed diff --git a/paddle/phi/core/distributed/auto_parallel/reshard/x_to_r_reshard_function.cc b/paddle/phi/core/distributed/auto_parallel/reshard/x_to_r_reshard_function.cc index bd415480d64e9..947a4b77f6961 100644 --- a/paddle/phi/core/distributed/auto_parallel/reshard/x_to_r_reshard_function.cc +++ b/paddle/phi/core/distributed/auto_parallel/reshard/x_to_r_reshard_function.cc @@ -25,8 +25,7 @@ #include "paddle/phi/kernels/p_recv_kernel.h" #include "paddle/phi/kernels/p_send_kernel.h" -namespace phi { -namespace distributed { +namespace phi::distributed { bool XToRShrinkReshardFunction::IsSuitable( const DistTensor& in, const TensorDistAttr& out_dist_attr) { @@ -130,5 +129,4 @@ void XToRShrinkReshardFunction::Eval(phi::DeviceContext* dev_ctx, } } -} // namespace distributed -} // namespace phi +} // namespace phi::distributed diff --git a/paddle/phi/kernels/funcs/concat_and_split_functor.cc b/paddle/phi/kernels/funcs/concat_and_split_functor.cc index fd49748666a6e..c42bbbd3a5318 100644 --- a/paddle/phi/kernels/funcs/concat_and_split_functor.cc +++ b/paddle/phi/kernels/funcs/concat_and_split_functor.cc @@ -14,8 +14,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/concat_and_split_functor.h" -namespace phi { -namespace funcs { +namespace phi::funcs { /* * All tensors' dimension should be the same and the values of @@ -132,5 +131,4 @@ struct SplitFunctor { FOR_ALL_TYPES(DEFINE_FUNCTOR); -} // namespace funcs -} // namespace phi +} // namespace phi::funcs diff --git a/paddle/phi/kernels/funcs/matrix_inverse.cc b/paddle/phi/kernels/funcs/matrix_inverse.cc index c316970e6a560..940e0ea3146ec 100644 --- a/paddle/phi/kernels/funcs/matrix_inverse.cc +++ b/paddle/phi/kernels/funcs/matrix_inverse.cc @@ -16,8 +16,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/blas/blas.h" -namespace phi { -namespace funcs { +namespace phi::funcs { template void MatrixInverseFunctor::operator()(const Context& dev_ctx, @@ -29,5 +28,4 @@ void MatrixInverseFunctor::operator()(const Context& dev_ctx, template class MatrixInverseFunctor; template class MatrixInverseFunctor; -} // namespace funcs -} // namespace phi +} // namespace phi::funcs diff --git a/paddle/phi/kernels/funcs/sequence_pooling.cc b/paddle/phi/kernels/funcs/sequence_pooling.cc index f4ee9c323366e..1fdaadfea01a1 100644 --- a/paddle/phi/kernels/funcs/sequence_pooling.cc +++ b/paddle/phi/kernels/funcs/sequence_pooling.cc @@ -21,8 +21,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/jit/kernels.h" #include "paddle/phi/kernels/funcs/math_function.h" -namespace phi { -namespace funcs { +namespace phi::funcs { template ; template class SequencePoolGradFunctor; template class SequencePoolGradFunctor; -} // namespace funcs -} // namespace phi +} // namespace phi::funcs diff --git a/paddle/phi/kernels/fusion/cpu/fusion_seqconv_eltadd_relu_kernel.cc b/paddle/phi/kernels/fusion/cpu/fusion_seqconv_eltadd_relu_kernel.cc index 4ff18849316d8..456d3370990cb 100644 --- a/paddle/phi/kernels/fusion/cpu/fusion_seqconv_eltadd_relu_kernel.cc +++ b/paddle/phi/kernels/fusion/cpu/fusion_seqconv_eltadd_relu_kernel.cc @@ -23,8 +23,7 @@ #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/fc_functor.h" -namespace phi { -namespace fusion { +namespace phi::fusion { template void FusionSeqConvEltAddReluKernel(const Context& dev_ctx, @@ -148,8 +147,7 @@ void FusionSeqConvEltAddReluKernel(const Context& dev_ctx, true); } -} // namespace fusion -} // namespace phi +} // namespace phi::fusion PD_REGISTER_KERNEL(fusion_seqconv_eltadd_relu, CPU,