Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 6th Fundable Projects 2 No.29】 Fix modernize-concat-nested-namespaces-part-12 #64767

Open
wants to merge 7 commits into
base: develop
Choose a base branch
from
8 changes: 2 additions & 6 deletions paddle/fluid/distributed/auto_parallel/dist_attr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@ limitations under the License. */
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/phi/core/distributed/auto_parallel/proto_helper.h"

namespace paddle {
namespace distributed {
namespace auto_parallel {
namespace paddle::distributed::auto_parallel {

using phi::distributed::auto_parallel::str_join;

Expand Down Expand Up @@ -487,6 +485,4 @@ bool operator==(const OperatorDistAttr& lhs, const OperatorDistAttr& rhs) {
return true;
}

} // namespace auto_parallel
} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed::auto_parallel
6 changes: 2 additions & 4 deletions paddle/fluid/distributed/fleet_executor/dist_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor.h"

namespace paddle {
namespace distributed {
namespace paddle::distributed {

namespace {
bool IsPersistable(const framework::VarDesc *var) {
Expand Down Expand Up @@ -705,5 +704,4 @@ bool DistModel::Run(const std::vector<DistModelTensor> &input_data,
return true;
}

} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
12 changes: 4 additions & 8 deletions paddle/fluid/distributed/ps/service/brpc_ps_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,12 @@

static const int max_port = 65535;

namespace paddle {
namespace framework {
namespace paddle::framework {
class Scope;
class Variable;
} // namespace framework
} // namespace paddle
} // namespace paddle::framework

namespace paddle {
namespace distributed {
namespace paddle::distributed {

PD_DEFINE_int32(pserver_push_dense_merge_limit,
12,
Expand Down Expand Up @@ -2066,5 +2063,4 @@ void BrpcPsClient::PushDenseRawGradient(std::shared_ptr<DenseAsyncTask> &task,
}
}

} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
8 changes: 2 additions & 6 deletions paddle/fluid/framework/details/multi_devices_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
#include "paddle/fluid/framework/details/share_tensor_buffer_op_handle.h"
#include "paddle/fluid/framework/ir/graph_helper.h"

namespace paddle {
namespace framework {
namespace details {
namespace paddle::framework::details {

static constexpr size_t kUndefinedDevIdx = -1UL;

Expand Down Expand Up @@ -300,6 +298,4 @@ bool HasKeepLastReadOp(const ir::Graph &graph) {
return HasDropLastReadOpImpl(graph, false);
}

} // namespace details
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::details
6 changes: 2 additions & 4 deletions paddle/fluid/framework/dist_multi_trainer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ limitations under the License. */
#include "paddle/fluid/framework/device_worker_factory.h"
#include "paddle/fluid/framework/trainer.h"

namespace paddle {
namespace framework {
namespace paddle::framework {

void DistMultiTrainer::Initialize(const TrainerDesc &trainer_desc,
Dataset *dataset) {
Expand Down Expand Up @@ -232,5 +231,4 @@ void DistMultiTrainer::MergeToRootScope(phi::DenseTensor *root_tensor,
root_data[i] += data[i];
}
}
} // namespace framework
} // namespace paddle
} // namespace paddle::framework
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#endif

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

#define GET_IR_NODE(node__) GET_IR_NODE_FROM_SUBGRAPH(node__, node__, pattern);
#define GET_NODES \
Expand Down Expand Up @@ -171,9 +169,7 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const {
AddStatis(found_conv_eltwise_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(conv_elementwise_add_fuse_pass,
paddle::framework::ir::ConvElementwiseAddFusePass);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/pass_tester_helper.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void AddVarToScope(Scope* param_scope,
const std::string& name,
Expand Down Expand Up @@ -147,9 +145,7 @@ TEST(DenseMultiHeadMatmulToSparsePass, basic) {
num_fused_nodes_after));
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

USE_PASS(multihead_matmul_fuse_pass);
USE_PASS(multihead_matmul_fuse_pass_v2);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/pass_tester_helper.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

TEST(EmbeddingElewiseLayernormFusePass, basic) {
// inputs operator output
Expand Down Expand Up @@ -100,8 +98,6 @@ TEST(EmbeddingElewiseLayernormFusePass, pass_op_version_check) {
.IsPassCompatible("embedding_eltwise_layernorm_fuse_pass"));
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

USE_PASS(embedding_eltwise_layernorm_fuse_pass);
16 changes: 4 additions & 12 deletions paddle/fluid/framework/ir/fuse_bn_act_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,13 @@
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
class Node;
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void FuseBatchNormActPass::ApplyImpl(ir::Graph *graph) const {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Expand Down Expand Up @@ -351,8 +345,6 @@ std::vector<Node *> FuseBatchNormActPass::ReplaceNode(
return new_list;
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(fuse_bn_act_pass, paddle::framework::ir::FuseBatchNormActPass);
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/graph_pattern_detector_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@

#include "paddle/fluid/framework/ir/graph_pattern_detector.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

class Node;

Expand Down Expand Up @@ -207,6 +205,4 @@ TEST(GraphPatternDetector, IntermediateCheck) {
ASSERT_EQ(count, 1);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

class Node;

Expand Down Expand Up @@ -256,9 +254,7 @@ void LayerNormShiftPartitionFusePass::ApplyImpl(ir::Graph* graph) const {
AddStatis(found_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(layernorm_shift_partition_fuse_pass,
paddle::framework::ir::LayerNormShiftPartitionFusePass);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

class Graph;

Expand Down Expand Up @@ -301,9 +299,7 @@ void BufferSharedInplaceOpPass::ApplyImpl(ProgramDesc *main_program,
block->Flush();
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(buffer_shared_inplace_pass,
paddle::framework::ir::BufferSharedInplaceOpPass)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/operators/controlflow/op_variant.h"
#include "paddle/fluid/operators/controlflow/pylayer_op_helper.h"
namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
using OpVariant = operators::OpVariant;
class PyLayerOpEagerDeletionPass : public Pass {
protected:
Expand Down Expand Up @@ -94,9 +92,7 @@ class PyLayerOpEagerDeletionPass : public Pass {
}
};

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(pylayer_op_eager_deletion_pass,
paddle::framework::ir::PyLayerOpEagerDeletionPass);
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@ limitations under the License. */

#include "paddle/fluid/framework/ir/node.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
// msvc15 don't support constexpr in correct way.
// static constexpr member implies inline since CXX17 and may cause multiple
// definition.
Expand All @@ -39,6 +37,4 @@ std::unique_ptr<Node> CreateNodeForTest(OpDesc *op_desc) {
return std::unique_ptr<Node>(new Node(op_desc));
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/onednn/cpu_quantize_squash_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@
#include "paddle/phi/core/enforce.h"
#include "paddle/utils/string/pretty_log.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

using string::PrettyLogDetail;

Expand Down Expand Up @@ -635,9 +633,7 @@ void CPUQuantizeSquashPass::ApplyImpl(ir::Graph* graph) const {
QuantizeBf16Conv(graph);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(cpu_quantize_squash_pass,
paddle::framework::ir::CPUQuantizeSquashPass);
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
#include "paddle/fluid/framework/naive_executor.h"
#include "paddle/phi/common/place.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void SetOp(ProgramDesc* prog,
const std::string& type,
Expand Down Expand Up @@ -1179,8 +1177,6 @@ TEST(CpuQuantizeSquashPass, squash_all_u8_input_to_concat2) {
BuildU8U8ConcatProgramDesc(1.2f, 1.2f), expected_operators, remove_nodes);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

USE_PASS(cpu_quantize_squash_pass);
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
#include "paddle/phi/core/enforce.h"
#include "paddle/utils/string/pretty_log.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

using string::PrettyLogDetail;

Expand Down Expand Up @@ -202,9 +200,7 @@ MatmulTransposeReshapeMKLDNNPass::MatmulTransposeReshapeMKLDNNPass() {
.End();
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(matmul_transpose_reshape_onednn_fuse_pass,
paddle::framework::ir::MatmulTransposeReshapeMKLDNNPass);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/onednn/quant_dequant_onednn_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@
#include "paddle/fluid/framework/ir/onednn/onednn_pass_util.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void QuantDequantMkldnnPass::MarkSkipQuantizedOps(
ir::Graph* graph, const std::unordered_set<std::string>& skip_ops) const {
Expand Down Expand Up @@ -758,9 +756,7 @@ void QuantDequantMkldnnPass::ApplyImpl(ir::Graph* graph) const {
graph, "has_quant_info", "var_quant_scales", var_quant_scales);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(quant_dequant_onednn_pass,
paddle::framework::ir::QuantDequantMkldnnPass);
Expand Down
Loading