Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 6th Fundable Projects 2 No.29】 Fix modernize-concat-nested-namespaces-part-23 #64777

Open
wants to merge 6 commits into
base: develop
Choose a base branch
from
Open
6 changes: 2 additions & 4 deletions paddle/fluid/framework/block_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"

namespace paddle {
namespace framework {
namespace paddle::framework {

VarDesc *BlockDesc::Var(const std::string &name) {
auto it = vars_.find(name);
Expand Down Expand Up @@ -385,5 +384,4 @@ bool BlockDesc::NeedUpdate(bool include_subs) {
return need;
}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
8 changes: 2 additions & 6 deletions paddle/fluid/framework/details/async_ssa_graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@
#include "paddle/fluid/distributed/ps/service/communicator/communicator.h"
#endif

namespace paddle {
namespace framework {
namespace details {
namespace paddle::framework::details {

inline void InitVarsInScope(const std::vector<VarInfo> &var_infos,
Scope *scope,
Expand Down Expand Up @@ -203,6 +201,4 @@ FetchResultType AsyncSSAGraphExecutor::Run(
return ret;
}

} // namespace details
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::details
6 changes: 2 additions & 4 deletions paddle/fluid/framework/io/crypto/cipher_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {
namespace paddle::framework {

std::string CipherUtils::GenKey(int length) {
CryptoPP::AutoSeededRandomPool prng;
Expand Down Expand Up @@ -115,5 +114,4 @@ bool CipherUtils::GetValue<bool>(

const int CipherUtils::AES_DEFAULT_IV_SIZE = 128;
const int CipherUtils::AES_DEFAULT_TAG_SIZE = 128;
} // namespace framework
} // namespace paddle
} // namespace paddle::framework
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/pass_tester_helper.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void AddVarToScope(Scope* param_scope,
const std::string& name,
Expand Down Expand Up @@ -145,9 +143,7 @@ TEST(MultiHeadMatmulFusePass, pass_op_version_check) {
.IsPassCompatible("multihead_matmul_fuse_pass_v2"));
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

USE_PASS(multihead_matmul_fuse_pass);
USE_PASS(multihead_matmul_fuse_pass_v2);
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@
#include "paddle/fluid/framework/ir/onednn/depthwise_conv_onednn_pass.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void SetOp(ProgramDesc* prog,
const std::string& type,
Expand Down Expand Up @@ -154,8 +152,6 @@ TEST(DepthwiseConvMKLDNNPass, basic) {
EXPECT_EQ(after.onednn_conv_nodes, before.onednn_conv_nodes + 1);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

USE_PASS(depthwise_conv_onednn_pass);
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/pass_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@ limitations under the License. */

#include "gtest/gtest.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
class Graph;
class Node;

Expand Down Expand Up @@ -279,9 +277,7 @@ TEST(PassTest, TestPassRegistrarDeconstructor) {
pass_registrary->~PassRegistrar();
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(test_pass, paddle::framework::ir::TestPass)
.RequirePassAttr("test_pass_attr")
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/quant_linear_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,7 @@ void ConvertTensorType(phi::DenseTensor* tensor) {
}
} // namespace

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

#define GET_IR_NODE(node__) GET_IR_NODE_FROM_SUBGRAPH(node__, node__, pattern);
#define GET_NODES \
Expand Down Expand Up @@ -318,9 +316,7 @@ int QuantLinearFusePass::ApplyQuantLinearFusePattern(Graph* graph,
return found_count;
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(quant_linear_fuse_pass,
paddle::framework::ir::QuantLinearFusePass);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/relu6_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@

#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void Relu6FusePass::ApplyImpl(ir::Graph* graph) const {
// This pass is now used for xpu, because xpu can fuse conv + bias + relu6
Expand Down Expand Up @@ -130,8 +128,6 @@ void Relu6FusePass::ApplyImpl(ir::Graph* graph) const {
gpd(graph, handler);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(relu6_fuse_pass, paddle::framework::ir::Relu6FusePass);
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/relu6_fuse_pass_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
#include "paddle/fluid/framework/ir/pass_tester_helper.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

template <typename T = float>
void AddVarToScope(Scope* param_scope,
Expand Down Expand Up @@ -63,8 +61,6 @@ TEST(Relu6FusePass, basic) {
"clip should be mapped to relu6 after pass."));
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

USE_PASS(relu6_fuse_pass);
12 changes: 4 additions & 8 deletions paddle/fluid/framework/ir/sigmoid_elementmul_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,7 @@
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {
namespace ir {
namespace patterns {
namespace paddle::framework::ir::patterns {

struct SigmoidElementmulFusePattern : public PatternBase {
SigmoidElementmulFusePattern(PDPattern* pattern,
Expand Down Expand Up @@ -65,7 +62,8 @@ SigmoidElementmulFusePattern::SigmoidElementmulFusePattern(
elemul_op->LinksFrom({sigmoid_x, sigmoid_out}).LinksTo({elemul_out});
}

} // namespace patterns
} // namespace paddle::framework::ir::patterns
namespace paddle::framework::ir {

SigmoidElementmulFusePass::SigmoidElementmulFusePass() = default;

Expand Down Expand Up @@ -114,9 +112,7 @@ void SigmoidElementmulFusePass::ApplyImpl(ir::Graph* graph) const {
AddStatis(found_subgraph_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(sigmoid_elementmul_fuse_pass,
paddle::framework::ir::SigmoidElementmulFusePass);
Expand Down
6 changes: 2 additions & 4 deletions paddle/fluid/framework/new_executor/workqueue/workqueue.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"

namespace paddle {
namespace framework {
namespace paddle::framework {

void WorkQueueOptions::Validate() const {
PADDLE_ENFORCE_GT(name.size(),
Expand Down Expand Up @@ -251,5 +250,4 @@ std::unique_ptr<WorkQueueGroup> CreateWorkQueueGroup(
return ptr;
}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
6 changes: 2 additions & 4 deletions paddle/fluid/framework/pull_dense_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ namespace phi {
class DenseTensor;
} // namespace phi

namespace paddle {
namespace framework {
namespace paddle::framework {

class Scope;
class Variable;
Expand Down Expand Up @@ -270,5 +269,4 @@ void PullDenseWorker::MergeDenseParam() {
}
}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
6 changes: 2 additions & 4 deletions paddle/fluid/framework/unused_var_check.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@ PADDLE_DEFINE_EXPORTED_bool(
"Checking whether operator contains unused inputs, "
"especially for grad operator. It should be in unittest.");

namespace paddle {
namespace framework {
namespace paddle::framework {

std::unordered_set<std::string> *GetThreadLocalUsedVarNameSet() {
thread_local std::unordered_set<std::string> used_var_name_set;
Expand Down Expand Up @@ -133,5 +132,4 @@ void CheckUnusedVar(const OperatorBase &op, const Scope &scope) {
}
}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
8 changes: 2 additions & 6 deletions paddle/fluid/inference/tensorrt/convert/activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace inference {
namespace tensorrt {
namespace paddle::inference::tensorrt {

class ActivationOpConverter : public OpConverter {
public:
Expand Down Expand Up @@ -187,9 +185,7 @@ class ThresholdedReluOpConverter : public ActivationOpConverter {
};
#endif

} // namespace tensorrt
} // namespace inference
} // namespace paddle
} // namespace paddle::inference::tensorrt

REGISTER_TRT_OP_CONVERTER(relu, ReluOpConverter);
REGISTER_TRT_OP_CONVERTER(sigmoid, SigmoidOpConverter);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/inference/tensorrt/convert/assign_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@ limitations under the License. */

#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"

namespace paddle {
namespace inference {
namespace tensorrt {
namespace paddle::inference::tensorrt {

class AssignOpConverter : public OpConverter {
public:
Expand All @@ -32,8 +30,6 @@ class AssignOpConverter : public OpConverter {
}
};

} // namespace tensorrt
} // namespace inference
} // namespace paddle
} // namespace paddle::inference::tensorrt

REGISTER_TRT_OP_CONVERTER(assign, AssignOpConverter);
8 changes: 2 additions & 6 deletions paddle/fluid/inference/tensorrt/convert/bmm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@ limitations under the License. */

#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"

namespace paddle {
namespace inference {
namespace tensorrt {
namespace paddle::inference::tensorrt {

class BMMOpConverter : public OpConverter {
public:
Expand All @@ -42,8 +40,6 @@ class BMMOpConverter : public OpConverter {
}
};

} // namespace tensorrt
} // namespace inference
} // namespace paddle
} // namespace paddle::inference::tensorrt

REGISTER_TRT_OP_CONVERTER(bmm, BMMOpConverter);
8 changes: 2 additions & 6 deletions paddle/fluid/inference/tensorrt/convert/einsum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@ limitations under the License. */

#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"

namespace paddle {
namespace inference {
namespace tensorrt {
namespace paddle::inference::tensorrt {

/*
* Einsum Op
Expand Down Expand Up @@ -46,8 +44,6 @@ class EinsumOpConverter : public OpConverter {
}
};

} // namespace tensorrt
} // namespace inference
} // namespace paddle
} // namespace paddle::inference::tensorrt

REGISTER_TRT_OP_CONVERTER(einsum, EinsumOpConverter);
8 changes: 2 additions & 6 deletions paddle/fluid/inference/tensorrt/convert/elementwise_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h"

namespace paddle {
namespace inference {
namespace tensorrt {
namespace paddle::inference::tensorrt {

class ElementwiseTensorOpConverter : public OpConverter {
public:
Expand Down Expand Up @@ -354,9 +352,7 @@ class PowOpConverter : public OpConverter {
}
};

} // namespace tensorrt
} // namespace inference
} // namespace paddle
} // namespace paddle::inference::tensorrt

REGISTER_TRT_OP_CONVERTER(elementwise_add_weight,
ElementwiseTensorAddOpConverter);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@ limitations under the License. */

#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"

namespace paddle {
namespace inference {
namespace tensorrt {
namespace paddle::inference::tensorrt {

/*
* HardSigmoidOp, IActivationLayer in TRT. This Layer doesn't has weights.
Expand Down Expand Up @@ -49,8 +47,6 @@ class HardSigmoidOpConverter : public OpConverter {
}
};

} // namespace tensorrt
} // namespace inference
} // namespace paddle
} // namespace paddle::inference::tensorrt

REGISTER_TRT_OP_CONVERTER(hard_sigmoid, HardSigmoidOpConverter);
Loading