Skip to content

Commit

Permalink
【Hackathon 6th Fundable Projects 2 No.29】 Fix modernize-concat-nested…
Browse files Browse the repository at this point in the history
…-namespaces-part-14 (#64769)

* part 14

* format

* format
  • Loading branch information
walkalone20 authored Jun 4, 2024
1 parent 0f7ed5e commit 4000a48
Show file tree
Hide file tree
Showing 50 changed files with 118 additions and 282 deletions.
6 changes: 2 additions & 4 deletions paddle/common/flags_native.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@
#include <string>
#include <vector>

namespace paddle {
namespace flags {
namespace paddle::flags {

std::stringstream& ErrorStream() {
static std::stringstream err_ss;
Expand Down Expand Up @@ -554,5 +553,4 @@ INSTANTIATE_GET_FROM_ENV(std::string);

#undef INSTANTIATE_GET_FROM_ENV

} // namespace flags
} // namespace paddle
} // namespace paddle::flags
6 changes: 2 additions & 4 deletions paddle/fluid/distributed/collective/process_group_gloo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@
#include "paddle/phi/api/lib/data_transform.h"
#include "paddle/phi/core/distributed/comm_context_manager.h"

namespace paddle {
namespace distributed {
namespace paddle::distributed {

#ifdef _WIN32
#define GENERATE_FUNC(type, func, ...) \
Expand Down Expand Up @@ -727,5 +726,4 @@ phi::distributed::GlooCommContext* ProcessGroupGloo::GetCommContext() {
return comm_context;
}

} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@
#include "paddle/fluid/distributed/fleet_executor/task_node.h"
#include "paddle/fluid/framework/operator.h"

namespace paddle {
namespace distributed {
namespace paddle::distributed {

AmplifierInterceptor::AmplifierInterceptor(int64_t interceptor_id,
TaskNode* node)
Expand Down Expand Up @@ -56,5 +55,4 @@ void AmplifierInterceptor::ReplyCompletedToUpStream() {

REGISTER_INTERCEPTOR(Amplifier, AmplifierInterceptor);

} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@
#include <unordered_map>

#include "paddle/phi/core/generator.h"
namespace paddle {
namespace distributed {
namespace paddle::distributed {

void RandomSampler::build(GraphEdgeBlob *edges) { this->edges = edges; }

Expand Down Expand Up @@ -164,5 +163,4 @@ int WeightedSampler::sample(
subtract_count_map[this]++;
return return_idx;
}
} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
6 changes: 2 additions & 4 deletions paddle/fluid/distributed/test/ctr_dymf_accessor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ limitations under the License. */
#include "paddle/fluid/distributed/ps/table/sparse_sgd_rule.h"
#include "paddle/fluid/distributed/the_one_ps.pb.h"

namespace paddle {
namespace distributed {
namespace paddle::distributed {
REGISTER_PSCORE_CLASS(SparseValueSGDRule, SparseAdaGradSGDRule);
REGISTER_PSCORE_CLASS(SparseValueSGDRule, StdAdaGradSGDRule);
REGISTER_PSCORE_CLASS(SparseValueSGDRule, SparseAdamSGDRule);
Expand Down Expand Up @@ -171,5 +170,4 @@ TEST(downpour_feature_value_accessor_test, test_string_related) {
ASSERT_NE(acc->ParseFromString(str, value), 0);
// make sure init_zero=true
}
} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
8 changes: 2 additions & 6 deletions paddle/fluid/framework/details/fused_broadcast_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@
#include "paddle/fluid/framework/details/container_cast.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"

namespace paddle {
namespace framework {
namespace details {
namespace paddle::framework::details {

void FusedBroadcastOpHandle::RunImpl() {
platform::RecordEvent record_event(
Expand Down Expand Up @@ -58,6 +56,4 @@ void FusedBroadcastOpHandle::RunImpl() {

std::string FusedBroadcastOpHandle::Name() const { return "fused_broadcast"; }

} // namespace details
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::details
17 changes: 6 additions & 11 deletions paddle/fluid/framework/executor_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,11 @@ DECLARE_FILE_SYMBOLS(print_statistics);
COMMON_DECLARE_bool(pir_apply_inplace_pass);
COMMON_DECLARE_bool(print_ir);

namespace paddle {
namespace framework {
namespace paddle::framework {
class ProgramDesc;
} // namespace framework
} // namespace paddle
} // namespace paddle::framework

namespace paddle {
namespace framework {

namespace details {
namespace paddle::framework::details {

static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) {
framework::ExecutionStrategy execution_strategy;
Expand Down Expand Up @@ -208,7 +203,8 @@ std::set<std::string> ParseSafeEagerDeletionSkipVarsSet(
VLOG(1) << "Found skip_eager_delete_vars: " << skip_eager_delete_vars.size();
return skip_eager_delete_vars;
}
} // namespace details
} // namespace paddle::framework::details
namespace paddle::framework {

// C++11 removes the need for manual locking. Concurrent execution shall wait if
// a static local variable is already being initialized.
Expand Down Expand Up @@ -588,5 +584,4 @@ std::unique_ptr<::pir::Program> ConstructBackwardIrProgram(
return res;
}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
6 changes: 2 additions & 4 deletions paddle/fluid/framework/io/save_load_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/phi/common/port.h"

namespace paddle {
namespace framework {
namespace paddle::framework {

void SaveTensor(const phi::DenseTensor& x,
const std::string& file_path,
Expand Down Expand Up @@ -54,5 +53,4 @@ void LoadTensor(const std::string& file_path, phi::DenseTensor* out) {

framework::DeserializeFromStream(fin, out);
}
} // namespace framework
} // namespace paddle
} // namespace paddle::framework
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/delete_quant_dequant_op_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@ namespace phi {
class DenseTensor;
} // namespace phi

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

#define GET_IR_NODE(node__) GET_IR_NODE_FROM_SUBGRAPH(node__, node__, pattern);
#define GET_NODES \
Expand Down Expand Up @@ -107,9 +105,7 @@ void DeleteQuantDequantOpPass::ApplyImpl(ir::Graph* graph) const {
AddStatis(found_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(delete_quant_dequant_op_pass,
paddle::framework::ir::DeleteQuantDequantOpPass);
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,7 @@
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace patterns {
namespace paddle::framework::ir::patterns {
PDNode *patterns::DenseMultiheadMatmul::operator()() {
auto *multihead_matmul = pattern->NewNode(multihead_matmul_repr())
->assert_is_op("multihead_matmul");
Expand Down Expand Up @@ -61,7 +58,8 @@ PDNode *patterns::DenseMultiheadMatmul::operator()() {

return multihead_matmul_out;
}
} // namespace patterns
} // namespace paddle::framework::ir::patterns
namespace paddle::framework::ir {
DenseMultiheadMatmulToSparsePass::DenseMultiheadMatmulToSparsePass() {
AddOpCompat(OpCompat("multihead_matmul"))
.AddInput("Input")
Expand Down Expand Up @@ -170,9 +168,7 @@ void DenseMultiheadMatmulToSparsePass::ApplyImpl(Graph *graph) const {
AddStatis(found_multihead_matmul_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(dense_multihead_matmul_to_sparse_pass,
paddle::framework::ir::DenseMultiheadMatmulToSparsePass);
20 changes: 6 additions & 14 deletions paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,11 @@

#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
class Node;
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

namespace paddle {
namespace framework {
namespace ir {
namespace patterns {
namespace paddle::framework::ir::patterns {

static PDNode* create_emb_vars(PDPattern* pattern,
const std::string& name,
Expand Down Expand Up @@ -139,7 +132,8 @@ void SkipLayerNorm::operator()() {
.LinksTo({layer_norm_out, layer_norm_mean_var, layer_norm_variance_var});
}

} // namespace patterns
} // namespace paddle::framework::ir::patterns
namespace paddle::framework::ir {

int EmbeddingEltwiseLayerNormFusePass::BuildFusion(
Graph* graph, const std::string& name_scope
Expand Down Expand Up @@ -474,9 +468,7 @@ void EmbeddingEltwiseLayerNormFusePass::ApplyImpl(Graph* graph) const {
AddStatis(fusion_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(embedding_eltwise_layernorm_fuse_pass,
paddle::framework::ir::EmbeddingEltwiseLayerNormFusePass);
Expand Down
11 changes: 2 additions & 9 deletions paddle/fluid/framework/ir/fc_lstm_fuse_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,7 @@

#include "paddle/fluid/framework/ir/fc_lstm_fuse_pass_tester.h"

namespace paddle {
namespace framework {
namespace ir {

namespace fc_lstm_test {
namespace paddle::framework::ir::fc_lstm_test {

TEST(FcLstmFusePass, basic) {
std::unique_ptr<ir::Graph> graph = PrepareGraph();
Expand Down Expand Up @@ -50,9 +46,6 @@ TEST(FcLstmFusePass, basic) {
"The number of fusion_gru nodes does "
"not meet expectations after fuse"));
}
} // namespace fc_lstm_test
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir::fc_lstm_test

USE_PASS(fc_lstm_fuse_pass);
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,11 @@ namespace phi {
class DenseTensor;
} // namespace phi

namespace paddle {
namespace framework {
namespace paddle::framework {
class Scope;
} // namespace framework
} // namespace paddle
} // namespace paddle::framework

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

class Node;

Expand Down Expand Up @@ -309,9 +305,7 @@ void ConvAffineChannelFusePass::FuseConvAffineChannel(
AddStatis(found_conv_ac_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(conv_affine_channel_onednn_fuse_pass,
paddle::framework::ir::ConvAffineChannelFusePass);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/phi/common/place.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
namespace {
struct Data {
Data() = default;
Expand Down Expand Up @@ -380,8 +378,6 @@ TEST_F(ParamsQuantizationMkldnnPassTestFixture, conv_with_bias_2g2o2i1h1ws) {
}

} // namespace
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

USE_PASS(params_quantization_onednn_pass);
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/quantize_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@

#include "paddle/fluid/framework/ir/quantize_helper.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void SaveQuantInfoInTheGraph(
ir::Graph* graph,
Expand Down Expand Up @@ -74,6 +72,4 @@ std::vector<float> GetScaleVecValueForNode(
return var_quant_scales->at(node->Name());
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir
20 changes: 6 additions & 14 deletions paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,11 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
class Node;
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

namespace paddle {
namespace framework {
namespace ir {
namespace patterns {
namespace paddle::framework::ir::patterns {

struct SkipLayerNorm : public PatternBase {
SkipLayerNorm(PDPattern *pattern, const std::string &name_scope)
Expand Down Expand Up @@ -99,7 +92,8 @@ PDNode *SkipLayerNorm::operator()(PDNode *x, PDNode *y) {
return layer_norm_out_var;
}

} // namespace patterns
} // namespace paddle::framework::ir::patterns
namespace paddle::framework::ir {

void SkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const {
PADDLE_ENFORCE_NOT_NULL(
Expand Down Expand Up @@ -194,9 +188,7 @@ void SkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const {
AddStatis(found_subgraph_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(skip_layernorm_fuse_pass,
paddle::framework::ir::SkipLayerNormFusePass);
Expand Down
Loading

0 comments on commit 4000a48

Please sign in to comment.