Skip to content

Commit

Permalink
part 24
Browse files Browse the repository at this point in the history
  • Loading branch information
walkalone20 committed May 30, 2024
1 parent a338cea commit bff869b
Show file tree
Hide file tree
Showing 50 changed files with 116 additions and 284 deletions.
6 changes: 2 additions & 4 deletions paddle/fluid/distributed/fleet_executor/task_loop.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"

namespace paddle {
namespace distributed {
namespace paddle::distributed {

thread_local TaskLoop* TaskLoop::thread_local_loop_ = nullptr;

Expand Down Expand Up @@ -81,5 +80,4 @@ void TaskLoop::AbortNotInLoopThread() {
std::this_thread::get_id()));
}

} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
6 changes: 2 additions & 4 deletions paddle/fluid/distributed/ps/table/graph/graph_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
#include "paddle/fluid/distributed/ps/table/graph/graph_node.h"

#include <cstring>
namespace paddle {
namespace distributed {
namespace paddle::distributed {

GraphNode::~GraphNode() {
if (sampler != nullptr) {
Expand Down Expand Up @@ -122,5 +121,4 @@ void FeatureNode::recover_from_buffer(char* buffer) {
feature.push_back(str); // NOLINT
}
}
} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
8 changes: 2 additions & 6 deletions paddle/fluid/framework/details/all_reduce_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@
COMMON_DECLARE_bool(sync_nccl_allreduce);
#endif

namespace paddle {
namespace framework {
namespace details {
namespace paddle::framework::details {

#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
AllReduceOpHandle::AllReduceOpHandle(ir::Node *node,
Expand Down Expand Up @@ -335,6 +333,4 @@ void AllReduceOpHandle::SyncNCCLAllReduce() {
#endif

std::string AllReduceOpHandle::Name() const { return "all_reduce"; }
} // namespace details
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::details
14 changes: 4 additions & 10 deletions paddle/fluid/framework/details/eager_deletion_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,11 @@
#endif
#include <algorithm>

namespace paddle {
namespace framework {
namespace paddle::framework {
class Variable;
} // namespace framework
} // namespace paddle
} // namespace paddle::framework

namespace paddle {
namespace framework {
namespace details {
namespace paddle::framework::details {

EagerDeletionOpHandle::EagerDeletionOpHandle(
ir::Node *node,
Expand Down Expand Up @@ -213,6 +209,4 @@ std::vector<std::string> EagerDeletionOpHandle::VarsToDelete() const {
return var_names;
}

} // namespace details
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::details
8 changes: 2 additions & 6 deletions paddle/fluid/framework/details/reduce_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,7 @@ PADDLE_DEFINE_EXPORTED_bool(
false,
"Whether to make the result of computation deterministic in CPU side.");

namespace paddle {
namespace framework {
namespace details {
namespace paddle::framework::details {

std::once_flag CollectiveContext::init_flag_;
std::unique_ptr<CollectiveContext> CollectiveContext::context_;
Expand Down Expand Up @@ -318,6 +316,4 @@ std::vector<const T *> ReduceOpHandle::GetInputValues(
}

std::string ReduceOpHandle::Name() const { return "reduce"; }
} // namespace details
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::details
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/delete_cast_op_pass_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/framework/ir/pass_tester_helper.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void AddVarToScope(Scope* param_scope,
const std::string& name,
Expand Down Expand Up @@ -315,8 +313,6 @@ TEST(ApplyCastPass, basic) {
cast_num_in_graph));
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

USE_PASS(delete_cast_op_pass);
10 changes: 2 additions & 8 deletions paddle/fluid/framework/ir/fusion_group/code_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,7 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/fusion_group/code_generator_helper.h"
#include "paddle/fluid/framework/ir/fusion_group/cuda_resources.h"

namespace paddle {
namespace framework {
namespace ir {
namespace fusion_group {
namespace paddle::framework::ir::fusion_group {

std::string ExtractDataType(const std::vector<Node*>& nodes) {
std::string dtype_str = "";
Expand Down Expand Up @@ -373,7 +370,4 @@ std::unordered_map<Node*, int> CodeGenerator::EncodeVarNodes(
return var_ids;
}

} // namespace fusion_group
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir::fusion_group
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/merge_layernorm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,7 @@
GET_IR_NODE(layernorm_40_in_bias); \
GET_IR_NODE(layernorm_40_in_scale); \
GET_IR_NODE(layernorm_40_out);
namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
MergeLayernormFusePass::MergeLayernormFusePass() {
AddOpCompat(OpCompat("reshape2"))
.AddInput("X")
Expand Down Expand Up @@ -176,9 +174,7 @@ void MergeLayernormFusePass::ApplyImpl(ir::Graph* graph) const {
gpd(graph, handler);
AddStatis(fusion_count);
}
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir
REGISTER_PASS(merge_layernorm_fuse_pass,
paddle::framework::ir::MergeLayernormFusePass);
REGISTER_PASS_CAPABILITY(merge_layernorm_fuse_pass)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@

#include "paddle/fluid/framework/ir/pass.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

class Graph;

Expand Down Expand Up @@ -106,9 +104,7 @@ void AddReaderDependencyPass::ApplyImpl(Graph *graph) const {
}
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(add_reader_dependency_pass,
paddle::framework::ir::AddReaderDependencyPass);
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@
#include "paddle/fluid/framework/details/computation_op_handle.h"
#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

static std::unordered_set<std::string> ReaderOpSet() {
return {"create_py_reader"};
Expand Down Expand Up @@ -78,6 +76,4 @@ void SetReaderOpDeviceInfo(Graph *graph, size_t dev_cnt, size_t dev_idx) {
VLOG(10) << "Found op number " << found_op_num;
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,7 @@
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void ComputePropagateScalesMkldnnPass::GetTensorFromVector(
const std::vector<float>& data_v, phi::DenseTensor* tensor) const {
Expand Down Expand Up @@ -516,9 +514,7 @@ void ComputePropagateScalesMkldnnPass::ApplyImpl(ir::Graph* graph) const {
graph, "has_quant_info", "var_quant_scales", var_quant_scales);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(compute_propagate_scales_onednn_pass,
paddle::framework::ir::ComputePropagateScalesMkldnnPass);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/utils/string/pretty_log.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

ResidualConnectionMKLDNNFusePass::ResidualConnectionMKLDNNFusePass() {
AddOpCompat(OpCompat("conv2d"))
Expand Down Expand Up @@ -305,9 +303,7 @@ void ResidualConnectionMKLDNNFusePass::ApplyImpl(ir::Graph* graph) const {

AddStatis(graph_with_stats.second);
}
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(conv_elementwise_add_onednn_fuse_pass,
paddle::framework::ir::ResidualConnectionMKLDNNFusePass);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@
#include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/utils/string/pretty_log.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

using string::PrettyLogDetail;

Expand Down Expand Up @@ -132,9 +130,7 @@ void FuseOperatorReshape2OneDNNPass::FuseReshape2(Graph *graph,
op_type);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(operator_reshape2_onednn_fuse_pass,
paddle::framework::ir::FuseOperatorReshape2OneDNNPass);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@
#include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/utils/string/pretty_log.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

using string::PrettyLogDetail;

Expand Down Expand Up @@ -77,9 +75,7 @@ void FuseSqueeze2Transpose2OneDNNPass::ApplyImpl(Graph *graph) const {
}
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(squeeze2_transpose2_onednn_fuse_pass,
paddle::framework::ir::FuseSqueeze2Transpose2OneDNNPass);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/placement_pass_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@ limitations under the License. */

#include "paddle/fluid/framework/operator.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void PlacementPassBase::ApplyImpl(ir::Graph* graph) const {
VLOG(3) << "Applies " << GetPlacementName() << " placement strategy.";
Expand All @@ -43,6 +41,4 @@ void PlacementPassBase::ApplyImpl(ir::Graph* graph) const {
}
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir
20 changes: 6 additions & 14 deletions paddle/fluid/framework/ir/preln_elementwise_groupnorm_act_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,11 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
class Node;
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

namespace paddle {
namespace framework {
namespace ir {
namespace patterns {
namespace paddle::framework::ir::patterns {

struct PrelnGroupNormAct : public PatternBase {
PrelnGroupNormAct(PDPattern *pattern, const std::string &name_scope)
Expand Down Expand Up @@ -92,7 +85,8 @@ void PrelnGroupNormAct::operator()(PDNode *x, PDNode *y, bool with_act) {
}
}

} // namespace patterns
} // namespace paddle::framework::ir::patterns
namespace paddle::framework::ir {

int PrelnGroupNormActFusePass::ApplyAddGNPattern(ir::Graph *graph,
bool with_act) const {
Expand Down Expand Up @@ -203,9 +197,7 @@ void PrelnGroupNormActFusePass::ApplyImpl(ir::Graph *graph) const {
AddStatis(found_subgraph_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(preln_elementwise_groupnorm_act_pass,
paddle::framework::ir::PrelnGroupNormActFusePass);
Expand Down
12 changes: 4 additions & 8 deletions paddle/fluid/framework/ir/trt_qk_multihead_matmul_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,7 @@
#endif
#include "paddle/phi/kernels/funcs/blas/blas.h"

namespace paddle {
namespace framework {
namespace ir {
namespace patterns {
namespace paddle::framework::ir::patterns {

// input_qk input_v
// |q |k v
Expand Down Expand Up @@ -249,7 +246,8 @@ PDNode* TrtQKMultiHeadMatmulPattern::operator()() {
return reshape2_qkv_out_var;
}

} // namespace patterns
} // namespace paddle::framework::ir::patterns
namespace paddle::framework::ir {

int TrtQkMultiHeadMatmulFusePass::BuildQkFusion(Graph* graph,
const std::string& name_scope,
Expand Down Expand Up @@ -575,9 +573,7 @@ void TrtQkMultiHeadMatmulFusePass::ApplyImpl(Graph* graph) const {
AddStatis(fusion_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(trt_qk_multihead_matmul_fuse_pass,
paddle::framework::ir::TrtQkMultiHeadMatmulFusePass);
Expand Down
Loading

0 comments on commit bff869b

Please sign in to comment.