Skip to content

Commit

Permalink
part 10
Browse files Browse the repository at this point in the history
  • Loading branch information
walkalone20 committed May 30, 2024
1 parent a338cea commit 1abf589
Show file tree
Hide file tree
Showing 50 changed files with 119 additions and 265 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@
#include "paddle/fluid/distributed/collective/process_group_custom.h"
#endif

namespace phi {
namespace detail {
namespace phi::detail {

// FIXME(paddle-dev): Since the singleton of ProcessGroup in fluid is used in
// SyncBN, the fluid symbol will be dependent on external hardware access.
Expand Down Expand Up @@ -63,5 +62,4 @@ ccl::CCLComm GetCCLComm(const Place& place, int global_gid) {
}
}

} // namespace detail
} // namespace phi
} // namespace phi::detail
6 changes: 2 additions & 4 deletions paddle/fluid/distributed/fleet_executor/sink_interceptor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@

#include "paddle/fluid/distributed/fleet_executor/task_node.h"

namespace paddle {
namespace distributed {
namespace paddle::distributed {

SinkInterceptor::SinkInterceptor(int64_t interceptor_id, TaskNode* node)
: Interceptor(interceptor_id, node),
Expand Down Expand Up @@ -64,5 +63,4 @@ void SinkInterceptor::Run(const InterceptorMessage& msg) {
}

REGISTER_INTERCEPTOR(Sink, SinkInterceptor);
} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
6 changes: 2 additions & 4 deletions paddle/fluid/distributed/ps/service/ps_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@
#include "paddle/fluid/framework/fleet/gloo_wrapper.h"
#endif

namespace paddle {
namespace distributed {
namespace paddle::distributed {
REGISTER_PSCORE_CLASS(PSClient, BrpcPsClient);
REGISTER_PSCORE_CLASS(PSClient, PsLocalClient);
REGISTER_PSCORE_CLASS(PSClient, GraphBrpcClient);
Expand Down Expand Up @@ -109,5 +108,4 @@ PSClient *PSClientFactory::Create(const PSParameter &ps_config) {
VLOG(3) << "Create PSClient[" << service_param.client_class() << "] success";
return client;
}
} // namespace distributed
} // namespace paddle
} // namespace paddle::distributed
6 changes: 2 additions & 4 deletions paddle/fluid/eager/api/utils/hook_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@
#include "paddle/fluid/eager/utils.h"
#include "paddle/phi/core/dense_tensor.h"

namespace egr {
namespace egr_utils_api {
namespace egr::egr_utils_api {

int64_t RegisterGradientHookForTensor(
const paddle::Tensor& tensor,
Expand Down Expand Up @@ -96,5 +95,4 @@ void RetainGradForTensor(const paddle::Tensor& tensor) {
void RegisterBackwardFinalHook(const std::function<void()>& hook) {
Controller::Instance().RegisterBackwardFinalHook(hook);
}
} // namespace egr_utils_api
} // namespace egr
} // namespace egr::egr_utils_api
6 changes: 2 additions & 4 deletions paddle/fluid/framework/data_layout_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace paddle {
namespace framework {
namespace paddle::framework {

std::vector<int> GetAxis(const DataLayout& from, const DataLayout& to) {
PADDLE_ENFORCE_NE(
Expand Down Expand Up @@ -102,5 +101,4 @@ void TransDataLayout(DataLayout from_layout,
out->set_layout(to_layout);
}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
16 changes: 5 additions & 11 deletions paddle/fluid/framework/details/variable_visitor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,13 @@

namespace phi {
class DenseTensor;
} // namespace phi
} // namespace phi

namespace paddle {
namespace framework {
namespace paddle::framework {
class Variable;
} // namespace framework
} // namespace paddle
} // namespace paddle::framework

namespace paddle {
namespace framework {
namespace details {
namespace paddle::framework::details {
template <typename Func>
static void VisitVariable(Variable* var, Func* func) {
if (var->IsType<phi::DenseTensor>()) {
Expand Down Expand Up @@ -181,6 +177,4 @@ void VariableVisitor::EnforceShapeAndDTypeEQ(const Variable& var1,
VisitVariable(var2, &visitor);
}

} // namespace details
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::details
8 changes: 3 additions & 5 deletions paddle/fluid/framework/device_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,9 @@ limitations under the License. */
#include "paddle/fluid/framework/convert_utils.h"
namespace phi {
class DenseTensor;
} // namespace phi
} // namespace phi

namespace paddle {
namespace framework {
namespace paddle::framework {

class Scope;

Expand Down Expand Up @@ -496,5 +495,4 @@ void DeviceWorker::DumpField(const Scope& scope,
writer_.Flush();
}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
6 changes: 2 additions & 4 deletions paddle/fluid/framework/heter_section_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@ limitations under the License. */
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/lodtensor_printer.h"

namespace paddle {
namespace framework {
namespace paddle::framework {

void SetMicroId(paddle::framework::Scope* scope,
platform::DeviceContext* dev_ctx,
Expand Down Expand Up @@ -554,6 +553,5 @@ void HeterSectionWorker::TrainFilesWithProfiler() {
}
}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
#endif
12 changes: 4 additions & 8 deletions paddle/fluid/framework/ir/fc_gru_fuse_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,10 @@

#include "paddle/fluid/framework/ir/fc_gru_fuse_pass_tester.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

namespace fc_gru_test {
} // namespace paddle::framework::ir
namespace paddle::framework::ir::fc_gru_test {
TEST(FcGruFusePass, basic) {
std::unique_ptr<ir::Graph> graph = PrepareGraph();
auto pass = PassRegistry::Instance().Get("fc_gru_fuse_pass");
Expand Down Expand Up @@ -50,9 +49,6 @@ TEST(FcGruFusePass, basic) {
"expectations after fuse"));
}

} // namespace fc_gru_test
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir::fc_gru_test

USE_PASS(fc_gru_fuse_pass);
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,7 @@
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

// op -> variables which can be deleted after op runs
using OpToVarNameSetMap = std::unordered_map<details::ComputationOpHandle *,
Expand Down Expand Up @@ -303,9 +301,7 @@ void EagerDeletionPass::ApplyImpl(ir::Graph *graph) const {
while_op_eager_deletion_pass->Apply(graph);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(eager_deletion_pass, paddle::framework::ir::EagerDeletionPass)
.RequirePassAttr(paddle::framework::ir::kMemOptVarInfoMapList)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
#include "paddle/fluid/operators/controlflow/op_variant.h"
#include "paddle/fluid/operators/controlflow/while_op_helper.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
using OpVariant = operators::OpVariant;

class WhileOpEagerDeletionPass : public ir::Pass {
Expand Down Expand Up @@ -105,9 +103,7 @@ class WhileOpEagerDeletionPass : public ir::Pass {
}
};

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(while_op_eager_deletion_pass,
paddle::framework::ir::WhileOpEagerDeletionPass);
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

class Graph;

Expand Down Expand Up @@ -101,9 +99,7 @@ void DepthwiseConvMKLDNNPass::ApplyImpl(ir::Graph* graph) const {
AddStatis(found_depthwise_conv_onednn_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(depthwise_conv_onednn_pass,
paddle::framework::ir::DepthwiseConvMKLDNNPass);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/utils/string/pretty_log.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

using string::PrettyLogDetail;

Expand Down Expand Up @@ -284,9 +282,7 @@ MatmulActivationMkldnnFusePass::MatmulActivationMkldnnFusePass() {
.End();
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(matmul_activation_onednn_fuse_pass,
paddle::framework::ir::MatmulActivationMkldnnFusePass);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,7 @@
GET_IR_NODE(reshape2_50_op); \
GET_IR_NODE(reshape2_50_out);

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
class Node;
ReverseRollFusePass::ReverseRollFusePass() { // NOLINT
AddOpCompat(OpCompat("reshape2"))
Expand Down Expand Up @@ -189,9 +187,7 @@ void ReverseRollFusePass::ApplyImpl(ir::Graph* graph) const {
fuse_count += ApplyPattern(graph, false);
AddStatis(fuse_count);
}
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(reverse_roll_fuse_pass,
paddle::framework::ir::ReverseRollFusePass);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

struct FuseExpr {};

Expand Down Expand Up @@ -340,9 +338,7 @@ void SeqConcatFcFusePass::ApplyImpl(ir::Graph* graph) const {
AddStatis(fuse_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(seq_concat_fc_fuse_pass,
paddle::framework::ir::SeqConcatFcFusePass);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/silu_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@
#include <string>
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

void SiluFusePass::ApplyImpl(ir::Graph* graph) const {
// This pass is used for cutlass, because cutlass can fuse conv + bias + silu
Expand Down Expand Up @@ -79,8 +77,6 @@ void SiluFusePass::ApplyImpl(ir::Graph* graph) const {
gpd(graph, handler);
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(silu_fuse_pass, paddle::framework::ir::SiluFusePass);
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/transfer_layout_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,7 @@
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {
namespace {

void InsertLayoutTransOp(ir::Graph *graph,
Expand Down Expand Up @@ -317,8 +315,6 @@ void TransferLayoutPass::ApplyImpl(ir::Graph *graph) const {
AddStatis(static_cast<int>(valid_ops.size()));
}

} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(transfer_layout_pass, paddle::framework::ir::TransferLayoutPass);
8 changes: 2 additions & 6 deletions paddle/fluid/framework/ir/trt_remove_amp_strategy_op_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@
#include "paddle/fluid/framework/ir/node.h"
#include "paddle/phi/common/data_type.h"

namespace paddle {
namespace framework {
namespace ir {
namespace paddle::framework::ir {

namespace {
template <typename InType, typename OutType>
Expand Down Expand Up @@ -150,9 +148,7 @@ void TrtRemoveAMPStrategyOpPass::ApplyImpl(Graph *graph) const {
}
}
}
} // namespace ir
} // namespace framework
} // namespace paddle
} // namespace paddle::framework::ir

REGISTER_PASS(trt_remove_amp_strategy_op_pass,
paddle::framework::ir::TrtRemoveAMPStrategyOpPass);
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
#include "paddle/fluid/framework/new_executor/instruction/builtin_combine_instruction.h"
#include "paddle/fluid/framework/new_executor/new_executor_defs.h"

namespace paddle {
namespace framework {
namespace paddle::framework {

BuiltinCombineInstruction::BuiltinCombineInstruction(
size_t id,
Expand All @@ -33,5 +32,4 @@ BuiltinCombineInstruction::BuiltinCombineInstruction(

void BuiltinCombineInstruction::Run() {}

} // namespace framework
} // namespace paddle
} // namespace paddle::framework
Loading

0 comments on commit 1abf589

Please sign in to comment.