From 15105838177fe61c1cb9a86c36370c7dddd250ee Mon Sep 17 00:00:00 2001 From: 0x45f Date: Wed, 22 Jun 2022 06:28:43 +0000 Subject: [PATCH 1/6] Split layer_utils.cc and polish interface BaseFunction --- paddle/fluid/jit/CMakeLists.txt | 13 ++- paddle/fluid/jit/base_function.cc | 139 ---------------------------- paddle/fluid/jit/base_function.h | 67 +------------- paddle/fluid/jit/exector_function.h | 36 ++++--- paddle/fluid/jit/function_schema.cc | 85 +++++++++++++++++ paddle/fluid/jit/function_schema.h | 83 +++++++++++++++++ paddle/fluid/jit/layer.cc | 22 ++--- paddle/fluid/jit/layer.h | 29 +++--- paddle/fluid/jit/layer_utils.cc | 92 ++++++++++++++++++ paddle/fluid/jit/layer_utils.h | 49 ++++++++++ paddle/fluid/jit/pe_function.h | 44 ++++++--- paddle/fluid/jit/serializer.cc | 30 +++--- paddle/fluid/jit/serializer.h | 8 +- 13 files changed, 419 insertions(+), 278 deletions(-) delete mode 100644 paddle/fluid/jit/base_function.cc create mode 100644 paddle/fluid/jit/function_schema.cc create mode 100644 paddle/fluid/jit/function_schema.h create mode 100644 paddle/fluid/jit/layer_utils.cc create mode 100644 paddle/fluid/jit/layer_utils.h diff --git a/paddle/fluid/jit/CMakeLists.txt b/paddle/fluid/jit/CMakeLists.txt index dabaabff8cbea..4feebf29f9a0b 100644 --- a/paddle/fluid/jit/CMakeLists.txt +++ b/paddle/fluid/jit/CMakeLists.txt @@ -3,15 +3,17 @@ cc_library( SRCS serializer.cc DEPS lod_tensor device_context) +cc_library( + jit_layer_utils + SRCS layer_utils.cc + DEPS scope proto_desc) + cc_library( jit_layer SRCS layer.cc DEPS executor parallel_executor executor_cache) -cc_library( - jit_base_function - SRCS base_function.cc - DEPS scope proto_desc) +cc_library(jit_function_schema SRCS function_schema.cc) if(WITH_TESTING AND NOT WIN32) add_custom_target( @@ -32,7 +34,8 @@ if(WITH_TESTING AND NOT WIN32) scale_op jit_serializer jit_layer - jit_base_function) + jit_layer_utils + jit_function_schema) cc_test( layer_test SRCS layer_test.cc diff --git a/paddle/fluid/jit/base_function.cc b/paddle/fluid/jit/base_function.cc deleted file mode 100644 index 93521173d73b6..0000000000000 --- a/paddle/fluid/jit/base_function.cc +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/jit/base_function.h" - -namespace paddle { -namespace jit { - -Argument::Argument(const std::string &name, bool is_out) - : name_(name), is_output_(is_out) {} - -const std::string &Argument::Name() const { return name_; } - -std::vector FunctionSchema::GetInputArgNames() { - std::vector input_arg_names; - for (auto &arg : input_args) { - input_arg_names.emplace_back(arg.Name()); - } - return input_arg_names; -} - -std::vector FunctionSchema::GetOutputArgNames() { - std::vector output_arg_names; - for (auto &arg : output_args) { - output_arg_names.emplace_back(arg.Name()); - } - return output_arg_names; -} - -void FunctionSchema::AddInputArg(std::string name) { - input_args.emplace_back(name, false); -} - -void FunctionSchema::AddOutputArg(std::string name) { - output_args.emplace_back(name, true); -} - -BaseFunction::BaseFunction(const framework::ProgramDesc &program_desc, - const std::vector ¶m_names, - const VariableNameMap ¶ms_dict, - const phi::Place &place) - : program_desc_(program_desc), place_(place) { - // Parse FunctionSchema - for (auto &in_name : program_desc_.GetFeedTargetNames()) { - schema_.AddInputArg(in_name); - } - for (auto &out_name : program_desc_.GetFetchTargetNames()) { - schema_.AddOutputArg(out_name); - } - // share params into scope - ShareParamsIntoScope(param_names, params_dict); - VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); - // remove feed fetch op - RemoveFeedFetch(); -} - -void BaseFunction::FetchOutput(std::vector *outs) { - for (auto &out_name : schema_.GetOutputArgNames()) { - VLOG(3) << "fetch out: " << out_name; - auto *var = scope_.FindVar(out_name); - VLOG(3) << "after scope_.FindVar(out_name);"; - auto &src_tensor = var->Get(); - VLOG(3) << "var->Get();"; - Variable v; - auto *p = v.GetMutable(); - *p = src_tensor; - outs->emplace_back(v); - } -} - -void BaseFunction::ShareInputsIntoScope(const std::vector &vars) { - VLOG(3) << "vars size: " << vars.size(); - std::vector ordered_input_names = schema_.GetInputArgNames(); - PADDLE_ENFORCE_EQ( - vars.size(), - ordered_input_names.size(), - platform::errors::InvalidArgument( - "vars.size() should be equal to ordered_input_names.size().")); - - for (size_t i = 0; i < vars.size(); i++) { - VLOG(3) << "share into scope: " << ordered_input_names[i]; - auto &dense_tensor = vars[i].Get(); - auto *var = scope_.Var(ordered_input_names[i]); - auto *dst_tensor = var->GetMutable(); - *dst_tensor = dense_tensor; - } -} - -void BaseFunction::ShareParamsIntoScope( - const std::vector ¶m_names, - const VariableNameMap ¶ms_dict) { - VLOG(3) << "param_names size: " << param_names.size(); - for (size_t i = 0; i < param_names.size(); ++i) { - std::string name = param_names[i]; - Variable val = params_dict.find(name)->second; - auto &dense_tensor = val.Get(); - VLOG(3) << "share into scope: " << name; - auto *var = scope_.Var(name); - auto *dst_tensor = var->GetMutable(); - *dst_tensor = dense_tensor; - } -} - -void BaseFunction::RemoveFeedFetch() { - for (size_t i = 0; i < program_desc_.Size(); ++i) { - auto *block = program_desc_.MutableBlock(i); - const auto &all_ops = block->AllOps(); - size_t op_size = all_ops.size(); - VLOG(3) << "op_size: " << op_size; - for (int i = op_size - 1; i >= 0; i--) { - auto op = all_ops[i]; - if (op->Type() == "feed") { - VLOG(3) << "remove op type: " << op->Type() << ", index: " << i - << ", var name: " << op->Input("X")[0]; - block->RemoveVar(op->Input("X")[0]); - block->RemoveOp(i, i + 1); - } else if (op->Type() == "fetch") { - VLOG(3) << "remove op type: " << op->Type() << ", index: " << i - << ", var name: " << op->Output("Out")[0]; - block->RemoveVar(op->Output("Out")[0]); - block->RemoveOp(i, i + 1); - } - } - } -} - -} // namespace jit -} // namespace paddle diff --git a/paddle/fluid/jit/base_function.h b/paddle/fluid/jit/base_function.h index 3f23ebcd97a82..00abd2dfd256a 100644 --- a/paddle/fluid/jit/base_function.h +++ b/paddle/fluid/jit/base_function.h @@ -17,77 +17,20 @@ #include #include -#include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/phi/core/dense_tensor.h" -#include "paddle/phi/core/enforce.h" +#include "paddle/fluid/framework/variable.h" + +#include "paddle/phi/common/place.h" namespace paddle { namespace jit { using Variable = paddle::framework::Variable; -using VariableNameMap = std::map; -using DenseTensor = phi::DenseTensor; - -class Argument { - public: - explicit Argument(const std::string &name, bool is_out = false); - - const std::string &Name() const; - - private: - std::string name_; - // paddle::optional default_val_; - bool is_output_; -}; - -class FunctionSchema { - public: - FunctionSchema() = default; - - std::vector GetInputArgNames(); - - std::vector GetOutputArgNames(); - - void AddInputArg(std::string name); - - void AddOutputArg(std::string name); - - private: - // input_args and output_args are ordered - std::vector input_args; - std::vector output_args; -}; - -// TODO(dev): make it as abstract class class BaseFunction { public: - BaseFunction(const framework::ProgramDesc &program_desc, - const std::vector ¶m_names, - const VariableNameMap ¶ms_dict, - const phi::Place &place); - - virtual ~BaseFunction() {} - virtual std::vector operator()( const std::vector &inputs) = 0; - - protected: - void FetchOutput(std::vector *outs); - - void ShareInputsIntoScope(const std::vector &vars); - - void ShareParamsIntoScope(const std::vector ¶m_names, - const VariableNameMap ¶ms_dict); - - void RemoveFeedFetch(); - - protected: - framework::ProgramDesc program_desc_; - FunctionSchema schema_; - // global_scope place params - framework::Scope scope_; - phi::Place place_; + virtual ~BaseFunction() {} + // virtual void SetPalce(const phi::Place &place); }; } // namespace jit diff --git a/paddle/fluid/jit/exector_function.h b/paddle/fluid/jit/exector_function.h index 29f8e6cdab35b..7d26d8d6f5de2 100644 --- a/paddle/fluid/jit/exector_function.h +++ b/paddle/fluid/jit/exector_function.h @@ -14,40 +14,52 @@ #pragma once +#include +#include +#include + #include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/layer_utils.h" + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/variable.h" namespace paddle { namespace jit { class ExectorFunction : public BaseFunction { public: - ExectorFunction(const framework::ProgramDesc &program_desc, - const std::vector param_names, + ExectorFunction(const std::shared_ptr &info, const VariableNameMap ¶ms_dict, const phi::Place &place) - : BaseFunction(program_desc, param_names, params_dict, place), - inner_exe_(place_) {} + : info_(info), place_(place), inner_exe_(place_) { + ShareParamsIntoScope(info_->GetParamNames(), params_dict, &scope_); + VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); + } - ~ExectorFunction() {} + ~ExectorFunction() noexcept {} std::vector operator()(const std::vector &inputs) { - // share input into scope - ShareInputsIntoScope(inputs); - // run program - inner_exe_.Run(program_desc_, + ShareInputsIntoScope(info_->GetInputArgNames(), inputs, &scope_); + inner_exe_.Run(info_->GetProgramDesc(), &scope_, /*blockID=*/0, false, true, - schema_.GetOutputArgNames()); + info_->GetOutputArgNames()); VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); - // fetch outputs std::vector res; - FetchOutput(&res); + FetchVarsByNames(info_->GetOutputArgNames(), scope_, &res); return res; } private: + std::shared_ptr info_; + framework::Scope scope_; + phi::Place place_; framework::Executor inner_exe_; }; diff --git a/paddle/fluid/jit/function_schema.cc b/paddle/fluid/jit/function_schema.cc new file mode 100644 index 0000000000000..5f938ce942a0f --- /dev/null +++ b/paddle/fluid/jit/function_schema.cc @@ -0,0 +1,85 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/function_schema.h" + +namespace paddle { +namespace jit { + +Argument::Argument(const std::string& name, bool is_out) + : name_(name), is_output_(is_out) {} + +const std::string& Argument::Name() const { return name_; } + +const std::vector FunctionSchema::GetInputArgNames() const { + std::vector input_arg_names; + for (auto& arg : input_args) { + input_arg_names.emplace_back(arg.Name()); + } + return input_arg_names; +} + +const std::vector FunctionSchema::GetOutputArgNames() const { + std::vector output_arg_names; + for (auto& arg : output_args) { + output_arg_names.emplace_back(arg.Name()); + } + return output_arg_names; +} + +void FunctionSchema::AddInputArg(const std::string& name) { + input_args.emplace_back(name, false); +} + +void FunctionSchema::AddOutputArg(const std::string& name) { + output_args.emplace_back(name, true); +} + +FunctionInfo::FunctionInfo(const std::string& func_name, + const std::vector& param_names, + const framework::ProgramDesc& program_desc) + : func_name_(func_name), + param_names_(param_names), + program_desc_(program_desc) { + // Parse FunctionSchema + for (auto& in_name : program_desc_.GetFeedTargetNames()) { + schema_.AddInputArg(in_name); + } + for (auto& out_name : program_desc_.GetFetchTargetNames()) { + schema_.AddOutputArg(out_name); + } + // remove feed fetch op + RemoveFeedFetch(&program_desc_); +} + +const std::string& FunctionInfo::GetFunctionName() const { return func_name_; } + +const framework::ProgramDesc& FunctionInfo::GetProgramDesc() const { + return program_desc_; +} + +const std::vector& FunctionInfo::GetParamNames() const { + return param_names_; +} + +const std::vector FunctionInfo::GetInputArgNames() const { + return schema_.GetInputArgNames(); +} + +const std::vector FunctionInfo::GetOutputArgNames() const { + return schema_.GetOutputArgNames(); +} + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/function_schema.h b/paddle/fluid/jit/function_schema.h new file mode 100644 index 0000000000000..83b156f07b5f0 --- /dev/null +++ b/paddle/fluid/jit/function_schema.h @@ -0,0 +1,83 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/jit/layer_utils.h" + +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/phi/core/enforce.h" + +namespace paddle { +namespace jit { + +class Argument { + public: + explicit Argument(const std::string& name, bool is_out = false); + + const std::string& Name() const; + + private: + std::string name_; + // paddle::optional default_val_; + bool is_output_; +}; + +class FunctionSchema { + public: + FunctionSchema() = default; + + const std::vector GetInputArgNames() const; + + const std::vector GetOutputArgNames() const; + + void AddInputArg(const std::string& name); + + void AddOutputArg(const std::string& name); + + private: + // input_args and output_args are ordered + std::vector input_args; + std::vector output_args; +}; + +class FunctionInfo { + public: + FunctionInfo(const std::string& func_name, + const std::vector& param_names, + const framework::ProgramDesc& program_desc); + + const std::string& GetFunctionName() const; + + const framework::ProgramDesc& GetProgramDesc() const; + + const std::vector& GetParamNames() const; + + const std::vector GetInputArgNames() const; + + const std::vector GetOutputArgNames() const; + + private: + std::string func_name_; + std::vector param_names_; + framework::ProgramDesc program_desc_; + FunctionSchema schema_; +}; + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/layer.cc b/paddle/fluid/jit/layer.cc index 1b4345f55c46c..5e281d9c10be5 100644 --- a/paddle/fluid/jit/layer.cc +++ b/paddle/fluid/jit/layer.cc @@ -19,25 +19,23 @@ namespace jit { // TODO(dev): Make vector, num_slot as in argument // Layer(const std::shared_ptr& type) : obj_(type, /*num_slot*/ 0U) // {} -Layer::Layer( - const std::vector& func_names, - const std::vector& program_descs, - const std::vector>& param_names_for_each_program, - const VariableNameMap& params_dict, - const phi::Place& place) { - VLOG(3) << "program size: " << program_descs.size(); +Layer::Layer(const std::vector>& infos, + const VariableNameMap& params_dict, + const phi::Place& place) + : params_dict_(params_dict) { + VLOG(3) << "infos size: " << infos.size(); // Layer manage the life time of all parameter. - for (size_t i = 0; i < func_names.size(); ++i) { + for (size_t i = 0; i < infos.size(); ++i) { // TODO(dev): choose exector or pe by flag - function_dict[func_names[i]] = std::make_shared( - program_descs[i], param_names_for_each_program[i], params_dict, place); + function_dict_[infos[i]->GetFunctionName()] = + std::make_shared(infos[i], params_dict, place); } } std::shared_ptr Layer::GetFunction( const std::string& name) const { - VLOG(3) << "funcs_ size: " << function_dict.size(); - return function_dict.at(name); + VLOG(3) << "funcs_ size: " << function_dict_.size(); + return function_dict_.at(name); } std::vector Layer::forward(const std::vector& inputs) { diff --git a/paddle/fluid/jit/layer.h b/paddle/fluid/jit/layer.h index aac7de851525f..3752cd6a58c2a 100644 --- a/paddle/fluid/jit/layer.h +++ b/paddle/fluid/jit/layer.h @@ -22,14 +22,19 @@ #include "paddle/fluid/jit/base_function.h" #include "paddle/fluid/jit/compilation_unit.h" #include "paddle/fluid/jit/exector_function.h" -#include "paddle/fluid/jit/object.h" +#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/pe_function.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/phi/common/place.h" + namespace paddle { namespace jit { using Variable = paddle::framework::Variable; using VariableNameMap = std::map; -using DenseTensor = phi::DenseTensor; class Layer { public: @@ -37,23 +42,23 @@ class Layer { // Layer(const std::shared_ptr& type) : obj_(type, /*num_slot*/ 0U) // {} // TODO(dev): consider make `func_name, program_desc, param_nams` as a class - Layer( - const std::vector& func_names, - const std::vector& program_descs, - const std::vector>& param_names_for_each_program, - const VariableNameMap& params_dict, - const phi::Place& place); + Layer(const std::vector>& infos, + const VariableNameMap& params_dict, + const phi::Place& place); std::shared_ptr GetFunction(const std::string& name) const; + Variable GetAttribute(const std::string& name) const; + std::vector forward(const std::vector& inputs); + void to(const phi::Place& place); + private: // internal::Object obj_; - // std::vector all_program_desc_; - // std::vector> param_name_for_each_program_; - // std::vector all_param_; - std::map> function_dict; + VariableNameMap params_dict_; + VariableNameMap attrs_dict_; + std::map> function_dict_; }; } // namespace jit diff --git a/paddle/fluid/jit/layer_utils.cc b/paddle/fluid/jit/layer_utils.cc new file mode 100644 index 0000000000000..2601854d1bcc3 --- /dev/null +++ b/paddle/fluid/jit/layer_utils.cc @@ -0,0 +1,92 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/layer_utils.h" + +namespace paddle { +namespace jit { + +void FetchVarsByNames(const std::vector &names, + const framework::Scope &scope, + std::vector *outs) { + for (auto &out_name : names) { + VLOG(3) << "fetch out: " << out_name; + auto *var = scope.FindVar(out_name); + auto &src_tensor = var->Get(); + Variable v; + auto *p = v.GetMutable(); + *p = src_tensor; + outs->emplace_back(v); + } +} + +void ShareInputsIntoScope(const std::vector &ordered_input_names, + const std::vector &vars, + framework::Scope *scope) { + VLOG(3) << "vars size: " << vars.size(); + PADDLE_ENFORCE_EQ( + vars.size(), + ordered_input_names.size(), + platform::errors::InvalidArgument( + "vars.size() should be equal to ordered_input_names.size().")); + + for (size_t i = 0; i < vars.size(); i++) { + VLOG(3) << "share into scope: " << ordered_input_names[i]; + auto &dense_tensor = vars[i].Get(); + auto *var = scope->Var(ordered_input_names[i]); + auto *dst_tensor = var->GetMutable(); + *dst_tensor = dense_tensor; + } +} + +void ShareParamsIntoScope(const std::vector ¶m_names, + const VariableNameMap ¶ms_dict, + framework::Scope *scope) { + VLOG(3) << "param_names size: " << param_names.size(); + for (size_t i = 0; i < param_names.size(); ++i) { + std::string name = param_names[i]; + auto ¶m = params_dict.find(name)->second; + auto &dense_tensor = param.Get(); + VLOG(3) << "share into scope: " << name; + auto *var = scope->Var(name); + auto *dst_tensor = var->GetMutable(); + *dst_tensor = dense_tensor; + } +} + +void RemoveFeedFetch(framework::ProgramDesc *program_desc) { + for (size_t i = 0; i < program_desc->Size(); ++i) { + auto *block = program_desc->MutableBlock(i); + const auto &all_ops = block->AllOps(); + size_t op_size = all_ops.size(); + VLOG(3) << "op_size: " << op_size; + for (int i = op_size - 1; i >= 0; i--) { + auto op = all_ops[i]; + if (op->Type() == "feed") { + VLOG(3) << "remove op type: " << op->Type() << ", index: " << i + << ", var name: " << op->Input("X")[0]; + block->RemoveVar(op->Input("X")[0]); + block->RemoveOp(i, i + 1); + } else if (op->Type() == "fetch") { + VLOG(3) << "remove op type: " << op->Type() << ", index: " << i + << ", var name: " << op->Output("Out")[0]; + block->RemoveVar(op->Output("Out")[0]); + block->RemoveOp(i, i + 1); + } + } + } +} + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/layer_utils.h b/paddle/fluid/jit/layer_utils.h new file mode 100644 index 0000000000000..fd7713cc6a147 --- /dev/null +++ b/paddle/fluid/jit/layer_utils.h @@ -0,0 +1,49 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/enforce.h" + +namespace paddle { +namespace jit { + +using Variable = paddle::framework::Variable; +using VariableNameMap = std::map; +using DenseTensor = phi::DenseTensor; + +void FetchVarsByNames(const std::vector &names, + const framework::Scope &scope, + std::vector *outs); + +void ShareInputsIntoScope(const std::vector &ordered_input_names, + const std::vector &vars, + framework::Scope *scope); + +void ShareParamsIntoScope(const std::vector ¶m_names, + const VariableNameMap ¶ms_dict, + framework::Scope *scope); + +void RemoveFeedFetch(framework::ProgramDesc *program_desc); + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/pe_function.h b/paddle/fluid/jit/pe_function.h index f4378da556658..7c8e493e6ff89 100644 --- a/paddle/fluid/jit/pe_function.h +++ b/paddle/fluid/jit/pe_function.h @@ -15,42 +15,55 @@ #pragma once #include +#include #include +#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/layer_utils.h" + #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/executor_cache.h" -#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/variable.h" namespace paddle { namespace jit { class PEFunction : public BaseFunction { public: - PEFunction(const framework::ProgramDesc &program_desc, - const std::vector param_names, + PEFunction(const std::shared_ptr &info, const VariableNameMap ¶ms_dict, const phi::Place &place) - : BaseFunction(program_desc, param_names, params_dict, place) {} + : info_(info), place_(place) { + ShareParamsIntoScope(info_->GetParamNames(), params_dict, &scope_); + VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); + } - ~PEFunction() {} + ~PEFunction() noexcept {} std::vector operator()(const std::vector &inputs) { // bool is_test = true; std::string prog_string; std::hash string_hash; - program_desc_.Proto()->SerializePartialToString(&prog_string); + auto &program_desc = info_->GetProgramDesc(); + const_cast(&program_desc) + ->Proto() + ->SerializePartialToString(&prog_string); + // program_desc.Proto()->SerializePartialToString(&prog_string); int64_t program_id = static_cast(string_hash(prog_string)); - const framework::BlockDesc &global_block = program_desc_.Block(0); + const framework::BlockDesc &global_block = program_desc.Block(0); int64_t start_op_index = 0; int64_t end_op_index = static_cast(global_block.OpSize()); - ShareInputsIntoScope(inputs); - std::vector input_var_names = schema_.GetInputArgNames(); - std::vector output_var_names = schema_.GetOutputArgNames(); + ShareInputsIntoScope(info_->GetInputArgNames(), inputs, &scope_); + std::vector input_var_names = info_->GetInputArgNames(); + std::vector output_var_names = info_->GetOutputArgNames(); std::vector dout_var_names; if (end_op_index > start_op_index) { // TODO(dev): support other devices - auto cache_info = framework::GetExecutorInfoFromCache(program_desc_, + auto cache_info = framework::GetExecutorInfoFromCache(program_desc, place_, start_op_index, end_op_index, @@ -70,7 +83,7 @@ class PEFunction : public BaseFunction { dout_var_names.begin(), dout_var_names.end()); framework::details::ParseSafeEagerDeletionSkipVars( - program_desc_, + program_desc, end_op_index, output_var_names, &skip_eager_delete_vars); @@ -79,9 +92,14 @@ class PEFunction : public BaseFunction { } VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); std::vector res; - FetchOutput(&res); + FetchVarsByNames(info_->GetOutputArgNames(), scope_, &res); return res; } + + private: + std::shared_ptr info_; + framework::Scope scope_; + phi::Place place_; }; } // namespace jit diff --git a/paddle/fluid/jit/serializer.cc b/paddle/fluid/jit/serializer.cc index 587774c278551..45ca42044bd91 100644 --- a/paddle/fluid/jit/serializer.cc +++ b/paddle/fluid/jit/serializer.cc @@ -19,30 +19,26 @@ namespace jit { Layer Deserializer::operator()(const std::string& dir_path) { const auto& file_name_prefixs = GetPdmodelFileNamePrefix(dir_path); - std::vector func_names; - std::vector program_descs; - std::vector> param_names_for_each_program; // set is ordered std::set param_names_set; + std::vector> infos; VariableNameMap params_dict; for (auto& it : file_name_prefixs) { - func_names.emplace_back(it.first); + auto& func_name = it.first; + auto program_desc = LoadProgram(dir_path + it.second + PDMODEL_SUFFIX); - auto program = LoadProgram(dir_path + it.second + PDMODEL_SUFFIX); - program_descs.emplace_back(program); - - // TODO(dev): load int/float params - std::vector persistable_var_names; - auto all_var_desc = program.Block(0).AllVars(); + // TODO(dev): load int/float attrs + std::vector persist_var_names; + auto all_var_desc = program_desc.Block(0).AllVars(); for (auto* desc_ptr : all_var_desc) { if (IsPersistable(desc_ptr)) { - persistable_var_names.emplace_back(desc_ptr->Name()); + persist_var_names.emplace_back(desc_ptr->Name()); } } - param_names_for_each_program.emplace_back(persistable_var_names); - param_names_set.insert(persistable_var_names.begin(), - persistable_var_names.end()); + param_names_set.insert(persist_var_names.begin(), persist_var_names.end()); + infos.emplace_back(std::make_shared( + func_name, persist_var_names, program_desc)); } auto default_place = imperative::GetCurrentTracer()->ExpectedPlace(); @@ -52,11 +48,7 @@ Layer Deserializer::operator()(const std::string& dir_path) { default_place, ¶ms_dict); - return Layer(func_names, - program_descs, - param_names_for_each_program, - params_dict, - default_place); + return Layer(infos, params_dict, default_place); } bool Deserializer::IsPersistable(framework::VarDesc* desc_ptr) { diff --git a/paddle/fluid/jit/serializer.h b/paddle/fluid/jit/serializer.h index 1511b6b50f5c5..2ff483a91bf7b 100644 --- a/paddle/fluid/jit/serializer.h +++ b/paddle/fluid/jit/serializer.h @@ -15,16 +15,16 @@ #pragma once #include - #include #include #include #include -#include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/imperative/tracer.h" +#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/layer.h" + +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/imperative/tracer.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/phi/core/dense_tensor.h" From bbf99a769f92e90da1ae20e36553635dd919f755 Mon Sep 17 00:00:00 2001 From: 0x45f Date: Wed, 22 Jun 2022 09:09:04 +0000 Subject: [PATCH 2/6] Polish include --- paddle/fluid/jit/base_function.h | 4 ++-- paddle/fluid/jit/exector_function.h | 8 ++++---- paddle/fluid/jit/function_schema.h | 4 ++-- paddle/fluid/jit/layer.h | 12 +++++------- paddle/fluid/jit/layer_test.cc | 13 ++++--------- paddle/fluid/jit/pe_function.h | 8 ++++---- paddle/fluid/jit/serializer.h | 6 +++--- 7 files changed, 24 insertions(+), 31 deletions(-) diff --git a/paddle/fluid/jit/base_function.h b/paddle/fluid/jit/base_function.h index 00abd2dfd256a..ebe4314a5319e 100644 --- a/paddle/fluid/jit/base_function.h +++ b/paddle/fluid/jit/base_function.h @@ -17,10 +17,10 @@ #include #include -#include "paddle/fluid/framework/variable.h" - #include "paddle/phi/common/place.h" +#include "paddle/fluid/framework/variable.h" + namespace paddle { namespace jit { diff --git a/paddle/fluid/jit/exector_function.h b/paddle/fluid/jit/exector_function.h index 7d26d8d6f5de2..9451387d78888 100644 --- a/paddle/fluid/jit/exector_function.h +++ b/paddle/fluid/jit/exector_function.h @@ -18,15 +18,15 @@ #include #include -#include "paddle/fluid/jit/base_function.h" -#include "paddle/fluid/jit/function_schema.h" -#include "paddle/fluid/jit/layer_utils.h" - #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/layer_utils.h" + namespace paddle { namespace jit { diff --git a/paddle/fluid/jit/function_schema.h b/paddle/fluid/jit/function_schema.h index 83b156f07b5f0..82d2a983614a1 100644 --- a/paddle/fluid/jit/function_schema.h +++ b/paddle/fluid/jit/function_schema.h @@ -18,11 +18,11 @@ #include #include -#include "paddle/fluid/jit/layer_utils.h" - #include "paddle/fluid/framework/program_desc.h" #include "paddle/phi/core/enforce.h" +#include "paddle/fluid/jit/layer_utils.h" + namespace paddle { namespace jit { diff --git a/paddle/fluid/jit/layer.h b/paddle/fluid/jit/layer.h index 3752cd6a58c2a..e2f629d56c850 100644 --- a/paddle/fluid/jit/layer.h +++ b/paddle/fluid/jit/layer.h @@ -18,19 +18,17 @@ #include #include -#include "paddle/fluid/jit/ast.h" -#include "paddle/fluid/jit/base_function.h" -#include "paddle/fluid/jit/compilation_unit.h" -#include "paddle/fluid/jit/exector_function.h" -#include "paddle/fluid/jit/function_schema.h" -#include "paddle/fluid/jit/pe_function.h" - #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/variable.h" #include "paddle/phi/common/place.h" +#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/exector_function.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/pe_function.h" + namespace paddle { namespace jit { using Variable = paddle::framework::Variable; diff --git a/paddle/fluid/jit/layer_test.cc b/paddle/fluid/jit/layer_test.cc index 6f8b7a58b8422..4088a57571ec3 100644 --- a/paddle/fluid/jit/layer_test.cc +++ b/paddle/fluid/jit/layer_test.cc @@ -12,26 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/jit/layer.h" - -#include -#include -#include #include -#include #include "gtest/gtest.h" + #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/imperative/tracer.h" -#include "paddle/fluid/jit/serializer.h" -#include "paddle/fluid/memory/allocation/allocator_facade.h" -#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/math_function.h" +#include "paddle/fluid/jit/layer.h" +#include "paddle/fluid/jit/serializer.h" + USE_OP_ITSELF(elementwise_add); USE_OP_ITSELF(matmul_v2); USE_OP_ITSELF(relu); diff --git a/paddle/fluid/jit/pe_function.h b/paddle/fluid/jit/pe_function.h index 7c8e493e6ff89..eaac6d3525eca 100644 --- a/paddle/fluid/jit/pe_function.h +++ b/paddle/fluid/jit/pe_function.h @@ -18,16 +18,16 @@ #include #include -#include "paddle/fluid/jit/base_function.h" -#include "paddle/fluid/jit/function_schema.h" -#include "paddle/fluid/jit/layer_utils.h" - #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/executor_cache.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/layer_utils.h" + namespace paddle { namespace jit { diff --git a/paddle/fluid/jit/serializer.h b/paddle/fluid/jit/serializer.h index 2ff483a91bf7b..abf0ab3cb493a 100644 --- a/paddle/fluid/jit/serializer.h +++ b/paddle/fluid/jit/serializer.h @@ -20,14 +20,14 @@ #include #include -#include "paddle/fluid/jit/function_schema.h" -#include "paddle/fluid/jit/layer.h" - #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/imperative/tracer.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/layer.h" + namespace paddle { namespace jit { static const char PDMODEL_SUFFIX[] = ".pdmodel"; From aabc5826f8a63e6b8c98c87449df2a9e4265dce6 Mon Sep 17 00:00:00 2001 From: 0x45f Date: Wed, 22 Jun 2022 09:25:47 +0000 Subject: [PATCH 3/6] Fix cmake for funciton_schema --- paddle/fluid/jit/CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/jit/CMakeLists.txt b/paddle/fluid/jit/CMakeLists.txt index 4feebf29f9a0b..426655b97842f 100644 --- a/paddle/fluid/jit/CMakeLists.txt +++ b/paddle/fluid/jit/CMakeLists.txt @@ -13,7 +13,10 @@ cc_library( SRCS layer.cc DEPS executor parallel_executor executor_cache) -cc_library(jit_function_schema SRCS function_schema.cc) +cc_library( + jit_function_schema + SRCS function_schema.cc + DEPS proto_desc) if(WITH_TESTING AND NOT WIN32) add_custom_target( From b7c2f9d0406aaa9bf477f7dabc16da904d5335e3 Mon Sep 17 00:00:00 2001 From: 0x45f Date: Wed, 22 Jun 2022 09:41:51 +0000 Subject: [PATCH 4/6] Using unordered_map replace map and rename VariableNameMap --- paddle/fluid/jit/exector_function.h | 2 +- paddle/fluid/jit/layer.cc | 2 +- paddle/fluid/jit/layer.h | 10 +++++----- paddle/fluid/jit/layer_utils.cc | 2 +- paddle/fluid/jit/layer_utils.h | 5 +++-- paddle/fluid/jit/pe_function.h | 2 +- paddle/fluid/jit/serializer.cc | 4 ++-- paddle/fluid/jit/serializer.h | 2 +- 8 files changed, 15 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/jit/exector_function.h b/paddle/fluid/jit/exector_function.h index 9451387d78888..6af1ec50239bf 100644 --- a/paddle/fluid/jit/exector_function.h +++ b/paddle/fluid/jit/exector_function.h @@ -33,7 +33,7 @@ namespace jit { class ExectorFunction : public BaseFunction { public: ExectorFunction(const std::shared_ptr &info, - const VariableNameMap ¶ms_dict, + const Name2VariableMap ¶ms_dict, const phi::Place &place) : info_(info), place_(place), inner_exe_(place_) { ShareParamsIntoScope(info_->GetParamNames(), params_dict, &scope_); diff --git a/paddle/fluid/jit/layer.cc b/paddle/fluid/jit/layer.cc index 5e281d9c10be5..6df2a4cb95052 100644 --- a/paddle/fluid/jit/layer.cc +++ b/paddle/fluid/jit/layer.cc @@ -20,7 +20,7 @@ namespace jit { // Layer(const std::shared_ptr& type) : obj_(type, /*num_slot*/ 0U) // {} Layer::Layer(const std::vector>& infos, - const VariableNameMap& params_dict, + const Name2VariableMap& params_dict, const phi::Place& place) : params_dict_(params_dict) { VLOG(3) << "infos size: " << infos.size(); diff --git a/paddle/fluid/jit/layer.h b/paddle/fluid/jit/layer.h index e2f629d56c850..89610c6630cb3 100644 --- a/paddle/fluid/jit/layer.h +++ b/paddle/fluid/jit/layer.h @@ -13,9 +13,9 @@ // limitations under the License. #pragma once -#include #include #include +#include #include #include "paddle/fluid/framework/executor.h" @@ -32,7 +32,7 @@ namespace paddle { namespace jit { using Variable = paddle::framework::Variable; -using VariableNameMap = std::map; +using Name2VariableMap = std::unordered_map; class Layer { public: @@ -41,7 +41,7 @@ class Layer { // {} // TODO(dev): consider make `func_name, program_desc, param_nams` as a class Layer(const std::vector>& infos, - const VariableNameMap& params_dict, + const Name2VariableMap& params_dict, const phi::Place& place); std::shared_ptr GetFunction(const std::string& name) const; @@ -54,8 +54,8 @@ class Layer { private: // internal::Object obj_; - VariableNameMap params_dict_; - VariableNameMap attrs_dict_; + Name2VariableMap params_dict_; + Name2VariableMap attrs_dict_; std::map> function_dict_; }; diff --git a/paddle/fluid/jit/layer_utils.cc b/paddle/fluid/jit/layer_utils.cc index 2601854d1bcc3..88ab88f8b8df5 100644 --- a/paddle/fluid/jit/layer_utils.cc +++ b/paddle/fluid/jit/layer_utils.cc @@ -51,7 +51,7 @@ void ShareInputsIntoScope(const std::vector &ordered_input_names, } void ShareParamsIntoScope(const std::vector ¶m_names, - const VariableNameMap ¶ms_dict, + const Name2VariableMap ¶ms_dict, framework::Scope *scope) { VLOG(3) << "param_names size: " << param_names.size(); for (size_t i = 0; i < param_names.size(); ++i) { diff --git a/paddle/fluid/jit/layer_utils.h b/paddle/fluid/jit/layer_utils.h index fd7713cc6a147..7e79c4a94db82 100644 --- a/paddle/fluid/jit/layer_utils.h +++ b/paddle/fluid/jit/layer_utils.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include "paddle/fluid/framework/program_desc.h" @@ -28,7 +29,7 @@ namespace paddle { namespace jit { using Variable = paddle::framework::Variable; -using VariableNameMap = std::map; +using Name2VariableMap = std::unordered_map; using DenseTensor = phi::DenseTensor; void FetchVarsByNames(const std::vector &names, @@ -40,7 +41,7 @@ void ShareInputsIntoScope(const std::vector &ordered_input_names, framework::Scope *scope); void ShareParamsIntoScope(const std::vector ¶m_names, - const VariableNameMap ¶ms_dict, + const Name2VariableMap ¶ms_dict, framework::Scope *scope); void RemoveFeedFetch(framework::ProgramDesc *program_desc); diff --git a/paddle/fluid/jit/pe_function.h b/paddle/fluid/jit/pe_function.h index eaac6d3525eca..bc28724438969 100644 --- a/paddle/fluid/jit/pe_function.h +++ b/paddle/fluid/jit/pe_function.h @@ -34,7 +34,7 @@ namespace jit { class PEFunction : public BaseFunction { public: PEFunction(const std::shared_ptr &info, - const VariableNameMap ¶ms_dict, + const Name2VariableMap ¶ms_dict, const phi::Place &place) : info_(info), place_(place) { ShareParamsIntoScope(info_->GetParamNames(), params_dict, &scope_); diff --git a/paddle/fluid/jit/serializer.cc b/paddle/fluid/jit/serializer.cc index 45ca42044bd91..c83867dc1cbae 100644 --- a/paddle/fluid/jit/serializer.cc +++ b/paddle/fluid/jit/serializer.cc @@ -22,7 +22,7 @@ Layer Deserializer::operator()(const std::string& dir_path) { // set is ordered std::set param_names_set; std::vector> infos; - VariableNameMap params_dict; + Name2VariableMap params_dict; for (auto& it : file_name_prefixs) { auto& func_name = it.first; auto program_desc = LoadProgram(dir_path + it.second + PDMODEL_SUFFIX); @@ -92,7 +92,7 @@ Deserializer::GetPdmodelFileNamePrefix(const std::string& path) { void Deserializer::ReadTensorData(const std::string& file_name, const std::set& var_name, const phi::Place& place, - VariableNameMap* params_dict) const { + Name2VariableMap* params_dict) const { VLOG(3) << "ReadTensorData from: " << file_name; std::ifstream fin(file_name, std::ios::binary); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); diff --git a/paddle/fluid/jit/serializer.h b/paddle/fluid/jit/serializer.h index abf0ab3cb493a..985d086f83329 100644 --- a/paddle/fluid/jit/serializer.h +++ b/paddle/fluid/jit/serializer.h @@ -62,7 +62,7 @@ class Deserializer { void ReadTensorData(const std::string& file_name, const std::set& var_name, const phi::Place& place, - VariableNameMap* params_dict) const; + Name2VariableMap* params_dict) const; // void ReadExtraInfo(const std::string& file_name) const; // void ReadByteCode(const std::string& file_name) const; From 6938e83f2ccef6e90cddd8288c26b1ba915808e5 Mon Sep 17 00:00:00 2001 From: 0x45f Date: Wed, 22 Jun 2022 09:55:52 +0000 Subject: [PATCH 5/6] Polish Layer Constructor and remove useless include --- paddle/fluid/jit/layer.cc | 2 +- paddle/fluid/jit/serializer.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/fluid/jit/layer.cc b/paddle/fluid/jit/layer.cc index 6df2a4cb95052..b0d9525e1a43d 100644 --- a/paddle/fluid/jit/layer.cc +++ b/paddle/fluid/jit/layer.cc @@ -28,7 +28,7 @@ Layer::Layer(const std::vector>& infos, for (size_t i = 0; i < infos.size(); ++i) { // TODO(dev): choose exector or pe by flag function_dict_[infos[i]->GetFunctionName()] = - std::make_shared(infos[i], params_dict, place); + std::make_shared(infos[i], params_dict_, place); } } diff --git a/paddle/fluid/jit/serializer.h b/paddle/fluid/jit/serializer.h index 985d086f83329..1cd1ed3550c8f 100644 --- a/paddle/fluid/jit/serializer.h +++ b/paddle/fluid/jit/serializer.h @@ -15,7 +15,6 @@ #pragma once #include -#include #include #include #include From 066c63568b1bab526e183383019a815a5a636ed6 Mon Sep 17 00:00:00 2001 From: 0x45f Date: Wed, 22 Jun 2022 12:07:11 +0000 Subject: [PATCH 6/6] Refine compilation_unit --- paddle/fluid/jit/CMakeLists.txt | 12 ++++-- paddle/fluid/jit/compilation_unit.cc | 43 +++++++++++++++++++ paddle/fluid/jit/compilation_unit.h | 22 +++++++--- ...exector_function.h => executor_function.h} | 10 ++--- paddle/fluid/jit/layer.cc | 7 ++- paddle/fluid/jit/layer.h | 10 +---- 6 files changed, 79 insertions(+), 25 deletions(-) create mode 100644 paddle/fluid/jit/compilation_unit.cc rename paddle/fluid/jit/{exector_function.h => executor_function.h} (88%) diff --git a/paddle/fluid/jit/CMakeLists.txt b/paddle/fluid/jit/CMakeLists.txt index 426655b97842f..d4796026894a8 100644 --- a/paddle/fluid/jit/CMakeLists.txt +++ b/paddle/fluid/jit/CMakeLists.txt @@ -8,15 +8,20 @@ cc_library( SRCS layer_utils.cc DEPS scope proto_desc) +cc_library( + jit_compilation_unit + SRCS compilation_unit.cc + DEPS proto_desc executor parallel_executor executor_cache) + cc_library( jit_layer SRCS layer.cc - DEPS executor parallel_executor executor_cache) + DEPS jit_compilation_unit) cc_library( jit_function_schema SRCS function_schema.cc - DEPS proto_desc) + DEPS jit_layer_utils) if(WITH_TESTING AND NOT WIN32) add_custom_target( @@ -38,7 +43,8 @@ if(WITH_TESTING AND NOT WIN32) jit_serializer jit_layer jit_layer_utils - jit_function_schema) + jit_function_schema + jit_compilation_unit) cc_test( layer_test SRCS layer_test.cc diff --git a/paddle/fluid/jit/compilation_unit.cc b/paddle/fluid/jit/compilation_unit.cc new file mode 100644 index 0000000000000..2049f8c1becf2 --- /dev/null +++ b/paddle/fluid/jit/compilation_unit.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/compilation_unit.h" + +namespace paddle { +namespace jit { + +void CompilationUnit::AddExecutorFunction( + const std::string &func_name, + const std::shared_ptr &info, + const Name2VariableMap ¶ms_dict, + const phi::Place &place) { + function_dict_[func_name] = + std::make_shared(info, params_dict, place); +} + +void CompilationUnit::AddPEFunction(const std::string &func_name, + const std::shared_ptr &info, + const Name2VariableMap ¶ms_dict, + const phi::Place &place) { + function_dict_[func_name] = + std::make_shared(info, params_dict, place); +} + +std::shared_ptr CompilationUnit::GetFunction( + const std::string &name) const { + return function_dict_.at(name); +} + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/compilation_unit.h b/paddle/fluid/jit/compilation_unit.h index 815e9d3f4c090..4d56e9d787a91 100644 --- a/paddle/fluid/jit/compilation_unit.h +++ b/paddle/fluid/jit/compilation_unit.h @@ -14,23 +14,35 @@ #pragma once -#include #include #include -#include + +#include "paddle/fluid/jit/executor_function.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/pe_function.h" namespace paddle { namespace jit { -class BaseFunction; class CompilationUnit { public: CompilationUnit() = default; ~CompilationUnit() {} + void AddExecutorFunction(const std::string &func_name, + const std::shared_ptr &info, + const Name2VariableMap ¶ms_dict, + const phi::Place &place); + + void AddPEFunction(const std::string &func_name, + const std::shared_ptr &info, + const Name2VariableMap ¶ms_dict, + const phi::Place &place); + + std::shared_ptr GetFunction(const std::string &name) const; + private: - std::vector> functions_; - std::unordered_map functions_idx_; + std::unordered_map> function_dict_; }; } // namespace jit diff --git a/paddle/fluid/jit/exector_function.h b/paddle/fluid/jit/executor_function.h similarity index 88% rename from paddle/fluid/jit/exector_function.h rename to paddle/fluid/jit/executor_function.h index 6af1ec50239bf..1032848c3134c 100644 --- a/paddle/fluid/jit/exector_function.h +++ b/paddle/fluid/jit/executor_function.h @@ -30,17 +30,17 @@ namespace paddle { namespace jit { -class ExectorFunction : public BaseFunction { +class ExecutorFunction : public BaseFunction { public: - ExectorFunction(const std::shared_ptr &info, - const Name2VariableMap ¶ms_dict, - const phi::Place &place) + ExecutorFunction(const std::shared_ptr &info, + const Name2VariableMap ¶ms_dict, + const phi::Place &place) : info_(info), place_(place), inner_exe_(place_) { ShareParamsIntoScope(info_->GetParamNames(), params_dict, &scope_); VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); } - ~ExectorFunction() noexcept {} + ~ExecutorFunction() noexcept {} std::vector operator()(const std::vector &inputs) { ShareInputsIntoScope(info_->GetInputArgNames(), inputs, &scope_); diff --git a/paddle/fluid/jit/layer.cc b/paddle/fluid/jit/layer.cc index b0d9525e1a43d..2193a06a1ad42 100644 --- a/paddle/fluid/jit/layer.cc +++ b/paddle/fluid/jit/layer.cc @@ -27,15 +27,14 @@ Layer::Layer(const std::vector>& infos, // Layer manage the life time of all parameter. for (size_t i = 0; i < infos.size(); ++i) { // TODO(dev): choose exector or pe by flag - function_dict_[infos[i]->GetFunctionName()] = - std::make_shared(infos[i], params_dict_, place); + unit_.AddExecutorFunction( + infos[i]->GetFunctionName(), infos[i], params_dict_, place); } } std::shared_ptr Layer::GetFunction( const std::string& name) const { - VLOG(3) << "funcs_ size: " << function_dict_.size(); - return function_dict_.at(name); + return unit_.GetFunction(name); } std::vector Layer::forward(const std::vector& inputs) { diff --git a/paddle/fluid/jit/layer.h b/paddle/fluid/jit/layer.h index 89610c6630cb3..faeb70d542058 100644 --- a/paddle/fluid/jit/layer.h +++ b/paddle/fluid/jit/layer.h @@ -18,16 +18,11 @@ #include #include -#include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/variable.h" #include "paddle/phi/common/place.h" -#include "paddle/fluid/jit/base_function.h" -#include "paddle/fluid/jit/exector_function.h" +#include "paddle/fluid/jit/compilation_unit.h" #include "paddle/fluid/jit/function_schema.h" -#include "paddle/fluid/jit/pe_function.h" namespace paddle { namespace jit { @@ -39,7 +34,6 @@ class Layer { // TODO(dev): Make vector, num_slot as in argument // Layer(const std::shared_ptr& type) : obj_(type, /*num_slot*/ 0U) // {} - // TODO(dev): consider make `func_name, program_desc, param_nams` as a class Layer(const std::vector>& infos, const Name2VariableMap& params_dict, const phi::Place& place); @@ -56,7 +50,7 @@ class Layer { // internal::Object obj_; Name2VariableMap params_dict_; Name2VariableMap attrs_dict_; - std::map> function_dict_; + CompilationUnit unit_; }; } // namespace jit