From 126871bcbd00269577bc5e6919981584c4b97648 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Fri, 14 Jan 2022 07:27:20 +0000 Subject: [PATCH 01/13] Removed debug info --- paddle/fluid/framework/program_desc.cc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/paddle/fluid/framework/program_desc.cc b/paddle/fluid/framework/program_desc.cc index 60b93f4a71664d..4a31adcca65ec8 100644 --- a/paddle/fluid/framework/program_desc.cc +++ b/paddle/fluid/framework/program_desc.cc @@ -101,25 +101,20 @@ ProgramDesc::ProgramDesc(const std::string &binary_str) { PADDLE_ENFORCE_EQ(desc_.ParseFromString(binary_str), true, platform::errors::InvalidArgument( "Failed to parse program_desc from binary string.")); - VLOG(1) << 3333; InitFromProto(); } void ProgramDesc::InitFromProto() { - VLOG(1) << 4444; for (auto &block_desc : *desc_.mutable_blocks()) { blocks_.emplace_back(new BlockDesc(this, &block_desc)); } - VLOG(1) << 5555; for (auto &block : blocks_) { for (auto *op : block->AllOps()) { for (const auto &attr : op->Proto()->attrs()) { if (attr.type() == proto::AttrType::BLOCK) { - VLOG(1) << 6666; size_t blk_idx = attr.block_idx(); op->SetBlockAttr(attr.name(), this->MutableBlock(blk_idx)); } else if (attr.type() == proto::AttrType::BLOCKS) { - VLOG(1) << 7777; auto blks_idx = attr.blocks_idx(); std::vector block_descs; for (int blk_idx : blks_idx) { From 211a703f1eafdd2176160cb1c20308c15a297b0e Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Mon, 24 Jan 2022 01:15:28 +0000 Subject: [PATCH 02/13] Added automatic code generation for final state Eager Dygraph --- .../eager/auto_code_generator/CMakeLists.txt | 2 + .../final_state_generator/eager_gen.py | 719 ++++++++++++++++++ .../final_state_generator/test.py | 46 ++ python/paddle/utils/code_gen/api.yaml | 1 + 4 files changed, 768 insertions(+) create mode 100644 paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py create mode 100644 paddle/fluid/eager/auto_code_generator/final_state_generator/test.py diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index 010c879571c74b..668e60d857b9ca 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -1,3 +1,5 @@ +add_subdirectory(final_state_generator) + set(EAGER_GENERETOR_DEPS ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} pybind proto_desc executor layer tracer engine imperative_profiler imperative_flag) add_executable(eager_generator eager_generator.cc) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py new file mode 100644 index 00000000000000..a8df9cc35d4d23 --- /dev/null +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -0,0 +1,719 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +import re + + +################# +### Helpers ### +################# +def FindGradName(string): + return string + "_grad" + + +def FindForwardName(string): + if not string.endswith("_grad"): + return None + return string[:-5] + + +def IsPlainTensorType(string): + plain_tensor_types = ['Tensor&', 'Tensor', 'const Tensor&', 'const Tensor'] + if string in plain_tensor_types: + return True + return False + + +def IsVectorTensorType(string): + vector_tensor_types = ['list(Tensor)'] + if string in vector_tensor_types: + return True + return False + + +def GetSavedName(string): + return string + "_" + + +def GetConstReference(string): + ret = string + if not string.startswith("const "): + ret = "const " + string + if not string.endswith("&"): + ret += "&" + return ret + + +###################### +### File Readers ### +###################### +def ReadFwdFile(filepath): + f = open(filepath, 'r') + contents = yaml.load(f) + return contents + + +def ReadBwdFile(filepath): + f = open(filepath, 'r') + contents = yaml.load(f) + ret = {} + for content in contents: + assert 'grad_api' in content.keys() + api_name = content['grad_api'] + ret[api_name] = content + return ret + + +###################### +### Yaml Parsers ### +###################### +def ParseYamlArgs(string): + # Example: const Tensor& x, const Tensor& y, bool transpose_x, bool transpose_y + + # inputs_list = [ [arg_name, arg_type, orig_position], ...] + inputs_list = [] + # attrs_list = [ [arg_name, arg_type, default_value, orig_position], ...] + attrs_list = [] + + args = [x.strip() for x in string.strip().split(",")] + + atype = r'((const )?\S+) ' + aname = r'(\S+)' + pattern = f'{atype}{aname}' + for i in range(len(args)): + arg = args[i] + m = re.search(pattern, arg) + arg_type = m.group(1) + arg_name = m.group(3).split("=")[0] + default_value = m.group(3).split("=")[1] if len(m.group(3).split( + "=")) > 1 else None + if "Tensor" in arg_type: + assert default_value is None + inputs_list.append([arg_name, arg_type, i]) + else: + attrs_list.append([arg_name, arg_type, default_value, i]) + + return inputs_list, attrs_list + + +def ParseYamlReturns(string): + # Example: Tensor, Tensor + + # list = [ [ret_type, orig_position], ...] + returns_list = [] + + returns = [x.strip() for x in string.strip().split(",")] + for i in range(len(returns)): + ret = returns[i] + returns_list.append([ret, i]) + + return returns_list + + +def ParseYamlReturnsWithName(string): + # Example: Tensor(out), Tensor(out1) + + # list = [ [ret_name, ret_type, orig_position], ...] + returns_list = [] + + returns = [x.strip() for x in string.strip().split(",")] + + atype = r'(.*?)' + aname = r'(.*?)' + pattern = f'{atype}\({aname}\)' + for i in range(len(returns)): + ret = returns[i] + m = re.search(pattern, ret) + ret_type = m.group(1) + ret_name = m.group(2) + assert "Tensor" in ret_type + returns_list.append([ret_name, ret_type, i]) + + return returns_list + + +def ParseYamlForwardFromBackward(string): + # Example: matmul (const Tensor& x, const Tensor& y, bool transpose_x, bool transpose_y) -> Tensor(out) + + fname = r'(.*?)' + wspace = r'\s*' + fargs = r'(.*?)' + frets = r'(.*)' + pattern = f'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}' + + m = re.search(pattern, string) + function_name = m.group(1) + function_args = m.group(2) + function_returns = m.group(3) + + forward_inputs_list, forward_attrs_list = ParseYamlArgs(function_args) + forward_returns_list = ParseYamlReturnsWithName(function_returns) + + return forward_inputs_list, forward_attrs_list, forward_returns_list + + +def ParseYamlForward(args_str, returns_str): + # args Example: (const Tensor& x, const Tensor& y, bool transpose_x = false, bool transpose_y = false) + # returns Example: Tensor, Tensor + + fargs = r'(.*?)' + wspace = r'\s*' + args_pattern = f'\({fargs}\)' + args_str = re.search(args_pattern, args_str).group(1) + + inputs_list, attrs_list = ParseYamlArgs(args_str) + returns_list = ParseYamlReturns(returns_str) + + return inputs_list, attrs_list, returns_list + + +def ParseYamlBackward(args_str, returns_str): + # args Example: (const Tensor& x, const Tensor& y, const Tensor& out_grad, bool transpose_x=false, bool transpose_y=false) + # returns Example: Tensor(x_grad), Tensor(y_grad) + + fargs = r'(.*?)' + wspace = r'\s*' + args_pattern = f'\({fargs}\)' + args_str = re.search(args_pattern, args_str).group(1) + + inputs_list, attrs_list = ParseYamlArgs(args_str) + returns_list = ParseYamlReturnsWithName(returns_str) + + return inputs_list, attrs_list, returns_list + + +####################### +### Preprocessing ### +####################### +def ForwardsValidationCheck(forward_inputs_list, forward_attrs_list, + forward_returns_list, orig_forward_inputs_list, + orig_forward_attrs_list, orig_forward_returns_list): + # inputs_list = [ [input_name, input_type, orig_position], ...] + # attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...] + # forward_returns_list = [ [ret_name, ret_type, orig_position] ...] + # orig_returns_list = [ [ret_type, orig_position], ...] + for i in range(len(forward_inputs_list)): + forward_input_name = forward_inputs_list[i][0] + forward_input_type = forward_inputs_list[i][1] + forward_input_pos = forward_inputs_list[i][2] + orig_input_name = orig_forward_inputs_list[i][0] + orig_input_type = orig_forward_inputs_list[i][1] + orig_input_pos = orig_forward_inputs_list[i][2] + + assert forward_input_type == orig_input_type + assert forward_input_pos == orig_input_pos + + for i in range(len(forward_attrs_list)): + orig_attr_name = orig_forward_attrs_list[i][0] + orig_attr_type = orig_forward_attrs_list[i][1] + orig_attr_default = orig_forward_attrs_list[i][2] + orig_attr_pos = orig_forward_attrs_list[i][3] + forward_attr_name = forward_attrs_list[i][0] + forward_attr_type = forward_attrs_list[i][1] + forward_attr_default = forward_attrs_list[i][2] + forward_attr_pos = forward_attrs_list[i][3] + + assert orig_attr_type == forward_attr_type + assert orig_attr_default == forward_attr_default + assert orig_attr_pos == forward_attr_pos + + for i in range(len(forward_returns_list)): + orig_return_type = orig_forward_returns_list[i][0] + orig_return_pos = orig_forward_returns_list[i][1] + forward_return_type = forward_returns_list[i][1] + forward_return_pos = forward_returns_list[i][2] + + assert orig_return_type == forward_return_type + assert orig_return_pos == forward_return_pos + + # Check Order: Inputs, Attributes + max_input_position = -1 + for _, _, pos in forward_inputs_list: + max_input_position = max(max_input_position, pos) + + max_attr_position = -1 + for _, _, _, pos in forward_attrs_list: + assert pos > max_input_position + max_attr_position = max(max_attr_position, pos) + + +def BackwardValidationCheck(backward_fwd_input_map, backward_grad_input_map, + backward_attrs_list): + # backward_fwd_input_map = { "name" : [type, is_fwd_input, orig_position] ...} + # backward_grad_input_map = { "name" : [type, fwd_position, orig_position] ...} + # backward_attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...] + + # Check Order: TensorWrappers, GradTensors, Attributes + max_fwd_input_position = -1 + for _, (_, _, pos) in backward_fwd_input_map.items(): + max_fwd_input_position = max(max_fwd_input_position, pos) + + max_grad_tensor_position = -1 + for _, (_, _, pos) in backward_grad_input_map.items(): + assert pos > max_fwd_input_position + max_grad_tensor_position = max(max_grad_tensor_position, pos) + + max_attr_position = -1 + for _, _, _, pos in backward_attrs_list: + assert pos > max_grad_tensor_position + max_attr_position = max(max_attr_position, pos) + + +def DetermineForwardPositionMap(forward_inputs_list, forward_returns_list): + # inputs_list = [ [input_name, input_type, orig_position], ...] + # forward_returns_list = [ [ret_name, ret_type, orig_position] ...] + + # forward_position_map = { "name" : [type, fwd_position] ...} + forward_inputs_position_map = {} + forward_outputs_position_map = {} + for i in range(len(forward_inputs_list)): + forward_input = forward_inputs_list[i] + input_name = forward_input[0] + input_type = forward_input[1] + input_pos = forward_input[2] + + forward_inputs_position_map[input_name] = [input_type, input_pos] + + for i in range(len(forward_returns_list)): + forward_return = forward_returns_list[i] + return_name = forward_return[0] + return_type = forward_return[1] + return_pos = forward_return[2] + + forward_outputs_position_map[return_name] = [return_type, return_pos] + + return forward_inputs_position_map, forward_outputs_position_map + + +def SlotNameMatching(backward_inputs_list, backward_returns_list, + forward_inputs_position_map, forward_outputs_position_map): + + # backward_inputs_list = [ [input_name, input_type, orig_position], ...] + # backward_returns_list = [ [ret_name, ret_type, orig_position], ...] + # forward_inputs_position_map = { "name" : [type, fwd_position] } + # forward_outputs_position_map = { "name" : [type, fwd_position] } + + # backward_fwd_input_map = { "name" : [type, is_fwd_input, orig_position] ...} + # backward_grad_input_map = { "name" : [type, fwd_position, orig_position] ...} + # backward_grad_output_map = { "name" : [type, fwd_position, orig_position] ...} + + backward_fwd_input_map = {} + backward_grad_input_map = {} + backward_grad_output_map = {} + + for backward_input in backward_inputs_list: + backward_input_name = backward_input[0] + backward_input_type = backward_input[1] + backward_input_pos = backward_input[2] + + backward_fwd_name = FindForwardName(backward_input_name) + if backward_fwd_name: + # Grad Input + assert backward_fwd_name in forward_outputs_position_map.keys() + matched_forward_output_type = forward_outputs_position_map[ + backward_fwd_name][0] + matched_forward_output_pos = forward_outputs_position_map[ + backward_fwd_name][1] + + backward_grad_input_map[backward_input_name] = [ + backward_input_type, matched_forward_output_pos, + backward_input_pos + ] + else: + # TensorWrapper Input + if backward_input_name in forward_inputs_position_map.keys(): + tensor_wrapper_type = forward_inputs_position_map[ + backward_input_name][0] + backward_fwd_input_map[backward_input_name] = [ + backward_input_type, True, backward_input_pos + ] + + elif backward_input_name in forward_outputs_position_map.keys(): + tensor_wrapper_type = forward_outputs_position_map[ + backward_input_name][0] + backward_fwd_input_map[backward_input_name] = [ + backward_input_type, False, backward_input_pos + ] + else: + assert False + + for backward_output in backward_returns_list: + backward_output_name = backward_output[0] + backward_output_type = backward_output[1] + backward_output_pos = backward_output[2] + + backward_fwd_name = FindForwardName(backward_output_name) + assert backward_fwd_name is not None + assert backward_fwd_name in forward_inputs_position_map.keys() + + matched_forward_input_type = forward_inputs_position_map[ + backward_fwd_name][0] + matched_forward_input_pos = forward_inputs_position_map[ + backward_fwd_name][1] + + backward_grad_output_map[backward_output_name] = [ + backward_output_type, matched_forward_input_pos, backward_output_pos + ] + + return backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map + + +def GenerateNodeDeclaration(fwd_api_name, backward_fwd_input_map, + backward_attrs_list): + # Inputs: + # fwd_api_name = "" + # backward_fwd_input_map = { "name" : [type, is_fwd_input, orig_position] ...} + # backward_attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...] + + # Determine Node Name + forward_op_name = fwd_api_name + + # SetTensorWrapper Methods & TensorWrapper Members + set_tensor_wrapper_methods_str = "" + tensor_wrapper_members_str = "" + for tname, (ttype, is_fwd_input, _) in backward_fwd_input_map.items(): + tensor_wrapper_name = GetSavedName(tname) + if IsPlainTensorType(ttype): + SET_PLAIN_TENSOR_WRAPPER_TEMPLATE = """ + void SetTensorWrapper{}(const egr::EagerTensor& {}, bool full_reserved) {{ + {} = egr::TensorWrapper({}, full_reserved); + }} +""" + set_tensor_wrapper_methods_str += SET_PLAIN_TENSOR_WRAPPER_TEMPLATE.format( + tname, tname, tensor_wrapper_name, tname) + + PLAIN_TENSOR_MEMBER_TEMPLATE = """ + egr::TensorWrapper {}; +""" + tensor_wrapper_members_str += PLAIN_TENSOR_MEMBER_TEMPLATE.format( + tensor_wrapper_name) + else: + assert IsVectorTensorType(ttype) + SET_VECTOR_TENSOR_WRAPPER_TEMPLATE = """ + void SetTensorWrapper{}(const std::vector& {}, bool full_reserved) {{ + for(const auto& eager_tensor : {}) {{ + {}.emplace_back( egr::TensorWrapper(eager_tensor, full_reserved) ); + }}; + }} +""" + set_tensor_wrapper_methods_str += SET_VECTOR_TENSOR_WRAPPER_TEMPLATE.format( + tname, tname, tname, tensor_wrapper_name) + + VECTOR_TENSOR_MEMBER_TEMPLATE = """ + std::vector {}; +""" + tensor_wrapper_members_str += VECTOR_TENSOR_MEMBER_TEMPLATE.format( + tensor_wrapper_name) + # End: SetTensorWrapper Methods & TensorWrapper Members + + # SetAttributes & Attribute Members + set_attribute_methods_str = "" + attribute_members_str = "" + for aname, atype, default_val, _ in backward_attrs_list: + saved_attr_name = GetSavedName(aname) + SET_ATTR_METHOD_TEMPLATE = """ + void SetAttribute{}({} {}) {{ + {} = {}; + }} +""" + set_attribute_methods_str += SET_ATTR_METHOD_TEMPLATE.format( + aname, GetConstReference(atype), aname, saved_attr_name, aname) + + ATTRIBUTE_MEMBER_TEMPLATE = """ + {} {}; +""" + attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format( + GetConstReference(atype), saved_attr_name) + # End: SetAttributes & Attribute Members + + NODE_DECLARATION_TEMPLATE = """ +class GradNode{} : public egr::GradNodeBase {{ + public: + GradNode{}() : egr::GradNodeBase() {{}} + GradNode{}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : + egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {{}} + ~GradNode{}() override = default; + + virtual std::vector> operator()( + const std::vector>& grads) override; + + // SetTensorWrapperX, SetTensorWrapperY, ... + {} + // SetAttributes + {} + private: + // TensorWrappers + {} + + // Attributes + {} +}}; +""" + node_declaration_str = NODE_DECLARATION_TEMPLATE.format( + forward_op_name, forward_op_name, forward_op_name, forward_op_name, + set_tensor_wrapper_methods_str, set_attribute_methods_str, + tensor_wrapper_members_str, attribute_members_str) + + return node_declaration_str + + +def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, + backward_grad_input_map, backward_grad_output_map, + backward_attrs_list): + # fwd_api_name = "" + # backward_fwd_input_map = { "name" : [type, is_fwd_input, orig_position] ...} + # backward_grad_input_map = { "name" : [type, fwd_position, orig_position] ...} + # backward_grad_output_map = { "name" : [type, fwd_position, orig_position] ...} + # backward_attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...] + + # Construct grad_api function args + # Order: TensorWrappers, GradTensors, Attributes + grad_api_args_len = len(backward_fwd_input_map.keys()) + len( + backward_grad_input_map.keys()) + len(backward_attrs_list) + grad_api_args = ["" for i in range(grad_api_args_len)] + for name, (_, is_fwd_input, + grad_api_position), in backward_fwd_input_map.items(): + tensor_wrapper_name = GetSavedName(name) + if is_fwd_input: + grad_api_args[ + grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, true)" + else: + grad_api_args[ + grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, false)" + + for _, (_, fwd_position, + grad_api_position) in backward_grad_input_map.items(): + grad_api_args[ + grad_api_position] = f"*grads[{fwd_position}].Tensor().get()" + + for name, _, _, grad_api_position in backward_attrs_list: + saved_attribute_name = GetSavedName(name) + grad_api_args[grad_api_position] = f"this->{saved_attribute_name}" + grad_api_args_str = ", ".join(grad_api_args) + + # Construct grad_api returns + num_outputs = len(backward_grad_output_map.keys()) + returns_list = ["" for i in range(num_outputs)] + for _, (ttype, fwd_position, + grad_api_position) in backward_grad_output_map.items(): + # Infer Grad API Return Type + if num_outputs == 1: + # Single tensor output, return as is + if IsPlainTensorType(ttype): + returns_list[0] = "{grad_api_returns}" + else: + assert IsVectorTensorType(ttype) + returns_list[0] = "grad_api_returns" + else: + # Rearrange output order accordingly + if IsPlainTensorType(ttype): + returns_list[ + fwd_position] = f"{{ grad_api_returns[{grad_api_position}] }}" + else: + assert IsVectorTensorType(ttype) + returns_list[ + fwd_position] = f"grad_api_returns[{grad_api_position}]" + returns_str = ", ".join(returns_list) + returns_str = f"{{ {returns_str} }}" + + FUNCTION_TEMPLATE = """ +std::vector> GradNode{}::operator()(const std::vector>& grads) {{ + // Call grad_api function + auto grad_api_returns = {}({}); + return {}; +}} + """ + + node_definition_str = FUNCTION_TEMPLATE.format( + fwd_api_name, bwd_api_name, grad_api_args_str, returns_str) + + return node_definition_str + + +def GenerateForwardDefinition(fwd_api_name, bwd_api_name, + forward_inputs_position_map, + forward_outputs_position_map, forward_attrs_list, + backward_fwd_input_map, backward_grad_input_map, + backward_grad_output_map, backward_attrs_list): + # fwd_api_name = "" + # forward_inputs_position_map = { "name" : [type, fwd_position] } + # forward_outputs_position_map = { "name" : [type, fwd_position] } + # forward_attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...] + # backward_fwd_input_map = { "name" : [type, is_fwd_input, orig_position] ...} + # backward_grad_input_map = { "name" : [type, fwd_position, orig_position] ...} + # backward_grad_output_map = { "name" : [type, fwd_position, orig_position] ...} + # backward_attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...] + + # Get Function Args + num_inputs = len(forward_attrs_list) + len(forward_inputs_position_map.keys( + )) + inputs_args_list = ["" for i in range(num_inputs)] + inputs_call_list = ["" for i in range(num_inputs)] + for name, (ttype, pos) in forward_inputs_position_map.items(): + inputs_call_list[pos] = name + if IsPlainTensorType(ttype): + inputs_args_list[pos] = f"const egr::EagerTensor& {name}" + else: + assert IsVectorTensorType(ttype) + inputs_args_list[ + pos] = f"const std::vector& {name}" + + for name, atype, default_val, pos in forward_attrs_list: + inputs_call_list[pos] = name + if default_val is not None: + inputs_args_list[pos] = f"{atype} {name} = {default_val}" + else: + inputs_args_list[pos] = f"{atype} {name}" + + inputs_args_str = ", ".join(inputs_args_list) + inputs_call_str = ", ".join(inputs_call_list) + + # Forward Full Logic + forward_call_str = f"auto api_result = {fwd_api_name}({inputs_call_str});" + + # Get return type list & outputs + num_outputs = len(forward_outputs_position_map.keys()) + returns_type_list = ["" for i in range(num_outputs)] + returns_list = ["" for i in range(num_outputs)] + for name, (rtype, pos) in forward_outputs_position_map.items(): + if num_outputs == 1: + returns_list[ + 0] = f"egr::EagerUtils::CreateEagerTensorFromTensor(api_result)" + else: + # Tuple api_result + returns_list[ + pos] = f"egr::EagerUtils::CreateEagerTensorFromTensor(api_result[{pos}])" + + if IsPlainTensorType(rtype): + returns_type_list[pos] = "egr::EagerTensor" + else: + assert IsVectorTensorType(rtype) + returns_type_list[pos] = "std::vector" + + if num_outputs == 1: + returns_str = returns_list[0] + returns_type_str = returns_type_list[0] + else: + returns_type_str = ", ".join(returns_type_list) + returns_type_str = f"std::tuple<{returns_type_str}>" + returns_str = ", ".join(returns_list) + returns_str = f"std::make_tuple({returns_str})" + + FORWARD_FUNCTION_TEMPLATE = """ + {} {} ({}) {{ + + }} +""" + + +if __name__ == "__main__": + filepath = "/workspace/PaddleRepos/Paddle4/python/paddle/utils/code_gen/api.yaml" + fwd_api_list = ReadFwdFile(filepath) + + filepath = "/workspace/PaddleRepos/Paddle4/python/paddle/utils/code_gen/grad.yaml" + grad_api_dict = ReadBwdFile(filepath) + + # Generate per Dygraph API + for fwd_api in fwd_api_list: + # We only generate Ops with grad + if 'backward' not in fwd_api.keys(): + continue + + assert 'api' in fwd_api.keys() + assert 'args' in fwd_api.keys() + assert 'output' in fwd_api.keys() + assert 'backward' in fwd_api.keys() + + fwd_api_name = fwd_api['api'] + fwd_args_str = fwd_api['args'] + fwd_returns_str = fwd_api['output'] + + bwd_api_name = fwd_api['backward'] + assert bwd_api_name in grad_api_dict.keys() + bwd_api = grad_api_dict[bwd_api_name] + + assert 'args' in bwd_api.keys() + assert 'output' in bwd_api.keys() + assert 'forward' in bwd_api.keys() + bwd_forward_str = bwd_api['forward'] + bwd_args_str = bwd_api['args'] + bwd_returns_str = bwd_api['output'] + + # Collect Forward Inputs/Outputs + forward_inputs_list, forward_attrs_list, forward_returns_list = ParseYamlForwardFromBackward( + bwd_forward_str) + print("Parsed Forward Inputs List: ", forward_inputs_list) + print("Prased Forward Attrs List: ", forward_attrs_list) + print("Parsed Forward Returns List: ", forward_returns_list) + + # Collect Original Forward Inputs/Outputs and then perform validation checks + orig_forward_inputs_list, orig_forward_attrs_list, orig_forward_returns_list = ParseYamlForward( + fwd_args_str, fwd_returns_str) + print("Parsed Original Forward Inputs List: ", orig_forward_inputs_list) + print("Prased Original Forward Attrs List: ", orig_forward_attrs_list) + print("Parsed Original Forward Returns List: ", + orig_forward_returns_list) + + # Forward Validation Checks + ForwardsValidationCheck(forward_inputs_list, forward_attrs_list, + forward_returns_list, orig_forward_inputs_list, + orig_forward_attrs_list, + orig_forward_returns_list) + + # Parse Backward Inputs/Outputs + backward_inputs_list, backward_attrs_list, backward_returns_list = ParseYamlBackward( + bwd_args_str, bwd_returns_str) + print("Parsed Backward Inputs List: ", backward_inputs_list) + print("Prased Backward Attrs List: ", backward_attrs_list) + print("Parsed Backward Returns List: ", backward_returns_list) + + # Determine Forward Inputs/Outputs Position + forward_inputs_position_map, forward_outputs_position_map = DetermineForwardPositionMap( + forward_inputs_list, forward_returns_list) + print("Generated Forward Input Position Map: ", + forward_inputs_position_map) + print("Generated Forward Output Position Map: ", + forward_outputs_position_map) + + # SlotName Matching + backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map = SlotNameMatching( + backward_inputs_list, backward_returns_list, + forward_inputs_position_map, forward_outputs_position_map) + print("Generated Backward Fwd Input Map: ", backward_fwd_input_map) + print("Generated Backward Grad Input Map: ", backward_grad_input_map) + print("Generated Backward Grad Output Map: ", backward_grad_output_map) + + # Backward Validation Check + BackwardValidationCheck(backward_fwd_input_map, backward_grad_input_map, + backward_attrs_list) + + # Node Declaration Generation + node_declaration_str = GenerateNodeDeclaration( + fwd_api_name, backward_fwd_input_map, backward_attrs_list) + print("Generated Node Declaration: ", node_declaration_str) + + node_definition_str = GenerateNodeDefinition( + fwd_api_name, bwd_api_name, backward_fwd_input_map, + backward_grad_input_map, backward_grad_output_map, + backward_attrs_list) + print("Generated Node Definition: ", node_definition_str) + + # Node Definition Generation + forward_definition_str = GenerateForwardDefinition( + fwd_api_name, bwd_api_name, forward_inputs_position_map, + forward_outputs_position_map, forward_attrs_list, + backward_fwd_input_map, backward_grad_input_map, + backward_grad_output_map, backward_attrs_list) + print("Generated Forward Definition: ", forward_definition_str) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/test.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/test.py new file mode 100644 index 00000000000000..622e23fccd70f8 --- /dev/null +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/test.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name = "A" +B = "B" +C = "C" +D = "D" +E = "E" +x = """ +class GradNode%s : public egr::GradNodeBase {{ + public: + GradNode%s() : egr::GradNodeBase() {{}} + GradNode%s(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : + egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {{}} + ~GradNode%s() override = default; + + virtual std::vector> + operator()(const + std::vector>& grads) + override; + + // SetX, SetY, ... + {} + // SetAttrMap + {} + + private: + // TensorWrappers + {} + // Attribute Map + {} +}}; +""" + +print(x.format("A", "B", "C", "D")) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 562a726aa29f27..77fa9367318576 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -111,6 +111,7 @@ func : MatmulInferMeta kernel : func : matmul + backward : matmul_grad - api : mean args : (const Tensor& x, const std::vector& axis={}, bool keep_dim=false) From c292c9ff8e460d859099183e55d035214b6bdb63 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Mon, 24 Jan 2022 11:33:16 +0000 Subject: [PATCH 03/13] Modified backward yaml --- python/paddle/utils/code_gen/backward.yaml | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 python/paddle/utils/code_gen/backward.yaml diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml deleted file mode 100644 index 70c89daebf4d26..00000000000000 --- a/python/paddle/utils/code_gen/backward.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- grad_api : matmul_grad - forward : matmul (const Tensor& x, const Tensor& y, bool transpose_x, bool transpose_y) -> Tensor(out) - args : (const Tensor& x, const Tensor& y, const Tensor& out_grad, bool transpose_x=false, bool transpose_y=false) - output : Tensor(x_grad), Tensor(y_grad) - infer_meta : - func : MatmulGradInferMeta - kernel : - func : matmul_grad - -- grad_api : scale_grad - forward : scale (const Tensor& x, const Scalar& scale, float bias, bool bias_after_scale) -> Tensor(out) - args : (const Tensor& out_grad, const Scalar& scale, float bias=0.0, bool bias_after_scale=true) - output : Tensor // dx - invoke : scale(out_grad, scale, bias, bias_after_scale) From 3723caba83e663196c2256a3c31f858af0381068 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Tue, 25 Jan 2022 06:22:53 +0000 Subject: [PATCH 04/13] Added EagerUtils helper functions for final state CodeGen --- .../eager/auto_code_generator/CMakeLists.txt | 2 +- .../final_state_generator/CMakeLists.txt | 3 +- .../final_state_generator/eager_gen.py | 46 +++++++++++++++---- paddle/fluid/eager/utils.cc | 39 ++++++++++++++++ paddle/fluid/eager/utils.h | 10 ++++ 5 files changed, 89 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index c504a126ddecae..668e60d857b9ca 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -1,4 +1,4 @@ -#add_subdirectory(final_state_generator) +add_subdirectory(final_state_generator) set(EAGER_GENERETOR_DEPS ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} pybind proto_desc executor layer tracer engine imperative_profiler imperative_flag) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt index 80983edac41920..56ba4acc62b531 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt @@ -9,7 +9,7 @@ set(forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.cc") set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.h") -execute_process( +add_custom_target(eager_final_state_codegen COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py" "--api_yaml_path=${api_yaml_path}" "--backward_yaml_path=${backward_yaml_path}" @@ -21,4 +21,5 @@ execute_process( COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_forwards_h_path} ${forwards_h_path} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_nodes_cc_path} ${nodes_cc_path} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_nodes_h_path} ${nodes_h_path} + VERBATIM ) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index b74cdcf78dcb3b..5a49da852f44ca 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -15,6 +15,7 @@ import yaml import re import argparse +import os def ParseArguments(): @@ -93,8 +94,8 @@ def ReadBwdFile(filepath): contents = yaml.load(f) ret = {} for content in contents: - assert 'grad_api' in content.keys() - api_name = content['grad_api'] + assert 'backward_api' in content.keys() + api_name = content['backward_api'] ret[api_name] = content return ret @@ -435,10 +436,10 @@ def GenerateNodeDeclaration(fwd_api_name, backward_fwd_input_map, aname, GetConstReference(atype), aname, saved_attr_name, aname) ATTRIBUTE_MEMBER_TEMPLATE = """ - {} {}; + {} {} = {}; """ attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format( - GetConstReference(atype), saved_attr_name) + GetConstReference(atype), saved_attr_name, default_val) # End: SetAttributes & Attribute Members NODE_DECLARATION_TEMPLATE = """ @@ -491,15 +492,15 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, tensor_wrapper_name = GetSavedName(name) if is_fwd_input: grad_api_args[ - grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, true)" + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, true) )" else: grad_api_args[ - grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, false)" + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, false) )" for _, (_, fwd_position, grad_api_position) in backward_grad_input_map.items(): grad_api_args[ - grad_api_position] = f"*grads[{fwd_position}].Tensor().get()" + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( *grads[{fwd_position}] )" for name, _, _, grad_api_position in backward_attrs_list: saved_attribute_name = GetSavedName(name) @@ -615,7 +616,7 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, # SetAttributes set_attributes_list = [] for name, _, _, _ in backward_attrs_list: - set_attributes = " grad_node->SetAttribute{name}({name});" + set_attributes = f" grad_node->SetAttribute{name}({name});" set_attributes_list.append(set_attributes) set_attributes_str = "\n".join(set_attributes_list) @@ -727,7 +728,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, inputs_args_list = ["" for i in range(num_inputs)] inputs_call_list = ["" for i in range(num_inputs)] for name, (ttype, pos) in forward_inputs_position_map.items(): - inputs_call_list[pos] = f"*{name}.Tensor().get()" + inputs_call_list[pos] = f"egr::EagerUtils::SyncToPtenTensors({name})" if IsPlainTensorType(ttype): inputs_args_list[pos] = f"const egr::EagerTensor& {name}" else: @@ -905,10 +906,17 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str): # Collect Forward Inputs/Outputs forward_inputs_list, forward_attrs_list, forward_returns_list = ParseYamlForwardFromBackward( bwd_forward_str) + print("Parsed Forward Inputs List: ", forward_inputs_list) + print("Prased Forward Attrs List: ", forward_attrs_list) + print("Parsed Forward Returns List: ", forward_returns_list) # Collect Original Forward Inputs/Outputs and then perform validation checks orig_forward_inputs_list, orig_forward_attrs_list, orig_forward_returns_list = ParseYamlForward( fwd_args_str, fwd_returns_str) + print("Parsed Original Forward Inputs List: ", orig_forward_inputs_list) + print("Prased Original Forward Attrs List: ", orig_forward_attrs_list) + print("Parsed Original Forward Returns List: ", + orig_forward_returns_list) # Forward Validation Checks ForwardsValidationCheck(forward_inputs_list, forward_attrs_list, @@ -919,15 +927,25 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str): # Parse Backward Inputs/Outputs backward_inputs_list, backward_attrs_list, backward_returns_list = ParseYamlBackward( bwd_args_str, bwd_returns_str) + print("Parsed Backward Inputs List: ", backward_inputs_list) + print("Prased Backward Attrs List: ", backward_attrs_list) + print("Parsed Backward Returns List: ", backward_returns_list) # Determine Forward Inputs/Outputs Position forward_inputs_position_map, forward_outputs_position_map = DetermineForwardPositionMap( forward_inputs_list, forward_returns_list) + print("Generated Forward Input Position Map: ", + forward_inputs_position_map) + print("Generated Forward Output Position Map: ", + forward_outputs_position_map) # SlotName Matching backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map = SlotNameMatching( backward_inputs_list, backward_returns_list, forward_inputs_position_map, forward_outputs_position_map) + print("Generated Backward Fwd Input Map: ", backward_fwd_input_map) + print("Generated Backward Grad Input Map: ", backward_grad_input_map) + print("Generated Backward Grad Output Map: ", backward_grad_output_map) # Backward Validation Check BackwardValidationCheck(backward_fwd_input_map, backward_grad_input_map, @@ -936,11 +954,13 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str): # Node Declaration Generation node_declaration_str += GenerateNodeDeclaration( fwd_api_name, backward_fwd_input_map, backward_attrs_list) + print("Generated Node Declaration: ", node_declaration_str) node_definition_str += GenerateNodeDefinition( fwd_api_name, bwd_api_name, backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map, backward_attrs_list) + print("Generated Node Definition: ", node_definition_str) # Node Definition Generation definition_declaration_pair = GenerateForwardDefinition( @@ -948,6 +968,8 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str): forward_outputs_position_map, forward_attrs_list, backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map, backward_attrs_list) + print("Generated Forward Definition: ", forward_definition_str) + print("Generated Forward Declaration: ", forward_declaration_str) forward_definition_str += definition_declaration_pair[0] forward_declaration_str += definition_declaration_pair[1] @@ -957,6 +979,12 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str): forwards_h_path = args.forwards_h_path forwards_cc_path = args.forwards_cc_path + for path in [ + nodes_cc_path, nodes_h_path, forwards_h_path, forwards_cc_path + ]: + if os.path.exists(path): + os.remove(path) + GenerateNodeCCFile(nodes_cc_path, node_definition_str) GenerateNodeHFile(nodes_h_path, node_declaration_str) GenerateForwardCCFile(forwards_cc_path, forward_definition_str) diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index f50458e556276b..962f866456579f 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -286,4 +286,43 @@ void EagerUtils::CheckAndRetainGrad( } } +paddle::experimental::Tensor EagerUtils::SyncToPtenTensors( + const egr::EagerTensor& tensor) { + const_cast(&tensor)->SyncToTensor(); + return *tensor.Tensor().get(); +} + +std::vector EagerUtils::SyncToPtenTensors( + const std::vector& tensors) { + std::vector res; + size_t num = tensors.size(); + res.reserve(num); + for (size_t i = 0; i < num; i++) { + const_cast(&(tensors[i]))->SyncToTensor(); + res.push_back(*tensors[i].Tensor().get()); + } + return res; +} + +egr::EagerTensor EagerUtils::CreateEagerTensorFromTensor( + const paddle::experimental::Tensor& tensor) { + egr::EagerTensor ret; + ret.set_tensor(std::make_shared(tensor)); + return ret; +} + +std::vector EagerUtils::CreateEagerTensorFromTensor( + const std::vector& tensors) { + std::vector res; + size_t num = tensors.size(); + res.reserve(num); + for (size_t i = 0; i < num; i++) { + egr::EagerTensor tmp; + tmp.set_tensor(std::make_shared(tensors[i])); + res.emplace_back(std::move(tmp)); + } + + return res; +} + } // namespace egr diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index bc1acbd69d0495..62a0bbc140f8bf 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -170,6 +170,16 @@ class EagerUtils { static void CheckAndRetainGrad(const egr::EagerTensor& tensor); static void CheckAndRetainGrad(const std::vector& tensors); + + static paddle::experimental::Tensor SyncToPtenTensors( + const egr::EagerTensor& tensor); + static std::vector SyncToPtenTensors( + const std::vector& tensors); + + static egr::EagerTensor CreateEagerTensorFromTensor( + const paddle::experimental::Tensor& tensor); + static std::vector CreateEagerTensorFromTensor( + const std::vector& tensors); }; } // namespace egr From ca7435085675b6dc2142b1143711ca232e2acb5d Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Tue, 25 Jan 2022 09:13:09 +0000 Subject: [PATCH 05/13] Adjusted CMakeFiles to support compilation for final state auto generated codes --- .../eager_generated/backwards/CMakeLists.txt | 2 + .../eager_generated/forwards/CMakeLists.txt | 2 + .../eager/auto_code_generator/CMakeLists.txt | 2 +- .../final_state_generator/CMakeLists.txt | 9 ++- .../final_state_generator/eager_gen.py | 80 ++++++++++--------- .../generate_file_structures.py | 49 +++++++++++- 6 files changed, 97 insertions(+), 47 deletions(-) diff --git a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt index 1084f0ec573c66..e04d282748c0a6 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt @@ -1 +1,3 @@ cc_library(scale_node SRCS scale_node.cc DEPS global_utils pten pten_api grad_node_info) +#cc_library(final_dygraph_node SRCS nodes.cc DEPS ${eager_deps}) +#add_dependencies(final_dygraph_node eager_final_state_codegen) diff --git a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt index ed04e0b6f5a0cc..f682c27992db15 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt @@ -1 +1,3 @@ cc_library(eager_scale SRCS scale.cc DEPS pten_api pten autograd_meta scale_node) +#cc_library(final_dygraph_function SRCS dygraph_functions.cc DEPS ${eager_deps}) +#add_dependencies(final_dygraph_function eager_final_state_codegen) diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index 668e60d857b9ca..c504a126ddecae 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -1,4 +1,4 @@ -add_subdirectory(final_state_generator) +#add_subdirectory(final_state_generator) set(EAGER_GENERETOR_DEPS ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} pybind proto_desc executor layer tracer engine imperative_profiler imperative_flag) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt index 56ba4acc62b531..0a96cbc9c970ca 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt @@ -2,13 +2,14 @@ set(api_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/api.yaml") set(backward_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/backward.yaml") set(tmp_forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc") set(tmp_forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.h") -set(tmp_nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_node.cc") -set(tmp_nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_node.h") +set(tmp_nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_nodes.cc") +set(tmp_nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_nodes.h") set(forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.cc") set(forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h") -set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.cc") -set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.h") +set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.cc") +set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h") +message("Final State Eager CodeGen") add_custom_target(eager_final_state_codegen COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py" "--api_yaml_path=${api_yaml_path}" diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index 5a49da852f44ca..4782ca6b3b0e5f 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -72,6 +72,16 @@ def GetConstReference(string): return ret +def RemoveConstAndReference(string): + ret = string + if string.startswith("const "): + ret = ret[6:] + if string.endswith("&"): + ret = ret[:-1] + + return ret + + def GetAutoGradMetaName(string): return f"{string}_autograd_meta" @@ -439,7 +449,7 @@ def GenerateNodeDeclaration(fwd_api_name, backward_fwd_input_map, {} {} = {}; """ attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format( - GetConstReference(atype), saved_attr_name, default_val) + RemoveConstAndReference(atype), saved_attr_name, default_val) # End: SetAttributes & Attribute Members NODE_DECLARATION_TEMPLATE = """ @@ -490,17 +500,13 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, for name, (_, is_fwd_input, grad_api_position), in backward_fwd_input_map.items(): tensor_wrapper_name = GetSavedName(name) - if is_fwd_input: - grad_api_args[ - grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, true) )" - else: - grad_api_args[ - grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, false) )" + grad_api_args[ + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr) )" for _, (_, fwd_position, grad_api_position) in backward_grad_input_map.items(): grad_api_args[ - grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( *grads[{fwd_position}] )" + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}] )" for name, _, _, grad_api_position in backward_attrs_list: saved_attribute_name = GetSavedName(name) @@ -508,35 +514,28 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, grad_api_args_str = ", ".join(grad_api_args) # Construct grad_api returns - num_outputs = len(backward_grad_output_map.keys()) - returns_list = ["" for i in range(num_outputs)] + num_bwd_outputs = len(backward_grad_output_map.keys()) + returns_str = f"std::vector> returns({num_bwd_outputs});\n" for _, (ttype, fwd_position, grad_api_position) in backward_grad_output_map.items(): # Infer Grad API Return Type - if num_outputs == 1: + if num_bwd_outputs == 1: # Single tensor output, return as is if IsPlainTensorType(ttype): - returns_list[0] = "{grad_api_returns}" + returns_str += "returns[0] = { egr::EagerUtils::CreateEagerTensorFromTensor(grad_api_returns) };\n" else: assert IsVectorTensorType(ttype) - returns_list[0] = "grad_api_returns" + returns_str += "returns[0] = egr::EagerUtils::CreateEagerTensorFromTensor(grad_api_returns);\n" else: # Rearrange output order accordingly - if IsPlainTensorType(ttype): - returns_list[ - fwd_position] = f"{{ grad_api_returns[{grad_api_position}] }}" - else: - assert IsVectorTensorType(ttype) - returns_list[ - fwd_position] = f"grad_api_returns[{grad_api_position}]" - returns_str = ", ".join(returns_list) - returns_str = f"{{ {returns_str} }}" + returns_str += f"returns[{fwd_position}] = egr::EagerUtils::CreateEagerTensorFromTensor( grad_api_returns[{grad_api_position}] );\n" + returns_str += f"return returns;\n" FUNCTION_TEMPLATE = """ std::vector> GradNode{}::operator()(const std::vector>& grads) {{ // Call grad_api function - auto grad_api_returns = {}({}); - return {}; + auto grad_api_returns = paddle::experimental::{}({}); + {} }} """ @@ -566,12 +565,12 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, for name, (ttype, pos) in forward_inputs_position_map.items(): input_autograd_meta_name = GetAutoGradMetaName(name) if IsPlainTensorType(ttype): - input_autograd_meta = f" egr::EagerTensor* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});" + input_autograd_meta = f" egr::AutogradMeta* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});" else: assert IsVectorTensorType(ttype) input_autograd_meta_vec_name = GetAutoGradMetaVectorName(name) - input_autograd_meta = f" std::vector {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n" - input_autograd_meta += f" std::vector* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};" + input_autograd_meta = f" std::vector {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n" + input_autograd_meta += f" std::vector* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};" inputs_autograd_meta_list.append(input_autograd_meta) compute_require_grad_args_list.append(input_autograd_meta_name) @@ -587,19 +586,19 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, output_autograd_meta_vec_name = GetAutoGradMetaVectorName(name) if num_fwd_outputs == 1: if IsPlainTensorType(rtype): - output_autograd_meta = f" egr::EagerTensor* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(outputs);" + output_autograd_meta = f" egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&outputs);" else: assert IsVectorTensorType(rtype) - output_autograd_meta = f" std::vector {output_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({outputs});\n" - output_autograd_meta += f" std::vector* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" + output_autograd_meta = f" std::vector {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&{outputs});\n" + output_autograd_meta += f" std::vector* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" else: # Tuple api_result if IsPlainTensorType(rtype): - outputs_autograd_meta = f" egr::EagerTensor* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(outputs[{pos}]);" + outputs_autograd_meta = f" egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&outputs[{pos}]);" else: assert IsVectorTensorType(rtype) - output_autograd_meta = f" std::vector {output_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta(outputs[{pos}]);\n" - output_autograd_meta += f" std::vector* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" + output_autograd_meta = f" std::vector {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&outputs[{pos}]);\n" + output_autograd_meta += f" std::vector* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" outputs_autograd_meta_list.append(output_autograd_meta) pass_stop_gradient_args_list.append(output_autograd_meta_name) @@ -622,8 +621,11 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, # SetTensorWrappers set_tensor_wrappers_list = [] - for name, (_, _, _) in backward_fwd_input_map.items(): - set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name});" + for name, (_, is_fwd_input, _) in backward_fwd_input_map.items(): + if is_fwd_input: + set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, true);" + else: + set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, false);" set_tensor_wrappers_list.append(set_tensor_wrappers) set_tensor_wrappers_str = "\n".join(set_tensor_wrappers_list) @@ -747,7 +749,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, inputs_call_args_str = ", ".join(inputs_call_list) # Forward Full Logic - forward_call_str = f"auto api_result = {fwd_api_name}({inputs_call_args_str});" + forward_call_str = f"auto api_result = paddle::experimental::{fwd_api_name}({inputs_call_args_str});" # Get return type list & outputs num_outputs = len(forward_outputs_position_map.keys()) @@ -814,7 +816,7 @@ def GenerateNodeCCFile(filepath, node_definition_str): #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h" -#include "paddle/fluid/eager/api/generated/eager_generated/nodes/nodes.h" +#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h" """ file_contents += node_definition_str @@ -837,8 +839,8 @@ def GenerateNodeHFile(filepath, node_declaration_str): def GenerateForwardCCFile(filepath, forward_definition_str): file_contents = """ -#include "paddle/fluid/eager/api/generated/eager_generated/dygraph_forward_api.h" -#include "paddle/fluid/eager/api/generated/eager_generated/nodes/nodes.h" +#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h" +#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h" #include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/legacy/op_runner.h" diff --git a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py index 56ec287561c564..fdb8529515d30c 100644 --- a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py +++ b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py @@ -15,9 +15,45 @@ import sys import os -if __name__ == "__main__": - assert len(sys.argv) == 2 - eager_dir = sys.argv[1] + +def GenerateFileStructureForFinalDygraph(eager_dir): + """ + paddle/fluid/eager + |- generated + | |- CMakeLists.txt + | | "add_subdirectory(forwards), add_subdirectory(backwards)" + | + | |- forwards + | |- "dygraph_functions.cc" + | |- "dygraph_functions.h" + | + | |- backwards + | |- "nodes.cc" + | |- "nodes.h" + """ + # Directory Generation + generated_dir = os.path.join(eager_dir, "api/generated/eager_generated") + forwards_dir = os.path.join(generated_dir, "forwards") + nodes_dir = os.path.join(generated_dir, "backwards") + dirs = [generated_dir, forwards_dir, nodes_dir] + for directory in dirs: + if not os.path.exists(directory): + os.mkdir(directory) + + # Empty files + dygraph_forward_api_h_path = os.path.join(generated_dir, + "dygraph_functions.h") + empty_files = [dygraph_forward_api_h_path] + empty_files.append(os.path.join(forwards_dir, "dygraph_functions.cc")) + empty_files.append(os.path.join(nodes_dir, "nodes.cc")) + empty_files.append(os.path.join(nodes_dir, "nodes.h")) + + for path in empty_files: + if not os.path.exists(path): + open(path, 'a').close() + + +def GenerateFileStructureForIntermediateDygraph(eager_dir): """ paddle/fluid/eager |- generated @@ -79,3 +115,10 @@ with open(generated_level_cmakelist_path, "w") as f: f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)") + + +if __name__ == "__main__": + assert len(sys.argv) == 2 + eager_dir = sys.argv[1] + GenerateFileStructureForIntermediateDygraph(eager_dir) + GenerateFileStructureForFinalDygraph(eager_dir) From 62b15566ee5d6fd59b1f25edd2a4575e1f3da7e8 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Wed, 26 Jan 2022 02:14:22 +0000 Subject: [PATCH 06/13] Added python-c code generation for final state Eager Dygraph --- .../eager_generated/backwards/CMakeLists.txt | 4 +- .../eager_generated/forwards/CMakeLists.txt | 4 +- .../eager/auto_code_generator/CMakeLists.txt | 2 +- .../final_state_generator/CMakeLists.txt | 10 + .../final_state_generator/eager_gen.py | 76 ++++-- .../final_state_generator/python_c_gen.py | 230 ++++++++++++++++++ paddle/fluid/eager/utils.cc | 8 +- paddle/fluid/pybind/CMakeLists.txt | 4 +- .../pybind/eager_op_function_generator.cc | 5 + paddle/fluid/pybind/op_function_common.cc | 218 ++++++++++------- paddle/fluid/pybind/op_function_common.h | 24 ++ 11 files changed, 473 insertions(+), 112 deletions(-) create mode 100644 paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py diff --git a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt index e04d282748c0a6..8f4c2c36603260 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt @@ -1,3 +1,3 @@ cc_library(scale_node SRCS scale_node.cc DEPS global_utils pten pten_api grad_node_info) -#cc_library(final_dygraph_node SRCS nodes.cc DEPS ${eager_deps}) -#add_dependencies(final_dygraph_node eager_final_state_codegen) +cc_library(final_dygraph_node SRCS nodes.cc DEPS ${eager_deps}) +add_dependencies(final_dygraph_node eager_final_state_codegen) diff --git a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt index f682c27992db15..11871365265890 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt @@ -1,3 +1,3 @@ cc_library(eager_scale SRCS scale.cc DEPS pten_api pten autograd_meta scale_node) -#cc_library(final_dygraph_function SRCS dygraph_functions.cc DEPS ${eager_deps}) -#add_dependencies(final_dygraph_function eager_final_state_codegen) +cc_library(final_dygraph_function SRCS dygraph_functions.cc DEPS ${eager_deps}) +add_dependencies(final_dygraph_function eager_final_state_codegen) diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index c504a126ddecae..668e60d857b9ca 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -1,4 +1,4 @@ -#add_subdirectory(final_state_generator) +add_subdirectory(final_state_generator) set(EAGER_GENERETOR_DEPS ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} pybind proto_desc executor layer tracer engine imperative_profiler imperative_flag) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt index 0a96cbc9c970ca..c6bca01205e19c 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt @@ -24,3 +24,13 @@ add_custom_target(eager_final_state_codegen COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_nodes_h_path} ${nodes_h_path} VERBATIM ) + +set(tmp_python_c_output_path "${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/tmp_eager_final_state_op_function_impl.h") +set(python_c_output_path "${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/eager_final_state_op_function_impl.h") +add_custom_target(eager_final_state_python_c_codegen + COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py" + "--api_yaml_path=${api_yaml_path}" + "--output_path=${tmp_python_c_output_path}" + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${tmp_python_c_output_path} ${python_c_output_path} + VERBATIM +) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index 4782ca6b3b0e5f..4c9372a0b6c888 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -82,6 +82,14 @@ def RemoveConstAndReference(string): return ret +def GetGradNodeName(string): + return f"FinalGradNode{string}" + + +def GetForwardFunctionName(string): + return f"{string}_final_state_dygraph_function" + + def GetAutoGradMetaName(string): return f"{string}_autograd_meta" @@ -145,13 +153,13 @@ def ParseYamlArgs(string): def ParseYamlReturns(string): # Example: Tensor, Tensor - # list = [ [ret_type, orig_position], ...] + # list = [ ["", ret_type, orig_position], ...] returns_list = [] returns = [x.strip() for x in string.strip().split(",")] for i in range(len(returns)): ret = returns[i] - returns_list.append([ret, i]) + returns_list.append(["", ret, i]) return returns_list @@ -260,8 +268,8 @@ def ForwardsValidationCheck(forward_inputs_list, forward_attrs_list, assert orig_attr_pos == forward_attr_pos for i in range(len(forward_returns_list)): - orig_return_type = orig_forward_returns_list[i][0] - orig_return_pos = orig_forward_returns_list[i][1] + orig_return_type = orig_forward_returns_list[i][1] + orig_return_pos = orig_forward_returns_list[i][2] forward_return_type = forward_returns_list[i][1] forward_return_pos = forward_returns_list[i][2] @@ -452,13 +460,14 @@ def GenerateNodeDeclaration(fwd_api_name, backward_fwd_input_map, RemoveConstAndReference(atype), saved_attr_name, default_val) # End: SetAttributes & Attribute Members + grad_node_name = GetGradNodeName(fwd_api_name) NODE_DECLARATION_TEMPLATE = """ -class GradNode{} : public egr::GradNodeBase {{ +class {} : public egr::GradNodeBase {{ public: - GradNode{}() : egr::GradNodeBase() {{}} - GradNode{}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : + {}() : egr::GradNodeBase() {{}} + {}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {{}} - ~GradNode{}() override = default; + ~{}() override = default; virtual std::vector> operator()( const std::vector>& grads) override; @@ -476,7 +485,7 @@ class GradNode{} : public egr::GradNodeBase {{ }}; """ node_declaration_str = NODE_DECLARATION_TEMPLATE.format( - forward_op_name, forward_op_name, forward_op_name, forward_op_name, + grad_node_name, grad_node_name, grad_node_name, grad_node_name, set_tensor_wrapper_methods_str, set_attribute_methods_str, tensor_wrapper_members_str, attribute_members_str) @@ -503,10 +512,15 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, grad_api_args[ grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr) )" - for _, (_, fwd_position, + for _, (ttype, fwd_position, grad_api_position) in backward_grad_input_map.items(): - grad_api_args[ - grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}] )" + if IsPlainTensorType(ttype): + grad_api_args[ + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}][0] )" + else: + assert IsVectorTensorType(ttype) + grad_api_args[ + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}] )" for name, _, _, grad_api_position in backward_attrs_list: saved_attribute_name = GetSavedName(name) @@ -531,8 +545,9 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, returns_str += f"returns[{fwd_position}] = egr::EagerUtils::CreateEagerTensorFromTensor( grad_api_returns[{grad_api_position}] );\n" returns_str += f"return returns;\n" + grad_node_name = GetGradNodeName(fwd_api_name) FUNCTION_TEMPLATE = """ -std::vector> GradNode{}::operator()(const std::vector>& grads) {{ +std::vector> {}::operator()(const std::vector>& grads) {{ // Call grad_api function auto grad_api_returns = paddle::experimental::{}({}); {} @@ -540,7 +555,7 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, """ node_definition_str = FUNCTION_TEMPLATE.format( - fwd_api_name, bwd_api_name, grad_api_args_str, returns_str) + grad_node_name, bwd_api_name, grad_api_args_str, returns_str) return node_definition_str @@ -610,7 +625,8 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, # Node Construction num_bwd_inputs = len(backward_grad_input_map.keys()) num_bwd_outputs = len(backward_grad_output_map.keys()) - node_construction_str = f" auto grad_node = std::make_shared({num_bwd_inputs}, {num_bwd_outputs});" + grad_node_name = GetGradNodeName(fwd_api_name) + node_construction_str = f" auto grad_node = std::make_shared<{grad_node_name}>({num_bwd_inputs}, {num_bwd_outputs});" # SetAttributes set_attributes_list = [] @@ -786,7 +802,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, backward_grad_output_map, backward_attrs_list) FORWARD_FUNCTION_TEMPLATE = """ -{} {}_dygraph_function({}) {{ +{} {}({}) {{ // Forward API Call {} @@ -799,15 +815,34 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, }} """ + forward_function_name = GetForwardFunctionName(fwd_api_name) forward_function_str = FORWARD_FUNCTION_TEMPLATE.format( - returns_type_str, fwd_api_name, inputs_args_str, forward_call_str, - returns_str, node_creation_str) - - forward_function_declaration_str = f"{returns_type_str} {fwd_api_name}_dygraph_function({inputs_args_str});" + returns_type_str, forward_function_name, inputs_args_str, + forward_call_str, returns_str, node_creation_str) + forward_function_declaration_str = f"{returns_type_str} {forward_function_name}({inputs_args_str});" return forward_function_str, forward_function_declaration_str +def FakeMatmulGradAPI(): + fake_matmul_grad_str = """ +namespace paddle { +namespace experimental { + std::vector> matmul_grad(const Tensor& x, + const Tensor& y, + const Tensor& out_grad, + bool transpose_x, + bool transpose_y) { + std::vector> ret; + return ret; + } +} +} + +""" + return fake_matmul_grad_str + + def GenerateNodeCCFile(filepath, node_definition_str): file_contents = """ #include "glog/logging.h" @@ -819,6 +854,7 @@ def GenerateNodeCCFile(filepath, node_definition_str): #include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h" """ + file_contents += FakeMatmulGradAPI() file_contents += node_definition_str with open(filepath, 'a') as f: f.write(file_contents) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py new file mode 100644 index 00000000000000..ea3837914d5292 --- /dev/null +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py @@ -0,0 +1,230 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +from eager_gen import ReadFwdFile, GetForwardFunctionName, ParseYamlForward, DetermineForwardPositionMap + +atype_to_parsing_function = { + "bool": "CastPyArg2Boolean", + "int": "CastPyArg2Int", + "long": "CastPyArg2Long", + "float": "CastPyArg2Float", + "string": "CastPyArg2String", + "bool[]": "CastPyArg2Booleans", + "int[]": "CastPyArg2Ints", + "long[]": "CastPyArg2Longs", + "float[]": "CastPyArg2Floats", + "double[]": "CastPyArg2Float64s", + "string[]": "CastPyArg2Strings" +} + +atype_to_cxx_type = { + "bool": "bool", + "int": "int", + "long": "long", + "float": "float", + "string": "std::string", + "bool[]": "std::vector", + "int[]": "std::vector", + "long[]": "std::vector", + "float[]": "std::vector", + "double[]": "std::vector", + "string[]": "std::vector" +} + + +def ParseArguments(): + parser = argparse.ArgumentParser( + description='Eager Code Generator Args Parser') + parser.add_argument('--api_yaml_path', type=str) + parser.add_argument('--output_path', type=str) + + args = parser.parse_args() + return args + + +def GetCxxType(atype): + if atype not in atype_to_cxx_type.keys(): + assert False + + return atype_to_cxx_type[atype] + + +def FindParsingFunctionFromAttributeType(atype): + if atype not in atype_to_parsing_function.keys(): + assert False + + return atype_to_parsing_function[atype] + + +def GeneratePythonCFunction(fwd_api_name, forward_inputs_position_map, + forward_attrs_list, forward_outputs_position_map): + # forward_inputs_position_map = { "name" : [type, fwd_position] } + # forward_outputs_position_map = { "name" : [type, fwd_position] } + # forward_attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...] + + # Get EagerTensor from args + # Get dygraph function call args + num_args = len(forward_inputs_position_map.keys()) + len(forward_attrs_list) + num_input_tensors = len(forward_inputs_position_map.keys()) + dygraph_function_call_list = ["" for i in range(num_args)] + get_eager_tensor_str = "" + for name, (ttype, pos) in forward_inputs_position_map.items(): + get_eager_tensor_str += f" auto& {name} = GetEagerTensorFromArgs(\"{fwd_api_name}\", \"{name}\", args, {pos}, false);\n" + dygraph_function_call_list[pos] = f"{name}" + + parse_attributes_str = " paddle::framework::AttributeMap attrs;\n" + # Get Attributes + for name, atype, _, pos in forward_attrs_list: + parsing_function = FindParsingFunctionFromAttributeType(atype) + cxx_type = GetCxxType(atype) + key = f"{name}" + + parse_attributes_str += f" PyObject* {name}_obj = PyTuple_GET_ITEM(args, {pos});\n" + parse_attributes_str += f" {cxx_type} {name} = {parsing_function}({name}_obj, \"{fwd_api_name}\", {pos});\n" + + dygraph_function_call_list[pos] = f"{name}" + dygraph_function_call_str = ",".join(dygraph_function_call_list) + + PYTHON_C_FUNCTION_TEMPLATE = """ +static PyObject * eager_final_state_api_{}(PyObject *self, PyObject *args, PyObject *kwargs) +{{ + PyThreadState *tstate = nullptr; + try + {{ + // Get EagerTensors from args +{} + + // Parse Attributes +{} + + tstate = PyEval_SaveThread(); + + auto out = {}({}); + + PyEval_RestoreThread(tstate); + tstate = nullptr; + return ToPyObject(out); + }} + catch(...) {{ + if (tstate) {{ + PyEval_RestoreThread(tstate); + }} + ThrowExceptionToPython(std::current_exception()); + return nullptr; + }} +}} + +""" + python_c_function_str = PYTHON_C_FUNCTION_TEMPLATE.format( + fwd_api_name, get_eager_tensor_str, parse_attributes_str, + GetForwardFunctionName(fwd_api_name), dygraph_function_call_str) + + python_c_function_reg_str = f"{{\"final_state_{fwd_api_name}\", (PyCFunction)(void(*)(void))eager_final_state_api_{fwd_api_name}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {fwd_api_name} in dygraph.\"}}" + + return python_c_function_str, python_c_function_reg_str + + +def GeneratePythonCWrappers(python_c_function_str, python_c_function_reg_str): + + PYTHON_C_WRAPPER_TEMPLATE = """ +#pragma once + +#include "pybind11/detail/common.h" +#include "paddle/fluid/pybind/op_function_common.h" +#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h" +#include "paddle/fluid/pybind/exception.h" +#include + +namespace paddle {{ +namespace pybind {{ + +{} + +static PyMethodDef EagerFinalStateMethods[] = {{ + {} +}}; + +}} // namespace pybind +}} // namespace paddle + +""" + python_c_str = PYTHON_C_WRAPPER_TEMPLATE.format(python_c_function_str, + python_c_function_reg_str) + + return python_c_str + + +def GeneratePythonCFile(filepath, python_c_str): + with open(filepath, 'a') as f: + f.write(python_c_str) + + +if __name__ == "__main__": + args = ParseArguments() + + api_yaml_path = args.api_yaml_path + fwd_api_list = ReadFwdFile(api_yaml_path) + + python_c_function_list = [] + python_c_function_reg_list = [] + for fwd_api in fwd_api_list: + # We only generate Ops with grad + if 'backward' not in fwd_api.keys(): + continue + + assert 'api' in fwd_api.keys() + assert 'args' in fwd_api.keys() + assert 'output' in fwd_api.keys() + assert 'backward' in fwd_api.keys() + + fwd_api_name = fwd_api['api'] + fwd_args_str = fwd_api['args'] + fwd_returns_str = fwd_api['output'] + + # Collect Original Forward Inputs/Outputs and then perform validation checks + forward_inputs_list, forward_attrs_list, forward_returns_list = ParseYamlForward( + fwd_args_str, fwd_returns_str) + print("Parsed Original Forward Inputs List: ", forward_inputs_list) + print("Prased Original Forward Attrs List: ", forward_attrs_list) + print("Parsed Original Forward Returns List: ", forward_returns_list) + + forward_inputs_position_map, forward_outputs_position_map = DetermineForwardPositionMap( + forward_inputs_list, forward_returns_list) + print("Generated Forward Input Position Map: ", + forward_inputs_position_map) + print("Generated Forward Output Position Map: ", + forward_outputs_position_map) + + python_c_function_str, python_c_function_reg_str = GeneratePythonCFunction( + fwd_api_name, forward_inputs_position_map, forward_attrs_list, + forward_outputs_position_map) + python_c_function_list.append(python_c_function_str) + python_c_function_reg_list.append(python_c_function_reg_str) + print("Generated Python-C Function: ", python_c_function_str) + + python_c_functions_str = "\n".join(python_c_function_list) + python_c_functions_reg_str = ",\n".join(python_c_function_reg_list) + + python_c_str = GeneratePythonCWrappers(python_c_functions_str, + python_c_functions_reg_str) + print("Generated Python-C Codes: ", python_c_str) + + output_path = args.output_path + for path in [output_path]: + if os.path.exists(path): + os.remove(path) + + GeneratePythonCFile(output_path, python_c_str) diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index 962f866456579f..349a9d18474e1b 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -288,7 +288,9 @@ void EagerUtils::CheckAndRetainGrad( paddle::experimental::Tensor EagerUtils::SyncToPtenTensors( const egr::EagerTensor& tensor) { - const_cast(&tensor)->SyncToTensor(); + if (!tensor.initialized()) { + const_cast(&tensor)->SyncToTensor(); + } return *tensor.Tensor().get(); } @@ -298,7 +300,9 @@ std::vector EagerUtils::SyncToPtenTensors( size_t num = tensors.size(); res.reserve(num); for (size_t i = 0; i < num; i++) { - const_cast(&(tensors[i]))->SyncToTensor(); + if (!tensors[i].initialized()) { + const_cast(&(tensors[i]))->SyncToTensor(); + } res.push_back(*tensors[i].Tensor().get()); } return res; diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 1df77c78a419bb..4feba4ab19b785 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -151,7 +151,7 @@ if(WITH_PYTHON) set(tmp_eager_impl_file ${eager_impl_file}.tmp) set(OP_IMPL_DEPS op_function_generator) - set(EAGER_OP_IMPL_DEPS eager_op_function_generator) + set(EAGER_OP_IMPL_DEPS eager_op_function_generator eager_final_state_python_c_codegen) if(WIN32) if("${CMAKE_GENERATOR}" STREQUAL "Ninja") @@ -275,7 +275,7 @@ if(WITH_PYTHON) if(NOT ON_INFER) cc_library(paddle_eager SRCS eager.cc eager_functions.cc eager_method.cc eager_properties.cc eager_utils.cc - DEPS eager_api autograd_meta backward grad_node_info pten op_function_common dygraph_function dygraph_node accumulation_node global_utils utils python) + DEPS eager_api autograd_meta backward grad_node_info pten op_function_common final_dygraph_function final_dygraph_node dygraph_function dygraph_node accumulation_node global_utils utils python) add_dependencies(paddle_eager eager_codegen) add_dependencies(paddle_eager eager_op_function_generator_cmd) list(APPEND PYBIND_DEPS paddle_eager) diff --git a/paddle/fluid/pybind/eager_op_function_generator.cc b/paddle/fluid/pybind/eager_op_function_generator.cc index 090604ab4ee1a1..34acff7efd19d7 100644 --- a/paddle/fluid/pybind/eager_op_function_generator.cc +++ b/paddle/fluid/pybind/eager_op_function_generator.cc @@ -393,6 +393,7 @@ int main(int argc, char* argv[]) { std::vector headers{ "\"pybind11/detail/common.h\"", + "\"paddle/fluid/pybind/eager_final_state_op_function_impl.h\"", "\"paddle/fluid/pybind/op_function_common.h\"", "\"paddle/fluid/eager/api/generated/fluid_generated/" "dygraph_forward_api.h\"", @@ -441,6 +442,10 @@ int main(int argc, char* argv[]) { << " PADDLE_THROW(platform::errors::Fatal (\"Add functions to " "core.eager.ops failed!\"));\n" << " }\n\n" + << " if (PyModule_AddFunctions(m.ptr(), EagerFinalStateMethods) < 0) {\n" + << " PADDLE_THROW(platform::errors::Fatal (\"Add functions to " + "core.eager.ops failed!\"));\n" + << " }\n\n" << "}\n\n" << "} // namespace pybind\n" << "} // namespace paddle\n"; diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index 3ad4994a590f72..09c3cea398b2ae 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -100,17 +100,15 @@ bool PyObject_CheckFloatOrToFloat(PyObject** obj) { bool PyObject_CheckString(PyObject* obj) { return PyUnicode_Check(obj); } -void CastPyArg2AttrBoolean(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +bool CastPyArg2Boolean(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { if (obj == Py_None) { - attrs[key] = false; // To be compatible with QA integration testing. Some - // test case pass in None. + return false; // To be compatible with QA integration testing. Some + // test case pass in None. } else if (obj == Py_True) { - attrs[key] = true; + return true; } else if (obj == Py_False) { - attrs[key] = false; + return false; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -118,62 +116,89 @@ void CastPyArg2AttrBoolean(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return false; +} + +void CastPyArg2AttrBoolean(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Boolean(obj, op_type, arg_pos); +} + +int CastPyArg2Int(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { + if (PyObject_CheckLongOrToLong(&obj)) { + return (int)PyLong_AsLong(obj); // NOLINT + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument (position %d) must be " + "int, but got %s", + op_type, arg_pos + 1, + ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT + } + + return 0; } void CastPyArg2AttrInt(PyObject* obj, paddle::framework::AttributeMap& attrs, // NOLINT const std::string& key, const std::string& op_type, ssize_t arg_pos) { + attrs[key] = CastPyArg2Int(obj, op_type, arg_pos); +} + +int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { if (PyObject_CheckLongOrToLong(&obj)) { - attrs[key] = (int)PyLong_AsLong(obj); // NOLINT + return (int64_t)PyLong_AsLong(obj); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " - "int, but got %s", + "long, but got %s", op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return 0; } void CastPyArg2AttrLong(PyObject* obj, paddle::framework::AttributeMap& attrs, // NOLINT const std::string& key, const std::string& op_type, ssize_t arg_pos) { - if (PyObject_CheckLongOrToLong(&obj)) { - attrs[key] = (int64_t)PyLong_AsLong(obj); // NOLINT + attrs[key] = CastPyArg2Long(obj, op_type, arg_pos); +} + +float CastPyArg2Float(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + if (PyObject_CheckFloatOrToFloat(&obj)) { + return (float)PyFloat_AsDouble(obj); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " - "long, but got %s", + "float, but got %s", op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return 0.0; } void CastPyArg2AttrFloat(PyObject* obj, paddle::framework::AttributeMap& attrs, // NOLINT const std::string& key, const std::string& op_type, ssize_t arg_pos) { - if (PyObject_CheckFloatOrToFloat(&obj)) { - attrs[key] = (float)PyFloat_AsDouble(obj); // NOLINT - } else { - PADDLE_THROW(platform::errors::InvalidArgument( - "%s(): argument (position %d) must be " - "float, but got %s", - op_type, arg_pos + 1, - ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT - } + attrs[key] = CastPyArg2Float(obj, op_type, arg_pos); } -void CastPyArg2AttrString(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +std::string CastPyArg2String(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { if (PyObject_CheckString(obj)) { Py_ssize_t size; const char* data; data = PyUnicode_AsUTF8AndSize(obj, &size); - attrs[key] = std::string(data, (size_t)size); // NOLINT + return std::string(data, (size_t)size); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -181,16 +206,23 @@ void CastPyArg2AttrString(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return ""; } -void CastPyArg2AttrBooleans(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrString(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2String(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Booleans(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckBool(&item)) { @@ -204,11 +236,9 @@ void CastPyArg2AttrBooleans(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckBool(&item)) { @@ -222,7 +252,6 @@ void CastPyArg2AttrBooleans(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -230,16 +259,23 @@ void CastPyArg2AttrBooleans(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrInts(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrBooleans(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Booleans(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Ints(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -253,11 +289,9 @@ void CastPyArg2AttrInts(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -271,11 +305,9 @@ void CastPyArg2AttrInts(PyObject* obj, i)); } } - attrs[key] = value; } else if (PySequence_Check(obj)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PySequence_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -289,7 +321,6 @@ void CastPyArg2AttrInts(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -297,16 +328,23 @@ void CastPyArg2AttrInts(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrLongs(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrInts(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Ints(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Longs(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -320,11 +358,9 @@ void CastPyArg2AttrLongs(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -338,11 +374,9 @@ void CastPyArg2AttrLongs(PyObject* obj, i)); } } - attrs[key] = value; } else if (PySequence_Check(obj)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PySequence_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -356,7 +390,6 @@ void CastPyArg2AttrLongs(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -364,16 +397,23 @@ void CastPyArg2AttrLongs(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrFloats(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrLongs(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Longs(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Floats(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -387,11 +427,9 @@ void CastPyArg2AttrFloats(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -405,11 +443,9 @@ void CastPyArg2AttrFloats(PyObject* obj, i)); } } - attrs[key] = value; } else if (PySequence_Check(obj)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PySequence_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -423,7 +459,6 @@ void CastPyArg2AttrFloats(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -431,16 +466,24 @@ void CastPyArg2AttrFloats(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrFloat64s(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrFloats(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Floats(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Float64s(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -454,11 +497,9 @@ void CastPyArg2AttrFloat64s(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -472,11 +513,9 @@ void CastPyArg2AttrFloat64s(PyObject* obj, i)); } } - attrs[key] = value; } else if (PySequence_Check(obj)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PySequence_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -490,7 +529,6 @@ void CastPyArg2AttrFloat64s(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -498,16 +536,24 @@ void CastPyArg2AttrFloat64s(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrStrings(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrFloat64s(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Float64s(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Strings(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckString(item)) { @@ -524,11 +570,9 @@ void CastPyArg2AttrStrings(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckString(item)) { @@ -545,7 +589,6 @@ void CastPyArg2AttrStrings(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -553,6 +596,15 @@ void CastPyArg2AttrStrings(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; +} + +void CastPyArg2AttrStrings(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Strings(obj, op_type, arg_pos); } void CastPyArg2AttrBlock(PyObject* obj, diff --git a/paddle/fluid/pybind/op_function_common.h b/paddle/fluid/pybind/op_function_common.h index 9dc3a71a6ccf94..7ead9852667252 100644 --- a/paddle/fluid/pybind/op_function_common.h +++ b/paddle/fluid/pybind/op_function_common.h @@ -43,6 +43,30 @@ bool PyObject_CheckFloatOrToFloat(PyObject** obj); bool PyObject_CheckString(PyObject* obj); +bool CastPyArg2Boolean(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +int CastPyArg2Int(PyObject* obj, const std::string& op_type, ssize_t arg_pos); +int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +float CastPyArg2Float(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::string CastPyArg2String(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Booleans(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Ints(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Longs(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Floats(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Float64s(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Strings(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos); + void CastPyArg2AttrBoolean(PyObject* obj, paddle::framework::AttributeMap& attrs, // NOLINT const std::string& key, const std::string& op_type, From 64e2421f6eef1fc52d45db325f43e1966f2d19b5 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Wed, 26 Jan 2022 11:53:15 +0000 Subject: [PATCH 07/13] Fixed minor issue --- .../final_state_generator/eager_gen.py | 21 +------------------ 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index 4c9372a0b6c888..556e0f1b3252e0 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -824,29 +824,11 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, return forward_function_str, forward_function_declaration_str -def FakeMatmulGradAPI(): - fake_matmul_grad_str = """ -namespace paddle { -namespace experimental { - std::vector> matmul_grad(const Tensor& x, - const Tensor& y, - const Tensor& out_grad, - bool transpose_x, - bool transpose_y) { - std::vector> ret; - return ret; - } -} -} - -""" - return fake_matmul_grad_str - - def GenerateNodeCCFile(filepath, node_definition_str): file_contents = """ #include "glog/logging.h" #include "paddle/pten/api/all.h" +#include "paddle/pten/api/backward/backward_api.h" #include "paddle/fluid/imperative/tracer.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/eager/utils.h" @@ -854,7 +836,6 @@ def GenerateNodeCCFile(filepath, node_definition_str): #include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h" """ - file_contents += FakeMatmulGradAPI() file_contents += node_definition_str with open(filepath, 'a') as f: f.write(file_contents) From 033482dddfa3d893fe3134eefce3369b8b680acb Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Thu, 27 Jan 2022 08:29:18 +0000 Subject: [PATCH 08/13] Fixed yaml.load() method failure --- .../auto_code_generator/final_state_generator/eager_gen.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index 556e0f1b3252e0..fd5c3980379459 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -103,13 +103,13 @@ def GetAutoGradMetaVectorName(string): ###################### def ReadFwdFile(filepath): f = open(filepath, 'r') - contents = yaml.load(f) + contents = yaml.load(f, Loader=yaml.FullLoader) return contents def ReadBwdFile(filepath): f = open(filepath, 'r') - contents = yaml.load(f) + contents = yaml.load(f, Loader=yaml.FullLoader) ret = {} for content in contents: assert 'backward_api' in content.keys() From fb7fcf63b25fa5b115a88ed95e29782f90dfe62c Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Sat, 29 Jan 2022 03:30:13 +0000 Subject: [PATCH 09/13] Fixed minor issues --- .../api/generated/eager_generated/backwards/CMakeLists.txt | 3 +++ .../api/generated/eager_generated/forwards/CMakeLists.txt | 3 +++ 2 files changed, 6 insertions(+) diff --git a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt index 8f4c2c36603260..e3fafb265ad988 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt @@ -1,3 +1,6 @@ cc_library(scale_node SRCS scale_node.cc DEPS global_utils pten pten_api grad_node_info) + +if(NOT ON_INFER) cc_library(final_dygraph_node SRCS nodes.cc DEPS ${eager_deps}) add_dependencies(final_dygraph_node eager_final_state_codegen) +endif() diff --git a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt index 11871365265890..8ede139ddc0446 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt @@ -1,3 +1,6 @@ cc_library(eager_scale SRCS scale.cc DEPS pten_api pten autograd_meta scale_node) + +if(NOT ON_INFER) cc_library(final_dygraph_function SRCS dygraph_functions.cc DEPS ${eager_deps}) add_dependencies(final_dygraph_function eager_final_state_codegen) +endif() From 7b5eab7f5f18159ae0b916b47eb14aabb12d6425 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Sat, 29 Jan 2022 07:33:38 +0000 Subject: [PATCH 10/13] Refactored Python-C Attributes Parsing Functions --- paddle/fluid/eager/utils.cc | 43 +++++ paddle/fluid/pybind/op_function_common.cc | 218 ++++++++++++++-------- paddle/fluid/pybind/op_function_common.h | 24 +++ 3 files changed, 202 insertions(+), 83 deletions(-) diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index 98e6a8fc5d28e4..3093fec90efc67 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -286,4 +286,47 @@ void EagerUtils::CheckAndRetainGrad( } } +paddle::experimental::Tensor EagerUtils::SyncToPtenTensors( + const egr::EagerTensor& tensor) { + if (!tensor.initialized()) { + const_cast(&tensor)->SyncToTensor(); + } + return *tensor.Tensor().get(); +} + +std::vector EagerUtils::SyncToPtenTensors( + const std::vector& tensors) { + std::vector res; + size_t num = tensors.size(); + res.reserve(num); + for (size_t i = 0; i < num; i++) { + if (!tensors[i].initialized()) { + const_cast(&(tensors[i]))->SyncToTensor(); + } + res.push_back(*tensors[i].Tensor().get()); + } + return res; +} + +egr::EagerTensor EagerUtils::CreateEagerTensorFromTensor( + const paddle::experimental::Tensor& tensor) { + egr::EagerTensor ret; + ret.set_tensor(std::make_shared(tensor)); + return ret; +} + +std::vector EagerUtils::CreateEagerTensorFromTensor( + const std::vector& tensors) { + std::vector res; + size_t num = tensors.size(); + res.reserve(num); + for (size_t i = 0; i < num; i++) { + egr::EagerTensor tmp; + tmp.set_tensor(std::make_shared(tensors[i])); + res.emplace_back(std::move(tmp)); + } + + return res; +} + } // namespace egr diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index 3ad4994a590f72..09c3cea398b2ae 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -100,17 +100,15 @@ bool PyObject_CheckFloatOrToFloat(PyObject** obj) { bool PyObject_CheckString(PyObject* obj) { return PyUnicode_Check(obj); } -void CastPyArg2AttrBoolean(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +bool CastPyArg2Boolean(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { if (obj == Py_None) { - attrs[key] = false; // To be compatible with QA integration testing. Some - // test case pass in None. + return false; // To be compatible with QA integration testing. Some + // test case pass in None. } else if (obj == Py_True) { - attrs[key] = true; + return true; } else if (obj == Py_False) { - attrs[key] = false; + return false; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -118,62 +116,89 @@ void CastPyArg2AttrBoolean(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return false; +} + +void CastPyArg2AttrBoolean(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Boolean(obj, op_type, arg_pos); +} + +int CastPyArg2Int(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { + if (PyObject_CheckLongOrToLong(&obj)) { + return (int)PyLong_AsLong(obj); // NOLINT + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument (position %d) must be " + "int, but got %s", + op_type, arg_pos + 1, + ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT + } + + return 0; } void CastPyArg2AttrInt(PyObject* obj, paddle::framework::AttributeMap& attrs, // NOLINT const std::string& key, const std::string& op_type, ssize_t arg_pos) { + attrs[key] = CastPyArg2Int(obj, op_type, arg_pos); +} + +int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { if (PyObject_CheckLongOrToLong(&obj)) { - attrs[key] = (int)PyLong_AsLong(obj); // NOLINT + return (int64_t)PyLong_AsLong(obj); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " - "int, but got %s", + "long, but got %s", op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return 0; } void CastPyArg2AttrLong(PyObject* obj, paddle::framework::AttributeMap& attrs, // NOLINT const std::string& key, const std::string& op_type, ssize_t arg_pos) { - if (PyObject_CheckLongOrToLong(&obj)) { - attrs[key] = (int64_t)PyLong_AsLong(obj); // NOLINT + attrs[key] = CastPyArg2Long(obj, op_type, arg_pos); +} + +float CastPyArg2Float(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + if (PyObject_CheckFloatOrToFloat(&obj)) { + return (float)PyFloat_AsDouble(obj); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " - "long, but got %s", + "float, but got %s", op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return 0.0; } void CastPyArg2AttrFloat(PyObject* obj, paddle::framework::AttributeMap& attrs, // NOLINT const std::string& key, const std::string& op_type, ssize_t arg_pos) { - if (PyObject_CheckFloatOrToFloat(&obj)) { - attrs[key] = (float)PyFloat_AsDouble(obj); // NOLINT - } else { - PADDLE_THROW(platform::errors::InvalidArgument( - "%s(): argument (position %d) must be " - "float, but got %s", - op_type, arg_pos + 1, - ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT - } + attrs[key] = CastPyArg2Float(obj, op_type, arg_pos); } -void CastPyArg2AttrString(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +std::string CastPyArg2String(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { if (PyObject_CheckString(obj)) { Py_ssize_t size; const char* data; data = PyUnicode_AsUTF8AndSize(obj, &size); - attrs[key] = std::string(data, (size_t)size); // NOLINT + return std::string(data, (size_t)size); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -181,16 +206,23 @@ void CastPyArg2AttrString(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return ""; } -void CastPyArg2AttrBooleans(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrString(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2String(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Booleans(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckBool(&item)) { @@ -204,11 +236,9 @@ void CastPyArg2AttrBooleans(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckBool(&item)) { @@ -222,7 +252,6 @@ void CastPyArg2AttrBooleans(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -230,16 +259,23 @@ void CastPyArg2AttrBooleans(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrInts(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrBooleans(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Booleans(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Ints(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -253,11 +289,9 @@ void CastPyArg2AttrInts(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -271,11 +305,9 @@ void CastPyArg2AttrInts(PyObject* obj, i)); } } - attrs[key] = value; } else if (PySequence_Check(obj)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PySequence_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -289,7 +321,6 @@ void CastPyArg2AttrInts(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -297,16 +328,23 @@ void CastPyArg2AttrInts(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrLongs(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrInts(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Ints(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Longs(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -320,11 +358,9 @@ void CastPyArg2AttrLongs(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -338,11 +374,9 @@ void CastPyArg2AttrLongs(PyObject* obj, i)); } } - attrs[key] = value; } else if (PySequence_Check(obj)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PySequence_GetItem(obj, i); if (PyObject_CheckLongOrToLong(&item)) { @@ -356,7 +390,6 @@ void CastPyArg2AttrLongs(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -364,16 +397,23 @@ void CastPyArg2AttrLongs(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrFloats(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrLongs(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Longs(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Floats(PyObject* obj, const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -387,11 +427,9 @@ void CastPyArg2AttrFloats(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -405,11 +443,9 @@ void CastPyArg2AttrFloats(PyObject* obj, i)); } } - attrs[key] = value; } else if (PySequence_Check(obj)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PySequence_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -423,7 +459,6 @@ void CastPyArg2AttrFloats(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -431,16 +466,24 @@ void CastPyArg2AttrFloats(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrFloat64s(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrFloats(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Floats(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Float64s(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -454,11 +497,9 @@ void CastPyArg2AttrFloat64s(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -472,11 +513,9 @@ void CastPyArg2AttrFloat64s(PyObject* obj, i)); } } - attrs[key] = value; } else if (PySequence_Check(obj)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PySequence_GetItem(obj, i); if (PyObject_CheckFloatOrToFloat(&item)) { @@ -490,7 +529,6 @@ void CastPyArg2AttrFloat64s(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -498,16 +536,24 @@ void CastPyArg2AttrFloat64s(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; } -void CastPyArg2AttrStrings(PyObject* obj, - paddle::framework::AttributeMap& attrs, // NOLINT - const std::string& key, const std::string& op_type, - ssize_t arg_pos) { +void CastPyArg2AttrFloat64s(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Float64s(obj, op_type, arg_pos); +} + +std::vector CastPyArg2Strings(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos) { + std::vector value; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_CheckString(item)) { @@ -524,11 +570,9 @@ void CastPyArg2AttrStrings(PyObject* obj, i)); } } - attrs[key] = value; } else if (PyTuple_Check(obj)) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; - std::vector value; for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_CheckString(item)) { @@ -545,7 +589,6 @@ void CastPyArg2AttrStrings(PyObject* obj, i)); } } - attrs[key] = value; } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -553,6 +596,15 @@ void CastPyArg2AttrStrings(PyObject* obj, op_type, arg_pos + 1, ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT } + + return value; +} + +void CastPyArg2AttrStrings(PyObject* obj, + paddle::framework::AttributeMap& attrs, // NOLINT + const std::string& key, const std::string& op_type, + ssize_t arg_pos) { + attrs[key] = CastPyArg2Strings(obj, op_type, arg_pos); } void CastPyArg2AttrBlock(PyObject* obj, diff --git a/paddle/fluid/pybind/op_function_common.h b/paddle/fluid/pybind/op_function_common.h index 9dc3a71a6ccf94..7ead9852667252 100644 --- a/paddle/fluid/pybind/op_function_common.h +++ b/paddle/fluid/pybind/op_function_common.h @@ -43,6 +43,30 @@ bool PyObject_CheckFloatOrToFloat(PyObject** obj); bool PyObject_CheckString(PyObject* obj); +bool CastPyArg2Boolean(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +int CastPyArg2Int(PyObject* obj, const std::string& op_type, ssize_t arg_pos); +int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +float CastPyArg2Float(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::string CastPyArg2String(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Booleans(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Ints(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Longs(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Floats(PyObject* obj, const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Float64s(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos); +std::vector CastPyArg2Strings(PyObject* obj, + const std::string& op_type, + ssize_t arg_pos); + void CastPyArg2AttrBoolean(PyObject* obj, paddle::framework::AttributeMap& attrs, // NOLINT const std::string& key, const std::string& op_type, From 25245ef52366d64dad09f0444fe75ac0e422ebf8 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Tue, 8 Feb 2022 02:10:22 +0000 Subject: [PATCH 11/13] Fixed minor issue with Python-C AddFunctions --- .../auto_code_generator/final_state_generator/python_c_gen.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py index ea3837914d5292..bfbd93839cdea4 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py @@ -215,6 +215,7 @@ def GeneratePythonCFile(filepath, python_c_str): python_c_function_reg_list.append(python_c_function_reg_str) print("Generated Python-C Function: ", python_c_function_str) + python_c_function_reg_list.append("{nullptr,nullptr,0,nullptr}") python_c_functions_str = "\n".join(python_c_function_list) python_c_functions_reg_str = ",\n".join(python_c_function_reg_list) From 6f54c3d4cb4eb1c2816e372dd47c7687686e1383 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Wed, 9 Feb 2022 11:02:36 +0000 Subject: [PATCH 12/13] Fixed issues from merge --- .../auto_code_generator/final_state_generator/python_c_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py index bfbd93839cdea4..2d79b73b263406 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py @@ -82,7 +82,7 @@ def GeneratePythonCFunction(fwd_api_name, forward_inputs_position_map, dygraph_function_call_list = ["" for i in range(num_args)] get_eager_tensor_str = "" for name, (ttype, pos) in forward_inputs_position_map.items(): - get_eager_tensor_str += f" auto& {name} = GetEagerTensorFromArgs(\"{fwd_api_name}\", \"{name}\", args, {pos}, false);\n" + get_eager_tensor_str += f" auto& {name} = GetEagerTensorPtrFromArgs(\"{fwd_api_name}\", \"{name}\", args, {pos}, false);\n" dygraph_function_call_list[pos] = f"{name}" parse_attributes_str = " paddle::framework::AttributeMap attrs;\n" From a92bfd268c97debf3ec2f210a6064b8687ea468e Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Thu, 10 Feb 2022 01:48:34 +0000 Subject: [PATCH 13/13] Fixed merge issues --- .../auto_code_generator/final_state_generator/python_c_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py index 2d79b73b263406..60b615b50dae71 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py @@ -82,7 +82,7 @@ def GeneratePythonCFunction(fwd_api_name, forward_inputs_position_map, dygraph_function_call_list = ["" for i in range(num_args)] get_eager_tensor_str = "" for name, (ttype, pos) in forward_inputs_position_map.items(): - get_eager_tensor_str += f" auto& {name} = GetEagerTensorPtrFromArgs(\"{fwd_api_name}\", \"{name}\", args, {pos}, false);\n" + get_eager_tensor_str += f" auto& {name} = GetTensorFromArgs(\"{fwd_api_name}\", \"{name}\", args, {pos}, false);\n" dygraph_function_call_list[pos] = f"{name}" parse_attributes_str = " paddle::framework::AttributeMap attrs;\n"