diff --git a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt
index 1084f0ec573c6..e04d282748c0a 100644
--- a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt
+++ b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt
@@ -1 +1,3 @@
 cc_library(scale_node SRCS scale_node.cc DEPS global_utils pten pten_api grad_node_info)
+#cc_library(final_dygraph_node SRCS nodes.cc DEPS ${eager_deps})
+#add_dependencies(final_dygraph_node eager_final_state_codegen)
diff --git a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt
index ed04e0b6f5a0c..f682c27992db1 100644
--- a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt
+++ b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt
@@ -1 +1,3 @@
 cc_library(eager_scale SRCS scale.cc DEPS pten_api pten autograd_meta scale_node)
+#cc_library(final_dygraph_function SRCS dygraph_functions.cc DEPS ${eager_deps})
+#add_dependencies(final_dygraph_function eager_final_state_codegen)
diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt
index 56ba4acc62b53..0a96cbc9c970c 100644
--- a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt
+++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt
@@ -2,13 +2,14 @@ set(api_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/api.yaml")
 set(backward_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/backward.yaml")
 set(tmp_forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc")
 set(tmp_forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.h")
-set(tmp_nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_node.cc")
-set(tmp_nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_node.h")
+set(tmp_nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_nodes.cc")
+set(tmp_nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_nodes.h")
 set(forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.cc")
 set(forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h")
-set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.cc")
-set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.h")
+set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.cc")
+set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h")
 
+message("Final State Eager CodeGen")
 add_custom_target(eager_final_state_codegen
     COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py" 
             "--api_yaml_path=${api_yaml_path}"
diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
index 0031d47a383e4..97756b6f0e146 100644
--- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
+++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
@@ -15,6 +15,7 @@
 import yaml
 import re
 import argparse
+import os
 
 
 def ParseArguments():
@@ -71,6 +72,24 @@ def GetConstReference(string):
     return ret
 
 
+def RemoveConstAndReference(string):
+    ret = string
+    if string.startswith("const "):
+        ret = ret[6:]
+    if string.endswith("&"):
+        ret = ret[:-1]
+
+    return ret
+
+
+def GetGradNodeName(string):
+    return f"FinalGradNode{string}"
+
+
+def GetForwardFunctionName(string):
+    return f"{string}_final_state_dygraph_function"
+
+
 def GetAutoGradMetaName(string):
     return f"{string}_autograd_meta"
 
@@ -84,17 +103,17 @@ def GetAutoGradMetaVectorName(string):
 ######################
 def ReadFwdFile(filepath):
     f = open(filepath, 'r')
-    contents = yaml.load(f)
+    contents = yaml.load(f, Loader=yaml.FullLoader)
     return contents
 
 
 def ReadBwdFile(filepath):
     f = open(filepath, 'r')
-    contents = yaml.load(f)
+    contents = yaml.load(f, Loader=yaml.FullLoader)
     ret = {}
     for content in contents:
-        assert 'grad_api' in content.keys()
-        api_name = content['grad_api']
+        assert 'backward_api' in content.keys()
+        api_name = content['backward_api']
         ret[api_name] = content
     return ret
 
@@ -134,13 +153,13 @@ def ParseYamlArgs(string):
 def ParseYamlReturns(string):
     # Example: Tensor, Tensor
 
-    # list = [ [ret_type, orig_position], ...]
+    # list = [ ["", ret_type, orig_position], ...]
     returns_list = []
 
     returns = [x.strip() for x in string.strip().split(",")]
     for i in range(len(returns)):
         ret = returns[i]
-        returns_list.append([ret, i])
+        returns_list.append(["", ret, i])
 
     return returns_list
 
@@ -249,8 +268,8 @@ def ForwardsValidationCheck(forward_inputs_list, forward_attrs_list,
         assert orig_attr_pos == forward_attr_pos
 
     for i in range(len(forward_returns_list)):
-        orig_return_type = orig_forward_returns_list[i][0]
-        orig_return_pos = orig_forward_returns_list[i][1]
+        orig_return_type = orig_forward_returns_list[i][1]
+        orig_return_pos = orig_forward_returns_list[i][2]
         forward_return_type = forward_returns_list[i][1]
         forward_return_pos = forward_returns_list[i][2]
 
@@ -435,19 +454,20 @@ def GenerateNodeDeclaration(fwd_api_name, backward_fwd_input_map,
             aname, GetConstReference(atype), aname, saved_attr_name, aname)
 
         ATTRIBUTE_MEMBER_TEMPLATE = """
-   {} {};
+   {} {} = {};
 """
         attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format(
-            GetConstReference(atype), saved_attr_name)
+            RemoveConstAndReference(atype), saved_attr_name, default_val)
     # End: SetAttributes & Attribute Members
 
+    grad_node_name = GetGradNodeName(fwd_api_name)
     NODE_DECLARATION_TEMPLATE = """
-class GradNode{} : public egr::GradNodeBase {{
+class {} : public egr::GradNodeBase {{
  public:
-  GradNode{}() : egr::GradNodeBase() {{}}
-  GradNode{}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : 
+  {}() : egr::GradNodeBase() {{}}
+  {}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : 
       egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {{}}
-  ~GradNode{}() override = default;
+  ~{}() override = default;
 
   virtual std::vector<std::vector<egr::EagerTensor>> operator()(
       const std::vector<std::vector<egr::EagerTensor>>& grads) override;
@@ -465,7 +485,7 @@ class GradNode{} : public egr::GradNodeBase {{
 }};
 """
     node_declaration_str = NODE_DECLARATION_TEMPLATE.format(
-        forward_op_name, forward_op_name, forward_op_name, forward_op_name,
+        grad_node_name, grad_node_name, grad_node_name, grad_node_name,
         set_tensor_wrapper_methods_str, set_attribute_methods_str,
         tensor_wrapper_members_str, attribute_members_str)
 
@@ -489,17 +509,18 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map,
     for name, (_, is_fwd_input,
                grad_api_position), in backward_fwd_input_map.items():
         tensor_wrapper_name = GetSavedName(name)
-        if is_fwd_input:
+        grad_api_args[
+            grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr) )"
+
+    for _, (ttype, fwd_position,
+            grad_api_position) in backward_grad_input_map.items():
+        if IsPlainTensorType(ttype):
             grad_api_args[
-                grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, true)"
+                grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}][0] )"
         else:
+            assert IsVectorTensorType(ttype)
             grad_api_args[
-                grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, false)"
-
-    for _, (_, fwd_position,
-            grad_api_position) in backward_grad_input_map.items():
-        grad_api_args[
-            grad_api_position] = f"*grads[{fwd_position}].Tensor().get()"
+                grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}] )"
 
     for name, _, _, grad_api_position in backward_attrs_list:
         saved_attribute_name = GetSavedName(name)
@@ -507,40 +528,34 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map,
     grad_api_args_str = ", ".join(grad_api_args)
 
     # Construct grad_api returns
-    num_outputs = len(backward_grad_output_map.keys())
-    returns_list = ["" for i in range(num_outputs)]
+    num_bwd_outputs = len(backward_grad_output_map.keys())
+    returns_str = f"std::vector<std::vector<egr::EagerTensor>> returns({num_bwd_outputs});\n"
     for _, (ttype, fwd_position,
             grad_api_position) in backward_grad_output_map.items():
         # Infer Grad API Return Type
-        if num_outputs == 1:
+        if num_bwd_outputs == 1:
             # Single tensor output, return as is
             if IsPlainTensorType(ttype):
-                returns_list[0] = "{grad_api_returns}"
+                returns_str += "returns[0] = { egr::EagerUtils::CreateEagerTensorFromTensor(grad_api_returns) };\n"
             else:
                 assert IsVectorTensorType(ttype)
-                returns_list[0] = "grad_api_returns"
+                returns_str += "returns[0] = egr::EagerUtils::CreateEagerTensorFromTensor(grad_api_returns);\n"
         else:
             # Rearrange output order accordingly
-            if IsPlainTensorType(ttype):
-                returns_list[
-                    fwd_position] = f"{{ grad_api_returns[{grad_api_position}] }}"
-            else:
-                assert IsVectorTensorType(ttype)
-                returns_list[
-                    fwd_position] = f"grad_api_returns[{grad_api_position}]"
-    returns_str = ", ".join(returns_list)
-    returns_str = f"{{ {returns_str} }}"
+            returns_str += f"returns[{fwd_position}] = egr::EagerUtils::CreateEagerTensorFromTensor( grad_api_returns[{grad_api_position}] );\n"
+    returns_str += f"return returns;\n"
 
+    grad_node_name = GetGradNodeName(fwd_api_name)
     FUNCTION_TEMPLATE = """
-std::vector<std::vector<egr::EagerTensor>> GradNode{}::operator()(const std::vector<std::vector<egr::EagerTensor>>& grads) {{
+std::vector<std::vector<egr::EagerTensor>> {}::operator()(const std::vector<std::vector<egr::EagerTensor>>& grads) {{
     // Call grad_api function
-    auto grad_api_returns = {}({});
-    return {};
+    auto grad_api_returns = paddle::experimental::{}({});
+    {}
 }}
   """
 
     node_definition_str = FUNCTION_TEMPLATE.format(
-        fwd_api_name, bwd_api_name, grad_api_args_str, returns_str)
+        grad_node_name, bwd_api_name, grad_api_args_str, returns_str)
 
     return node_definition_str
 
@@ -565,12 +580,12 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name,
     for name, (ttype, pos) in forward_inputs_position_map.items():
         input_autograd_meta_name = GetAutoGradMetaName(name)
         if IsPlainTensorType(ttype):
-            input_autograd_meta = f"    egr::EagerTensor* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});"
+            input_autograd_meta = f"    egr::AutogradMeta* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});"
         else:
             assert IsVectorTensorType(ttype)
             input_autograd_meta_vec_name = GetAutoGradMetaVectorName(name)
-            input_autograd_meta = f"    std::vector<egr::EagerTensor*> {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n"
-            input_autograd_meta += f"    std::vector<egr::EagerTensor*>* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};"
+            input_autograd_meta = f"    std::vector<egr::AutogradMeta*> {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n"
+            input_autograd_meta += f"    std::vector<egr::AutogradMeta*>* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};"
 
         inputs_autograd_meta_list.append(input_autograd_meta)
         compute_require_grad_args_list.append(input_autograd_meta_name)
@@ -586,19 +601,19 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name,
         output_autograd_meta_vec_name = GetAutoGradMetaVectorName(name)
         if num_fwd_outputs == 1:
             if IsPlainTensorType(rtype):
-                output_autograd_meta = f"    egr::EagerTensor* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(outputs);"
+                output_autograd_meta = f"    egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&outputs);"
             else:
                 assert IsVectorTensorType(rtype)
-                output_autograd_meta = f"    std::vector<egr::EagerTensor*> {output_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta(outputs);\n"
-                output_autograd_meta += f"    std::vector<egr::EagerTensor*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};"
+                output_autograd_meta = f"    std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&outputs);\n"
+                output_autograd_meta += f"    std::vector<egr::AutogradMeta*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};"
         else:
             # Tuple api_result
             if IsPlainTensorType(rtype):
-                outputs_autograd_meta = f"    egr::EagerTensor* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(outputs[{pos}]);"
+                outputs_autograd_meta = f"    egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&outputs[{pos}]);"
             else:
                 assert IsVectorTensorType(rtype)
-                output_autograd_meta = f"    std::vector<egr::EagerTensor*> {output_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta(outputs[{pos}]);\n"
-                output_autograd_meta += f"    std::vector<egr::EagerTensor*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};"
+                output_autograd_meta = f"    std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&outputs[{pos}]);\n"
+                output_autograd_meta += f"    std::vector<egr::AutogradMeta*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};"
 
         outputs_autograd_meta_list.append(output_autograd_meta)
         pass_stop_gradient_args_list.append(output_autograd_meta_name)
@@ -610,19 +625,23 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name,
     # Node Construction
     num_bwd_inputs = len(backward_grad_input_map.keys())
     num_bwd_outputs = len(backward_grad_output_map.keys())
-    node_construction_str = f"        auto grad_node = std::make_shared<GradNode{fwd_api_name}>({num_bwd_inputs}, {num_bwd_outputs});"
+    grad_node_name = GetGradNodeName(fwd_api_name)
+    node_construction_str = f"        auto grad_node = std::make_shared<{grad_node_name}>({num_bwd_inputs}, {num_bwd_outputs});"
 
     # SetAttributes
     set_attributes_list = []
     for name, _, _, _ in backward_attrs_list:
-        set_attributes = "        grad_node->SetAttribute{name}({name});"
+        set_attributes = f"        grad_node->SetAttribute{name}({name});"
         set_attributes_list.append(set_attributes)
     set_attributes_str = "\n".join(set_attributes_list)
 
     # SetTensorWrappers
     set_tensor_wrappers_list = []
-    for name, (_, _, _) in backward_fwd_input_map.items():
-        set_tensor_wrappers = f"        grad_node->SetTensorWrapper{name}({name});"
+    for name, (_, is_fwd_input, _) in backward_fwd_input_map.items():
+        if is_fwd_input:
+            set_tensor_wrappers = f"        grad_node->SetTensorWrapper{name}({name}, true);"
+        else:
+            set_tensor_wrappers = f"        grad_node->SetTensorWrapper{name}({name}, false);"
         set_tensor_wrappers_list.append(set_tensor_wrappers)
     set_tensor_wrappers_str = "\n".join(set_tensor_wrappers_list)
 
@@ -727,7 +746,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
     inputs_args_list = ["" for i in range(num_inputs)]
     inputs_call_list = ["" for i in range(num_inputs)]
     for name, (ttype, pos) in forward_inputs_position_map.items():
-        inputs_call_list[pos] = f"*{name}.Tensor().get()"
+        inputs_call_list[pos] = f"egr::EagerUtils::SyncToPtenTensors({name})"
         if IsPlainTensorType(ttype):
             inputs_args_list[pos] = f"const egr::EagerTensor& {name}"
         else:
@@ -746,7 +765,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
     inputs_call_args_str = ", ".join(inputs_call_list)
 
     # Forward Full Logic
-    forward_call_str = f"auto api_result = {fwd_api_name}({inputs_call_args_str});"
+    forward_call_str = f"auto api_result = paddle::experimental::{fwd_api_name}({inputs_call_args_str});"
 
     # Get return type list & outputs
     num_outputs = len(forward_outputs_position_map.keys())
@@ -783,7 +802,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
         backward_grad_output_map, backward_attrs_list)
 
     FORWARD_FUNCTION_TEMPLATE = """
-{} {}_dygraph_function({}) {{
+{} {}({}) {{
     // Forward API Call
     {}
     
@@ -796,11 +815,11 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
 }}
 """
 
+    forward_function_name = GetForwardFunctionName(fwd_api_name)
     forward_function_str = FORWARD_FUNCTION_TEMPLATE.format(
-        returns_type_str, fwd_api_name, inputs_args_str, forward_call_str,
-        returns_str, node_creation_str)
-
-    forward_function_declaration_str = f"{returns_type_str} {fwd_api_name}_dygraph_function({inputs_args_str});"
+        returns_type_str, forward_function_name, inputs_args_str,
+        forward_call_str, returns_str, node_creation_str)
+    forward_function_declaration_str = f"{returns_type_str} {forward_function_name}({inputs_args_str});"
 
     return forward_function_str, forward_function_declaration_str
 
@@ -809,11 +828,12 @@ def GenerateNodeCCFile(filepath, node_definition_str):
     file_contents = """
 #include "glog/logging.h"
 #include "paddle/pten/api/all.h"
+#include "paddle/pten/api/backward/backward_api.h"
 #include "paddle/fluid/imperative/tracer.h"
 #include "paddle/fluid/framework/op_registry.h"
 #include "paddle/fluid/eager/utils.h"
 #include "paddle/fluid/eager/api/utils/global_utils.h"
-#include "paddle/fluid/eager/api/generated/eager_generated/nodes/nodes.h"
+#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h"
 
 """
     file_contents += node_definition_str
@@ -836,8 +856,8 @@ def GenerateNodeHFile(filepath, node_declaration_str):
 
 def GenerateForwardCCFile(filepath, forward_definition_str):
     file_contents = """
-#include "paddle/fluid/eager/api/generated/eager_generated/dygraph_forward_api.h"
-#include "paddle/fluid/eager/api/generated/eager_generated/nodes/nodes.h"
+#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
+#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h"
 
 #include "paddle/fluid/eager/api/utils/global_utils.h"
 #include "paddle/fluid/eager/legacy/op_runner.h"
@@ -905,10 +925,17 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str):
         # Collect Forward Inputs/Outputs
         forward_inputs_list, forward_attrs_list, forward_returns_list = ParseYamlForwardFromBackward(
             bwd_forward_str)
+        print("Parsed Forward Inputs List: ", forward_inputs_list)
+        print("Prased Forward Attrs List: ", forward_attrs_list)
+        print("Parsed Forward Returns List: ", forward_returns_list)
 
         # Collect Original Forward Inputs/Outputs and then perform validation checks
         orig_forward_inputs_list, orig_forward_attrs_list, orig_forward_returns_list = ParseYamlForward(
             fwd_args_str, fwd_returns_str)
+        print("Parsed Original Forward Inputs List: ", orig_forward_inputs_list)
+        print("Prased Original Forward Attrs List: ", orig_forward_attrs_list)
+        print("Parsed Original Forward Returns List: ",
+              orig_forward_returns_list)
 
         # Forward Validation Checks
         ForwardsValidationCheck(forward_inputs_list, forward_attrs_list,
@@ -919,15 +946,25 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str):
         # Parse Backward Inputs/Outputs
         backward_inputs_list, backward_attrs_list, backward_returns_list = ParseYamlBackward(
             bwd_args_str, bwd_returns_str)
+        print("Parsed Backward Inputs List: ", backward_inputs_list)
+        print("Prased Backward Attrs List: ", backward_attrs_list)
+        print("Parsed Backward Returns List: ", backward_returns_list)
 
         # Determine Forward Inputs/Outputs Position
         forward_inputs_position_map, forward_outputs_position_map = DetermineForwardPositionMap(
             forward_inputs_list, forward_returns_list)
+        print("Generated Forward Input Position Map: ",
+              forward_inputs_position_map)
+        print("Generated Forward Output Position Map: ",
+              forward_outputs_position_map)
 
         # SlotName Matching
         backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map = SlotNameMatching(
             backward_inputs_list, backward_returns_list,
             forward_inputs_position_map, forward_outputs_position_map)
+        print("Generated Backward Fwd Input Map: ", backward_fwd_input_map)
+        print("Generated Backward Grad Input Map: ", backward_grad_input_map)
+        print("Generated Backward Grad Output Map: ", backward_grad_output_map)
 
         # Backward Validation Check
         BackwardValidationCheck(backward_fwd_input_map, backward_grad_input_map,
@@ -936,11 +973,13 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str):
         # Node Declaration Generation
         node_declaration_str += GenerateNodeDeclaration(
             fwd_api_name, backward_fwd_input_map, backward_attrs_list)
+        print("Generated Node Declaration: ", node_declaration_str)
 
         node_definition_str += GenerateNodeDefinition(
             fwd_api_name, bwd_api_name, backward_fwd_input_map,
             backward_grad_input_map, backward_grad_output_map,
             backward_attrs_list)
+        print("Generated Node Definition: ", node_definition_str)
 
         # Node Definition Generation
         definition_declaration_pair = GenerateForwardDefinition(
@@ -948,6 +987,8 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str):
             forward_outputs_position_map, forward_attrs_list,
             backward_fwd_input_map, backward_grad_input_map,
             backward_grad_output_map, backward_attrs_list)
+        print("Generated Forward Definition: ", forward_definition_str)
+        print("Generated Forward Declaration: ", forward_declaration_str)
         forward_definition_str += definition_declaration_pair[0]
         forward_declaration_str += definition_declaration_pair[1]
 
@@ -957,6 +998,12 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str):
     forwards_h_path = args.forwards_h_path
     forwards_cc_path = args.forwards_cc_path
 
+    for path in [
+            nodes_cc_path, nodes_h_path, forwards_h_path, forwards_cc_path
+    ]:
+        if os.path.exists(path):
+            os.remove(path)
+
     GenerateNodeCCFile(nodes_cc_path, node_definition_str)
     GenerateNodeHFile(nodes_h_path, node_declaration_str)
     GenerateForwardCCFile(forwards_cc_path, forward_definition_str)
diff --git a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py
index 56ec287561c56..fdb8529515d30 100644
--- a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py
+++ b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py
@@ -15,9 +15,45 @@
 import sys
 import os
 
-if __name__ == "__main__":
-    assert len(sys.argv) == 2
-    eager_dir = sys.argv[1]
+
+def GenerateFileStructureForFinalDygraph(eager_dir):
+    """
+    paddle/fluid/eager
+    |- generated
+    |  |- CMakeLists.txt
+    |  |  "add_subdirectory(forwards), add_subdirectory(backwards)"
+    |  
+    |  |- forwards
+    |     |- "dygraph_functions.cc"
+    |     |- "dygraph_functions.h"
+    |
+    |  |- backwards
+    |     |- "nodes.cc"
+    |     |- "nodes.h"
+    """
+    # Directory Generation
+    generated_dir = os.path.join(eager_dir, "api/generated/eager_generated")
+    forwards_dir = os.path.join(generated_dir, "forwards")
+    nodes_dir = os.path.join(generated_dir, "backwards")
+    dirs = [generated_dir, forwards_dir, nodes_dir]
+    for directory in dirs:
+        if not os.path.exists(directory):
+            os.mkdir(directory)
+
+    # Empty files
+    dygraph_forward_api_h_path = os.path.join(generated_dir,
+                                              "dygraph_functions.h")
+    empty_files = [dygraph_forward_api_h_path]
+    empty_files.append(os.path.join(forwards_dir, "dygraph_functions.cc"))
+    empty_files.append(os.path.join(nodes_dir, "nodes.cc"))
+    empty_files.append(os.path.join(nodes_dir, "nodes.h"))
+
+    for path in empty_files:
+        if not os.path.exists(path):
+            open(path, 'a').close()
+
+
+def GenerateFileStructureForIntermediateDygraph(eager_dir):
     """
     paddle/fluid/eager
     |- generated
@@ -79,3 +115,10 @@
 
     with open(generated_level_cmakelist_path, "w") as f:
         f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)")
+
+
+if __name__ == "__main__":
+    assert len(sys.argv) == 2
+    eager_dir = sys.argv[1]
+    GenerateFileStructureForIntermediateDygraph(eager_dir)
+    GenerateFileStructureForFinalDygraph(eager_dir)
diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc
index 98e6a8fc5d28e..88030d91bf91f 100644
--- a/paddle/fluid/eager/utils.cc
+++ b/paddle/fluid/eager/utils.cc
@@ -286,4 +286,43 @@ void EagerUtils::CheckAndRetainGrad(
   }
 }
 
+paddle::experimental::Tensor EagerUtils::SyncToPtenTensors(
+    const egr::EagerTensor& tensor) {
+  const_cast<EagerTensor*>(&tensor)->SyncToTensor();
+  return *tensor.Tensor().get();
+}
+
+std::vector<paddle::experimental::Tensor> EagerUtils::SyncToPtenTensors(
+    const std::vector<egr::EagerTensor>& tensors) {
+  std::vector<paddle::experimental::Tensor> res;
+  size_t num = tensors.size();
+  res.reserve(num);
+  for (size_t i = 0; i < num; i++) {
+    const_cast<EagerTensor*>(&(tensors[i]))->SyncToTensor();
+    res.push_back(*tensors[i].Tensor().get());
+  }
+  return res;
+}
+
+egr::EagerTensor EagerUtils::CreateEagerTensorFromTensor(
+    const paddle::experimental::Tensor& tensor) {
+  egr::EagerTensor ret;
+  ret.set_tensor(std::make_shared<paddle::experimental::Tensor>(tensor));
+  return ret;
+}
+
+std::vector<egr::EagerTensor> EagerUtils::CreateEagerTensorFromTensor(
+    const std::vector<paddle::experimental::Tensor>& tensors) {
+  std::vector<egr::EagerTensor> res;
+  size_t num = tensors.size();
+  res.reserve(num);
+  for (size_t i = 0; i < num; i++) {
+    egr::EagerTensor tmp;
+    tmp.set_tensor(std::make_shared<paddle::experimental::Tensor>(tensors[i]));
+    res.emplace_back(std::move(tmp));
+  }
+
+  return res;
+}
+
 }  // namespace egr
diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h
index ef3ecf27c3ccb..73839d34ec2b5 100644
--- a/paddle/fluid/eager/utils.h
+++ b/paddle/fluid/eager/utils.h
@@ -170,6 +170,16 @@ class EagerUtils {
 
   static void CheckAndRetainGrad(const egr::EagerTensor& tensor);
   static void CheckAndRetainGrad(const std::vector<egr::EagerTensor>& tensors);
+
+  static paddle::experimental::Tensor SyncToPtenTensors(
+      const egr::EagerTensor& tensor);
+  static std::vector<paddle::experimental::Tensor> SyncToPtenTensors(
+      const std::vector<egr::EagerTensor>& tensors);
+
+  static egr::EagerTensor CreateEagerTensorFromTensor(
+      const paddle::experimental::Tensor& tensor);
+  static std::vector<egr::EagerTensor> CreateEagerTensorFromTensor(
+      const std::vector<paddle::experimental::Tensor>& tensors);
 };
 
 }  // namespace egr