diff --git a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt index 1084f0ec573c66c0f4235f42ea973db695c55261..e04d282748c0a6061ccb7d9429f0066c7fabf3ca 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/backwards/CMakeLists.txt @@ -1 +1,3 @@ cc_library(scale_node SRCS scale_node.cc DEPS global_utils pten pten_api grad_node_info) +#cc_library(final_dygraph_node SRCS nodes.cc DEPS ${eager_deps}) +#add_dependencies(final_dygraph_node eager_final_state_codegen) diff --git a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt index ed04e0b6f5a0cc6c4b2cb5a422895966b54bca02..f682c27992db15e81f28afe0bb9c3b30454a9d88 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt +++ b/paddle/fluid/eager/api/generated/eager_generated/forwards/CMakeLists.txt @@ -1 +1,3 @@ cc_library(eager_scale SRCS scale.cc DEPS pten_api pten autograd_meta scale_node) +#cc_library(final_dygraph_function SRCS dygraph_functions.cc DEPS ${eager_deps}) +#add_dependencies(final_dygraph_function eager_final_state_codegen) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt index 56ba4acc62b5311d2624498b8c7a9b0a3058dc63..0a96cbc9c970ca776e19cef74e18fe66016804e2 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt @@ -2,13 +2,14 @@ set(api_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/api.yaml") set(backward_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/backward.yaml") set(tmp_forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc") set(tmp_forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.h") -set(tmp_nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_node.cc") -set(tmp_nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_node.h") +set(tmp_nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_nodes.cc") +set(tmp_nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_nodes.h") set(forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.cc") set(forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h") -set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.cc") -set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.h") +set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.cc") +set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h") +message("Final State Eager CodeGen") add_custom_target(eager_final_state_codegen COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py" "--api_yaml_path=${api_yaml_path}" diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index 0031d47a383e4707f6c6f2c1209841f36f902bb4..97756b6f0e14647966185f5f4da94ed5c6a58679 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -15,6 +15,7 @@ import yaml import re import argparse +import os def ParseArguments(): @@ -71,6 +72,24 @@ def GetConstReference(string): return ret +def RemoveConstAndReference(string): + ret = string + if string.startswith("const "): + ret = ret[6:] + if string.endswith("&"): + ret = ret[:-1] + + return ret + + +def GetGradNodeName(string): + return f"FinalGradNode{string}" + + +def GetForwardFunctionName(string): + return f"{string}_final_state_dygraph_function" + + def GetAutoGradMetaName(string): return f"{string}_autograd_meta" @@ -84,17 +103,17 @@ def GetAutoGradMetaVectorName(string): ###################### def ReadFwdFile(filepath): f = open(filepath, 'r') - contents = yaml.load(f) + contents = yaml.load(f, Loader=yaml.FullLoader) return contents def ReadBwdFile(filepath): f = open(filepath, 'r') - contents = yaml.load(f) + contents = yaml.load(f, Loader=yaml.FullLoader) ret = {} for content in contents: - assert 'grad_api' in content.keys() - api_name = content['grad_api'] + assert 'backward_api' in content.keys() + api_name = content['backward_api'] ret[api_name] = content return ret @@ -134,13 +153,13 @@ def ParseYamlArgs(string): def ParseYamlReturns(string): # Example: Tensor, Tensor - # list = [ [ret_type, orig_position], ...] + # list = [ ["", ret_type, orig_position], ...] returns_list = [] returns = [x.strip() for x in string.strip().split(",")] for i in range(len(returns)): ret = returns[i] - returns_list.append([ret, i]) + returns_list.append(["", ret, i]) return returns_list @@ -249,8 +268,8 @@ def ForwardsValidationCheck(forward_inputs_list, forward_attrs_list, assert orig_attr_pos == forward_attr_pos for i in range(len(forward_returns_list)): - orig_return_type = orig_forward_returns_list[i][0] - orig_return_pos = orig_forward_returns_list[i][1] + orig_return_type = orig_forward_returns_list[i][1] + orig_return_pos = orig_forward_returns_list[i][2] forward_return_type = forward_returns_list[i][1] forward_return_pos = forward_returns_list[i][2] @@ -435,19 +454,20 @@ def GenerateNodeDeclaration(fwd_api_name, backward_fwd_input_map, aname, GetConstReference(atype), aname, saved_attr_name, aname) ATTRIBUTE_MEMBER_TEMPLATE = """ - {} {}; + {} {} = {}; """ attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format( - GetConstReference(atype), saved_attr_name) + RemoveConstAndReference(atype), saved_attr_name, default_val) # End: SetAttributes & Attribute Members + grad_node_name = GetGradNodeName(fwd_api_name) NODE_DECLARATION_TEMPLATE = """ -class GradNode{} : public egr::GradNodeBase {{ +class {} : public egr::GradNodeBase {{ public: - GradNode{}() : egr::GradNodeBase() {{}} - GradNode{}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : + {}() : egr::GradNodeBase() {{}} + {}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {{}} - ~GradNode{}() override = default; + ~{}() override = default; virtual std::vector> operator()( const std::vector>& grads) override; @@ -465,7 +485,7 @@ class GradNode{} : public egr::GradNodeBase {{ }}; """ node_declaration_str = NODE_DECLARATION_TEMPLATE.format( - forward_op_name, forward_op_name, forward_op_name, forward_op_name, + grad_node_name, grad_node_name, grad_node_name, grad_node_name, set_tensor_wrapper_methods_str, set_attribute_methods_str, tensor_wrapper_members_str, attribute_members_str) @@ -489,17 +509,18 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, for name, (_, is_fwd_input, grad_api_position), in backward_fwd_input_map.items(): tensor_wrapper_name = GetSavedName(name) - if is_fwd_input: + grad_api_args[ + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr) )" + + for _, (ttype, fwd_position, + grad_api_position) in backward_grad_input_map.items(): + if IsPlainTensorType(ttype): grad_api_args[ - grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, true)" + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}][0] )" else: + assert IsVectorTensorType(ttype) grad_api_args[ - grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, false)" - - for _, (_, fwd_position, - grad_api_position) in backward_grad_input_map.items(): - grad_api_args[ - grad_api_position] = f"*grads[{fwd_position}].Tensor().get()" + grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}] )" for name, _, _, grad_api_position in backward_attrs_list: saved_attribute_name = GetSavedName(name) @@ -507,40 +528,34 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, grad_api_args_str = ", ".join(grad_api_args) # Construct grad_api returns - num_outputs = len(backward_grad_output_map.keys()) - returns_list = ["" for i in range(num_outputs)] + num_bwd_outputs = len(backward_grad_output_map.keys()) + returns_str = f"std::vector> returns({num_bwd_outputs});\n" for _, (ttype, fwd_position, grad_api_position) in backward_grad_output_map.items(): # Infer Grad API Return Type - if num_outputs == 1: + if num_bwd_outputs == 1: # Single tensor output, return as is if IsPlainTensorType(ttype): - returns_list[0] = "{grad_api_returns}" + returns_str += "returns[0] = { egr::EagerUtils::CreateEagerTensorFromTensor(grad_api_returns) };\n" else: assert IsVectorTensorType(ttype) - returns_list[0] = "grad_api_returns" + returns_str += "returns[0] = egr::EagerUtils::CreateEagerTensorFromTensor(grad_api_returns);\n" else: # Rearrange output order accordingly - if IsPlainTensorType(ttype): - returns_list[ - fwd_position] = f"{{ grad_api_returns[{grad_api_position}] }}" - else: - assert IsVectorTensorType(ttype) - returns_list[ - fwd_position] = f"grad_api_returns[{grad_api_position}]" - returns_str = ", ".join(returns_list) - returns_str = f"{{ {returns_str} }}" + returns_str += f"returns[{fwd_position}] = egr::EagerUtils::CreateEagerTensorFromTensor( grad_api_returns[{grad_api_position}] );\n" + returns_str += f"return returns;\n" + grad_node_name = GetGradNodeName(fwd_api_name) FUNCTION_TEMPLATE = """ -std::vector> GradNode{}::operator()(const std::vector>& grads) {{ +std::vector> {}::operator()(const std::vector>& grads) {{ // Call grad_api function - auto grad_api_returns = {}({}); - return {}; + auto grad_api_returns = paddle::experimental::{}({}); + {} }} """ node_definition_str = FUNCTION_TEMPLATE.format( - fwd_api_name, bwd_api_name, grad_api_args_str, returns_str) + grad_node_name, bwd_api_name, grad_api_args_str, returns_str) return node_definition_str @@ -565,12 +580,12 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, for name, (ttype, pos) in forward_inputs_position_map.items(): input_autograd_meta_name = GetAutoGradMetaName(name) if IsPlainTensorType(ttype): - input_autograd_meta = f" egr::EagerTensor* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});" + input_autograd_meta = f" egr::AutogradMeta* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});" else: assert IsVectorTensorType(ttype) input_autograd_meta_vec_name = GetAutoGradMetaVectorName(name) - input_autograd_meta = f" std::vector {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n" - input_autograd_meta += f" std::vector* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};" + input_autograd_meta = f" std::vector {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n" + input_autograd_meta += f" std::vector* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};" inputs_autograd_meta_list.append(input_autograd_meta) compute_require_grad_args_list.append(input_autograd_meta_name) @@ -586,19 +601,19 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, output_autograd_meta_vec_name = GetAutoGradMetaVectorName(name) if num_fwd_outputs == 1: if IsPlainTensorType(rtype): - output_autograd_meta = f" egr::EagerTensor* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(outputs);" + output_autograd_meta = f" egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&outputs);" else: assert IsVectorTensorType(rtype) - output_autograd_meta = f" std::vector {output_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta(outputs);\n" - output_autograd_meta += f" std::vector* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" + output_autograd_meta = f" std::vector {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&outputs);\n" + output_autograd_meta += f" std::vector* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" else: # Tuple api_result if IsPlainTensorType(rtype): - outputs_autograd_meta = f" egr::EagerTensor* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(outputs[{pos}]);" + outputs_autograd_meta = f" egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&outputs[{pos}]);" else: assert IsVectorTensorType(rtype) - output_autograd_meta = f" std::vector {output_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta(outputs[{pos}]);\n" - output_autograd_meta += f" std::vector* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" + output_autograd_meta = f" std::vector {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&outputs[{pos}]);\n" + output_autograd_meta += f" std::vector* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" outputs_autograd_meta_list.append(output_autograd_meta) pass_stop_gradient_args_list.append(output_autograd_meta_name) @@ -610,19 +625,23 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, # Node Construction num_bwd_inputs = len(backward_grad_input_map.keys()) num_bwd_outputs = len(backward_grad_output_map.keys()) - node_construction_str = f" auto grad_node = std::make_shared({num_bwd_inputs}, {num_bwd_outputs});" + grad_node_name = GetGradNodeName(fwd_api_name) + node_construction_str = f" auto grad_node = std::make_shared<{grad_node_name}>({num_bwd_inputs}, {num_bwd_outputs});" # SetAttributes set_attributes_list = [] for name, _, _, _ in backward_attrs_list: - set_attributes = " grad_node->SetAttribute{name}({name});" + set_attributes = f" grad_node->SetAttribute{name}({name});" set_attributes_list.append(set_attributes) set_attributes_str = "\n".join(set_attributes_list) # SetTensorWrappers set_tensor_wrappers_list = [] - for name, (_, _, _) in backward_fwd_input_map.items(): - set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name});" + for name, (_, is_fwd_input, _) in backward_fwd_input_map.items(): + if is_fwd_input: + set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, true);" + else: + set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, false);" set_tensor_wrappers_list.append(set_tensor_wrappers) set_tensor_wrappers_str = "\n".join(set_tensor_wrappers_list) @@ -727,7 +746,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, inputs_args_list = ["" for i in range(num_inputs)] inputs_call_list = ["" for i in range(num_inputs)] for name, (ttype, pos) in forward_inputs_position_map.items(): - inputs_call_list[pos] = f"*{name}.Tensor().get()" + inputs_call_list[pos] = f"egr::EagerUtils::SyncToPtenTensors({name})" if IsPlainTensorType(ttype): inputs_args_list[pos] = f"const egr::EagerTensor& {name}" else: @@ -746,7 +765,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, inputs_call_args_str = ", ".join(inputs_call_list) # Forward Full Logic - forward_call_str = f"auto api_result = {fwd_api_name}({inputs_call_args_str});" + forward_call_str = f"auto api_result = paddle::experimental::{fwd_api_name}({inputs_call_args_str});" # Get return type list & outputs num_outputs = len(forward_outputs_position_map.keys()) @@ -783,7 +802,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, backward_grad_output_map, backward_attrs_list) FORWARD_FUNCTION_TEMPLATE = """ -{} {}_dygraph_function({}) {{ +{} {}({}) {{ // Forward API Call {} @@ -796,11 +815,11 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, }} """ + forward_function_name = GetForwardFunctionName(fwd_api_name) forward_function_str = FORWARD_FUNCTION_TEMPLATE.format( - returns_type_str, fwd_api_name, inputs_args_str, forward_call_str, - returns_str, node_creation_str) - - forward_function_declaration_str = f"{returns_type_str} {fwd_api_name}_dygraph_function({inputs_args_str});" + returns_type_str, forward_function_name, inputs_args_str, + forward_call_str, returns_str, node_creation_str) + forward_function_declaration_str = f"{returns_type_str} {forward_function_name}({inputs_args_str});" return forward_function_str, forward_function_declaration_str @@ -809,11 +828,12 @@ def GenerateNodeCCFile(filepath, node_definition_str): file_contents = """ #include "glog/logging.h" #include "paddle/pten/api/all.h" +#include "paddle/pten/api/backward/backward_api.h" #include "paddle/fluid/imperative/tracer.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h" -#include "paddle/fluid/eager/api/generated/eager_generated/nodes/nodes.h" +#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h" """ file_contents += node_definition_str @@ -836,8 +856,8 @@ def GenerateNodeHFile(filepath, node_declaration_str): def GenerateForwardCCFile(filepath, forward_definition_str): file_contents = """ -#include "paddle/fluid/eager/api/generated/eager_generated/dygraph_forward_api.h" -#include "paddle/fluid/eager/api/generated/eager_generated/nodes/nodes.h" +#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h" +#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h" #include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/legacy/op_runner.h" @@ -905,10 +925,17 @@ if __name__ == "__main__": # Collect Forward Inputs/Outputs forward_inputs_list, forward_attrs_list, forward_returns_list = ParseYamlForwardFromBackward( bwd_forward_str) + print("Parsed Forward Inputs List: ", forward_inputs_list) + print("Prased Forward Attrs List: ", forward_attrs_list) + print("Parsed Forward Returns List: ", forward_returns_list) # Collect Original Forward Inputs/Outputs and then perform validation checks orig_forward_inputs_list, orig_forward_attrs_list, orig_forward_returns_list = ParseYamlForward( fwd_args_str, fwd_returns_str) + print("Parsed Original Forward Inputs List: ", orig_forward_inputs_list) + print("Prased Original Forward Attrs List: ", orig_forward_attrs_list) + print("Parsed Original Forward Returns List: ", + orig_forward_returns_list) # Forward Validation Checks ForwardsValidationCheck(forward_inputs_list, forward_attrs_list, @@ -919,15 +946,25 @@ if __name__ == "__main__": # Parse Backward Inputs/Outputs backward_inputs_list, backward_attrs_list, backward_returns_list = ParseYamlBackward( bwd_args_str, bwd_returns_str) + print("Parsed Backward Inputs List: ", backward_inputs_list) + print("Prased Backward Attrs List: ", backward_attrs_list) + print("Parsed Backward Returns List: ", backward_returns_list) # Determine Forward Inputs/Outputs Position forward_inputs_position_map, forward_outputs_position_map = DetermineForwardPositionMap( forward_inputs_list, forward_returns_list) + print("Generated Forward Input Position Map: ", + forward_inputs_position_map) + print("Generated Forward Output Position Map: ", + forward_outputs_position_map) # SlotName Matching backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map = SlotNameMatching( backward_inputs_list, backward_returns_list, forward_inputs_position_map, forward_outputs_position_map) + print("Generated Backward Fwd Input Map: ", backward_fwd_input_map) + print("Generated Backward Grad Input Map: ", backward_grad_input_map) + print("Generated Backward Grad Output Map: ", backward_grad_output_map) # Backward Validation Check BackwardValidationCheck(backward_fwd_input_map, backward_grad_input_map, @@ -936,11 +973,13 @@ if __name__ == "__main__": # Node Declaration Generation node_declaration_str += GenerateNodeDeclaration( fwd_api_name, backward_fwd_input_map, backward_attrs_list) + print("Generated Node Declaration: ", node_declaration_str) node_definition_str += GenerateNodeDefinition( fwd_api_name, bwd_api_name, backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map, backward_attrs_list) + print("Generated Node Definition: ", node_definition_str) # Node Definition Generation definition_declaration_pair = GenerateForwardDefinition( @@ -948,6 +987,8 @@ if __name__ == "__main__": forward_outputs_position_map, forward_attrs_list, backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map, backward_attrs_list) + print("Generated Forward Definition: ", forward_definition_str) + print("Generated Forward Declaration: ", forward_declaration_str) forward_definition_str += definition_declaration_pair[0] forward_declaration_str += definition_declaration_pair[1] @@ -957,6 +998,12 @@ if __name__ == "__main__": forwards_h_path = args.forwards_h_path forwards_cc_path = args.forwards_cc_path + for path in [ + nodes_cc_path, nodes_h_path, forwards_h_path, forwards_cc_path + ]: + if os.path.exists(path): + os.remove(path) + GenerateNodeCCFile(nodes_cc_path, node_definition_str) GenerateNodeHFile(nodes_h_path, node_declaration_str) GenerateForwardCCFile(forwards_cc_path, forward_definition_str) diff --git a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py index 56ec287561c564a8df0c624caee1a0addd7bb601..fdb8529515d30c7ede581a3511b2242499fb75d8 100644 --- a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py +++ b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py @@ -15,9 +15,45 @@ import sys import os -if __name__ == "__main__": - assert len(sys.argv) == 2 - eager_dir = sys.argv[1] + +def GenerateFileStructureForFinalDygraph(eager_dir): + """ + paddle/fluid/eager + |- generated + | |- CMakeLists.txt + | | "add_subdirectory(forwards), add_subdirectory(backwards)" + | + | |- forwards + | |- "dygraph_functions.cc" + | |- "dygraph_functions.h" + | + | |- backwards + | |- "nodes.cc" + | |- "nodes.h" + """ + # Directory Generation + generated_dir = os.path.join(eager_dir, "api/generated/eager_generated") + forwards_dir = os.path.join(generated_dir, "forwards") + nodes_dir = os.path.join(generated_dir, "backwards") + dirs = [generated_dir, forwards_dir, nodes_dir] + for directory in dirs: + if not os.path.exists(directory): + os.mkdir(directory) + + # Empty files + dygraph_forward_api_h_path = os.path.join(generated_dir, + "dygraph_functions.h") + empty_files = [dygraph_forward_api_h_path] + empty_files.append(os.path.join(forwards_dir, "dygraph_functions.cc")) + empty_files.append(os.path.join(nodes_dir, "nodes.cc")) + empty_files.append(os.path.join(nodes_dir, "nodes.h")) + + for path in empty_files: + if not os.path.exists(path): + open(path, 'a').close() + + +def GenerateFileStructureForIntermediateDygraph(eager_dir): """ paddle/fluid/eager |- generated @@ -79,3 +115,10 @@ if __name__ == "__main__": with open(generated_level_cmakelist_path, "w") as f: f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)") + + +if __name__ == "__main__": + assert len(sys.argv) == 2 + eager_dir = sys.argv[1] + GenerateFileStructureForIntermediateDygraph(eager_dir) + GenerateFileStructureForFinalDygraph(eager_dir) diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index 98e6a8fc5d28e472419d5a4f802fd73158a300f4..88030d91bf91fb1c5e23435140b95afb3fd311fa 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -286,4 +286,43 @@ void EagerUtils::CheckAndRetainGrad( } } +paddle::experimental::Tensor EagerUtils::SyncToPtenTensors( + const egr::EagerTensor& tensor) { + const_cast(&tensor)->SyncToTensor(); + return *tensor.Tensor().get(); +} + +std::vector EagerUtils::SyncToPtenTensors( + const std::vector& tensors) { + std::vector res; + size_t num = tensors.size(); + res.reserve(num); + for (size_t i = 0; i < num; i++) { + const_cast(&(tensors[i]))->SyncToTensor(); + res.push_back(*tensors[i].Tensor().get()); + } + return res; +} + +egr::EagerTensor EagerUtils::CreateEagerTensorFromTensor( + const paddle::experimental::Tensor& tensor) { + egr::EagerTensor ret; + ret.set_tensor(std::make_shared(tensor)); + return ret; +} + +std::vector EagerUtils::CreateEagerTensorFromTensor( + const std::vector& tensors) { + std::vector res; + size_t num = tensors.size(); + res.reserve(num); + for (size_t i = 0; i < num; i++) { + egr::EagerTensor tmp; + tmp.set_tensor(std::make_shared(tensors[i])); + res.emplace_back(std::move(tmp)); + } + + return res; +} + } // namespace egr diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index ef3ecf27c3ccb9493d552819c9296180e0b57210..73839d34ec2b5a5639607c132a077f85dd283d01 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -170,6 +170,16 @@ class EagerUtils { static void CheckAndRetainGrad(const egr::EagerTensor& tensor); static void CheckAndRetainGrad(const std::vector& tensors); + + static paddle::experimental::Tensor SyncToPtenTensors( + const egr::EagerTensor& tensor); + static std::vector SyncToPtenTensors( + const std::vector& tensors); + + static egr::EagerTensor CreateEagerTensorFromTensor( + const paddle::experimental::Tensor& tensor); + static std::vector CreateEagerTensorFromTensor( + const std::vector& tensors); }; } // namespace egr