未验证 提交 09198b04 编写于 作者: Z Zhanlue Yang 提交者: GitHub

Adjusted CMakeFiles to support compilation for final state auto generated codes (#39215)

* Removed debug info

* Added automatic code generation for final state Eager Dygraph

* Modified backward yaml

* Added EagerUtils helper functions for final state CodeGen

* Adjusted CMakeFiles to support compilation for final state auto generated codes

* Fixed final state eager codegen

* Fixed CI problems

* Fixed yaml.load() method failure

* Turned final state codegen off for now

* Fixed minor issue
上级 76103c88
cc_library(scale_node SRCS scale_node.cc DEPS global_utils pten pten_api grad_node_info) cc_library(scale_node SRCS scale_node.cc DEPS global_utils pten pten_api grad_node_info)
#cc_library(final_dygraph_node SRCS nodes.cc DEPS ${eager_deps})
#add_dependencies(final_dygraph_node eager_final_state_codegen)
cc_library(eager_scale SRCS scale.cc DEPS pten_api pten autograd_meta scale_node) cc_library(eager_scale SRCS scale.cc DEPS pten_api pten autograd_meta scale_node)
#cc_library(final_dygraph_function SRCS dygraph_functions.cc DEPS ${eager_deps})
#add_dependencies(final_dygraph_function eager_final_state_codegen)
...@@ -2,13 +2,14 @@ set(api_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/api.yaml") ...@@ -2,13 +2,14 @@ set(api_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/api.yaml")
set(backward_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/backward.yaml") set(backward_yaml_path "${PADDLE_SOURCE_DIR}/python/paddle/utils/code_gen/backward.yaml")
set(tmp_forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc") set(tmp_forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc")
set(tmp_forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.h") set(tmp_forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.h")
set(tmp_nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_node.cc") set(tmp_nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_nodes.cc")
set(tmp_nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_node.h") set(tmp_nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/tmp_nodes.h")
set(forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.cc") set(forwards_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.cc")
set(forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h") set(forwards_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h")
set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.cc") set(nodes_cc_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.cc")
set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/node.h") set(nodes_h_path "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h")
message("Final State Eager CodeGen")
add_custom_target(eager_final_state_codegen add_custom_target(eager_final_state_codegen
COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py" COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py"
"--api_yaml_path=${api_yaml_path}" "--api_yaml_path=${api_yaml_path}"
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import yaml import yaml
import re import re
import argparse import argparse
import os
def ParseArguments(): def ParseArguments():
...@@ -71,6 +72,24 @@ def GetConstReference(string): ...@@ -71,6 +72,24 @@ def GetConstReference(string):
return ret return ret
def RemoveConstAndReference(string):
ret = string
if string.startswith("const "):
ret = ret[6:]
if string.endswith("&"):
ret = ret[:-1]
return ret
def GetGradNodeName(string):
return f"FinalGradNode{string}"
def GetForwardFunctionName(string):
return f"{string}_final_state_dygraph_function"
def GetAutoGradMetaName(string): def GetAutoGradMetaName(string):
return f"{string}_autograd_meta" return f"{string}_autograd_meta"
...@@ -84,17 +103,17 @@ def GetAutoGradMetaVectorName(string): ...@@ -84,17 +103,17 @@ def GetAutoGradMetaVectorName(string):
###################### ######################
def ReadFwdFile(filepath): def ReadFwdFile(filepath):
f = open(filepath, 'r') f = open(filepath, 'r')
contents = yaml.load(f) contents = yaml.load(f, Loader=yaml.FullLoader)
return contents return contents
def ReadBwdFile(filepath): def ReadBwdFile(filepath):
f = open(filepath, 'r') f = open(filepath, 'r')
contents = yaml.load(f) contents = yaml.load(f, Loader=yaml.FullLoader)
ret = {} ret = {}
for content in contents: for content in contents:
assert 'grad_api' in content.keys() assert 'backward_api' in content.keys()
api_name = content['grad_api'] api_name = content['backward_api']
ret[api_name] = content ret[api_name] = content
return ret return ret
...@@ -134,13 +153,13 @@ def ParseYamlArgs(string): ...@@ -134,13 +153,13 @@ def ParseYamlArgs(string):
def ParseYamlReturns(string): def ParseYamlReturns(string):
# Example: Tensor, Tensor # Example: Tensor, Tensor
# list = [ [ret_type, orig_position], ...] # list = [ ["", ret_type, orig_position], ...]
returns_list = [] returns_list = []
returns = [x.strip() for x in string.strip().split(",")] returns = [x.strip() for x in string.strip().split(",")]
for i in range(len(returns)): for i in range(len(returns)):
ret = returns[i] ret = returns[i]
returns_list.append([ret, i]) returns_list.append(["", ret, i])
return returns_list return returns_list
...@@ -249,8 +268,8 @@ def ForwardsValidationCheck(forward_inputs_list, forward_attrs_list, ...@@ -249,8 +268,8 @@ def ForwardsValidationCheck(forward_inputs_list, forward_attrs_list,
assert orig_attr_pos == forward_attr_pos assert orig_attr_pos == forward_attr_pos
for i in range(len(forward_returns_list)): for i in range(len(forward_returns_list)):
orig_return_type = orig_forward_returns_list[i][0] orig_return_type = orig_forward_returns_list[i][1]
orig_return_pos = orig_forward_returns_list[i][1] orig_return_pos = orig_forward_returns_list[i][2]
forward_return_type = forward_returns_list[i][1] forward_return_type = forward_returns_list[i][1]
forward_return_pos = forward_returns_list[i][2] forward_return_pos = forward_returns_list[i][2]
...@@ -435,19 +454,20 @@ def GenerateNodeDeclaration(fwd_api_name, backward_fwd_input_map, ...@@ -435,19 +454,20 @@ def GenerateNodeDeclaration(fwd_api_name, backward_fwd_input_map,
aname, GetConstReference(atype), aname, saved_attr_name, aname) aname, GetConstReference(atype), aname, saved_attr_name, aname)
ATTRIBUTE_MEMBER_TEMPLATE = """ ATTRIBUTE_MEMBER_TEMPLATE = """
{} {}; {} {} = {};
""" """
attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format( attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format(
GetConstReference(atype), saved_attr_name) RemoveConstAndReference(atype), saved_attr_name, default_val)
# End: SetAttributes & Attribute Members # End: SetAttributes & Attribute Members
grad_node_name = GetGradNodeName(fwd_api_name)
NODE_DECLARATION_TEMPLATE = """ NODE_DECLARATION_TEMPLATE = """
class GradNode{} : public egr::GradNodeBase {{ class {} : public egr::GradNodeBase {{
public: public:
GradNode{}() : egr::GradNodeBase() {{}} {}() : egr::GradNodeBase() {{}}
GradNode{}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : {}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) :
egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {{}} egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {{}}
~GradNode{}() override = default; ~{}() override = default;
virtual std::vector<std::vector<egr::EagerTensor>> operator()( virtual std::vector<std::vector<egr::EagerTensor>> operator()(
const std::vector<std::vector<egr::EagerTensor>>& grads) override; const std::vector<std::vector<egr::EagerTensor>>& grads) override;
...@@ -465,7 +485,7 @@ class GradNode{} : public egr::GradNodeBase {{ ...@@ -465,7 +485,7 @@ class GradNode{} : public egr::GradNodeBase {{
}}; }};
""" """
node_declaration_str = NODE_DECLARATION_TEMPLATE.format( node_declaration_str = NODE_DECLARATION_TEMPLATE.format(
forward_op_name, forward_op_name, forward_op_name, forward_op_name, grad_node_name, grad_node_name, grad_node_name, grad_node_name,
set_tensor_wrapper_methods_str, set_attribute_methods_str, set_tensor_wrapper_methods_str, set_attribute_methods_str,
tensor_wrapper_members_str, attribute_members_str) tensor_wrapper_members_str, attribute_members_str)
...@@ -489,17 +509,18 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, ...@@ -489,17 +509,18 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map,
for name, (_, is_fwd_input, for name, (_, is_fwd_input,
grad_api_position), in backward_fwd_input_map.items(): grad_api_position), in backward_fwd_input_map.items():
tensor_wrapper_name = GetSavedName(name) tensor_wrapper_name = GetSavedName(name)
if is_fwd_input: grad_api_args[
grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr) )"
for _, (ttype, fwd_position,
grad_api_position) in backward_grad_input_map.items():
if IsPlainTensorType(ttype):
grad_api_args[ grad_api_args[
grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, true)" grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}][0] )"
else: else:
assert IsVectorTensorType(ttype)
grad_api_args[ grad_api_args[
grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, false)" grad_api_position] = f"egr::EagerUtils::SyncToPtenTensors( grads[{fwd_position}] )"
for _, (_, fwd_position,
grad_api_position) in backward_grad_input_map.items():
grad_api_args[
grad_api_position] = f"*grads[{fwd_position}].Tensor().get()"
for name, _, _, grad_api_position in backward_attrs_list: for name, _, _, grad_api_position in backward_attrs_list:
saved_attribute_name = GetSavedName(name) saved_attribute_name = GetSavedName(name)
...@@ -507,40 +528,34 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map, ...@@ -507,40 +528,34 @@ def GenerateNodeDefinition(fwd_api_name, bwd_api_name, backward_fwd_input_map,
grad_api_args_str = ", ".join(grad_api_args) grad_api_args_str = ", ".join(grad_api_args)
# Construct grad_api returns # Construct grad_api returns
num_outputs = len(backward_grad_output_map.keys()) num_bwd_outputs = len(backward_grad_output_map.keys())
returns_list = ["" for i in range(num_outputs)] returns_str = f"std::vector<std::vector<egr::EagerTensor>> returns({num_bwd_outputs});\n"
for _, (ttype, fwd_position, for _, (ttype, fwd_position,
grad_api_position) in backward_grad_output_map.items(): grad_api_position) in backward_grad_output_map.items():
# Infer Grad API Return Type # Infer Grad API Return Type
if num_outputs == 1: if num_bwd_outputs == 1:
# Single tensor output, return as is # Single tensor output, return as is
if IsPlainTensorType(ttype): if IsPlainTensorType(ttype):
returns_list[0] = "{grad_api_returns}" returns_str += "returns[0] = { egr::EagerUtils::CreateEagerTensorFromTensor(grad_api_returns) };\n"
else: else:
assert IsVectorTensorType(ttype) assert IsVectorTensorType(ttype)
returns_list[0] = "grad_api_returns" returns_str += "returns[0] = egr::EagerUtils::CreateEagerTensorFromTensor(grad_api_returns);\n"
else: else:
# Rearrange output order accordingly # Rearrange output order accordingly
if IsPlainTensorType(ttype): returns_str += f"returns[{fwd_position}] = egr::EagerUtils::CreateEagerTensorFromTensor( grad_api_returns[{grad_api_position}] );\n"
returns_list[ returns_str += f"return returns;\n"
fwd_position] = f"{{ grad_api_returns[{grad_api_position}] }}"
else:
assert IsVectorTensorType(ttype)
returns_list[
fwd_position] = f"grad_api_returns[{grad_api_position}]"
returns_str = ", ".join(returns_list)
returns_str = f"{{ {returns_str} }}"
grad_node_name = GetGradNodeName(fwd_api_name)
FUNCTION_TEMPLATE = """ FUNCTION_TEMPLATE = """
std::vector<std::vector<egr::EagerTensor>> GradNode{}::operator()(const std::vector<std::vector<egr::EagerTensor>>& grads) {{ std::vector<std::vector<egr::EagerTensor>> {}::operator()(const std::vector<std::vector<egr::EagerTensor>>& grads) {{
// Call grad_api function // Call grad_api function
auto grad_api_returns = {}({}); auto grad_api_returns = paddle::experimental::{}({});
return {}; {}
}} }}
""" """
node_definition_str = FUNCTION_TEMPLATE.format( node_definition_str = FUNCTION_TEMPLATE.format(
fwd_api_name, bwd_api_name, grad_api_args_str, returns_str) grad_node_name, bwd_api_name, grad_api_args_str, returns_str)
return node_definition_str return node_definition_str
...@@ -565,12 +580,12 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, ...@@ -565,12 +580,12 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name,
for name, (ttype, pos) in forward_inputs_position_map.items(): for name, (ttype, pos) in forward_inputs_position_map.items():
input_autograd_meta_name = GetAutoGradMetaName(name) input_autograd_meta_name = GetAutoGradMetaName(name)
if IsPlainTensorType(ttype): if IsPlainTensorType(ttype):
input_autograd_meta = f" egr::EagerTensor* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});" input_autograd_meta = f" egr::AutogradMeta* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});"
else: else:
assert IsVectorTensorType(ttype) assert IsVectorTensorType(ttype)
input_autograd_meta_vec_name = GetAutoGradMetaVectorName(name) input_autograd_meta_vec_name = GetAutoGradMetaVectorName(name)
input_autograd_meta = f" std::vector<egr::EagerTensor*> {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n" input_autograd_meta = f" std::vector<egr::AutogradMeta*> {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n"
input_autograd_meta += f" std::vector<egr::EagerTensor*>* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};" input_autograd_meta += f" std::vector<egr::AutogradMeta*>* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};"
inputs_autograd_meta_list.append(input_autograd_meta) inputs_autograd_meta_list.append(input_autograd_meta)
compute_require_grad_args_list.append(input_autograd_meta_name) compute_require_grad_args_list.append(input_autograd_meta_name)
...@@ -586,19 +601,19 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, ...@@ -586,19 +601,19 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name,
output_autograd_meta_vec_name = GetAutoGradMetaVectorName(name) output_autograd_meta_vec_name = GetAutoGradMetaVectorName(name)
if num_fwd_outputs == 1: if num_fwd_outputs == 1:
if IsPlainTensorType(rtype): if IsPlainTensorType(rtype):
output_autograd_meta = f" egr::EagerTensor* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(outputs);" output_autograd_meta = f" egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&outputs);"
else: else:
assert IsVectorTensorType(rtype) assert IsVectorTensorType(rtype)
output_autograd_meta = f" std::vector<egr::EagerTensor*> {output_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta(outputs);\n" output_autograd_meta = f" std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&outputs);\n"
output_autograd_meta += f" std::vector<egr::EagerTensor*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" output_autograd_meta += f" std::vector<egr::AutogradMeta*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};"
else: else:
# Tuple api_result # Tuple api_result
if IsPlainTensorType(rtype): if IsPlainTensorType(rtype):
outputs_autograd_meta = f" egr::EagerTensor* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(outputs[{pos}]);" outputs_autograd_meta = f" egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&outputs[{pos}]);"
else: else:
assert IsVectorTensorType(rtype) assert IsVectorTensorType(rtype)
output_autograd_meta = f" std::vector<egr::EagerTensor*> {output_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta(outputs[{pos}]);\n" output_autograd_meta = f" std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&outputs[{pos}]);\n"
output_autograd_meta += f" std::vector<egr::EagerTensor*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};" output_autograd_meta += f" std::vector<egr::AutogradMeta*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};"
outputs_autograd_meta_list.append(output_autograd_meta) outputs_autograd_meta_list.append(output_autograd_meta)
pass_stop_gradient_args_list.append(output_autograd_meta_name) pass_stop_gradient_args_list.append(output_autograd_meta_name)
...@@ -610,19 +625,23 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name, ...@@ -610,19 +625,23 @@ def GenerateNodeCreationCodes(fwd_api_name, bwd_api_name,
# Node Construction # Node Construction
num_bwd_inputs = len(backward_grad_input_map.keys()) num_bwd_inputs = len(backward_grad_input_map.keys())
num_bwd_outputs = len(backward_grad_output_map.keys()) num_bwd_outputs = len(backward_grad_output_map.keys())
node_construction_str = f" auto grad_node = std::make_shared<GradNode{fwd_api_name}>({num_bwd_inputs}, {num_bwd_outputs});" grad_node_name = GetGradNodeName(fwd_api_name)
node_construction_str = f" auto grad_node = std::make_shared<{grad_node_name}>({num_bwd_inputs}, {num_bwd_outputs});"
# SetAttributes # SetAttributes
set_attributes_list = [] set_attributes_list = []
for name, _, _, _ in backward_attrs_list: for name, _, _, _ in backward_attrs_list:
set_attributes = " grad_node->SetAttribute{name}({name});" set_attributes = f" grad_node->SetAttribute{name}({name});"
set_attributes_list.append(set_attributes) set_attributes_list.append(set_attributes)
set_attributes_str = "\n".join(set_attributes_list) set_attributes_str = "\n".join(set_attributes_list)
# SetTensorWrappers # SetTensorWrappers
set_tensor_wrappers_list = [] set_tensor_wrappers_list = []
for name, (_, _, _) in backward_fwd_input_map.items(): for name, (_, is_fwd_input, _) in backward_fwd_input_map.items():
set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name});" if is_fwd_input:
set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, true);"
else:
set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, false);"
set_tensor_wrappers_list.append(set_tensor_wrappers) set_tensor_wrappers_list.append(set_tensor_wrappers)
set_tensor_wrappers_str = "\n".join(set_tensor_wrappers_list) set_tensor_wrappers_str = "\n".join(set_tensor_wrappers_list)
...@@ -727,7 +746,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, ...@@ -727,7 +746,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
inputs_args_list = ["" for i in range(num_inputs)] inputs_args_list = ["" for i in range(num_inputs)]
inputs_call_list = ["" for i in range(num_inputs)] inputs_call_list = ["" for i in range(num_inputs)]
for name, (ttype, pos) in forward_inputs_position_map.items(): for name, (ttype, pos) in forward_inputs_position_map.items():
inputs_call_list[pos] = f"*{name}.Tensor().get()" inputs_call_list[pos] = f"egr::EagerUtils::SyncToPtenTensors({name})"
if IsPlainTensorType(ttype): if IsPlainTensorType(ttype):
inputs_args_list[pos] = f"const egr::EagerTensor& {name}" inputs_args_list[pos] = f"const egr::EagerTensor& {name}"
else: else:
...@@ -746,7 +765,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, ...@@ -746,7 +765,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
inputs_call_args_str = ", ".join(inputs_call_list) inputs_call_args_str = ", ".join(inputs_call_list)
# Forward Full Logic # Forward Full Logic
forward_call_str = f"auto api_result = {fwd_api_name}({inputs_call_args_str});" forward_call_str = f"auto api_result = paddle::experimental::{fwd_api_name}({inputs_call_args_str});"
# Get return type list & outputs # Get return type list & outputs
num_outputs = len(forward_outputs_position_map.keys()) num_outputs = len(forward_outputs_position_map.keys())
...@@ -783,7 +802,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, ...@@ -783,7 +802,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
backward_grad_output_map, backward_attrs_list) backward_grad_output_map, backward_attrs_list)
FORWARD_FUNCTION_TEMPLATE = """ FORWARD_FUNCTION_TEMPLATE = """
{} {}_dygraph_function({}) {{ {} {}({}) {{
// Forward API Call // Forward API Call
{} {}
...@@ -796,11 +815,11 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, ...@@ -796,11 +815,11 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
}} }}
""" """
forward_function_name = GetForwardFunctionName(fwd_api_name)
forward_function_str = FORWARD_FUNCTION_TEMPLATE.format( forward_function_str = FORWARD_FUNCTION_TEMPLATE.format(
returns_type_str, fwd_api_name, inputs_args_str, forward_call_str, returns_type_str, forward_function_name, inputs_args_str,
returns_str, node_creation_str) forward_call_str, returns_str, node_creation_str)
forward_function_declaration_str = f"{returns_type_str} {forward_function_name}({inputs_args_str});"
forward_function_declaration_str = f"{returns_type_str} {fwd_api_name}_dygraph_function({inputs_args_str});"
return forward_function_str, forward_function_declaration_str return forward_function_str, forward_function_declaration_str
...@@ -809,11 +828,12 @@ def GenerateNodeCCFile(filepath, node_definition_str): ...@@ -809,11 +828,12 @@ def GenerateNodeCCFile(filepath, node_definition_str):
file_contents = """ file_contents = """
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/pten/api/all.h" #include "paddle/pten/api/all.h"
#include "paddle/pten/api/backward/backward_api.h"
#include "paddle/fluid/imperative/tracer.h" #include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/api/generated/eager_generated/nodes/nodes.h" #include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h"
""" """
file_contents += node_definition_str file_contents += node_definition_str
...@@ -836,8 +856,8 @@ def GenerateNodeHFile(filepath, node_declaration_str): ...@@ -836,8 +856,8 @@ def GenerateNodeHFile(filepath, node_declaration_str):
def GenerateForwardCCFile(filepath, forward_definition_str): def GenerateForwardCCFile(filepath, forward_definition_str):
file_contents = """ file_contents = """
#include "paddle/fluid/eager/api/generated/eager_generated/dygraph_forward_api.h" #include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
#include "paddle/fluid/eager/api/generated/eager_generated/nodes/nodes.h" #include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h"
#include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/legacy/op_runner.h" #include "paddle/fluid/eager/legacy/op_runner.h"
...@@ -905,10 +925,17 @@ if __name__ == "__main__": ...@@ -905,10 +925,17 @@ if __name__ == "__main__":
# Collect Forward Inputs/Outputs # Collect Forward Inputs/Outputs
forward_inputs_list, forward_attrs_list, forward_returns_list = ParseYamlForwardFromBackward( forward_inputs_list, forward_attrs_list, forward_returns_list = ParseYamlForwardFromBackward(
bwd_forward_str) bwd_forward_str)
print("Parsed Forward Inputs List: ", forward_inputs_list)
print("Prased Forward Attrs List: ", forward_attrs_list)
print("Parsed Forward Returns List: ", forward_returns_list)
# Collect Original Forward Inputs/Outputs and then perform validation checks # Collect Original Forward Inputs/Outputs and then perform validation checks
orig_forward_inputs_list, orig_forward_attrs_list, orig_forward_returns_list = ParseYamlForward( orig_forward_inputs_list, orig_forward_attrs_list, orig_forward_returns_list = ParseYamlForward(
fwd_args_str, fwd_returns_str) fwd_args_str, fwd_returns_str)
print("Parsed Original Forward Inputs List: ", orig_forward_inputs_list)
print("Prased Original Forward Attrs List: ", orig_forward_attrs_list)
print("Parsed Original Forward Returns List: ",
orig_forward_returns_list)
# Forward Validation Checks # Forward Validation Checks
ForwardsValidationCheck(forward_inputs_list, forward_attrs_list, ForwardsValidationCheck(forward_inputs_list, forward_attrs_list,
...@@ -919,15 +946,25 @@ if __name__ == "__main__": ...@@ -919,15 +946,25 @@ if __name__ == "__main__":
# Parse Backward Inputs/Outputs # Parse Backward Inputs/Outputs
backward_inputs_list, backward_attrs_list, backward_returns_list = ParseYamlBackward( backward_inputs_list, backward_attrs_list, backward_returns_list = ParseYamlBackward(
bwd_args_str, bwd_returns_str) bwd_args_str, bwd_returns_str)
print("Parsed Backward Inputs List: ", backward_inputs_list)
print("Prased Backward Attrs List: ", backward_attrs_list)
print("Parsed Backward Returns List: ", backward_returns_list)
# Determine Forward Inputs/Outputs Position # Determine Forward Inputs/Outputs Position
forward_inputs_position_map, forward_outputs_position_map = DetermineForwardPositionMap( forward_inputs_position_map, forward_outputs_position_map = DetermineForwardPositionMap(
forward_inputs_list, forward_returns_list) forward_inputs_list, forward_returns_list)
print("Generated Forward Input Position Map: ",
forward_inputs_position_map)
print("Generated Forward Output Position Map: ",
forward_outputs_position_map)
# SlotName Matching # SlotName Matching
backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map = SlotNameMatching( backward_fwd_input_map, backward_grad_input_map, backward_grad_output_map = SlotNameMatching(
backward_inputs_list, backward_returns_list, backward_inputs_list, backward_returns_list,
forward_inputs_position_map, forward_outputs_position_map) forward_inputs_position_map, forward_outputs_position_map)
print("Generated Backward Fwd Input Map: ", backward_fwd_input_map)
print("Generated Backward Grad Input Map: ", backward_grad_input_map)
print("Generated Backward Grad Output Map: ", backward_grad_output_map)
# Backward Validation Check # Backward Validation Check
BackwardValidationCheck(backward_fwd_input_map, backward_grad_input_map, BackwardValidationCheck(backward_fwd_input_map, backward_grad_input_map,
...@@ -936,11 +973,13 @@ if __name__ == "__main__": ...@@ -936,11 +973,13 @@ if __name__ == "__main__":
# Node Declaration Generation # Node Declaration Generation
node_declaration_str += GenerateNodeDeclaration( node_declaration_str += GenerateNodeDeclaration(
fwd_api_name, backward_fwd_input_map, backward_attrs_list) fwd_api_name, backward_fwd_input_map, backward_attrs_list)
print("Generated Node Declaration: ", node_declaration_str)
node_definition_str += GenerateNodeDefinition( node_definition_str += GenerateNodeDefinition(
fwd_api_name, bwd_api_name, backward_fwd_input_map, fwd_api_name, bwd_api_name, backward_fwd_input_map,
backward_grad_input_map, backward_grad_output_map, backward_grad_input_map, backward_grad_output_map,
backward_attrs_list) backward_attrs_list)
print("Generated Node Definition: ", node_definition_str)
# Node Definition Generation # Node Definition Generation
definition_declaration_pair = GenerateForwardDefinition( definition_declaration_pair = GenerateForwardDefinition(
...@@ -948,6 +987,8 @@ if __name__ == "__main__": ...@@ -948,6 +987,8 @@ if __name__ == "__main__":
forward_outputs_position_map, forward_attrs_list, forward_outputs_position_map, forward_attrs_list,
backward_fwd_input_map, backward_grad_input_map, backward_fwd_input_map, backward_grad_input_map,
backward_grad_output_map, backward_attrs_list) backward_grad_output_map, backward_attrs_list)
print("Generated Forward Definition: ", forward_definition_str)
print("Generated Forward Declaration: ", forward_declaration_str)
forward_definition_str += definition_declaration_pair[0] forward_definition_str += definition_declaration_pair[0]
forward_declaration_str += definition_declaration_pair[1] forward_declaration_str += definition_declaration_pair[1]
...@@ -957,6 +998,12 @@ if __name__ == "__main__": ...@@ -957,6 +998,12 @@ if __name__ == "__main__":
forwards_h_path = args.forwards_h_path forwards_h_path = args.forwards_h_path
forwards_cc_path = args.forwards_cc_path forwards_cc_path = args.forwards_cc_path
for path in [
nodes_cc_path, nodes_h_path, forwards_h_path, forwards_cc_path
]:
if os.path.exists(path):
os.remove(path)
GenerateNodeCCFile(nodes_cc_path, node_definition_str) GenerateNodeCCFile(nodes_cc_path, node_definition_str)
GenerateNodeHFile(nodes_h_path, node_declaration_str) GenerateNodeHFile(nodes_h_path, node_declaration_str)
GenerateForwardCCFile(forwards_cc_path, forward_definition_str) GenerateForwardCCFile(forwards_cc_path, forward_definition_str)
......
...@@ -15,9 +15,45 @@ ...@@ -15,9 +15,45 @@
import sys import sys
import os import os
if __name__ == "__main__":
assert len(sys.argv) == 2 def GenerateFileStructureForFinalDygraph(eager_dir):
eager_dir = sys.argv[1] """
paddle/fluid/eager
|- generated
| |- CMakeLists.txt
| | "add_subdirectory(forwards), add_subdirectory(backwards)"
|
| |- forwards
| |- "dygraph_functions.cc"
| |- "dygraph_functions.h"
|
| |- backwards
| |- "nodes.cc"
| |- "nodes.h"
"""
# Directory Generation
generated_dir = os.path.join(eager_dir, "api/generated/eager_generated")
forwards_dir = os.path.join(generated_dir, "forwards")
nodes_dir = os.path.join(generated_dir, "backwards")
dirs = [generated_dir, forwards_dir, nodes_dir]
for directory in dirs:
if not os.path.exists(directory):
os.mkdir(directory)
# Empty files
dygraph_forward_api_h_path = os.path.join(generated_dir,
"dygraph_functions.h")
empty_files = [dygraph_forward_api_h_path]
empty_files.append(os.path.join(forwards_dir, "dygraph_functions.cc"))
empty_files.append(os.path.join(nodes_dir, "nodes.cc"))
empty_files.append(os.path.join(nodes_dir, "nodes.h"))
for path in empty_files:
if not os.path.exists(path):
open(path, 'a').close()
def GenerateFileStructureForIntermediateDygraph(eager_dir):
""" """
paddle/fluid/eager paddle/fluid/eager
|- generated |- generated
...@@ -79,3 +115,10 @@ if __name__ == "__main__": ...@@ -79,3 +115,10 @@ if __name__ == "__main__":
with open(generated_level_cmakelist_path, "w") as f: with open(generated_level_cmakelist_path, "w") as f:
f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)") f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)")
if __name__ == "__main__":
assert len(sys.argv) == 2
eager_dir = sys.argv[1]
GenerateFileStructureForIntermediateDygraph(eager_dir)
GenerateFileStructureForFinalDygraph(eager_dir)
...@@ -286,4 +286,43 @@ void EagerUtils::CheckAndRetainGrad( ...@@ -286,4 +286,43 @@ void EagerUtils::CheckAndRetainGrad(
} }
} }
paddle::experimental::Tensor EagerUtils::SyncToPtenTensors(
const egr::EagerTensor& tensor) {
const_cast<EagerTensor*>(&tensor)->SyncToTensor();
return *tensor.Tensor().get();
}
std::vector<paddle::experimental::Tensor> EagerUtils::SyncToPtenTensors(
const std::vector<egr::EagerTensor>& tensors) {
std::vector<paddle::experimental::Tensor> res;
size_t num = tensors.size();
res.reserve(num);
for (size_t i = 0; i < num; i++) {
const_cast<EagerTensor*>(&(tensors[i]))->SyncToTensor();
res.push_back(*tensors[i].Tensor().get());
}
return res;
}
egr::EagerTensor EagerUtils::CreateEagerTensorFromTensor(
const paddle::experimental::Tensor& tensor) {
egr::EagerTensor ret;
ret.set_tensor(std::make_shared<paddle::experimental::Tensor>(tensor));
return ret;
}
std::vector<egr::EagerTensor> EagerUtils::CreateEagerTensorFromTensor(
const std::vector<paddle::experimental::Tensor>& tensors) {
std::vector<egr::EagerTensor> res;
size_t num = tensors.size();
res.reserve(num);
for (size_t i = 0; i < num; i++) {
egr::EagerTensor tmp;
tmp.set_tensor(std::make_shared<paddle::experimental::Tensor>(tensors[i]));
res.emplace_back(std::move(tmp));
}
return res;
}
} // namespace egr } // namespace egr
...@@ -170,6 +170,16 @@ class EagerUtils { ...@@ -170,6 +170,16 @@ class EagerUtils {
static void CheckAndRetainGrad(const egr::EagerTensor& tensor); static void CheckAndRetainGrad(const egr::EagerTensor& tensor);
static void CheckAndRetainGrad(const std::vector<egr::EagerTensor>& tensors); static void CheckAndRetainGrad(const std::vector<egr::EagerTensor>& tensors);
static paddle::experimental::Tensor SyncToPtenTensors(
const egr::EagerTensor& tensor);
static std::vector<paddle::experimental::Tensor> SyncToPtenTensors(
const std::vector<egr::EagerTensor>& tensors);
static egr::EagerTensor CreateEagerTensorFromTensor(
const paddle::experimental::Tensor& tensor);
static std::vector<egr::EagerTensor> CreateEagerTensorFromTensor(
const std::vector<paddle::experimental::Tensor>& tensors);
}; };
} // namespace egr } // namespace egr
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册