未验证 提交 ec8a0c1d 编写于 作者: Z Zhanlue Yang 提交者: GitHub

Adjusted python-level trace_op to accomodate final state Eager Dygraph (#39319)

* Removed debug info

* Added automatic code generation for final state Eager Dygraph

* Modified backward yaml

* Added EagerUtils helper functions for final state CodeGen

* Adjusted CMakeFiles to support compilation for final state auto generated codes

* Added python-c code generation for final state Eager Dygraph

* Fixed minor issue

* Fixed yaml.load() method failure

* Fixed minor issues

* Refactored Python-C Attributes Parsing Functions

* Fixed minor issue with Python-C AddFunctions

* Adjusted python-level trace_op to accomodate final state Eager Dygraph

* Added Logs for final state Eager Dygraph

* Fixed merge issues

* Fixed minor issue
上级 74a150fe
......@@ -17,6 +17,12 @@ import re
import argparse
import os
# For API dispatch used at python-level
# { op_name : [arg_name, ...] }
core_ops_returns_info = {}
core_ops_args_info = {}
core_ops_args_type_info = {}
def ParseArguments():
parser = argparse.ArgumentParser(
......@@ -130,17 +136,16 @@ def ParseYamlArgs(string):
attrs_list = []
args = [x.strip() for x in string.strip().split(",")]
atype = r'((const )?\S+) '
aname = r'(\S+)'
aname = r'(.*)'
pattern = f'{atype}{aname}'
for i in range(len(args)):
arg = args[i]
m = re.search(pattern, arg)
arg_type = m.group(1)
arg_name = m.group(3).split("=")[0]
default_value = m.group(3).split("=")[1] if len(m.group(3).split(
"=")) > 1 else None
arg_type = m.group(1).strip()
arg_name = m.group(3).split("=")[0].strip()
default_value = m.group(3).split("=")[1].strip() if len(
m.group(3).split("=")) > 1 else None
if "Tensor" in arg_type:
assert default_value is None
inputs_list.append([arg_name, arg_type, i])
......@@ -262,7 +267,6 @@ def ForwardsValidationCheck(forward_inputs_list, forward_attrs_list,
forward_attr_type = forward_attrs_list[i][1]
forward_attr_default = forward_attrs_list[i][2]
forward_attr_pos = forward_attrs_list[i][3]
assert orig_attr_type == forward_attr_type
assert orig_attr_default == forward_attr_default
assert orig_attr_pos == forward_attr_pos
......@@ -741,26 +745,34 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
# Get Function Args
num_inputs = len(forward_attrs_list) + len(forward_inputs_position_map.keys(
))
inputs_args_list = ["" for i in range(num_inputs)]
inputs_args_definition_list = ["" for i in range(num_inputs)]
inputs_args_declaration_list = ["" for i in range(num_inputs)]
inputs_call_list = ["" for i in range(num_inputs)]
for name, (ttype, pos) in forward_inputs_position_map.items():
inputs_call_list[pos] = f"{name}"
if IsPlainTensorType(ttype):
inputs_args_list[
inputs_args_definition_list[
pos] = f"const paddle::experimental::Tensor& {name}"
inputs_args_declaration_list[
pos] = f"const paddle::experimental::Tensor& {name}"
else:
assert IsVectorTensorType(ttype)
inputs_args_list[
inputs_args_definition_list[
pos] = f"const std::vector<paddle::experimental::Tensor>& {name}"
inputs_args_declaration_list[
pos] = f"const std::vector<paddle::experimental::Tensor>& {name}"
for name, atype, default_val, pos in forward_attrs_list:
inputs_call_list[pos] = name
if default_val is not None:
inputs_args_list[pos] = f"{atype} {name} = {default_val}"
inputs_args_declaration_list[
pos] = f"{atype} {name} = {default_val}"
else:
inputs_args_list[pos] = f"{atype} {name}"
inputs_args_declaration_list[pos] = f"{atype} {name}"
inputs_args_definition_list[pos] = f"{atype} {name}"
inputs_args_str = ", ".join(inputs_args_list)
inputs_args_declaration_str = ", ".join(inputs_args_declaration_list)
inputs_args_definition_str = ", ".join(inputs_args_definition_list)
inputs_call_args_str = ", ".join(inputs_call_list)
# Forward Full Logic
......@@ -812,13 +824,95 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
forward_function_name = GetForwardFunctionName(fwd_api_name)
forward_function_str = FORWARD_FUNCTION_TEMPLATE.format(
returns_type_str, forward_function_name, inputs_args_str,
returns_type_str, forward_function_name, inputs_args_definition_str,
forward_call_str, node_creation_str, returns_str)
forward_function_declaration_str = f"{returns_type_str} {forward_function_name}({inputs_args_str});"
forward_function_declaration_str = f"{returns_type_str} {forward_function_name}({inputs_args_declaration_str});"
return forward_function_str, forward_function_declaration_str
def CollectCoreOpsInformation(fwd_api_name, forward_inputs_position_map,
forward_outputs_position_map, forward_attrs_list):
# fwd_api_name : ""
# forward_inputs_position_map = { "name" : [type, fwd_position] }
# forward_outputs_position_map = { "name" : [type, fwd_position] }
# forward_attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...]
num_args = len(forward_inputs_position_map.keys()) + len(forward_attrs_list)
num_returns = len(forward_outputs_position_map.keys())
final_state_fwd_api_name = "final_state_" + fwd_api_name
core_ops_returns_info[
final_state_fwd_api_name] = ["" for i in range(num_returns)]
core_ops_args_info[final_state_fwd_api_name] = ["" for i in range(num_args)]
core_ops_args_type_info[
final_state_fwd_api_name] = ["" for i in range(num_args)]
for name, (ttype, pos) in forward_inputs_position_map.items():
core_ops_args_info[final_state_fwd_api_name][pos] = name
if IsPlainTensorType(ttype):
core_ops_args_type_info[final_state_fwd_api_name][pos] = "tensor"
else:
assert IsVectorTensorType(ttype)
core_ops_args_type_info[final_state_fwd_api_name][pos] = "list"
for name, _, _, pos in forward_attrs_list:
core_ops_args_info[final_state_fwd_api_name][pos] = name
for name, (ttype, pos) in forward_outputs_position_map.items():
core_ops_returns_info[final_state_fwd_api_name][pos] = name
def GenerateCoreOpInfoDeclaration():
core_ops_declaration_str = """
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_args_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_args_type_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_returns_info;
"""
return core_ops_declaration_str
def GenerateCoreOpInfoDefinition():
CORE_OPS_INFO_TEMPLATE = """
std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_args_info = {{
{}
}};
std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_args_type_info = {{
{}
}};
std::unordered_map<std::string, std::vector<std::string>> core_ops_final_state_returns_info = {{
{}
}};
"""
op_args_info_list = []
for op_name, arg_list in core_ops_args_info.items():
arg_str = ",".join(["\"" + v + "\"" for v in arg_list])
op_args_info = f"{{ \"{op_name}\", {{ {arg_str} }} }},"
op_args_info_list.append(op_args_info)
op_types_info_list = []
for op_name, type_list in core_ops_args_type_info.items():
type_str = ",".join(["\"" + v + "\"" for v in type_list])
op_types_info = f"{{ \"{op_name}\", {{ {type_str} }} }},"
op_types_info_list.append(op_types_info)
op_returns_info_list = []
for op_name, return_list in core_ops_returns_info.items():
return_str = ",".join(["\"" + v + "\"" for v in return_list])
return_types_info = f"{{ \"{op_name}\", {{ {return_str} }} }},"
op_returns_info_list.append(return_types_info)
op_args_info_str = "\n".join(op_args_info_list)
op_types_info_str = "\n".join(op_types_info_list)
op_returns_info_str = "\n".join(op_returns_info_list)
core_ops_info_definition_str = CORE_OPS_INFO_TEMPLATE.format(
op_args_info_str, op_types_info_str, op_returns_info_str)
return core_ops_info_definition_str
def GenerateNodeCCFile(filepath, node_definition_str):
file_contents = """
#include "glog/logging.h"
......@@ -856,6 +950,8 @@ def GenerateForwardCCFile(filepath, forward_definition_str):
#include "paddle/fluid/eager/api/utils/global_utils.h"
"""
file_contents += GenerateCoreOpInfoDefinition()
file_contents += forward_definition_str
with open(filepath, 'a') as f:
f.write(file_contents)
......@@ -871,6 +967,7 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str):
#include "paddle/fluid/framework/op_registry.h"
"""
file_contents += GenerateCoreOpInfoDeclaration()
file_contents += forward_function_declaration_str
with open(filepath, 'a') as f:
f.write(file_contents)
......@@ -985,6 +1082,11 @@ if __name__ == "__main__":
forward_definition_str += definition_declaration_pair[0]
forward_declaration_str += definition_declaration_pair[1]
# For python-level API dispatch
CollectCoreOpsInformation(fwd_api_name, forward_inputs_position_map,
forward_outputs_position_map,
forward_attrs_list)
# Generate Files
nodes_h_path = args.nodes_h_path
nodes_cc_path = args.nodes_cc_path
......
......@@ -104,6 +104,8 @@ static PyObject * eager_final_state_api_{}(PyObject *self, PyObject *args, PyObj
PyThreadState *tstate = nullptr;
try
{{
VLOG(6) << "Running Eager Final State API: {}";
// Get EagerTensors from args
{}
......@@ -129,16 +131,87 @@ static PyObject * eager_final_state_api_{}(PyObject *self, PyObject *args, PyObj
"""
python_c_function_str = PYTHON_C_FUNCTION_TEMPLATE.format(
fwd_api_name, get_eager_tensor_str, parse_attributes_str,
fwd_api_name, fwd_api_name, get_eager_tensor_str, parse_attributes_str,
GetForwardFunctionName(fwd_api_name), dygraph_function_call_str)
python_c_function_reg_str = f"{{\"final_state_{fwd_api_name}\", (PyCFunction)(void(*)(void))eager_final_state_api_{fwd_api_name}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {fwd_api_name} in dygraph.\"}}"
python_c_function_reg_str = f"{{\"final_state_{fwd_api_name}\", (PyCFunction)(void(*)(void))eager_final_state_api_{fwd_api_name}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {fwd_api_name} in dygraph.\"}},\n"
return python_c_function_str, python_c_function_reg_str
def GenerateCoreOpsInfoMap():
result = """
static PyObject * eager_get_final_state_core_ops_args_info(PyObject *self) {
PyThreadState *tstate = nullptr;
try
{
return ToPyObject(core_ops_final_state_args_info);
}
catch(...) {
if (tstate) {
PyEval_RestoreThread(tstate);
}
ThrowExceptionToPython(std::current_exception());
return nullptr;
}
}
static PyObject * eager_get_final_state_core_ops_args_type_info(PyObject *self) {
PyThreadState *tstate = nullptr;
try
{
return ToPyObject(core_ops_final_state_args_type_info);
}
catch(...) {
if (tstate) {
PyEval_RestoreThread(tstate);
}
ThrowExceptionToPython(std::current_exception());
return nullptr;
}
}
static PyObject * eager_get_final_state_core_ops_returns_info(PyObject *self) {
PyThreadState *tstate = nullptr;
try
{
return ToPyObject(core_ops_final_state_returns_info);
}
catch(...) {
if (tstate) {
PyEval_RestoreThread(tstate);
}
ThrowExceptionToPython(std::current_exception());
return nullptr;
}
}
"""
core_ops_infos_registry = """
{\"get_final_state_core_ops_args_info\",
(PyCFunction)(void(*)(void))eager_get_final_state_core_ops_args_info, METH_NOARGS,
\"C++ interface function for eager_get_final_state_core_ops_args_info.\"},
{\"get_final_state_core_ops_args_type_info\",
(PyCFunction)(void(*)(void))eager_get_final_state_core_ops_args_type_info,
METH_NOARGS,
\"C++ interface function for eager_get_final_state_core_ops_args_type_info.\"},
{\"get_final_state_core_ops_returns_info\",
(PyCFunction)(void(*)(void))eager_get_final_state_core_ops_returns_info,
METH_NOARGS, \"C++ interface function for eager_get_final_state_core_ops_returns_info.\"},
"""
return result, core_ops_infos_registry
def GeneratePythonCWrappers(python_c_function_str, python_c_function_reg_str):
core_ops_infos_definition, core_ops_infos_registry = GenerateCoreOpsInfoMap(
)
python_c_function_str += core_ops_infos_definition
python_c_function_reg_str += core_ops_infos_registry
python_c_function_reg_str += "\n {nullptr,nullptr,0,nullptr}"
PYTHON_C_WRAPPER_TEMPLATE = """
#pragma once
......@@ -215,12 +288,12 @@ if __name__ == "__main__":
python_c_function_reg_list.append(python_c_function_reg_str)
print("Generated Python-C Function: ", python_c_function_str)
python_c_function_reg_list.append("{nullptr,nullptr,0,nullptr}")
python_c_functions_str = "\n".join(python_c_function_list)
python_c_functions_reg_str = ",\n".join(python_c_function_reg_list)
python_c_str = GeneratePythonCWrappers(python_c_functions_str,
python_c_functions_reg_str)
print("Generated Python-C Codes: ", python_c_str)
output_path = args.output_path
......
......@@ -21,6 +21,17 @@ from paddle.fluid import core
from paddle.fluid import framework
from paddle import _C_ops
final_state_name_mapping = {
"matmul_v2": {
"final_op_name": "final_state_matmul",
"transpose_x": "trans_x",
"transpose_y": "trans_y",
"x": "X",
"y": "Y",
"out": "Out",
}
}
class Tracer(core.Tracer):
"""
......@@ -40,17 +51,13 @@ class Tracer(core.Tracer):
self._train_mode = True
def trace_op(self,
def eager_trace_op(self,
type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
if framework._in_eager_mode():
# inputs : {"sum": [tensor], ...}
# outputs : {"sum": [tensor], ...}
function_ptr = _C_ops.__dict__[type]
core_ops_args_info = _C_ops.get_core_ops_args_info()
......@@ -107,22 +114,127 @@ class Tracer(core.Tracer):
# Replaced outputs by function returns
if isinstance(returns[i], list):
for j in range(len(returns[i])):
outputs[retname][j].reconstruct_from_(returns[i]
[j])
outputs[retname][j].reconstruct_from_(returns[i][j],
False)
else:
outputs[retname][0].reconstruct_from_(returns[i])
outputs[retname][0].reconstruct_from_(returns[i], False)
elif isinstance(returns, list):
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
for j in range(len(returns)):
outputs[key][j].reconstruct_from_(returns[j])
outputs[key][j].reconstruct_from_(returns[j], False)
else:
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
if isinstance(outputs[key], list):
outputs[key][0].reconstruct_from_(returns)
outputs[key][0].reconstruct_from_(returns, False)
else:
outputs[key].reconstruct_from_(returns, False)
def eager_final_state_trace_op(self,
type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
assert type in final_state_name_mapping.keys()
final_state_type = final_state_name_mapping[type]["final_op_name"]
function_ptr = _C_ops.__dict__[final_state_type]
core_ops_args_info = _C_ops.get_final_state_core_ops_args_info()
core_ops_args_type_info = _C_ops.get_final_state_core_ops_args_type_info(
)
core_ops_returns_info = _C_ops.get_final_state_core_ops_returns_info()
op_args = core_ops_args_info[final_state_type]
op_args_type = core_ops_args_type_info[final_state_type]
op_returns = core_ops_returns_info[final_state_type]
arg_list = []
for i in range(len(op_args)):
eager_arg_name = op_args[i]
arg_type = op_args_type[i]
assert eager_arg_name in final_state_name_mapping[type].keys()
arg_name = final_state_name_mapping[type][eager_arg_name]
if arg_name in inputs.keys():
arg_to_append = inputs[arg_name]
elif arg_name in outputs.keys():
arg_to_append = outputs[arg_name]
elif arg_name in attrs.keys() and arg_type == "":
arg_to_append = attrs[arg_name]
else:
# dispensable
arg_to_append = None
if arg_type == "":
# attribute
arg_list.append(arg_to_append)
elif arg_type == "tensor":
if isinstance(arg_to_append, list):
arg_list.append(arg_to_append[0])
else:
arg_list.append(arg_to_append)
elif arg_type == "list":
assert isinstance(arg_to_append, list)
arg_list.append(arg_to_append)
else:
assert arg_to_append is None
arg_list.append(arg_to_append)
returns = function_ptr(*arg_list)
if isinstance(returns, tuple):
for i in range(len(op_returns)):
eager_retname = op_returns[i]
assert eager_retname in final_state_name_mapping[type].keys()
retname = final_state_name_mapping[type][eager_retname]
if retname in outputs.keys():
# Replaced outputs by function returns
if isinstance(returns[i], list):
for j in range(len(returns[i])):
outputs[retname][j].reconstruct_from_(returns[i][j],
False)
else:
outputs[retname][0].reconstruct_from_(returns[i], False)
elif isinstance(returns, list):
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
for j in range(len(returns)):
outputs[key][j].reconstruct_from_(returns[j], False)
else:
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
if isinstance(outputs[key], list):
outputs[key][0].reconstruct_from_(returns, False)
else:
outputs[key].reconstruct_from_(returns, False)
def trace_op(self,
type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
if framework._in_eager_mode():
# inputs : {"sum": [tensor], ...}
# outputs : {"sum": [tensor], ...}
if type in final_state_name_mapping.keys():
final_state_type = final_state_name_mapping[type][
"final_op_name"]
assert final_state_type in _C_ops.__dict__
self.eager_final_state_trace_op(type, inputs, outputs, attrs,
stop_gradient, inplace_map)
else:
outputs[key].reconstruct_from_(returns)
self.eager_trace_op(type, inputs, outputs, attrs, stop_gradient,
inplace_map)
else:
self.trace(type, inputs, outputs, attrs,
framework._current_expected_place(), self._has_grad and
......
- backward_api : matmul_grad
forward : matmul (const Tensor& x, const Tensor& y, bool transpose_x, bool transpose_y) -> Tensor(out)
forward : matmul (const Tensor& x, const Tensor& y, bool transpose_x=false, bool transpose_y=false) -> Tensor(out)
args : (const Tensor& x, const Tensor& y, const Tensor& out_grad, bool transpose_x=false, bool transpose_y=false)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册