未验证 提交 afe2fdd1 编写于 作者: H hong 提交者: GitHub

update eager code gen (#40924)

* update

* remove useless code

* remove label smooth test

* polish code

* polish code

* polish code

* remove _in_eager_mode error;
上级 c006a609
......@@ -690,6 +690,11 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase):
for name, (_, is_fwd_input,
grad_api_position), in backward_forward_inputs_map.items():
tensor_wrapper_name = GetSavedName(name)
is_optional = (name in self.optional_inputs)
if is_optional:
grad_api_args[
grad_api_position] = f"egr::EagerUtils::RecoverOptionalTensorWrapper(&this->{tensor_wrapper_name}, nullptr)"
else:
grad_api_args[
grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr)"
......@@ -710,7 +715,8 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase):
# Construct grad_api returns
num_bwd_outputs = len(backward_grad_outputs_map.keys())
returns_str = f"std::vector<std::vector<paddle::experimental::Tensor>> returns({num_bwd_outputs});\n"
slot_num_bwd_outputs = len(self.forward_inputs_position_map.keys())
returns_str = f"std::vector<std::vector<paddle::experimental::Tensor>> returns({slot_num_bwd_outputs});\n"
for _, (ttype, fwd_position,
grad_api_position) in backward_grad_outputs_map.items():
# Infer Grad API Return Type
......@@ -768,7 +774,7 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase):
is_optional = (name in optional_inputs)
if IsPlainTensorType(ttype):
if is_optional:
arg_str = f"const paddle::optional<paddle::experimental::Tensor>& {name}"
arg_str = f"const paddle::optional<const paddle::experimental::Tensor&> {name}"
else:
if inplace_map and name in inplace_map.keys():
arg_str = f"paddle::experimental::Tensor& {name}"
......@@ -918,8 +924,8 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase):
inplace_name, inplace_name)
# Node Construction
num_backward_inputs = len(backward_grad_inputs_map.keys())
num_backward_outputs = len(backward_grad_outputs_map.keys())
num_backward_inputs = len(forward_outputs_position_map.keys())
num_backward_outputs = len(forward_inputs_position_map.keys())
grad_node_name = GetGradNodeName(forward_api_name)
node_construction_str = f" auto grad_node = std::make_shared<{grad_node_name}>({num_backward_inputs}, {num_backward_outputs});"
......@@ -946,7 +952,7 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase):
if is_fwd_input:
if is_optional:
set_tensor_wrappers = f" if({name}.is_initialized()) grad_node->SetTensorWrapper{name}({name}, true);"
set_tensor_wrappers = f" if({name}.get_ptr() != nullptr) grad_node->SetTensorWrapper{name}(*({name}.get_ptr()), true);"
else:
set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, true);"
else:
......@@ -960,7 +966,7 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase):
tw_name = f"api_result"
if is_optional:
set_tensor_wrappers = f" if({tw_name}.is_initialized()) grad_node->SetTensorWrapper{name}({tw_name}, false);"
set_tensor_wrappers = f" if({tw_name}.get_ptr() != nullptr) grad_node->SetTensorWrapper{name}(*({tw_name}.get_ptr()), false);"
else:
set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({tw_name}, false);"
set_tensor_wrappers_list.append(set_tensor_wrappers)
......@@ -971,6 +977,11 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase):
set_edges_list = []
for name, (_, pos) in forward_inputs_position_map.items():
input_autograd_meta_name = GetAutoGradMetaName(name)
is_optional = (name in self.optional_inputs)
if is_optional:
set_grad_out_meta = f" if({name}.get_ptr() != nullptr) grad_node->SetGradOutMeta(*({name}.get_ptr()), {pos});"
set_edges = f" if({name}.get_ptr() != nullptr) grad_node->AddEdges({input_autograd_meta_name}, {pos});"
else:
set_grad_out_meta = f" grad_node->SetGradOutMeta({name}, {pos});"
set_edges = f" grad_node->AddEdges({input_autograd_meta_name}, {pos});"
set_grad_out_meta_list.append(set_grad_out_meta)
......
......@@ -72,6 +72,14 @@ AutogradMeta* EagerUtils::nullable_autograd_meta(
return static_cast<AutogradMeta*>(p_autograd_meta);
}
AutogradMeta* EagerUtils::nullable_autograd_meta(
paddle::optional<const paddle::experimental::Tensor&> target) {
if (target.get_ptr() != nullptr) {
return EagerUtils::nullable_autograd_meta(*(target.get_ptr()));
}
return nullptr;
}
std::vector<AutogradMeta*> EagerUtils::nullable_autograd_meta(
const std::vector<paddle::experimental::Tensor>& targets) {
std::vector<AutogradMeta*> metas;
......@@ -327,6 +335,22 @@ paddle::experimental::Tensor EagerUtils::RecoverTensorWrapper(
return tw->recover(grad_node);
}
paddle::optional<const paddle::experimental::Tensor&>
EagerUtils::RecoverOptionalTensorWrapper(
TensorWrapper* tw, const std::shared_ptr<GradNodeBase>& grad_node) {
PADDLE_ENFORCE_NOT_NULL(
tw, phi::errors::InvalidArgument("TensorWrapper in "
"RecoverOptionalTensorWrapper function "
"should not be null"));
auto tmp = tw->recover(grad_node);
paddle::optional<const paddle::experimental::Tensor&> res{paddle::none};
if (tmp.initialized()) {
res = tmp;
}
return res;
}
std::vector<paddle::experimental::Tensor> EagerUtils::RecoverTensorWrapper(
std::vector<TensorWrapper>* tw,
const std::shared_ptr<GradNodeBase>& grad_node) {
......
......@@ -121,6 +121,8 @@ class EagerUtils {
// This method will return an AutogradMeta pointer unsafely.
static AutogradMeta* nullable_autograd_meta(
const paddle::experimental::Tensor& target);
static AutogradMeta* nullable_autograd_meta(
paddle::optional<const paddle::experimental::Tensor&> target);
static std::vector<AutogradMeta*> nullable_autograd_meta(
const std::vector<paddle::experimental::Tensor>& targets);
static AutogradMeta* unsafe_autograd_meta(
......@@ -164,6 +166,9 @@ class EagerUtils {
static std::vector<paddle::experimental::Tensor> RecoverTensorWrapper(
std::vector<TensorWrapper>* tw,
const std::shared_ptr<GradNodeBase>& grad_node);
static paddle::optional<const paddle::experimental::Tensor&>
RecoverOptionalTensorWrapper(TensorWrapper* tw,
const std::shared_ptr<GradNodeBase>& grad_node);
// Intermidate needed remove this once we don't need legacy
// Inner Method
......
......@@ -606,7 +606,7 @@ PyObject* ToPyObject(
// For Final State Dygraph,
// We directly use paddle::optional(Tensor) as dispensable Tensor
paddle::optional<paddle::experimental::Tensor> GetOptionalTensorFromArgs(
paddle::optional<const paddle::experimental::Tensor&> GetOptionalTensorFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable) {
PyObject* obj = PyTuple_GET_ITEM(args, arg_idx);
......@@ -621,10 +621,10 @@ paddle::optional<paddle::experimental::Tensor> GetOptionalTensorFromArgs(
"%s(): argument '%s' (position %d) must be Tensor, but got None",
op_type, arg_name, arg_idx));
}
return {};
return paddle::none;
}
return paddle::make_optional<paddle::experimental::Tensor>(
return paddle::make_optional<const paddle::experimental::Tensor&>(
reinterpret_cast<TensorObject*>(obj)->tensor);
}
......
......@@ -162,7 +162,7 @@ paddle::experimental::DataType CastPyArg2DataType(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
paddle::optional<paddle::experimental::Tensor> GetOptionalTensorFromArgs(
paddle::optional<const paddle::experimental::Tensor&> GetOptionalTensorFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
......
......@@ -191,7 +191,6 @@ std::shared_ptr<phi::DenseTensor> PrepareData(
dense_tensor.layout(), target_args_def.layout, transform_flag))) {
return std::static_pointer_cast<phi::DenseTensor>(tensor_in);
}
phi::DenseTensor out =
TransformData(dense_tensor, target_args_def, transform_flag);
return std::make_shared<phi::DenseTensor>(std::move(out));
......@@ -207,6 +206,17 @@ std::shared_ptr<phi::DenseTensor> PrepareData(
return {nullptr};
}
std::shared_ptr<phi::DenseTensor> PrepareData(
const paddle::optional<const Tensor&> input,
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag) {
if (input.get_ptr() != nullptr) {
return PrepareData(*(input.get_ptr()), target_args_def, transform_flag);
}
return {nullptr};
}
std::unique_ptr<std::vector<phi::DenseTensor>> PrepareData(
const std::vector<Tensor>& inputs,
const phi::TensorArgDef& target_args_def,
......
......@@ -76,5 +76,10 @@ std::unique_ptr<std::vector<phi::DenseTensor>> PrepareData(
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag);
std::shared_ptr<phi::DenseTensor> PrepareData(
const paddle::optional<const Tensor&> input,
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag);
} // namespace experimental
} // namespace paddle
......@@ -17,11 +17,13 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
class TestLabelSmoothOp(OpTest):
def config(self):
self.op_type = "label_smooth"
self.python_api = paddle.nn.functional.label_smooth
self.epsilon = 0.1
batch_size, self.label_dim = 10, 12
self.label = np.zeros((batch_size, self.label_dim)).astype("float64")
......@@ -37,10 +39,10 @@ class TestLabelSmoothOp(OpTest):
self.outputs = {'Out': smoothed_label}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(["X"], "Out")
self.check_grad(["X"], "Out", check_eager=False)
class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp):
......@@ -72,4 +74,5 @@ class TestLabelSmoothOpWithPriorDist3D(TestLabelSmoothOpWithPriorDist):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -544,6 +544,7 @@
# =================================== sep0
......
......@@ -109,7 +109,7 @@ class BaseAPI(object):
'int[]': 'const std::vector<int>&'
}
optional_types_trans = {
'Tensor': 'const paddle::optional<Tensor>&',
'Tensor': 'paddle::optional<const Tensor&>',
'Tensor[]': 'const paddle::optional<std::vector<Tensor>>&',
'int': 'paddle::optional<int>',
'int32_t': 'paddle::optional<int32_t>',
......@@ -502,7 +502,9 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
'const Tensor&': 'const phi::DenseTensor&',
'const std::vector<Tensor>&':
'const std::vector<const phi::DenseTensor*>&',
'const paddle::optional<Tensor>&':
'const paddle::optional<Tensor&>':
'paddle::optional<const phi::DenseTensor&>',
'paddle::optional<const Tensor&>':
'paddle::optional<const phi::DenseTensor&>',
'const paddle::optional<std::vector<Tensor>>&':
'paddle::optional<const std::vector<phi::DenseTensor>&>'
......
......@@ -307,6 +307,7 @@
kernel :
func : mv_grad
- backward_api : cast_grad
forward : cast (Tensor x, DataType out_dtype) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......
......@@ -44,13 +44,17 @@ PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']}
'Tensor': 'MetaTensor*',
'std::vector<Tensor>': 'std::vector<MetaTensor>*',
'const paddle::optional<Tensor&>':
'const paddle::optional<MetaTensor&>'
'const paddle::optional<MetaTensor&>',
'paddle::optional<const Tensor&>':
'paddle::optional<const MetaTensor&>'
}
wrapped_infermeta_name = get_wrapped_infermeta_name(api.api)
args = []
print("@@@", api.api)
for input_name in api.inputs['names']:
if input_name in kernel_params:
print("type", api.inputs['input_info'])
args.append(tensor_type_map[api.inputs['input_info'][
input_name]] + ' ' + input_name)
for attr_name in api.attrs['names']:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册