From afe2fdd1f45ff1eb369352cca6a0ebc88b210894 Mon Sep 17 00:00:00 2001 From: hong <43953930+phlrain@users.noreply.github.com> Date: Fri, 25 Mar 2022 23:38:20 +0800 Subject: [PATCH] update eager code gen (#40924) * update * remove useless code * remove label smooth test * polish code * polish code * polish code * remove _in_eager_mode error; --- .../final_state_generator/eager_gen.py | 33 ++++++++++++------- paddle/fluid/eager/utils.cc | 24 ++++++++++++++ paddle/fluid/eager/utils.h | 5 +++ paddle/fluid/pybind/eager_utils.cc | 6 ++-- paddle/fluid/pybind/eager_utils.h | 2 +- paddle/phi/api/lib/data_transform.cc | 12 ++++++- paddle/phi/api/lib/data_transform.h | 5 +++ .../tests/unittests/test_label_smooth_op.py | 7 ++-- python/paddle/utils/code_gen/api.yaml | 1 + python/paddle/utils/code_gen/api_base.py | 6 ++-- python/paddle/utils/code_gen/backward.yaml | 1 + .../utils/code_gen/wrapped_infermeta_gen.py | 6 +++- 12 files changed, 87 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index cd59211f02..01ef711063 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -690,8 +690,13 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase): for name, (_, is_fwd_input, grad_api_position), in backward_forward_inputs_map.items(): tensor_wrapper_name = GetSavedName(name) - grad_api_args[ - grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr)" + is_optional = (name in self.optional_inputs) + if is_optional: + grad_api_args[ + grad_api_position] = f"egr::EagerUtils::RecoverOptionalTensorWrapper(&this->{tensor_wrapper_name}, nullptr)" + else: + grad_api_args[ + grad_api_position] = f"egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr)" for _, (ttype, fwd_position, grad_api_position) in backward_grad_inputs_map.items(): @@ -710,7 +715,8 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase): # Construct grad_api returns num_bwd_outputs = len(backward_grad_outputs_map.keys()) - returns_str = f"std::vector> returns({num_bwd_outputs});\n" + slot_num_bwd_outputs = len(self.forward_inputs_position_map.keys()) + returns_str = f"std::vector> returns({slot_num_bwd_outputs});\n" for _, (ttype, fwd_position, grad_api_position) in backward_grad_outputs_map.items(): # Infer Grad API Return Type @@ -768,7 +774,7 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase): is_optional = (name in optional_inputs) if IsPlainTensorType(ttype): if is_optional: - arg_str = f"const paddle::optional& {name}" + arg_str = f"const paddle::optional {name}" else: if inplace_map and name in inplace_map.keys(): arg_str = f"paddle::experimental::Tensor& {name}" @@ -917,9 +923,9 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase): bump_inplace_version_str += BUMP_INPLACE_VERSION_TEMPLATE.format( inplace_name, inplace_name) - # Node Construction - num_backward_inputs = len(backward_grad_inputs_map.keys()) - num_backward_outputs = len(backward_grad_outputs_map.keys()) + # Node Construction + num_backward_inputs = len(forward_outputs_position_map.keys()) + num_backward_outputs = len(forward_inputs_position_map.keys()) grad_node_name = GetGradNodeName(forward_api_name) node_construction_str = f" auto grad_node = std::make_shared<{grad_node_name}>({num_backward_inputs}, {num_backward_outputs});" @@ -946,7 +952,7 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase): if is_fwd_input: if is_optional: - set_tensor_wrappers = f" if({name}.is_initialized()) grad_node->SetTensorWrapper{name}({name}, true);" + set_tensor_wrappers = f" if({name}.get_ptr() != nullptr) grad_node->SetTensorWrapper{name}(*({name}.get_ptr()), true);" else: set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, true);" else: @@ -960,7 +966,7 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase): tw_name = f"api_result" if is_optional: - set_tensor_wrappers = f" if({tw_name}.is_initialized()) grad_node->SetTensorWrapper{name}({tw_name}, false);" + set_tensor_wrappers = f" if({tw_name}.get_ptr() != nullptr) grad_node->SetTensorWrapper{name}(*({tw_name}.get_ptr()), false);" else: set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({tw_name}, false);" set_tensor_wrappers_list.append(set_tensor_wrappers) @@ -971,8 +977,13 @@ class DygraphSingleFunctionGenerator(FunctionGeneratorBase): set_edges_list = [] for name, (_, pos) in forward_inputs_position_map.items(): input_autograd_meta_name = GetAutoGradMetaName(name) - set_grad_out_meta = f" grad_node->SetGradOutMeta({name}, {pos});" - set_edges = f" grad_node->AddEdges({input_autograd_meta_name}, {pos});" + is_optional = (name in self.optional_inputs) + if is_optional: + set_grad_out_meta = f" if({name}.get_ptr() != nullptr) grad_node->SetGradOutMeta(*({name}.get_ptr()), {pos});" + set_edges = f" if({name}.get_ptr() != nullptr) grad_node->AddEdges({input_autograd_meta_name}, {pos});" + else: + set_grad_out_meta = f" grad_node->SetGradOutMeta({name}, {pos});" + set_edges = f" grad_node->AddEdges({input_autograd_meta_name}, {pos});" set_grad_out_meta_list.append(set_grad_out_meta) set_edges_list.append(set_edges) set_grad_out_meta_str = "\n".join(set_grad_out_meta_list) diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index aee7c87361..3d0972783d 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -72,6 +72,14 @@ AutogradMeta* EagerUtils::nullable_autograd_meta( return static_cast(p_autograd_meta); } +AutogradMeta* EagerUtils::nullable_autograd_meta( + paddle::optional target) { + if (target.get_ptr() != nullptr) { + return EagerUtils::nullable_autograd_meta(*(target.get_ptr())); + } + return nullptr; +} + std::vector EagerUtils::nullable_autograd_meta( const std::vector& targets) { std::vector metas; @@ -327,6 +335,22 @@ paddle::experimental::Tensor EagerUtils::RecoverTensorWrapper( return tw->recover(grad_node); } +paddle::optional +EagerUtils::RecoverOptionalTensorWrapper( + TensorWrapper* tw, const std::shared_ptr& grad_node) { + PADDLE_ENFORCE_NOT_NULL( + tw, phi::errors::InvalidArgument("TensorWrapper in " + "RecoverOptionalTensorWrapper function " + "should not be null")); + auto tmp = tw->recover(grad_node); + + paddle::optional res{paddle::none}; + if (tmp.initialized()) { + res = tmp; + } + return res; +} + std::vector EagerUtils::RecoverTensorWrapper( std::vector* tw, const std::shared_ptr& grad_node) { diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index 396837f101..537d6c59c0 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -121,6 +121,8 @@ class EagerUtils { // This method will return an AutogradMeta pointer unsafely. static AutogradMeta* nullable_autograd_meta( const paddle::experimental::Tensor& target); + static AutogradMeta* nullable_autograd_meta( + paddle::optional target); static std::vector nullable_autograd_meta( const std::vector& targets); static AutogradMeta* unsafe_autograd_meta( @@ -164,6 +166,9 @@ class EagerUtils { static std::vector RecoverTensorWrapper( std::vector* tw, const std::shared_ptr& grad_node); + static paddle::optional + RecoverOptionalTensorWrapper(TensorWrapper* tw, + const std::shared_ptr& grad_node); // Intermidate needed remove this once we don't need legacy // Inner Method diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 2e884b212a..b4d316a957 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -606,7 +606,7 @@ PyObject* ToPyObject( // For Final State Dygraph, // We directly use paddle::optional(Tensor) as dispensable Tensor -paddle::optional GetOptionalTensorFromArgs( +paddle::optional GetOptionalTensorFromArgs( const std::string& op_type, const std::string& arg_name, PyObject* args, ssize_t arg_idx, bool dispensable) { PyObject* obj = PyTuple_GET_ITEM(args, arg_idx); @@ -621,10 +621,10 @@ paddle::optional GetOptionalTensorFromArgs( "%s(): argument '%s' (position %d) must be Tensor, but got None", op_type, arg_name, arg_idx)); } - return {}; + return paddle::none; } - return paddle::make_optional( + return paddle::make_optional( reinterpret_cast(obj)->tensor); } diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 3500082ba6..13565cfe70 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -162,7 +162,7 @@ paddle::experimental::DataType CastPyArg2DataType(PyObject* obj, const std::string& op_type, ssize_t arg_pos); -paddle::optional GetOptionalTensorFromArgs( +paddle::optional GetOptionalTensorFromArgs( const std::string& op_type, const std::string& arg_name, PyObject* args, ssize_t arg_idx, bool dispensable = false); diff --git a/paddle/phi/api/lib/data_transform.cc b/paddle/phi/api/lib/data_transform.cc index 7d886e50db..4e6ebe33ae 100644 --- a/paddle/phi/api/lib/data_transform.cc +++ b/paddle/phi/api/lib/data_transform.cc @@ -191,7 +191,6 @@ std::shared_ptr PrepareData( dense_tensor.layout(), target_args_def.layout, transform_flag))) { return std::static_pointer_cast(tensor_in); } - phi::DenseTensor out = TransformData(dense_tensor, target_args_def, transform_flag); return std::make_shared(std::move(out)); @@ -207,6 +206,17 @@ std::shared_ptr PrepareData( return {nullptr}; } +std::shared_ptr PrepareData( + const paddle::optional input, + const phi::TensorArgDef& target_args_def, + const TransformFlag& transform_flag) { + if (input.get_ptr() != nullptr) { + return PrepareData(*(input.get_ptr()), target_args_def, transform_flag); + } + + return {nullptr}; +} + std::unique_ptr> PrepareData( const std::vector& inputs, const phi::TensorArgDef& target_args_def, diff --git a/paddle/phi/api/lib/data_transform.h b/paddle/phi/api/lib/data_transform.h index 8eb1c4a179..f5537961d0 100644 --- a/paddle/phi/api/lib/data_transform.h +++ b/paddle/phi/api/lib/data_transform.h @@ -76,5 +76,10 @@ std::unique_ptr> PrepareData( const phi::TensorArgDef& target_args_def, const TransformFlag& transform_flag); +std::shared_ptr PrepareData( + const paddle::optional input, + const phi::TensorArgDef& target_args_def, + const TransformFlag& transform_flag); + } // namespace experimental } // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/test_label_smooth_op.py b/python/paddle/fluid/tests/unittests/test_label_smooth_op.py index 222e1321fe..8ff6bb4967 100644 --- a/python/paddle/fluid/tests/unittests/test_label_smooth_op.py +++ b/python/paddle/fluid/tests/unittests/test_label_smooth_op.py @@ -17,11 +17,13 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle class TestLabelSmoothOp(OpTest): def config(self): self.op_type = "label_smooth" + self.python_api = paddle.nn.functional.label_smooth self.epsilon = 0.1 batch_size, self.label_dim = 10, 12 self.label = np.zeros((batch_size, self.label_dim)).astype("float64") @@ -37,10 +39,10 @@ class TestLabelSmoothOp(OpTest): self.outputs = {'Out': smoothed_label} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_eager=False) class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp): @@ -72,4 +74,5 @@ class TestLabelSmoothOpWithPriorDist3D(TestLabelSmoothOpWithPriorDist): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 11e8d67ca4..8fb8a1e9e4 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -544,6 +544,7 @@ + # =================================== sep0 diff --git a/python/paddle/utils/code_gen/api_base.py b/python/paddle/utils/code_gen/api_base.py index be407b5755..35d4cc7b5f 100644 --- a/python/paddle/utils/code_gen/api_base.py +++ b/python/paddle/utils/code_gen/api_base.py @@ -109,7 +109,7 @@ class BaseAPI(object): 'int[]': 'const std::vector&' } optional_types_trans = { - 'Tensor': 'const paddle::optional&', + 'Tensor': 'paddle::optional', 'Tensor[]': 'const paddle::optional>&', 'int': 'paddle::optional', 'int32_t': 'paddle::optional', @@ -502,7 +502,9 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self 'const Tensor&': 'const phi::DenseTensor&', 'const std::vector&': 'const std::vector&', - 'const paddle::optional&': + 'const paddle::optional': + 'paddle::optional', + 'paddle::optional': 'paddle::optional', 'const paddle::optional>&': 'paddle::optional&>' diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 6b597ce44a..56ee071626 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -307,6 +307,7 @@ kernel : func : mv_grad + - backward_api : cast_grad forward : cast (Tensor x, DataType out_dtype) -> Tensor(out) args : (Tensor x, Tensor out_grad) diff --git a/python/paddle/utils/code_gen/wrapped_infermeta_gen.py b/python/paddle/utils/code_gen/wrapped_infermeta_gen.py index aab4b21974..13c35813aa 100644 --- a/python/paddle/utils/code_gen/wrapped_infermeta_gen.py +++ b/python/paddle/utils/code_gen/wrapped_infermeta_gen.py @@ -44,13 +44,17 @@ PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']} 'Tensor': 'MetaTensor*', 'std::vector': 'std::vector*', 'const paddle::optional': - 'const paddle::optional' + 'const paddle::optional', + 'paddle::optional': + 'paddle::optional' } wrapped_infermeta_name = get_wrapped_infermeta_name(api.api) args = [] + print("@@@", api.api) for input_name in api.inputs['names']: if input_name in kernel_params: + print("type", api.inputs['input_info']) args.append(tensor_type_map[api.inputs['input_info'][ input_name]] + ' ' + input_name) for attr_name in api.attrs['names']: -- GitLab