From 176df91ce2c4b0ec1418783e644751d046f07793 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Wed, 6 Apr 2022 10:19:02 +0800 Subject: [PATCH] Add some op yaml (#41173) * add real and imag yaml * add roi_align and roi_pool yaml * add qr yaml * add psroi_pool yaml * fix bug * fix param bug of psroi_pool * fix infrt problem * fix merge bug --- paddle/phi/api/lib/CMakeLists.txt | 2 +- paddle/phi/api/lib/api_custom_impl.cc | 57 +++++++++++++++++++ paddle/phi/api/lib/api_custom_impl.h | 6 ++ paddle/phi/infermeta/backward.cc | 7 +++ paddle/phi/infermeta/backward.h | 2 + .../tests/unittests/test_psroi_pool_op.py | 8 ++- .../tests/unittests/test_real_imag_op.py | 7 ++- .../fluid/tests/unittests/test_roi_pool_op.py | 10 +++- python/paddle/tensor/attribute.py | 11 +++- python/paddle/utils/code_gen/api.yaml | 48 ++++++++++++++++ python/paddle/utils/code_gen/backward.yaml | 31 ++++++++-- python/paddle/vision/ops.py | 12 +++- tools/infrt/skipped_phi_api.json | 2 +- 13 files changed, 185 insertions(+), 18 deletions(-) diff --git a/paddle/phi/api/lib/CMakeLists.txt b/paddle/phi/api/lib/CMakeLists.txt index d4d8a0fa8a3..7dfe7d8cf4d 100644 --- a/paddle/phi/api/lib/CMakeLists.txt +++ b/paddle/phi/api/lib/CMakeLists.txt @@ -165,7 +165,7 @@ cc_library(context_pool SRCS context_pool.cc DEPS phi_context phi_enforce place) cc_library(kernel_dispatch SRCS kernel_dispatch.cc DEPS phi_tensor_raw phi_context kernel_factory context_pool) cc_library(api_gen_utils SRCS api_gen_utils.cc DEPS phi_tensor_raw selected_rows sparse_csr_tensor sparse_coo_tensor) cc_library(phi_data_transform SRCS data_transform.cc DEPS phi_tensor_raw transfer_layout_kernel cast_kernel data_device_transform) -cc_library(api_custom_impl SRCS api_custom_impl.cc DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils phi_data_transform backward_infermeta) +cc_library(api_custom_impl SRCS api_custom_impl.cc DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils backward_infermeta phi_data_transform) cc_library(sparse_api_custom_impl SRCS sparse_api_custom_impl.cc DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils phi_data_transform) cc_library(phi_function_api SRCS ${api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils phi_data_transform api_custom_impl) diff --git a/paddle/phi/api/lib/api_custom_impl.cc b/paddle/phi/api/lib/api_custom_impl.cc index 8ea9204fa9a..f559027fdd4 100644 --- a/paddle/phi/api/lib/api_custom_impl.cc +++ b/paddle/phi/api/lib/api_custom_impl.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include "paddle/phi/api/lib/data_transform.h" #include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/api/lib/utils/storage.h" +#include "paddle/phi/common/type_traits.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/meta_tensor.h" @@ -716,6 +717,62 @@ std::vector concat_grad_impl(const std::vector& x, return x_grad; } +Tensor imag_grad_impl(const Tensor& out_grad) { + phi::KernelKey kernel_key{ParseBackend(out_grad), + out_grad.layout(), + phi::dtype::ToComplex(out_grad.dtype())}; + auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( + "imag_grad", kernel_key); + + VLOG(6) << "imag_grad API kernel key: " << kernel_key; + VLOG(6) << "imag_grad API kernel: " << kernel; + + auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend()); + + auto dense_out_grad = TensorToDenseTensor(out_grad); + + Tensor out; + auto kernel_out = SetKernelOutput(kernel_key.backend(), &out); + phi::MetaTensor meta_out(kernel_out); + phi::RealAndImagGradInferMeta(*dense_out_grad, &meta_out); + + using kernel_signature = void (*)( + const phi::DeviceContext&, const phi::DenseTensor&, phi::DenseTensor*); + + auto* kernel_fn = kernel.GetVariadicKernelFn(); + (*kernel_fn)(*dev_ctx, *dense_out_grad, kernel_out); + + return out; +} + +Tensor real_grad_impl(const Tensor& out_grad) { + phi::KernelKey kernel_key{ParseBackend(out_grad), + out_grad.layout(), + phi::dtype::ToComplex(out_grad.dtype())}; + auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( + "real_grad", kernel_key); + + VLOG(6) << "real_grad API kernel key: " << kernel_key; + VLOG(6) << "real_grad API kernel: " << kernel; + + auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend()); + + auto dense_out_grad = TensorToDenseTensor(out_grad); + + Tensor out; + auto kernel_out = SetKernelOutput(kernel_key.backend(), &out); + phi::MetaTensor meta_out(kernel_out); + phi::RealAndImagGradInferMeta(*dense_out_grad, &meta_out); + + using kernel_signature = void (*)( + const phi::DeviceContext&, const phi::DenseTensor&, phi::DenseTensor*); + + auto* kernel_fn = kernel.GetVariadicKernelFn(); + (*kernel_fn)(*dev_ctx, *dense_out_grad, kernel_out); + + return out; +} + std::vector stack_grad_impl(const std::vector& x, const Tensor& out_grad, int axis) { diff --git a/paddle/phi/api/lib/api_custom_impl.h b/paddle/phi/api/lib/api_custom_impl.h index 91b94fd74c9..4745782d914 100644 --- a/paddle/phi/api/lib/api_custom_impl.h +++ b/paddle/phi/api/lib/api_custom_impl.h @@ -92,10 +92,16 @@ std::tuple batch_norm_impl( bool trainable_statistics, bool fuse_with_relu); +/************************ backward api impl ***************************/ + std::vector concat_grad_impl(const std::vector& x, const Tensor& out_grad, const Scalar& axis); +Tensor imag_grad_impl(const Tensor& x); + +Tensor real_grad_impl(const Tensor& x); + std::vector stack_grad_impl(const std::vector& x, const Tensor& out_grad, int axis); diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 4e029d4c27c..43d7d0393dd 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/phi/infermeta/backward.h" +#include "paddle/phi/common/type_traits.h" #include "paddle/phi/kernels/funcs/axis_utils.h" namespace phi { @@ -402,6 +403,12 @@ void PsroiPoolGradInferMeta(const MetaTensor& x, dx->share_meta(x); } +void RealAndImagGradInferMeta(const MetaTensor& out_grad, MetaTensor* dx) { + dx->set_dims(out_grad.dims()); + dx->set_dtype(dtype::ToComplex(out_grad.dtype())); + dx->set_layout(out_grad.layout()); +} + void ScatterGradInferMeta(const MetaTensor& index, const MetaTensor& updates, const MetaTensor& out_grad, diff --git a/paddle/phi/infermeta/backward.h b/paddle/phi/infermeta/backward.h index 3cd4875e999..432c1aacfcf 100644 --- a/paddle/phi/infermeta/backward.h +++ b/paddle/phi/infermeta/backward.h @@ -174,6 +174,8 @@ void PoolGradInferMeta(const MetaTensor& x, const std::string& padding_algorithm, MetaTensor* dx); +void RealAndImagGradInferMeta(const MetaTensor& out_grad, MetaTensor* dx); + void ScatterGradInferMeta(const MetaTensor& index, const MetaTensor& updates, const MetaTensor& out_grad, diff --git a/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py b/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py index 95b8c5c3c0a..39dec982b66 100644 --- a/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py @@ -95,7 +95,8 @@ class TestPSROIPoolOp(OpTest): self.pooled_width).astype('float64') self.inputs = { 'X': self.x, - 'ROIs': (self.rois_with_batch_id[:, 1:5], self.rois_lod) + 'ROIs': (self.rois_with_batch_id[:, 1:5], self.rois_lod), + 'RoisNum': self.boxes_num } self.attrs = { 'output_channels': self.output_channels, @@ -145,13 +146,14 @@ class TestPSROIPoolOp(OpTest): def setUp(self): self.op_type = 'psroi_pool' + self.python_api = lambda x, boxes, boxes_num, pooled_height, pooled_width, output_channels, spatial_scale: paddle.vision.ops.psroi_pool(x, boxes, boxes_num, (pooled_height, pooled_width), spatial_scale) self.set_data() def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_real_imag_op.py b/python/paddle/fluid/tests/unittests/test_real_imag_op.py index ab24506f801..523f48374ea 100644 --- a/python/paddle/fluid/tests/unittests/test_real_imag_op.py +++ b/python/paddle/fluid/tests/unittests/test_real_imag_op.py @@ -39,6 +39,7 @@ class TestRealOp(OpTest): paddle.enable_static() # op test attrs self.op_type = "real" + self.python_api = paddle.real self.dtype = np.float64 self.init_input_output() # backward attrs @@ -58,14 +59,15 @@ class TestRealOp(OpTest): self.grad_out.shape) def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): self.check_grad( ['X'], 'Out', user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out]) + user_defined_grad_outputs=[self.grad_out], + check_eager=True) class TestImagOp(TestRealOp): @@ -74,6 +76,7 @@ class TestImagOp(TestRealOp): paddle.enable_static() # op test attrs self.op_type = "imag" + self.python_api = paddle.imag self.dtype = np.float64 self.init_input_output() # backward attrs diff --git a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py index c6622cf8d9c..f0afcff63c6 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py @@ -14,6 +14,7 @@ from __future__ import print_function +import paddle import unittest import numpy as np import math @@ -32,6 +33,7 @@ class TestROIPoolOp(OpTest): self.inputs = { 'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod), + 'RoisNum': self.boxes_num } self.attrs = { @@ -130,16 +132,20 @@ class TestROIPoolOp(OpTest): rois.append(roi) self.rois_num = len(rois) self.rois = np.array(rois).astype("float64") + self.boxes_num = np.array( + [bno + 1 for bno in range(self.batch_size)]).astype('int32') def setUp(self): self.op_type = "roi_pool" + self.python_api = lambda x, boxes, boxes_num, pooled_height, pooled_width, spatial_scale: paddle.vision.ops.roi_pool(x, boxes, boxes_num, (pooled_height, pooled_width), spatial_scale) + self.python_out_sig = ["Out"] self.set_data() def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class BadInputTestRoiPool(unittest.TestCase): diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index b851f6db4ac..07db7794b6d 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -18,12 +18,13 @@ from ..framework import core from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype -# TODO: define functions to get tensor attributes +# TODO: define functions to get tensor attributes from ..fluid.layers import rank # noqa: F401 from ..fluid.layers import shape # noqa: F401 import paddle from paddle import _C_ops from paddle.static import Variable +from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__ = [] @@ -185,7 +186,9 @@ def real(x, name=None): # [[1., 2., 3.], # [4., 5., 6.]]) """ - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_real(x) + if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') @@ -229,7 +232,9 @@ def imag(x, name=None): # [[6., 5., 4.], # [3., 2., 1.]]) """ - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_imag(x) + if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index a3e5c3fad7e..93d14b1744e 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -802,6 +802,15 @@ func : huber_loss # backward : huber_loss_grad +- api : imag + args : (Tensor x) + output : Tensor + infer_meta : + func : RealAndImagInferMeta + kernel : + func : imag + backward : imag_grad + # increment - api : increment args : (Tensor x, float value) @@ -1336,6 +1345,16 @@ func : prelu backward : prelu_grad +- api : psroi_pool + args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) + output : Tensor + infer_meta : + func : PsroiPoolInferMeta + kernel : + func : psroi_pool + optional : boxes_num + backward : psroi_pool_grad + # put_along_axis - api : put_along_axis args : (Tensor x, Tensor index, Tensor value, int axis, str reduce) @@ -1348,6 +1367,15 @@ data_type : x backward : put_along_axis_grad +- api : qr + args : (Tensor x, str mode) + output : Tensor(q), Tensor(r) + infer_meta : + func : QrInferMeta + kernel : + func : qr + # backward : qr_grad + - api : randint args : (int low, int high, IntArray shape, DataType dtype=DataType::INT64, Place place={}) output : Tensor(out) @@ -1372,6 +1400,15 @@ data_type : dtype backend : place +- api : real + args : (Tensor x) + output : Tensor + infer_meta : + func : RealAndImagInferMeta + kernel : + func : real + backward : real_grad + - api : reciprocal args : (Tensor x) output : Tensor @@ -1423,6 +1460,17 @@ optional : boxes_num backward : roi_align_grad +- api : roi_pool + args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale) + output : Tensor(out), Tensor(arg_max) + infer_meta : + func : RoiPoolInferMeta + kernel : + func : roi_pool + optional : boxes_num + intermediate : arg_max + backward : roi_pool_grad + - api : roll args : (Tensor x, IntArray shifts, int64_t[] axis) output : Tensor(out) diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index f49b804937d..4cb411634a0 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -537,6 +537,12 @@ kernel : func : hard_sigmoid_grad +- backward_api : imag_grad + forward : imag (Tensor x) -> Tensor(out) + args : (Tensor out_grad) + output : Tensor(x_grad) + invoke : imag_grad_impl(out_grad) + - backward_api : index_sample_grad forward : index_sample (Tensor x, Tensor index) -> Tensor(out) args : (Tensor x, Tensor index, Tensor out_grad) @@ -961,15 +967,15 @@ func : prelu_grad - backward_api : psroi_pool_grad - forward : psroi_pool (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale ) -> Tensor(out) - args : (Tensor x, Tensor rois, Tensor rois_num, Tensor out_grad, int pooled_weight, int pooled_width, int output_channels, float spatial_scale) + forward : psroi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) -> Tensor(out) + args : (Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, int output_channels, float spatial_scale) output : Tensor(x_grad) infer_meta : - func : UnchangedInferMeta + func : GeneralUnaryGradInferMeta param : [x] kernel : func : psroi_pool_grad - optional : rois_num + optional : boxes_num # output is optional - backward_api : put_along_axis_grad @@ -982,6 +988,12 @@ kernel : func : put_along_axis_grad +- backward_api : real_grad + forward : real (Tensor x) -> Tensor(out) + args : (Tensor out_grad) + output : Tensor(x_grad) + invoke : real_grad_impl(out_grad) + - backward_api : reciprocal_grad forward : reciprocal (Tensor x) -> Tensor(out) args : (Tensor out, Tensor out_grad) @@ -1048,6 +1060,17 @@ func : roi_align_grad optional : boxes_num +- backward_api : roi_pool_grad + forward : roi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale) -> Tensor(out), Tensor(arg_max) + args : (Tensor x, Tensor boxes, Tensor boxes_num, Tensor arg_max, Tensor out_grad, int pooled_height, int pooled_width, float spatial_scale) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : roi_pool_grad + optional : boxes_num + - backward_api : roll_grad forward : roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out) args : (Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis) diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 7d29e4b1c9c..2ed01d42cfb 100644 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -959,7 +959,11 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): assert len(x.shape) == 4, \ "Input features with shape should be (N, C, H, W)" output_channels = int(x.shape[1] / (pooled_height * pooled_width)) - if _non_static_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_psroi_pool(x, boxes, boxes_num, pooled_height, + pooled_width, output_channels, + spatial_scale) + if _in_legacy_dygraph(): return _C_ops.psroi_pool(x, boxes, boxes_num, "output_channels", output_channels, "spatial_scale", spatial_scale, "pooled_height", pooled_height, @@ -1069,7 +1073,11 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): output_size = (output_size, output_size) pooled_height, pooled_width = output_size - if _non_static_mode(): + if in_dygraph_mode(): + assert boxes_num is not None, "boxes_num should not be None in dygraph mode." + return _C_ops.final_state_roi_pool(x, boxes, boxes_num, pooled_height, + pooled_width, spatial_scale) + if _in_legacy_dygraph(): assert boxes_num is not None, "boxes_num should not be None in dygraph mode." pool_out, argmaxes = _C_ops.roi_pool( x, boxes, boxes_num, "pooled_height", pooled_height, "pooled_width", diff --git a/tools/infrt/skipped_phi_api.json b/tools/infrt/skipped_phi_api.json index b1ce8596f85..72317c9eb05 100644 --- a/tools/infrt/skipped_phi_api.json +++ b/tools/infrt/skipped_phi_api.json @@ -1,4 +1,4 @@ { -"phi_apis":["conj", "nll_loss", "flatten", "expand_as", "dropout", "roi_align"], +"phi_apis":["conj", "dropout", "expand_as", "flatten", "nll_loss", "psroi_pool", "roi_align", "roi_pool"], "phi_kernels":["equal_all"] } -- GitLab