From 58c9903034afd8145ec74d2a7dc4d120e42a4582 Mon Sep 17 00:00:00 2001 From: hong <43953930+phlrain@users.noreply.github.com> Date: Tue, 12 Apr 2022 17:33:13 +0800 Subject: [PATCH] Add conj pixel shuffle yaml (#41499) (#41616) * ad conj flip yaml * add flip conj pixel shuffle --- paddle/fluid/operators/pixel_shuffle_op.cc | 42 +++---------------- paddle/phi/infermeta/unary.cc | 30 +++++++++++++ paddle/phi/infermeta/unary.h | 5 +++ .../fluid/tests/unittests/test_conj_op.py | 6 ++- .../paddle/fluid/tests/unittests/test_flip.py | 6 ++- .../tests/unittests/test_pixel_shuffle.py | 6 ++- python/paddle/tensor/manipulation.py | 4 ++ python/paddle/tensor/math.py | 3 ++ python/paddle/utils/code_gen/api.yaml | 4 +- python/paddle/utils/code_gen/backward.yaml | 29 +++++++++++++ 10 files changed, 91 insertions(+), 44 deletions(-) diff --git a/paddle/fluid/operators/pixel_shuffle_op.cc b/paddle/fluid/operators/pixel_shuffle_op.cc index 21ca26f49f6..1724aedbe9b 100644 --- a/paddle/fluid/operators/pixel_shuffle_op.cc +++ b/paddle/fluid/operators/pixel_shuffle_op.cc @@ -82,42 +82,6 @@ class PixelShuffleGradMaker : public framework::SingleGradOpMaker { class PixelShuffleGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ( - ctx->HasInput(framework::GradVarName("Out")), true, - platform::errors::NotFound("Input(Out@Grad) should not be null")); - PADDLE_ENFORCE_EQ( - ctx->HasOutput(framework::GradVarName("X")), true, - platform::errors::NotFound("Output(X@Grad) should not be null")); - - auto do_dims = ctx->GetInputDim(framework::GradVarName("Out")); - PADDLE_ENFORCE_EQ(do_dims.size(), 4, - platform::errors::InvalidArgument( - "Input should be a 4-D tensor of format [N, C, H, W] " - "or [N, H, W, C], but got %u.", - do_dims.size())); - - auto upscale_factor = ctx->Attrs().Get("upscale_factor"); - - const std::string data_format = - ctx->Attrs().Get("data_format"); - const bool channel_last = (data_format == "NHWC"); - - auto dx_dims = do_dims; - dx_dims[0] = do_dims[0]; - - if (!channel_last) { - dx_dims[1] = do_dims[1] * (upscale_factor * upscale_factor); - dx_dims[2] = do_dims[2] / upscale_factor; - dx_dims[3] = do_dims[3] / upscale_factor; - } else { - dx_dims[1] = do_dims[1] / upscale_factor; - dx_dims[2] = do_dims[2] / upscale_factor; - dx_dims[3] = do_dims[3] * (upscale_factor * upscale_factor); - } - ctx->SetOutputDim(framework::GradVarName("X"), dx_dims); - } }; } // namespace operators @@ -132,7 +96,11 @@ REGISTER_OPERATOR(pixel_shuffle, ops::PixelShuffleOp, ops::PixelShuffleOpMaker, ops::PixelShuffleGradMaker, PixelShuffleInferShapeFunctor); -REGISTER_OPERATOR(pixel_shuffle_grad, ops::PixelShuffleGradOp); +DECLARE_INFER_SHAPE_FUNCTOR(pixel_shuffle_grad, + PixelShuffleGradInferShapeFunctor, + PD_INFER_META(phi::PixelShuffleGradInferMeta)); +REGISTER_OPERATOR(pixel_shuffle_grad, ops::PixelShuffleGradOp, + PixelShuffleGradInferShapeFunctor); REGISTER_OP_VERSION(pixel_shuffle) .AddCheckpoint( diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 0fedcca255c..fa3ea84c931 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -1280,6 +1280,36 @@ void PixelShuffleInferMeta(const MetaTensor& x, out->set_dims(output_dims); } +void PixelShuffleGradInferMeta(const MetaTensor& out_grad, + int upscale_factor, + const std::string& data_format, + MetaTensor* x_grad) { + auto do_dims = out_grad.dims(); + PADDLE_ENFORCE_EQ(do_dims.size(), + 4, + phi::errors::InvalidArgument( + "Input should be a 4-D tensor of format [N, C, H, W] " + "or [N, H, W, C], but got %u.", + do_dims.size())); + + const bool channel_last = (data_format == "NHWC"); + + auto dx_dims = do_dims; + dx_dims[0] = do_dims[0]; + + if (!channel_last) { + dx_dims[1] = do_dims[1] * (upscale_factor * upscale_factor); + dx_dims[2] = do_dims[2] / upscale_factor; + dx_dims[3] = do_dims[3] / upscale_factor; + } else { + dx_dims[1] = do_dims[1] / upscale_factor; + dx_dims[2] = do_dims[2] / upscale_factor; + dx_dims[3] = do_dims[3] * (upscale_factor * upscale_factor); + } + x_grad->set_dims(dx_dims); + x_grad->set_dtype(out_grad.dtype()); +} + void PNormInferMeta(const MetaTensor& x, float porder, int axis, diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 1d69c9504d9..a79f21c4a3f 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -195,6 +195,11 @@ void PixelShuffleInferMeta(const MetaTensor& x, const std::string& data_format, MetaTensor* out); +void PixelShuffleGradInferMeta(const MetaTensor& out_grad, + int upscale_factor, + const std::string& data_format, + MetaTensor* x_grad); + void PNormInferMeta(const MetaTensor& x, float porder, int axis, diff --git a/python/paddle/fluid/tests/unittests/test_conj_op.py b/python/paddle/fluid/tests/unittests/test_conj_op.py index 774a29ada4a..fe9efc301fe 100644 --- a/python/paddle/fluid/tests/unittests/test_conj_op.py +++ b/python/paddle/fluid/tests/unittests/test_conj_op.py @@ -32,6 +32,7 @@ paddle.enable_static() class TestConjOp(OpTest): def setUp(self): self.op_type = "conj" + self.python_api = paddle.tensor.conj self.init_dtype_type() self.init_input_output() self.init_grad_input_output() @@ -53,14 +54,15 @@ class TestConjOp(OpTest): self.grad_in = np.conj(self.grad_out) def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad_normal(self): self.check_grad( ['X'], 'Out', user_defined_grads=[self.grad_in], - user_defined_grad_outputs=[self.grad_out]) + user_defined_grad_outputs=[self.grad_out], + check_eager=True) class TestComplexConjOp(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_flip.py b/python/paddle/fluid/tests/unittests/test_flip.py index 5e2aacf9cef..010d23bca51 100644 --- a/python/paddle/fluid/tests/unittests/test_flip.py +++ b/python/paddle/fluid/tests/unittests/test_flip.py @@ -67,6 +67,7 @@ class TestFlipOp_API(unittest.TestCase): class TestFlipOp(OpTest): def setUp(self): self.op_type = 'flip' + self.python_api = paddle.tensor.flip self.init_test_case() self.inputs = {'X': np.random.random(self.in_shape).astype('float64')} self.init_attrs() @@ -76,10 +77,10 @@ class TestFlipOp(OpTest): self.attrs = {"axis": self.axis} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_eager=True) def init_test_case(self): self.in_shape = (6, 4, 2, 3) @@ -131,4 +132,5 @@ class TestFlipOpNegAxis(TestFlipOp): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py index f1a409c712f..06d975fe2b8 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py @@ -52,6 +52,7 @@ def pixel_shuffle_np(x, up_factor, data_format="NCHW"): class TestPixelShuffleOp(OpTest): def setUp(self): self.op_type = "pixel_shuffle" + self.python_api = paddle.nn.functional.pixel_shuffle self.init_data_format() n, c, h, w = 2, 9, 4, 4 @@ -73,10 +74,10 @@ class TestPixelShuffleOp(OpTest): self.format = "NCHW" def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestChannelLast(TestPixelShuffleOp): @@ -220,4 +221,5 @@ class TestPixelShuffleError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index a885a031d42..389b5dbd7db 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -458,6 +458,10 @@ def flip(x, axis, name=None): """ if isinstance(axis, int): axis = [axis] + + if in_dygraph_mode(): + return _C_ops.final_state_flip(x, axis) + if paddle.in_dynamic_mode(): return _C_ops.flip(x, "axis", axis) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index a1d27ab904e..298d7af96ea 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -3349,6 +3349,9 @@ def conj(x, name=None): # [(4-4j), (5-5j), (6-6j)]]) """ + if in_dygraph_mode(): + return _C_ops.final_state_conj(x) + if paddle.in_dynamic_mode(): return _C_ops.conj(x) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index f82a7b01e1a..43fcb48f6ce 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -345,6 +345,7 @@ func : UnchangedInferMeta kernel : func : conj + backward : conj_grad - api : conv2d args : (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) @@ -628,6 +629,7 @@ func : FlipInferMeta kernel : func : flip + backward : flip_grad - api : floor args : (Tensor x) @@ -1382,7 +1384,7 @@ func : PixelShuffleInferMeta kernel : func : pixel_shuffle - # backward : pixel_shuffle_grad + backward : pixel_shuffle_grad # poisson // no need grad - api : poisson diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 7a7f07f7058..20489a35a56 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -208,6 +208,16 @@ output : Tensor[](x_grad) invoke : concat_grad_impl(x, out_grad, axis) +- backward_api : conj_grad + forward : conj (Tensor x) -> Tensor(out) + args : (Tensor out_grad) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [out_grad] + kernel : + func : conj + - backward_api : conv2d_grad forward : conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out) args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) @@ -436,6 +446,16 @@ backend: out_grad layout: out_grad +- backward_api : flip_grad + forward : flip (Tensor x, int[] axis) -> Tensor(out) + args : (Tensor out_grad, int[] axis) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [out_grad] + kernel : + func : flip + - backward_api : floor_grad forward : floor(Tensor x) -> Tensor(out) args : (Tensor out_grad) @@ -990,6 +1010,15 @@ kernel : func : pad3d_grad +- backward_api : pixel_shuffle_grad + forward : pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out) + args : (Tensor out_grad, int upscale_factor, str data_format) + output : Tensor(x_grad) + infer_meta : + func : PixelShuffleGradInferMeta + kernel : + func : pixel_shuffle_grad + - backward_api : pool2d_grad forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -- GitLab