From bc88fbb5b6ea0dd1edb019aba97d8affa4ac13c0 Mon Sep 17 00:00:00 2001 From: hong <43953930+phlrain@users.noreply.github.com> Date: Fri, 8 Apr 2022 08:46:09 +0800 Subject: [PATCH] Add conj pixel shuffle yaml (#41499) * ad conj flip yaml * add flip conj pixel shuffle --- paddle/fluid/operators/pixel_shuffle_op.cc | 42 +++---------------- paddle/phi/infermeta/unary.cc | 30 +++++++++++++ paddle/phi/infermeta/unary.h | 5 +++ .../fluid/tests/unittests/test_conj_op.py | 6 ++- .../paddle/fluid/tests/unittests/test_flip.py | 6 ++- .../tests/unittests/test_pixel_shuffle.py | 6 ++- python/paddle/tensor/manipulation.py | 4 ++ python/paddle/tensor/math.py | 3 ++ python/paddle/utils/code_gen/api.yaml | 4 +- python/paddle/utils/code_gen/backward.yaml | 29 +++++++++++++ 10 files changed, 91 insertions(+), 44 deletions(-) diff --git a/paddle/fluid/operators/pixel_shuffle_op.cc b/paddle/fluid/operators/pixel_shuffle_op.cc index 21ca26f49f..1724aedbe9 100644 --- a/paddle/fluid/operators/pixel_shuffle_op.cc +++ b/paddle/fluid/operators/pixel_shuffle_op.cc @@ -82,42 +82,6 @@ class PixelShuffleGradMaker : public framework::SingleGradOpMaker { class PixelShuffleGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ( - ctx->HasInput(framework::GradVarName("Out")), true, - platform::errors::NotFound("Input(Out@Grad) should not be null")); - PADDLE_ENFORCE_EQ( - ctx->HasOutput(framework::GradVarName("X")), true, - platform::errors::NotFound("Output(X@Grad) should not be null")); - - auto do_dims = ctx->GetInputDim(framework::GradVarName("Out")); - PADDLE_ENFORCE_EQ(do_dims.size(), 4, - platform::errors::InvalidArgument( - "Input should be a 4-D tensor of format [N, C, H, W] " - "or [N, H, W, C], but got %u.", - do_dims.size())); - - auto upscale_factor = ctx->Attrs().Get("upscale_factor"); - - const std::string data_format = - ctx->Attrs().Get("data_format"); - const bool channel_last = (data_format == "NHWC"); - - auto dx_dims = do_dims; - dx_dims[0] = do_dims[0]; - - if (!channel_last) { - dx_dims[1] = do_dims[1] * (upscale_factor * upscale_factor); - dx_dims[2] = do_dims[2] / upscale_factor; - dx_dims[3] = do_dims[3] / upscale_factor; - } else { - dx_dims[1] = do_dims[1] / upscale_factor; - dx_dims[2] = do_dims[2] / upscale_factor; - dx_dims[3] = do_dims[3] * (upscale_factor * upscale_factor); - } - ctx->SetOutputDim(framework::GradVarName("X"), dx_dims); - } }; } // namespace operators @@ -132,7 +96,11 @@ REGISTER_OPERATOR(pixel_shuffle, ops::PixelShuffleOp, ops::PixelShuffleOpMaker, ops::PixelShuffleGradMaker, PixelShuffleInferShapeFunctor); -REGISTER_OPERATOR(pixel_shuffle_grad, ops::PixelShuffleGradOp); +DECLARE_INFER_SHAPE_FUNCTOR(pixel_shuffle_grad, + PixelShuffleGradInferShapeFunctor, + PD_INFER_META(phi::PixelShuffleGradInferMeta)); +REGISTER_OPERATOR(pixel_shuffle_grad, ops::PixelShuffleGradOp, + PixelShuffleGradInferShapeFunctor); REGISTER_OP_VERSION(pixel_shuffle) .AddCheckpoint( diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index a81a0e1503..c6e2cb7619 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -1315,6 +1315,36 @@ void PixelShuffleInferMeta(const MetaTensor& x, out->set_dims(output_dims); } +void PixelShuffleGradInferMeta(const MetaTensor& out_grad, + int upscale_factor, + const std::string& data_format, + MetaTensor* x_grad) { + auto do_dims = out_grad.dims(); + PADDLE_ENFORCE_EQ(do_dims.size(), + 4, + phi::errors::InvalidArgument( + "Input should be a 4-D tensor of format [N, C, H, W] " + "or [N, H, W, C], but got %u.", + do_dims.size())); + + const bool channel_last = (data_format == "NHWC"); + + auto dx_dims = do_dims; + dx_dims[0] = do_dims[0]; + + if (!channel_last) { + dx_dims[1] = do_dims[1] * (upscale_factor * upscale_factor); + dx_dims[2] = do_dims[2] / upscale_factor; + dx_dims[3] = do_dims[3] / upscale_factor; + } else { + dx_dims[1] = do_dims[1] / upscale_factor; + dx_dims[2] = do_dims[2] / upscale_factor; + dx_dims[3] = do_dims[3] * (upscale_factor * upscale_factor); + } + x_grad->set_dims(dx_dims); + x_grad->set_dtype(out_grad.dtype()); +} + void PNormInferMeta(const MetaTensor& x, float porder, int axis, diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 63a1dd52bb..c49e4c88dd 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -200,6 +200,11 @@ void PixelShuffleInferMeta(const MetaTensor& x, const std::string& data_format, MetaTensor* out); +void PixelShuffleGradInferMeta(const MetaTensor& out_grad, + int upscale_factor, + const std::string& data_format, + MetaTensor* x_grad); + void PNormInferMeta(const MetaTensor& x, float porder, int axis, diff --git a/python/paddle/fluid/tests/unittests/test_conj_op.py b/python/paddle/fluid/tests/unittests/test_conj_op.py index 774a29ada4..fe9efc301f 100644 --- a/python/paddle/fluid/tests/unittests/test_conj_op.py +++ b/python/paddle/fluid/tests/unittests/test_conj_op.py @@ -32,6 +32,7 @@ paddle.enable_static() class TestConjOp(OpTest): def setUp(self): self.op_type = "conj" + self.python_api = paddle.tensor.conj self.init_dtype_type() self.init_input_output() self.init_grad_input_output() @@ -53,14 +54,15 @@ class TestConjOp(OpTest): self.grad_in = np.conj(self.grad_out) def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad_normal(self): self.check_grad( ['X'], 'Out', user_defined_grads=[self.grad_in], - user_defined_grad_outputs=[self.grad_out]) + user_defined_grad_outputs=[self.grad_out], + check_eager=True) class TestComplexConjOp(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_flip.py b/python/paddle/fluid/tests/unittests/test_flip.py index 5e2aacf9ce..010d23bca5 100644 --- a/python/paddle/fluid/tests/unittests/test_flip.py +++ b/python/paddle/fluid/tests/unittests/test_flip.py @@ -67,6 +67,7 @@ class TestFlipOp_API(unittest.TestCase): class TestFlipOp(OpTest): def setUp(self): self.op_type = 'flip' + self.python_api = paddle.tensor.flip self.init_test_case() self.inputs = {'X': np.random.random(self.in_shape).astype('float64')} self.init_attrs() @@ -76,10 +77,10 @@ class TestFlipOp(OpTest): self.attrs = {"axis": self.axis} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_eager=True) def init_test_case(self): self.in_shape = (6, 4, 2, 3) @@ -131,4 +132,5 @@ class TestFlipOpNegAxis(TestFlipOp): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py index f1a409c712..06d975fe2b 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py @@ -52,6 +52,7 @@ def pixel_shuffle_np(x, up_factor, data_format="NCHW"): class TestPixelShuffleOp(OpTest): def setUp(self): self.op_type = "pixel_shuffle" + self.python_api = paddle.nn.functional.pixel_shuffle self.init_data_format() n, c, h, w = 2, 9, 4, 4 @@ -73,10 +74,10 @@ class TestPixelShuffleOp(OpTest): self.format = "NCHW" def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestChannelLast(TestPixelShuffleOp): @@ -220,4 +221,5 @@ class TestPixelShuffleError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 0f90cf6950..d8021f36c2 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -458,6 +458,10 @@ def flip(x, axis, name=None): """ if isinstance(axis, int): axis = [axis] + + if in_dygraph_mode(): + return _C_ops.final_state_flip(x, axis) + if paddle.in_dynamic_mode(): return _C_ops.flip(x, "axis", axis) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index a1d27ab904..298d7af96e 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -3349,6 +3349,9 @@ def conj(x, name=None): # [(4-4j), (5-5j), (6-6j)]]) """ + if in_dygraph_mode(): + return _C_ops.final_state_conj(x) + if paddle.in_dynamic_mode(): return _C_ops.conj(x) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 76f03f9ff8..3a76e89bbb 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -345,6 +345,7 @@ func : UnchangedInferMeta kernel : func : conj + backward : conj_grad - api : conv2d args : (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) @@ -659,6 +660,7 @@ func : FlipInferMeta kernel : func : flip + backward : flip_grad - api : floor args : (Tensor x) @@ -1430,7 +1432,7 @@ func : PixelShuffleInferMeta kernel : func : pixel_shuffle - # backward : pixel_shuffle_grad + backward : pixel_shuffle_grad # poisson // no need grad - api : poisson diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index b32e015325..3456fe3260 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -208,6 +208,16 @@ output : Tensor[](x_grad) invoke : concat_grad_impl(x, out_grad, axis) +- backward_api : conj_grad + forward : conj (Tensor x) -> Tensor(out) + args : (Tensor out_grad) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [out_grad] + kernel : + func : conj + - backward_api : conv2d_grad forward : conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out) args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) @@ -456,6 +466,16 @@ backend: out_grad layout: out_grad +- backward_api : flip_grad + forward : flip (Tensor x, int[] axis) -> Tensor(out) + args : (Tensor out_grad, int[] axis) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [out_grad] + kernel : + func : flip + - backward_api : floor_grad forward : floor(Tensor x) -> Tensor(out) args : (Tensor out_grad) @@ -1010,6 +1030,15 @@ kernel : func : pad3d_grad +- backward_api : pixel_shuffle_grad + forward : pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out) + args : (Tensor out_grad, int upscale_factor, str data_format) + output : Tensor(x_grad) + infer_meta : + func : PixelShuffleGradInferMeta + kernel : + func : pixel_shuffle_grad + - backward_api : pool2d_grad forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -- GitLab