From 7779768b534943742fc355a6f07bd8152ca0570b Mon Sep 17 00:00:00 2001 From: lijianshe02 <48898730+lijianshe02@users.noreply.github.com> Date: Tue, 15 Dec 2020 19:19:17 +0800 Subject: [PATCH] add transpose double grad test=develop (#29600) * add transpose double grad test=develop --- paddle/fluid/operators/transpose_op.cc | 18 +++++++- .../fluid/tests/unittests/test_nn_grad.py | 44 +++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 42f4a819baa..d9940ddca3e 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -272,6 +272,20 @@ class Transpose2GradMaker : public framework::SingleGradOpMaker { } }; +template +class Transpose2DoubleGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr grad_op) const override { + grad_op->SetType("transpose2"); + grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X"))); + grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); + grad_op->SetOutput("XShape", this->Input("XShape")); + grad_op->SetAttrMap(this->Attrs()); + } +}; + class Transpose2OpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -338,7 +352,9 @@ REGISTER_OP_CPU_KERNEL( REGISTER_OPERATOR(transpose2, ops::Transpose2Op, ops::Transpose2OpMaker, ops::Transpose2GradMaker, ops::Transpose2GradMaker); -REGISTER_OPERATOR(transpose2_grad, ops::Transpose2OpGrad); +REGISTER_OPERATOR(transpose2_grad, ops::Transpose2OpGrad, + ops::Transpose2DoubleGradMaker, + ops::Transpose2DoubleGradMaker); REGISTER_OP_CPU_KERNEL( transpose2, ops::TransposeKernel, diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index 6fa14d8eb60..6a5e1ba1473 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -350,5 +350,49 @@ class TestClipDoubleGradCheck(unittest.TestCase): self.func(p) +class TestTransposeDoubleGradCheck(unittest.TestCase): + @prog_scope() + def func(self, place): + x_shape = [3, 40] + perm = [1, 0] + dtype = np.float64 + + x = layers.data('x', x_shape, False, dtype) + x.persistable = True + out = paddle.transpose(x, perm) + x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) + + gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestTransposeDoubleGradCheckCase1(unittest.TestCase): + @prog_scope() + def func(self, place): + x_shape = [2, 3, 4, 5] + perm = [0, 2, 3, 1] + dtype = np.float64 + + x = layers.data('x', x_shape, False, dtype) + x.persistable = True + out = paddle.transpose(x, perm) + x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) + + gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == "__main__": unittest.main() -- GitLab