From badc6f2233f7b4ede9ce267845f3368947d4d63c Mon Sep 17 00:00:00 2001 From: lijianshe02 <48898730+lijianshe02@users.noreply.github.com> Date: Fri, 15 Jan 2021 12:57:50 +0800 Subject: [PATCH] add transpose double grad , cherry-pick from #29600 (#30435) * add transpose double grad test=develop (#29600) * add transpose double grad test=develop * cherry-pick test=develop --- paddle/fluid/operators/transpose_op.cc | 18 +++++++- .../fluid/tests/unittests/test_nn_grad.py | 44 +++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 42f4a819baa..d9940ddca3e 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -272,6 +272,20 @@ class Transpose2GradMaker : public framework::SingleGradOpMaker { } }; +template +class Transpose2DoubleGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr grad_op) const override { + grad_op->SetType("transpose2"); + grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X"))); + grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); + grad_op->SetOutput("XShape", this->Input("XShape")); + grad_op->SetAttrMap(this->Attrs()); + } +}; + class Transpose2OpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -338,7 +352,9 @@ REGISTER_OP_CPU_KERNEL( REGISTER_OPERATOR(transpose2, ops::Transpose2Op, ops::Transpose2OpMaker, ops::Transpose2GradMaker, ops::Transpose2GradMaker); -REGISTER_OPERATOR(transpose2_grad, ops::Transpose2OpGrad); +REGISTER_OPERATOR(transpose2_grad, ops::Transpose2OpGrad, + ops::Transpose2DoubleGradMaker, + ops::Transpose2DoubleGradMaker); REGISTER_OP_CPU_KERNEL( transpose2, ops::TransposeKernel, diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index 899c1f798e6..60b95e9e470 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -329,5 +329,49 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): self.func(p) +class TestTransposeDoubleGradCheck(unittest.TestCase): + @prog_scope() + def func(self, place): + x_shape = [3, 40] + perm = [1, 0] + dtype = np.float64 + + x = layers.data('x', x_shape, False, dtype) + x.persistable = True + out = paddle.transpose(x, perm) + x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) + + gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestTransposeDoubleGradCheckCase1(unittest.TestCase): + @prog_scope() + def func(self, place): + x_shape = [2, 3, 4, 5] + perm = [0, 2, 3, 1] + dtype = np.float64 + + x = layers.data('x', x_shape, False, dtype) + x.persistable = True + out = paddle.transpose(x, perm) + x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) + + gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == "__main__": unittest.main() -- GitLab