From de5dea5ff3bd4675b95012b163a4fe83bae39de5 Mon Sep 17 00:00:00 2001 From: jingqinghe Date: Mon, 7 Sep 2020 14:23:13 +0800 Subject: [PATCH] support reduce_sum double grad test=develop --- .../operators/reduce_ops/reduce_sum_op.cc | 46 +++++++++++++++++++ .../fluid/tests/unittests/test_nn_grad.py | 23 ++++++++++ 2 files changed, 69 insertions(+) diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc index 6e470e3af4e..32b9a0639ad 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc @@ -63,6 +63,50 @@ class ReduceSumVarTypeInference : public paddle::framework::VarTypeInference { } }; +class ReduceSumDoubleGradDescMaker : public framework::GradOpDescMakerBase { + public: + using framework::GradOpDescMakerBase::GradOpDescMakerBase; + + std::vector> operator()() const override { + std::vector> ops; + auto x_gg = OutputGrad(framework::GradVarName("X")); // input ddx + auto out_grads = InputGrad(framework::GradVarName("Out")); + if (!out_grads.empty()) { + auto* out_grad_op = new framework::OpDesc(); + out_grad_op->SetType("reduce_sum"); + out_grad_op->SetInput("X", x_gg); + out_grad_op->SetAttrMap(Attrs()); + out_grad_op->SetOutput("Out", out_grads); + ops.emplace_back(out_grad_op); + } + + return ops; + } +}; + +class ReduceSumDoubleGradOpBaseMaker : public imperative::GradOpBaseMakerBase { + public: + using imperative::GradOpBaseMakerBase::GradOpBaseMakerBase; + + std::shared_ptr operator()() const override { + auto out_grads = InputGrad(framework::GradVarName("Out")); + if (!out_grads.empty()) { + auto x_gg = OutputGrad(framework::GradVarName("X")); // input ddx + auto node = this->NewGradNode(); + { + imperative::TracedGradOp op(node); + op.SetType("reduce_sum"); + op.SetInput("X", x_gg); + op.SetAttrMap(Attrs()); + op.SetOutput("Out", out_grads); + } + return node; + } else { + return nullptr; + } + } +}; + } // namespace operators } // namespace paddle @@ -77,6 +121,8 @@ REGISTER_OPERATOR(reduce_sum, ops::ReduceOp, ReduceSumOpMaker, ops::ReduceSumOpGradMaker, ops::ReduceSumOpGradMaker); REGISTER_OPERATOR(reduce_sum_grad, ops::ReduceGradOp, + i ops::ReduceSumDoubleGradDescMaker, + ops::ReduceSumDoubleGradOpBaseMaker, ops::ReduceSumGradNoNeedBufferVarInferer); REGISTER_OP_CPU_KERNEL( diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index c6cfe01dce4..0c39dc5e731 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -101,6 +101,29 @@ class TestReduceMeanWithDimDoubleGradCheck(unittest.TestCase): self.func(p) +class TestReduceSumWithDimDoubleGradCheck(unittest.TestCase): + @prog_scope() + def func(self, place): + shape = [7, 11] + eps = 0.05 + dtype = np.float64 + + x = layers.data('x', shape, False, dtype) + x.persistable = True + y = layers.reduce_sum(x, dim=0) + x_arr = np.random.uniform(-1, 1, shape).astype(dtype) + + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps) + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + class TestMulDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place): -- GitLab