From a86e8c0eeff0a897cd3dbda2f656d28254eb4505 Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Tue, 11 Aug 2020 16:11:54 +0800 Subject: [PATCH] add more error info for these ops without double grad ops. (#25987) --- .../fluid/imperative/partial_grad_engine.cc | 5 ++- .../unittests/test_imperative_double_grad.py | 34 ++++++++++++++----- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/imperative/partial_grad_engine.cc b/paddle/fluid/imperative/partial_grad_engine.cc index 0b45c189dd7..4f133bf80c7 100644 --- a/paddle/fluid/imperative/partial_grad_engine.cc +++ b/paddle/fluid/imperative/partial_grad_engine.cc @@ -887,7 +887,10 @@ void PartialGradTask::RunEachOp(OpBase *op) { op->Attrs(), op->place()); PADDLE_ENFORCE_NOT_NULL( double_grad_node, - platform::errors::NotFound("The Op %s doesn't have any grad op.", + platform::errors::NotFound("The Op %s doesn't have any grad op. If you " + "don't intend calculating higher order " + "derivatives, please set `create_graph` to " + "False.", op->Type())); VLOG(10) << "Create " << double_grad_node->size() << " double grad op(s) for " << op->Type() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index 5c94f1836bf..429736803a1 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -298,15 +298,16 @@ class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad): class TestDygraphDoubleGradVisitedUniq(TestCase): def test_compare(self): - value = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') + value = np.random.uniform(-0.5, 0.5, 100).reshape(10, 2, + 5).astype("float32") def model_f(input): - conv2d = fluid.dygraph.Conv2D(3, 2, 3) + linear = fluid.dygraph.Linear(5, 3, bias_attr=False) for i in range(10): if i == 0: - out = conv2d(input) + out = linear(input) else: - out = out + conv2d(input) + out = out + linear(input) return out backward_strategy = fluid.dygraph.BackwardStrategy() @@ -321,8 +322,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): dx = fluid.dygraph.grad( outputs=[out], inputs=[a], - create_graph=True, - retain_graph=True, + create_graph=False, only_inputs=True, allow_unused=False, backward_strategy=backward_strategy) @@ -339,9 +339,25 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): grad_2 = a.gradient() - self.assertTrue( - np.allclose( - grad_1, grad_2, rtol=1.e-5, atol=1.e-8, equal_nan=True)) + self.assertTrue(np.array_equal(grad_1, grad_2)) + + +class TestRaiseNoDoubleGradOp(TestCase): + def raise_no_grad_op(self): + with fluid.dygraph.guard(): + x = fluid.layers.ones(shape=[2, 3, 2, 2], dtype='float32') + x.stop_gradient = False + y = paddle.fluid.layers.batch_norm(x) + + dx = fluid.dygraph.grad( + outputs=[y], inputs=[x], create_graph=True, + retain_graph=True)[0] + + loss = fluid.layers.reduce_mean(dx) + loss.backward() + + def test_raise(self): + self.assertRaises(fluid.core.EnforceNotMet, self.raise_no_grad_op) if __name__ == '__main__': -- GitLab