test_imperative_double_grad.py 26.0 KB
Newer Older
1
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle.fluid as fluid
H
hong 已提交
16
import paddle
17
from paddle.fluid.wrapped_decorator import wrap_decorator
Z
Zeng Jinle 已提交
18
from paddle.vision.models import resnet50, resnet101
19 20 21
import unittest
from unittest import TestCase
import numpy as np
22
from paddle.fluid.framework import _test_eager_guard
23 24 25 26


def _dygraph_guard_(func):
    def __impl__(*args, **kwargs):
J
Jiabin Yang 已提交
27
        if fluid._non_static_mode():
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
            return func(*args, **kwargs)
        else:
            with fluid.dygraph.guard():
                return func(*args, **kwargs)

    return __impl__


dygraph_guard = wrap_decorator(_dygraph_guard_)


def random_var(size, low=-1, high=1, dtype='float32'):
    x_np = np.random.uniform(low=low, high=high, size=size).astype(dtype)
    return fluid.dygraph.to_variable(x_np)


44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
class TestEagerGrad(TestCase):
    def func_simple_example_eager_grad(self):
        np.random.seed(2021)
        paddle.set_device('cpu')
        np_x = np.random.random((3, 3))
        np_y = np.random.random((3, 1))
        x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
        y = paddle.to_tensor(np_y, dtype="float64", stop_gradient=False)
        out = paddle.matmul(x, y)
        dx = fluid.dygraph.grad(out, x)

        dout = np.ones_like(np_y)
        expected_dx = np.matmul(dout, np.transpose(np_y))

        # stop_gradient = !create_graph, create_graph default false
        self.assertEqual(dx[0].stop_gradient, True)
60
        np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05)
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81

    def test_simple_example_eager_grad(self):
        with _test_eager_guard():
            self.func_simple_example_eager_grad()
        self.func_simple_example_eager_grad()

    def func_simple_example_eager_grad_allow_unused(self):
        np.random.seed(2021)
        paddle.set_device('cpu')
        np_x = np.random.random((3, 3))
        np_y = np.random.random((3, 1))
        np_z = np.random.random((3, 1))
        x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
        y = paddle.to_tensor(np_y, dtype="float64", stop_gradient=False)
        z = paddle.to_tensor(np_z, dtype="float64", stop_gradient=False)
        out_z = paddle.nn.functional.sigmoid(z)
        out = paddle.matmul(x, y)

        dx = fluid.dygraph.grad(out, [x, z], allow_unused=True)
        dout = np.ones_like(np_y)
        expected_dx = np.matmul(dout, np.transpose(np_y))
82
        np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05)
83 84 85
        # stop_gradient = !create_graph, create_graph default false
        self.assertEqual(dx[0].stop_gradient, True)
        # x is unused input in the graph
86
        self.assertIsNone(dx[1])
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108

    def test_simple_example_eager_grad_allow_unused(self):
        with _test_eager_guard():
            self.func_simple_example_eager_grad_allow_unused()
        self.func_simple_example_eager_grad_allow_unused()

    def func_simple_example_eager_grad_not_allow_unused(self):
        np.random.seed(2021)
        paddle.set_device('cpu')
        np_x = np.random.random((3, 3))
        np_y = np.random.random((3, 1))
        np_z = np.random.random((3, 1))
        x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
        y = paddle.to_tensor(np_y, dtype="float64", stop_gradient=False)
        z = paddle.to_tensor(np_z, dtype="float64", stop_gradient=False)
        out_z = paddle.nn.functional.sigmoid(z)
        out = paddle.matmul(x, y)

        try:
            # allow_unused is false in default
            dx = fluid.dygraph.grad(out, [x, z])
        except ValueError as e:
109
            error_msg = str(e)
110 111 112 113 114 115 116
            assert error_msg.find("allow_unused") > 0

    def test_simple_example_eager_grad_not_allow_unused(self):
        with _test_eager_guard():
            self.func_simple_example_eager_grad_not_allow_unused()
        self.func_simple_example_eager_grad_not_allow_unused()

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
    def func_simple_example_eager_grad_duplicate_input(self):
        np.random.seed(2021)
        paddle.set_device('cpu')
        np_x = np.random.random((3, 3))
        np_y = np.random.random((3, 1))
        np_z = np.random.random((3, 1))
        x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
        y = paddle.to_tensor(np_y, dtype="float64", stop_gradient=False)
        z = paddle.to_tensor(np_z, dtype="float64", stop_gradient=False)
        out_z = paddle.nn.functional.sigmoid(z)
        out = paddle.matmul(x, y)

        try:
            # duplicate input will arise RuntimeError errors
            dx = fluid.dygraph.grad(out, [x, x])
        except RuntimeError as e:
133
            error_msg = str(e)
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
            assert error_msg.find("duplicate") > 0

    def test_simple_example_eager_grad_duplicate_input(self):
        with _test_eager_guard():
            self.func_simple_example_eager_grad_duplicate_input()
        self.func_simple_example_eager_grad_duplicate_input()

    def func_simple_example_eager_grad_duplicate_output(self):
        np.random.seed(2021)
        paddle.set_device('cpu')
        np_x = np.random.random((3, 3))
        np_y = np.random.random((3, 1))
        np_z = np.random.random((3, 1))
        x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
        y = paddle.to_tensor(np_y, dtype="float64", stop_gradient=False)
        z = paddle.to_tensor(np_z, dtype="float64", stop_gradient=False)
        out_z = paddle.nn.functional.sigmoid(z)
        out = paddle.matmul(x, y)

        try:
            # duplicate output will arise RuntimeError errors
            dx = fluid.dygraph.grad([out, out], [x])
        except RuntimeError as e:
157
            error_msg = str(e)
158 159 160 161 162 163 164
            assert error_msg.find("duplicate") > 0

    def test_simple_example_eager_grad_duplicate_output(self):
        with _test_eager_guard():
            self.func_simple_example_eager_grad_duplicate_output()
        self.func_simple_example_eager_grad_duplicate_output()

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
    def test_simple_example_eager_two_grad_output(self):
        with _test_eager_guard():
            x1 = paddle.to_tensor([1.0, 2.0])
            x1.stop_gradient = False
            x2 = paddle.to_tensor([1.0, 2.0])
            x2.stop_gradient = False
            out1 = x1 * 2
            out2 = x2 * 2

            dout2_record_by_hook = []

            def record_hook(grad):
                dout2_record_by_hook.append(grad)

            out2.register_hook(record_hook)

            out3 = paddle.multiply(out1, out2)
            out4 = paddle.mean(out3)
            egr_dout2, egr_dout3 = paddle.grad([out4], [out2, out3])

185 186 187
            np.testing.assert_array_equal(
                dout2_record_by_hook[0].numpy(), np.array([1.0, 2.0])
            )
188 189 190 191 192 193 194 195 196 197 198 199 200 201

        x1 = paddle.to_tensor([1.0, 2.0])
        x1.stop_gradient = False
        x2 = paddle.to_tensor([1.0, 2.0])
        x2.stop_gradient = False
        out1 = x1 * 2
        out2 = x2 * 2

        out3 = paddle.multiply(out1, out2)
        out4 = paddle.mean(out3)
        dout2, dout3 = paddle.grad([out4], [out2, out3])

        self.assertEqual(dout2.stop_gradient, egr_dout2.stop_gradient)
        self.assertEqual(dout3.stop_gradient, egr_dout3.stop_gradient)
202 203
        np.testing.assert_array_equal(dout2.numpy(), egr_dout2.numpy())
        np.testing.assert_array_equal(dout3.numpy(), egr_dout3.numpy())
204

205

206 207 208 209 210
class TestDygraphDoubleGrad(TestCase):
    def setUp(self):
        self.sort_sum_gradient = False
        self.shape = [5, 10]

211 212 213 214 215 216 217 218 219 220
    def grad(
        self,
        outputs,
        inputs,
        grad_outputs=None,
        no_grad_vars=None,
        retain_graph=None,
        create_graph=False,
        allow_unused=False,
    ):
221
        fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient})
222 223 224 225 226 227 228 229 230
        return fluid.dygraph.grad(
            outputs=outputs,
            inputs=inputs,
            grad_outputs=grad_outputs,
            no_grad_vars=no_grad_vars,
            retain_graph=retain_graph,
            create_graph=create_graph,
            allow_unused=allow_unused,
        )
231 232

    @dygraph_guard
233
    def func_exception(self):
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
        with self.assertRaises(AssertionError):
            self.grad(None, None)

        shape = self.shape

        with self.assertRaises(AssertionError):
            self.grad(1, random_var(shape))

        with self.assertRaises(AssertionError):
            self.grad(random_var(shape), 1)

        with self.assertRaises(AssertionError):
            self.grad([1], [random_var(shape)])

        with self.assertRaises(AssertionError):
            self.grad([random_var(shape)], [1])

        with self.assertRaises(AssertionError):
252 253 254 255 256
            self.grad(
                [random_var(shape), random_var(shape)],
                [random_var(shape)],
                [random_var(shape)],
            )
257 258

        with self.assertRaises(AssertionError):
259 260 261
            self.grad(
                [random_var(shape)], [random_var(shape)], no_grad_vars=[1]
            )
262 263

        with self.assertRaises(AssertionError):
Z
Zeng Jinle 已提交
264
            self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1)
265

266 267 268 269 270
    def test_exception(self):
        with _test_eager_guard():
            self.func_exception()
        self.func_exception()

271
    @dygraph_guard
272
    def func_simple_example(self):
273 274 275 276 277
        x = random_var(self.shape)
        x.stop_gradient = False
        y = x + 1

        for create_graph in [False, True]:
278 279 280
            (dx,) = self.grad(
                [x], [x], create_graph=create_graph, retain_graph=True
            )
281 282 283 284
            self.assertEqual(dx.shape, x.shape)
            self.assertTrue(np.all(dx.numpy() == 1))
            self.assertNotEqual(dx.stop_gradient, create_graph)

285 286 287
            (dx_mul_2,) = self.grad(
                [y, x], [x], create_graph=create_graph, retain_graph=True
            )
288 289 290 291
            self.assertEqual(dx_mul_2.shape, x.shape)
            self.assertTrue(np.all(dx_mul_2.numpy() == 2))
            self.assertNotEqual(dx_mul_2.stop_gradient, create_graph)

292 293 294
            (none_grad,) = self.grad(
                [x], [y], create_graph=create_graph, allow_unused=True
            )
295
            self.assertIsNone(none_grad)
296

297 298 299
            (grad_with_none_and_not_none,) = self.grad(
                [x, y], [y], create_graph=create_graph
            )
300 301
            self.assertTrue(grad_with_none_and_not_none.shape, x.shape)
            self.assertTrue(np.all(grad_with_none_and_not_none.numpy() == 1))
302 303 304
            self.assertNotEqual(
                grad_with_none_and_not_none.stop_gradient, create_graph
            )
305

306 307 308 309 310
    def test_simple_example(self):
        with _test_eager_guard():
            self.func_simple_example()
        self.func_simple_example()

311
    @dygraph_guard
312 313 314 315 316 317 318 319 320 321 322 323 324 325
    def func_example_no_grad_vars(self):
        x = random_var(self.shape)
        x_np = x.numpy()
        numel = x_np.size
        x.stop_gradient = False

        y1 = fluid.layers.relu(x)
        y2 = fluid.layers.relu(x)
        z = y1 + y2
        w = z * z

        w_mean = fluid.layers.reduce_mean(w)
        del y1, z, w

326 327 328
        (dx_actual,) = self.grad(
            [w_mean], [x], create_graph=True, no_grad_vars=[y2]
        )
329 330 331 332

        self.assertFalse(y2.stop_gradient)
        self.assertFalse(dx_actual.stop_gradient)

333 334 335 336 337 338 339
        dx_expected = (
            1.0
            / float(numel)
            * (np.maximum(x_np, 0) + y2.numpy())
            * (x_np > 0)
            * 2
        ).astype('float32')
340

341
        np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
342 343 344 345 346 347 348 349

    def test_example_no_grad_vars(self):
        with _test_eager_guard():
            self.func_example_no_grad_vars()
        self.func_example_no_grad_vars()

    @dygraph_guard
    def func_none_one_initial_gradient(self):
350 351 352 353 354 355
        numel = 1
        for s in self.shape:
            numel *= s

        half_numel = int(numel / 2)
        half_x_positive = np.random.uniform(low=1, high=2, size=[half_numel])
356 357 358 359 360 361
        half_x_negative = np.random.uniform(
            low=-2, high=-1, size=[numel - half_numel]
        )
        x_np = np.array(list(half_x_positive) + list(half_x_negative)).astype(
            'float32'
        )
362 363 364
        np.random.shuffle(x_np)

        x = fluid.dygraph.to_variable(x_np)
365 366
        x.stop_gradient = False

367
        alpha = 0.2
368
        y = paddle.nn.functional.leaky_relu(x, alpha)
369 370 371 372
        y = y * y
        z = y * y

        x_np = x.numpy()
373 374
        relu_x_np = np.maximum(x_np, alpha * x_np).astype('float32')
        relu_x_grad_np = ((x_np > 0) + (x_np < 0) * alpha).astype('float32')
375
        dy_expected = (relu_x_np * relu_x_grad_np * 2).astype('float32')
376 377 378
        dz_expected = (np.power(relu_x_np, 3) * relu_x_grad_np * 4).astype(
            'float32'
        )
379

380 381
        random_grad_y = random_var(y.shape, low=1, high=2)
        random_grad_z = random_var(z.shape, low=1, high=2)
382 383 384 385 386 387 388 389 390
        ones_grad_y = np.ones(y.shape).astype('float32')
        ones_grad_z = np.ones(z.shape).astype('float32')

        original_random_grad_y = random_grad_y.numpy()
        original_random_grad_z = random_grad_z.numpy()

        for grad_y in [random_grad_y]:
            for grad_z in [random_grad_z]:
                for create_graph in [False, True]:
391 392 393 394 395 396 397
                    (dx_actual,) = self.grad(
                        outputs=[y, z],
                        inputs=[x],
                        grad_outputs=[grad_y, grad_z],
                        create_graph=create_graph,
                        retain_graph=True,
                    )
398

399 400
                    grad_y_np = (
                        ones_grad_y if grad_y is None else grad_y.numpy()
401
                    )
402 403
                    grad_z_np = (
                        ones_grad_z if grad_z is None else grad_z.numpy()
404 405
                    )

406 407 408 409 410 411
                    dx_expected = (
                        dy_expected * grad_y_np + dz_expected * grad_z_np
                    )
                    np.testing.assert_allclose(
                        dx_actual.numpy(), dx_expected, rtol=1e-05
                    )
412 413 414

                    if grad_y is not None:
                        self.assertTrue(grad_y.stop_gradient)
415 416 417
                        np.testing.assert_array_equal(
                            grad_y.numpy(), original_random_grad_y
                        )
418 419 420

                    if grad_z is not None:
                        self.assertTrue(grad_z.stop_gradient)
421 422 423
                        np.testing.assert_array_equal(
                            grad_z.numpy(), original_random_grad_z
                        )
424

425 426 427 428 429
    def test_none_one_initial_gradient(self):
        with _test_eager_guard():
            self.func_none_one_initial_gradient()
        self.func_none_one_initial_gradient()

430
    @dygraph_guard
431
    def func_example_with_gradient_accumulation_and_create_graph(self):
432 433 434 435 436 437 438 439 440 441 442 443
        x = random_var(self.shape)
        x_np = x.numpy()
        numel = x_np.size
        x.stop_gradient = False

        y = fluid.layers.relu(x)
        z = y + 1
        w = z * z

        w_mean = fluid.layers.reduce_mean(w)
        del y, z, w

444
        (dx_actual,) = self.grad([w_mean], [x], create_graph=True)
445 446 447 448 449
        del w_mean

        self.assertFalse(dx_actual.stop_gradient)

        # Theoritical result based on math calculation
450 451 452
        dx_expected = (
            1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2
        ).astype('float32')
453
        np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
454

455 456
        loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x)
        loss.backward(retain_graph=True)
457

458
        x_grad_actual = x.gradient()
459 460 461 462 463
        x_grad_expected = (
            2.0
            / float(numel)
            * (x_np + dx_expected * (x_np > 0) * 2 / float(numel))
        ).astype('float32')
464
        np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05)
465 466 467

        for i in range(5):
            loss.backward(retain_graph=True)
468
            x_grad_actual = x.gradient()
469 470 471 472 473 474 475 476
            x_grad_expected = (i + 2) * (
                2.0
                / float(numel)
                * (x_np + dx_expected * (x_np > 0) * 2 / float(numel))
            ).astype('float32')
            np.testing.assert_allclose(
                x_grad_actual, x_grad_expected, rtol=1e-05
            )
477

478 479 480 481 482
    def test_example_with_gradient_accumulation_and_create_graph(self):
        with _test_eager_guard():
            self.func_example_with_gradient_accumulation_and_create_graph()
        self.func_example_with_gradient_accumulation_and_create_graph()

483
    @dygraph_guard
484
    def func_example_with_gradient_accumulation_and_no_grad_vars(self):
485 486 487 488 489 490 491 492 493 494 495 496 497
        x = random_var(self.shape)
        x_np = x.numpy()
        numel = x_np.size
        x.stop_gradient = False

        y1 = fluid.layers.relu(x)
        y2 = fluid.layers.relu(x)
        z = y1 + y2
        w = z * z

        w_mean = fluid.layers.reduce_mean(w)
        del y1, z, w

498 499 500 501 502 503 504
        (dx_actual,) = self.grad(
            [w_mean],
            [x],
            retain_graph=True,
            create_graph=True,
            no_grad_vars=[y2],
        )
505 506 507 508

        self.assertFalse(y2.stop_gradient)
        self.assertFalse(dx_actual.stop_gradient)

509 510 511 512 513 514 515
        dx_expected = (
            1.0
            / float(numel)
            * (np.maximum(x_np, 0) + y2.numpy())
            * (x_np > 0)
            * 2
        ).astype('float32')
516
        np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
517

518 519
        loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x)
        loss.backward()
520

521
        x_grad_actual = x.gradient()
522 523 524 525 526
        x_grad_expected = (
            2.0
            / float(numel)
            * (x_np + dx_expected * (x_np > 0) * 4 / float(numel))
        ).astype('float32')
527
        np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05)
528 529 530 531 532

    def test_example_with_gradient_accumulation_and_no_grad_vars(self):
        with _test_eager_guard():
            self.func_example_with_gradient_accumulation_and_no_grad_vars()
        self.func_example_with_gradient_accumulation_and_no_grad_vars()
533 534

    @dygraph_guard
535
    def func_example_with_gradient_accumulation_and_not_create_graph(self):
536 537 538 539 540 541 542 543 544 545 546 547
        x = random_var(self.shape)
        x_np = x.numpy()
        numel = x_np.size
        x.stop_gradient = False

        y = fluid.layers.relu(x)
        z = y + 1
        w = z * z

        w_mean = fluid.layers.reduce_mean(w)
        del y, z, w

548
        (dx_actual,) = self.grad([w_mean], [x], create_graph=False)
549 550 551 552
        del w_mean

        self.assertTrue(dx_actual.stop_gradient)

553 554 555
        dx_expected = (
            1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2
        ).astype('float32')
556

557
        np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05)
558

559 560
        loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x)
        loss.backward()
561

562 563
        x_grad_actual = x.gradient()
        x_grad_expected = (2.0 * x_np / float(numel)).astype('float32')
564
        np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05)
565 566 567 568 569

    def test_example_with_gradient_accumulation_and_not_create_graph(self):
        with _test_eager_guard():
            self.func_example_with_gradient_accumulation_and_not_create_graph()
        self.func_example_with_gradient_accumulation_and_not_create_graph()
570 571 572 573 574 575 576 577


class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad):
    def setUp(self):
        self.sort_sum_gradient = True
        self.shape = [5, 10]


H
hong 已提交
578
class TestDygraphDoubleGradVisitedUniq(TestCase):
579
    def func_compare(self):
580 581 582 583 584
        value = (
            np.random.uniform(-0.5, 0.5, 100)
            .reshape(10, 2, 5)
            .astype("float32")
        )
H
hong 已提交
585 586

        def model_f(input):
587
            linear = fluid.dygraph.Linear(5, 3, bias_attr=False)
H
hong 已提交
588 589
            for i in range(10):
                if i == 0:
590
                    out = linear(input)
H
hong 已提交
591
                else:
592
                    out = out + linear(input)
H
hong 已提交
593 594
            return out

595 596
        fluid.set_flags({'FLAGS_sort_sum_gradient': True})

H
hong 已提交
597
        with fluid.dygraph.guard():
C
cnn 已提交
598
            paddle.seed(123)
L
Leo Chen 已提交
599
            paddle.framework.random._manual_program_seed(123)
H
hong 已提交
600 601 602 603 604
            a = fluid.dygraph.to_variable(value)
            a.stop_gradient = False

            out = model_f(a)

605 606 607 608 609 610 611
            dx = fluid.dygraph.grad(
                outputs=[out],
                inputs=[a],
                create_graph=False,
                only_inputs=True,
                allow_unused=False,
            )
H
hong 已提交
612 613 614 615

            grad_1 = dx[0].numpy()

        with fluid.dygraph.guard():
C
cnn 已提交
616
            paddle.seed(123)
L
Leo Chen 已提交
617
            paddle.framework.random._manual_program_seed(123)
H
hong 已提交
618 619 620 621
            a = fluid.dygraph.to_variable(value)
            a.stop_gradient = False

            out = model_f(a)
622
            out.backward()
H
hong 已提交
623 624 625

            grad_2 = a.gradient()

626
        np.testing.assert_array_equal(grad_1, grad_2)
627

628 629 630 631 632
    def test_compare(self):
        with _test_eager_guard():
            self.func_compare()
        self.func_compare()

633 634 635 636 637 638

class TestRaiseNoDoubleGradOp(TestCase):
    def raise_no_grad_op(self):
        with fluid.dygraph.guard():
            x = fluid.layers.ones(shape=[2, 3, 2, 2], dtype='float32')
            x.stop_gradient = False
639
            y = paddle.fluid.layers.group_norm(x, groups=1)
640

641 642 643
            dx = fluid.dygraph.grad(
                outputs=[y], inputs=[x], create_graph=True, retain_graph=True
            )[0]
644 645 646 647 648

            loss = fluid.layers.reduce_mean(dx)
            loss.backward()

    def test_raise(self):
649
        self.assertRaises(RuntimeError, self.raise_no_grad_op)
H
hong 已提交
650 651


W
Weilong Wu 已提交
652 653 654 655 656 657
class TestDoubleGradResNet(TestCase):
    def setUp(self):
        paddle.seed(123)
        paddle.framework.random._manual_program_seed(123)
        self.data = np.random.rand(1, 3, 224, 224).astype(np.float32)

Z
Zeng Jinle 已提交
658
    @dygraph_guard
W
Weilong Wu 已提交
659 660 661 662 663 664 665 666
    def test_resnet_resnet50(self):
        with _test_eager_guard():
            model = resnet50(pretrained=False)
            egr_data = paddle.to_tensor(self.data)
            egr_data.stop_gradient = False
            egr_out = model(egr_data)
            egr_preds = paddle.argmax(egr_out, axis=1)
            egr_label_onehot = paddle.nn.functional.one_hot(
667 668
                paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1]
            )
W
Weilong Wu 已提交
669 670 671 672 673 674 675 676
            egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1)

            egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0]
            egr_g_numpy = egr_g.numpy()
            self.assertEqual(list(egr_g_numpy.shape), list(egr_out.shape))

        model = resnet50(pretrained=False)
        data = paddle.to_tensor(self.data)
Z
Zeng Jinle 已提交
677
        data.stop_gradient = False
W
Weilong Wu 已提交
678
        out = model(data)
Z
Zeng Jinle 已提交
679
        preds = paddle.argmax(out, axis=1)
680 681 682
        label_onehot = paddle.nn.functional.one_hot(
            paddle.to_tensor(preds), num_classes=out.shape[1]
        )
Z
Zeng Jinle 已提交
683 684 685 686 687 688
        target = paddle.sum(out * label_onehot, axis=1)

        g = paddle.grad(outputs=target, inputs=out)[0]
        g_numpy = g.numpy()
        self.assertEqual(list(g_numpy.shape), list(out.shape))

689 690
        np.testing.assert_array_equal(egr_out, out)
        np.testing.assert_array_equal(egr_g_numpy, g_numpy)
Z
Zeng Jinle 已提交
691

W
Weilong Wu 已提交
692 693 694 695 696 697 698 699 700
    @dygraph_guard
    def test_resnet_resnet101(self):
        with _test_eager_guard():
            model = resnet101(pretrained=False)
            egr_data = paddle.to_tensor(self.data)
            egr_data.stop_gradient = False
            egr_out = model(egr_data)
            egr_preds = paddle.argmax(egr_out, axis=1)
            egr_label_onehot = paddle.nn.functional.one_hot(
701 702
                paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1]
            )
W
Weilong Wu 已提交
703 704 705 706 707 708 709 710 711 712 713
            egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1)

            egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0]
            egr_g_numpy = egr_g.numpy()
            self.assertEqual(list(egr_g_numpy.shape), list(egr_out.shape))

        model = resnet101(pretrained=False)
        data = paddle.to_tensor(self.data)
        data.stop_gradient = False
        out = model(data)
        preds = paddle.argmax(out, axis=1)
714 715 716
        label_onehot = paddle.nn.functional.one_hot(
            paddle.to_tensor(preds), num_classes=out.shape[1]
        )
W
Weilong Wu 已提交
717
        target = paddle.sum(out * label_onehot, axis=1)
Z
Zeng Jinle 已提交
718

W
Weilong Wu 已提交
719 720 721
        g = paddle.grad(outputs=target, inputs=out)[0]
        g_numpy = g.numpy()
        self.assertEqual(list(g_numpy.shape), list(out.shape))
Z
Zeng Jinle 已提交
722

723 724
        np.testing.assert_array_equal(egr_out, out)
        np.testing.assert_array_equal(egr_g_numpy, g_numpy)
Z
Zeng Jinle 已提交
725 726


727 728 729 730
class TestDoubleGradBasics(TestCase):
    def test_matmul(self):
        input_numpy = np.ones([3, 3]) * 2
        with _test_eager_guard():
731 732 733 734 735 736 737 738 739
            x = paddle.to_tensor(
                input_numpy, stop_gradient=False, dtype='float32'
            )
            y = paddle.to_tensor(
                input_numpy, stop_gradient=False, dtype='float32'
            )
            grad_out = paddle.to_tensor(
                np.ones([3, 3]), stop_gradient=False, dtype='float32'
            )
740 741

            out = paddle.matmul(x, y, False, False)
742 743 744
            new_x_g, new_y_g = paddle.grad(
                [out], [x, y], [grad_out], retain_graph=True, create_graph=True
            )
745 746 747
            new_x_g.backward()

            out_ref = np.ones([3, 3]) * 12.0
748
            np.testing.assert_array_equal(out.numpy(), out_ref)
749 750 751

            new_x_g_ref = np.ones([3, 3]) * 6.0
            new_y_g_ref = np.ones([3, 3]) * 6.0
752 753
            np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref)
            np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref)
754 755

            x_grad_ref = np.ones([3, 3]) * 0.0
756
            np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref)
757 758

            y_grad_ref = np.ones([3, 3]) * 3.0
759
            np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
760 761

            grad_out_grad_ref = np.ones([3, 3]) * 6.0
762 763 764
            np.testing.assert_array_equal(
                grad_out.grad.numpy(), grad_out_grad_ref
            )
765 766


767 768
if __name__ == '__main__':
    unittest.main()