test_imperative_double_grad.py 13.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle.fluid as fluid
H
hong 已提交
16
import paddle
17
from paddle.fluid.wrapped_decorator import wrap_decorator
Z
Zeng Jinle 已提交
18
from paddle.vision.models import resnet50, resnet101
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
import unittest
from unittest import TestCase
import numpy as np


def _dygraph_guard_(func):
    def __impl__(*args, **kwargs):
        if fluid.in_dygraph_mode():
            return func(*args, **kwargs)
        else:
            with fluid.dygraph.guard():
                return func(*args, **kwargs)

    return __impl__


dygraph_guard = wrap_decorator(_dygraph_guard_)


def random_var(size, low=-1, high=1, dtype='float32'):
    x_np = np.random.uniform(low=low, high=high, size=size).astype(dtype)
    return fluid.dygraph.to_variable(x_np)


class TestDygraphDoubleGrad(TestCase):
    def setUp(self):
        self.sort_sum_gradient = False
        self.shape = [5, 10]

    def grad(self,
             outputs,
             inputs,
             grad_outputs=None,
Z
Zeng Jinle 已提交
52 53 54 55
             no_grad_vars=None,
             retain_graph=None,
             create_graph=False,
             allow_unused=False):
56
        fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient})
Z
Zeng Jinle 已提交
57
        return fluid.dygraph.grad(
58 59 60
            outputs=outputs,
            inputs=inputs,
            grad_outputs=grad_outputs,
Z
Zeng Jinle 已提交
61 62
            no_grad_vars=no_grad_vars,
            retain_graph=retain_graph,
63
            create_graph=create_graph,
64
            allow_unused=allow_unused)
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89

    @dygraph_guard
    def test_exception(self):
        with self.assertRaises(AssertionError):
            self.grad(None, None)

        shape = self.shape

        with self.assertRaises(AssertionError):
            self.grad(1, random_var(shape))

        with self.assertRaises(AssertionError):
            self.grad(random_var(shape), 1)

        with self.assertRaises(AssertionError):
            self.grad([1], [random_var(shape)])

        with self.assertRaises(AssertionError):
            self.grad([random_var(shape)], [1])

        with self.assertRaises(AssertionError):
            self.grad([random_var(shape), random_var(shape)],
                      [random_var(shape)], [random_var(shape)])

        with self.assertRaises(AssertionError):
Z
Zeng Jinle 已提交
90 91
            self.grad(
                [random_var(shape)], [random_var(shape)], no_grad_vars=[1])
92 93

        with self.assertRaises(AssertionError):
Z
Zeng Jinle 已提交
94
            self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1)
95 96 97 98 99 100 101 102

    @dygraph_guard
    def test_simple_example(self):
        x = random_var(self.shape)
        x.stop_gradient = False
        y = x + 1

        for create_graph in [False, True]:
Z
Zeng Jinle 已提交
103 104
            dx, = self.grad(
                [x], [x], create_graph=create_graph, retain_graph=True)
105 106 107 108
            self.assertEqual(dx.shape, x.shape)
            self.assertTrue(np.all(dx.numpy() == 1))
            self.assertNotEqual(dx.stop_gradient, create_graph)

Z
Zeng Jinle 已提交
109 110
            dx_mul_2, = self.grad(
                [y, x], [x], create_graph=create_graph, retain_graph=True)
111 112 113 114
            self.assertEqual(dx_mul_2.shape, x.shape)
            self.assertTrue(np.all(dx_mul_2.numpy() == 2))
            self.assertNotEqual(dx_mul_2.stop_gradient, create_graph)

Z
Zeng Jinle 已提交
115 116
            none_grad, = self.grad(
                [x], [y], create_graph=create_graph, allow_unused=True)
117 118 119 120 121 122 123 124 125 126 127
            self.assertTrue(none_grad is None)

            grad_with_none_and_not_none, = self.grad(
                [x, y], [y], create_graph=create_graph)
            self.assertTrue(grad_with_none_and_not_none.shape, x.shape)
            self.assertTrue(np.all(grad_with_none_and_not_none.numpy() == 1))
            self.assertNotEqual(grad_with_none_and_not_none.stop_gradient,
                                create_graph)

    @dygraph_guard
    def test_none_one_initial_gradient(self):
128 129 130 131 132 133 134 135 136 137 138 139 140
        numel = 1
        for s in self.shape:
            numel *= s

        half_numel = int(numel / 2)
        half_x_positive = np.random.uniform(low=1, high=2, size=[half_numel])
        half_x_negative = np.random.uniform(
            low=-2, high=-1, size=[numel - half_numel])
        x_np = np.array(list(half_x_positive) + list(half_x_negative)).astype(
            'float32')
        np.random.shuffle(x_np)

        x = fluid.dygraph.to_variable(x_np)
141 142
        x.stop_gradient = False

143 144
        alpha = 0.2
        y = fluid.layers.leaky_relu(x, alpha=alpha)
145 146 147 148
        y = y * y
        z = y * y

        x_np = x.numpy()
149 150
        relu_x_np = np.maximum(x_np, alpha * x_np).astype('float32')
        relu_x_grad_np = ((x_np > 0) + (x_np < 0) * alpha).astype('float32')
151 152 153 154
        dy_expected = (relu_x_np * relu_x_grad_np * 2).astype('float32')
        dz_expected = (np.power(relu_x_np, 3) * relu_x_grad_np *
                       4).astype('float32')

155 156
        random_grad_y = random_var(y.shape, low=1, high=2)
        random_grad_z = random_var(z.shape, low=1, high=2)
157 158 159 160 161 162 163 164 165 166 167 168 169
        ones_grad_y = np.ones(y.shape).astype('float32')
        ones_grad_z = np.ones(z.shape).astype('float32')

        original_random_grad_y = random_grad_y.numpy()
        original_random_grad_z = random_grad_z.numpy()

        for grad_y in [random_grad_y]:
            for grad_z in [random_grad_z]:
                for create_graph in [False, True]:
                    dx_actual, = self.grad(
                        outputs=[y, z],
                        inputs=[x],
                        grad_outputs=[grad_y, grad_z],
Z
Zeng Jinle 已提交
170 171
                        create_graph=create_graph,
                        retain_graph=True)
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217

                    grad_y_np = ones_grad_y if grad_y is None else grad_y.numpy(
                    )
                    grad_z_np = ones_grad_z if grad_z is None else grad_z.numpy(
                    )

                    dx_expected = dy_expected * grad_y_np + dz_expected * grad_z_np
                    self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected))

                    if grad_y is not None:
                        self.assertTrue(grad_y.stop_gradient)
                        self.assertTrue(
                            np.array_equal(grad_y.numpy(),
                                           original_random_grad_y))

                    if grad_z is not None:
                        self.assertTrue(grad_z.stop_gradient)
                        self.assertTrue(
                            np.array_equal(grad_z.numpy(),
                                           original_random_grad_z))

    @dygraph_guard
    def test_example_with_gradient_accumulation_and_create_graph(self):
        x = random_var(self.shape)
        x_np = x.numpy()
        numel = x_np.size
        x.stop_gradient = False

        y = fluid.layers.relu(x)
        z = y + 1
        w = z * z

        w_mean = fluid.layers.reduce_mean(w)
        del y, z, w

        dx_actual, = self.grad([w_mean], [x], create_graph=True)
        del w_mean

        self.assertFalse(dx_actual.stop_gradient)

        # Theoritical result based on math calculation
        dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) *
                       (x_np > 0) * 2).astype('float32')
        self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected))

        loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x)
218
        loss.backward(retain_graph=True)
219 220 221 222 223 224 225

        x_grad_actual = x.gradient()
        x_grad_expected = (2.0 / float(numel) *
                           (x_np + dx_expected *
                            (x_np > 0) * 2 / float(numel))).astype('float32')
        self.assertTrue(np.allclose(x_grad_actual, x_grad_expected))

226 227 228 229 230 231 232 233
        for i in range(5):
            loss.backward(retain_graph=True)
            x_grad_actual = x.gradient()
            x_grad_expected = (i + 2) * (2.0 / float(numel) * (
                x_np + dx_expected *
                (x_np > 0) * 2 / float(numel))).astype('float32')
            self.assertTrue(np.allclose(x_grad_actual, x_grad_expected))

234
    @dygraph_guard
Z
Zeng Jinle 已提交
235
    def test_example_with_gradient_accumulation_and_no_grad_vars(self):
236 237 238 239 240 241 242 243 244 245 246 247 248 249
        x = random_var(self.shape)
        x_np = x.numpy()
        numel = x_np.size
        x.stop_gradient = False

        y1 = fluid.layers.relu(x)
        y2 = fluid.layers.relu(x)
        z = y1 + y2
        w = z * z

        w_mean = fluid.layers.reduce_mean(w)
        del y1, z, w

        dx_actual, = self.grad(
Z
Zeng Jinle 已提交
250
            [w_mean], [x], create_graph=True, no_grad_vars=[y2])
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305

        self.assertFalse(y2.stop_gradient)
        self.assertFalse(dx_actual.stop_gradient)

        dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + y2.numpy()) *
                       (x_np > 0) * 2).astype('float32')
        self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected))

        loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x)
        loss.backward()

        x_grad_actual = x.gradient()
        x_grad_expected = (2.0 / float(numel) *
                           (x_np + dx_expected *
                            (x_np > 0) * 4 / float(numel))).astype('float32')
        self.assertTrue(np.allclose(x_grad_actual, x_grad_expected))

    @dygraph_guard
    def test_example_with_gradient_accumulation_and_not_create_graph(self):
        x = random_var(self.shape)
        x_np = x.numpy()
        numel = x_np.size
        x.stop_gradient = False

        y = fluid.layers.relu(x)
        z = y + 1
        w = z * z

        w_mean = fluid.layers.reduce_mean(w)
        del y, z, w

        dx_actual, = self.grad([w_mean], [x], create_graph=False)
        del w_mean

        self.assertTrue(dx_actual.stop_gradient)

        dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) *
                       (x_np > 0) * 2).astype('float32')

        self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected))

        loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x)
        loss.backward()

        x_grad_actual = x.gradient()
        x_grad_expected = (2.0 * x_np / float(numel)).astype('float32')
        self.assertTrue(np.allclose(x_grad_actual, x_grad_expected))


class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad):
    def setUp(self):
        self.sort_sum_gradient = True
        self.shape = [5, 10]


H
hong 已提交
306 307
class TestDygraphDoubleGradVisitedUniq(TestCase):
    def test_compare(self):
308 309
        value = np.random.uniform(-0.5, 0.5, 100).reshape(10, 2,
                                                          5).astype("float32")
H
hong 已提交
310 311

        def model_f(input):
312
            linear = fluid.dygraph.Linear(5, 3, bias_attr=False)
H
hong 已提交
313 314
            for i in range(10):
                if i == 0:
315
                    out = linear(input)
H
hong 已提交
316
                else:
317
                    out = out + linear(input)
H
hong 已提交
318 319
            return out

320 321
        fluid.set_flags({'FLAGS_sort_sum_gradient': True})

H
hong 已提交
322
        with fluid.dygraph.guard():
C
cnn 已提交
323
            paddle.seed(123)
L
Leo Chen 已提交
324
            paddle.framework.random._manual_program_seed(123)
H
hong 已提交
325 326 327 328 329
            a = fluid.dygraph.to_variable(value)
            a.stop_gradient = False

            out = model_f(a)

330 331 332
            dx = fluid.dygraph.grad(
                outputs=[out],
                inputs=[a],
333
                create_graph=False,
334
                only_inputs=True,
335
                allow_unused=False)
H
hong 已提交
336 337 338 339

            grad_1 = dx[0].numpy()

        with fluid.dygraph.guard():
C
cnn 已提交
340
            paddle.seed(123)
L
Leo Chen 已提交
341
            paddle.framework.random._manual_program_seed(123)
H
hong 已提交
342 343 344 345
            a = fluid.dygraph.to_variable(value)
            a.stop_gradient = False

            out = model_f(a)
346
            out.backward()
H
hong 已提交
347 348 349

            grad_2 = a.gradient()

350 351 352 353 354 355 356 357
        self.assertTrue(np.array_equal(grad_1, grad_2))


class TestRaiseNoDoubleGradOp(TestCase):
    def raise_no_grad_op(self):
        with fluid.dygraph.guard():
            x = fluid.layers.ones(shape=[2, 3, 2, 2], dtype='float32')
            x.stop_gradient = False
358
            y = paddle.fluid.layers.group_norm(x, groups=1)
359 360 361 362 363 364 365 366 367

            dx = fluid.dygraph.grad(
                outputs=[y], inputs=[x], create_graph=True,
                retain_graph=True)[0]

            loss = fluid.layers.reduce_mean(dx)
            loss.backward()

    def test_raise(self):
368
        self.assertRaises(RuntimeError, self.raise_no_grad_op)
H
hong 已提交
369 370


Z
Zeng Jinle 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
class TestDoubleGradResNetBase(TestCase):
    @dygraph_guard
    def check_resnet(self):
        data = np.random.rand(1, 3, 224, 224).astype(np.float32)
        data = paddle.to_tensor(data)
        data.stop_gradient = False
        out = self.model(data)
        preds = paddle.argmax(out, axis=1)
        label_onehot = paddle.nn.functional.one_hot(
            paddle.to_tensor(preds), num_classes=out.shape[1])
        target = paddle.sum(out * label_onehot, axis=1)

        g = paddle.grad(outputs=target, inputs=out)[0]
        g_numpy = g.numpy()
        self.assertEqual(list(g_numpy.shape), list(out.shape))


class TestDoubleGradResNet50(TestDoubleGradResNetBase):
    def setUp(self):
        self.model = resnet50(pretrained=False)

    def test_main(self):
        self.check_resnet()


class TestDoubleGradResNet101(TestDoubleGradResNetBase):
    def setUp(self):
        self.model = resnet101(pretrained=False)

    def test_main(self):
        self.check_resnet()


404 405
if __name__ == '__main__':
    unittest.main()