test_gradient_clip.py 23.8 KB
Newer Older
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
C
chengduo 已提交
2 3 4 5 6
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
C
chengduo 已提交
8 9 10 11 12 13 14 15
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

C
chengduo 已提交
17
import numpy as np
18 19
from fake_reader import fake_imdb_reader

C
chengduo 已提交
20 21
import paddle
import paddle.fluid as fluid
22
import paddle.fluid.core as core
23
from paddle.nn.clip import _allow_pure_fp16_global_norm_clip
C
chengduo 已提交
24

W
WangXi 已提交
25 26
paddle.enable_static()

C
chengduo 已提交
27

28 29 30
def bow_net(
    data, label, dict_dim, emb_dim=128, hid_dim=128, hid_dim2=96, class_dim=2
):
C
chengduo 已提交
31 32 33 34 35
    """
    BOW net
    This model is from https://github.com/PaddlePaddle/models:
    fluid/PaddleNLP/text_classification/nets.py
    """
36 37 38
    emb = fluid.layers.embedding(
        input=data, is_sparse=True, size=[dict_dim, emb_dim]
    )
C
chengduo 已提交
39
    bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
40
    bow_tanh = paddle.tanh(bow)
C
Charles-hit 已提交
41 42 43 44 45
    fc_1 = paddle.static.nn.fc(x=bow_tanh, size=hid_dim, activation="tanh")
    fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim2, activation="tanh")
    prediction = paddle.static.nn.fc(
        x=[fc_2], size=class_dim, activation="softmax"
    )
46 47 48
    cost = paddle.nn.functional.cross_entropy(
        input=prediction, label=label, reduction='none', use_softmax=False
    )
49
    avg_cost = paddle.mean(x=cost)
C
chengduo 已提交
50 51 52 53 54 55

    return avg_cost


class TestGradientClip(unittest.TestCase):
    def setUp(self):
56
        self.word_dict_len = 5147
C
chengduo 已提交
57
        self.BATCH_SIZE = 2
58 59
        reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100)
        self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
60
        self.clip_gradient = lambda x: None
61 62 63 64
        self.init()

    def init(self):
        pass
C
chengduo 已提交
65 66

    def get_places(self):
67
        places = [fluid.CPUPlace()]
C
chengduo 已提交
68
        if core.is_compiled_with_cuda():
69
            places.append(fluid.CUDAPlace(0))
C
chengduo 已提交
70 71
        return places

72 73 74
    def check_clip_result(self, out, out_clip):
        pass

75
    def check_gradient_clip(self, place, dtype='float32'):
76 77
        prog = fluid.Program()
        startup_program = fluid.Program()
78 79 80
        with fluid.program_guard(
            main_program=prog, startup_program=startup_program
        ):
81 82
            image = fluid.data(name="a", shape=[-1, 784], dtype='float32')
            label = fluid.data(name="b", shape=[-1, 1], dtype='int64')
83 84
            if dtype != 'float32':
                image_cast = paddle.cast(image, dtype)
C
Charles-hit 已提交
85 86 87
                hidden = paddle.static.nn.fc(
                    x=image_cast, size=32, activation='relu'
                )
88
            else:
C
Charles-hit 已提交
89 90 91 92 93 94
                hidden = paddle.static.nn.fc(
                    x=image, size=32, activation='relu'
                )
            predict = paddle.static.nn.fc(
                x=hidden, size=10, activation='softmax'
            )
C
chengduo 已提交
95

96 97 98
            cost = paddle.nn.functional.cross_entropy(
                input=predict, label=label, reduction='none', use_softmax=False
            )
99
            avg_cost = paddle.mean(cost)
C
chengduo 已提交
100 101 102 103 104 105 106

        prog_clip = prog.clone()
        avg_cost_clip = prog_clip.block(0).var(avg_cost.name)

        p_g = fluid.backward.append_backward(loss=avg_cost)
        p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip)

107 108
        p_g = sorted(p_g, key=lambda x: x[0].name)
        p_g_clip = sorted(p_g_clip, key=lambda x: x[0].name)
109 110 111
        with fluid.program_guard(
            main_program=prog_clip, startup_program=startup_program
        ):
112
            p_g_clip = self.clip_gradient(p_g_clip)
C
chengduo 已提交
113 114 115 116

        grad_list = [elem[1] for elem in p_g]
        grad_clip_list = [elem[1] for elem in p_g_clip]

117
        train_reader = paddle.batch(paddle.dataset.mnist.train(), batch_size=3)
C
chengduo 已提交
118 119 120 121
        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=[image, label], place=place)
        exe.run(startup_program)

122 123
        data = next(train_reader())
        out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list)
124 125 126
        out_clip = exe.run(
            prog_clip, feed=feeder.feed(data), fetch_list=grad_clip_list
        )
127
        self.check_clip_result(out, out_clip)
C
chengduo 已提交
128 129

    def check_sparse_gradient_clip(self, place):
130 131
        prog = fluid.Program()
        startup_program = fluid.Program()
132 133 134 135 136 137
        with fluid.program_guard(
            main_program=prog, startup_program=startup_program
        ):
            data = fluid.data(
                name="words", shape=[-1, 1], dtype="int64", lod_level=1
            )
138
            label = fluid.data(name="label", shape=[-1, 1], dtype="int64")
139
            cost = bow_net(data, label, self.word_dict_len)
C
chengduo 已提交
140

141
            self.backward_and_optimize(cost)
C
chengduo 已提交
142 143 144 145 146 147 148

        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=[data, label], place=place)
        exe.run(startup_program)

        data = next(self.train_data())
        val = exe.run(prog, feed=feeder.feed(data), fetch_list=[cost])[0]
149
        self.assertEqual((1,), val.shape)
C
chengduo 已提交
150 151
        self.assertFalse(np.isnan(val))

152
    def backward_and_optimize(self, cost):
153 154 155 156 157 158 159 160 161 162
        pass


class TestGradientClipByGlobalNorm(TestGradientClip):
    def init(self):
        self.clip_norm = 0.2

    def check_clip_result(self, out, out_clip):
        global_norm = 0
        for v in out:
W
WangXi 已提交
163
            global_norm += np.sum(np.square(v))
164 165 166 167 168 169 170
        global_norm = np.sqrt(global_norm)
        scale = self.clip_norm / np.maximum(self.clip_norm, global_norm)
        res = []
        for i in range(len(out)):
            out[i] = scale * out[i]

        for u, v in zip(out, out_clip):
171 172 173 174 175
            np.testing.assert_allclose(
                u,
                v,
                rtol=1e-05,
                atol=1e-08,
176 177 178 179
                err_msg='gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}'.format(
                    u, v, u - v
                ),
            )
180

181
    # test whether the output is right when use 'set_gradient_clip'
182 183
    def test_old_gradient_clip(self):
        def func(params_grads):
184 185 186
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
            paddle.nn.clip.set_gradient_clip(clip)
            return paddle.nn.clip.append_gradient_clip_ops(params_grads)
187 188 189 190

        self.clip_gradient = func
        self.check_gradient_clip(fluid.CPUPlace())

191
    # test whether the output is right when use grad_clip
192 193
    def test_new_gradient_clip(self):
        def func(params_grads):
194
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
195
            return clip(params_grads)
C
chengduo 已提交
196

197 198 199
        self.clip_gradient = func
        self.check_gradient_clip(fluid.CPUPlace())

200
    # test whether the output is right when use grad_clip under float64
201 202
    def test_new_gradient_clip_fp64(self):
        def func(params_grads):
203
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
204 205 206 207 208
            return clip(params_grads)

        self.clip_gradient = func
        self.check_gradient_clip(fluid.CPUPlace(), "float64")

209 210 211
    # invoke 'set_gradient_clip' in a wrong order
    def test_wrong_API_order(self):
        def backward_func(cost):
212 213
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=5.0)
            paddle.nn.clip.set_gradient_clip(clip)
214 215 216
            sgd_optimizer = fluid.optimizer.SGD(
                learning_rate=0.01, grad_clip=clip
            )
217 218
            # if 'set_gradient_clip' and 'optimize(grad_clip)' together, 'set_gradient_clip' will be ineffective
            sgd_optimizer.minimize(cost)
219
            # 'set_gradient_clip' must before 'minimize', otherwise, 'set_gradient_clip' will be ineffective
220
            paddle.nn.clip.set_gradient_clip(clip)
221 222

        self.backward_and_optimize = backward_func
C
chengduo 已提交
223 224 225
        for place in self.get_places():
            self.check_sparse_gradient_clip(place)

226 227
    # raise typeError
    def test_tpyeError(self):
228
        # the type of optimizer(grad_clip=) must be an instance of GradientClipBase's derived class
229
        with self.assertRaises(TypeError):
230 231 232
            sgd_optimizer = fluid.optimizer.SGD(
                learning_rate=0.1, grad_clip="test"
            )
233

234 235 236
    # if grad is None or not need clip
    def test_none_grad_fp32(self):
        ops = self._test_none_grad_helper("float32")
237 238 239 240 241 242 243 244 245 246 247 248 249 250
        self.assertListEqual(
            ops,
            [
                'squared_l2_norm',
                'squared_l2_norm',
                'sum',
                'sqrt',
                'fill_constant',
                'elementwise_max',
                'elementwise_div',
                'elementwise_mul',
                'elementwise_mul',
            ],
        )
251 252 253

    def test_none_grad_fp16(self):
        ops = self._test_none_grad_helper("float16")
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
        self.assertListEqual(
            ops,
            [
                'square',
                'reduce_sum',
                'square',
                'reduce_sum',
                'sum',
                'cast',
                'sqrt',
                'fill_constant',
                'elementwise_max',
                'elementwise_div',
                'cast',
                'elementwise_mul',
                'cast',
                'elementwise_mul',
            ],
        )
273 274 275 276

    def _test_none_grad_helper(self, dtype):
        prog = fluid.Program()
        startup_program = fluid.Program()
277 278 279
        with fluid.program_guard(
            main_program=prog, startup_program=startup_program
        ):
280
            clip = paddle.nn.ClipGradByGlobalNorm(self.clip_norm)
281 282 283 284 285 286 287 288 289 290
            x = (
                fluid.default_main_program()
                .global_block()
                .create_parameter(name="x", shape=[2, 3], dtype=dtype)
            )
            y = (
                fluid.default_main_program()
                .global_block()
                .create_parameter(name="y", shape=[2, 3], dtype=dtype)
            )
291 292 293 294 295 296

            # (x, None) should not be returned
            params_grads = [(x, None), (x, y), (y, x)]
            params_grads = clip(params_grads)
            self.assertTrue(
                len(params_grads) == 2,
297
                "ClipByGlobalNorm: when grad is None, it shouldn't be returned by gradient clip!",
298 299 300 301 302
            )

            ops = [op.type for op in x.block.ops]
        return ops

303 304 305 306 307 308 309 310 311 312

class TestGradientClipByNorm(TestGradientClip):
    def init(self):
        self.clip_norm = 0.2

    def check_clip_result(self, out, out_clip):
        for u, v in zip(out, out_clip):
            norm = np.sqrt(np.sum(np.power(u, 2)))
            scale = self.clip_norm / np.maximum(self.clip_norm, norm)
            u = u * scale
313 314 315 316 317
            np.testing.assert_allclose(
                u,
                v,
                rtol=1e-05,
                atol=1e-08,
318 319
                err_msg='gradient clip by norm has wrong results!',
            )
320

321
    # test whether the output is right when use grad_clip
322
    def test_gradient_clip(self):
zhouweiwei2014's avatar
zhouweiwei2014 已提交
323
        def func(params_grads):
324
            clip = paddle.nn.ClipGradByNorm(clip_norm=self.clip_norm)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
325 326 327
            return clip(params_grads)

        self.clip_gradient = func
328 329 330 331
        self.check_gradient_clip(fluid.CPUPlace())

    # if grad is None or not need clip
    def test_none_grad(self):
332
        clip = paddle.nn.ClipGradByNorm(self.clip_norm)
333 334 335 336 337 338 339 340 341 342 343 344 345 346
        x = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                name="x", shape=[2, 3], dtype="float32", need_clip=False
            )
        )
        y = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                name="y", shape=[2, 3], dtype="float32", need_clip=False
            )
        )
347 348 349 350 351 352

        # (x, None) should not be returned
        params_grads = [(x, None), (x, y)]
        params_grads = clip(params_grads)
        self.assertTrue(
            len(clip(params_grads)) == 1,
353
            "ClipGradByNorm: when grad is None, it shouldn't be returned by gradient clip!",
354 355 356
        )
        self.assertTrue(
            params_grads[0][1].name == 'y',
357 358
            "ClipGradByNorm: grad should not be clipped when filtered out!",
        )
359 360 361 362 363 364 365 366 367 368 369 370


class TestGradientClipByValue(TestGradientClip):
    def init(self):
        self.max = 0.2
        self.min = 0.1

    def check_clip_result(self, out, out_clip):
        for i, v in enumerate(out):
            out[i] = np.clip(v, self.min, self.max)
        for u, v in zip(out, out_clip):
            u = np.clip(u, self.min, self.max)
371 372 373 374 375
            np.testing.assert_allclose(
                u,
                v,
                rtol=1e-06,
                atol=1e-08,
376 377
                err_msg='gradient clip by value has wrong results!',
            )
378

379
    # test whether the output is right when use grad_clip
380
    def test_gradient_clip(self):
zhouweiwei2014's avatar
zhouweiwei2014 已提交
381
        def func(params_grads):
382
            clip = paddle.nn.ClipGradByValue(max=self.max, min=self.min)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
383 384 385
            return clip(params_grads)

        self.clip_gradient = func
386 387 388 389
        self.check_gradient_clip(fluid.CPUPlace())

    # if grad is None or not need clip
    def test_none_grad(self):
390
        clip = paddle.nn.ClipGradByValue(self.max, self.min)
391 392 393 394 395 396 397 398 399 400 401 402 403 404
        x = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                name="x", shape=[2, 3], dtype="float32", need_clip=False
            )
        )
        y = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                name="y", shape=[2, 3], dtype="float32", need_clip=False
            )
        )
405 406 407 408 409 410

        # (x, None) should not be returned
        params_grads = [(x, None), (x, y)]
        params_grads = clip(params_grads)
        self.assertTrue(
            len(clip(params_grads)) == 1,
411
            "ClipGradByValue: when grad is None, it shouldn't be returned by gradient clip!",
412 413 414
        )
        self.assertTrue(
            params_grads[0][1].name == 'y',
415 416
            "ClipGradByValue: grad should not be clipped when filtered out!",
        )
417 418 419 420 421


class TestDygraphGradientClip(unittest.TestCase):
    def test_gradient_clip(self):
        with fluid.dygraph.guard():
422
            linear = paddle.nn.Linear(5, 5)
423
            inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
424
            out = linear(fluid.dygraph.to_variable(inputs))
425
            loss = paddle.mean(out)
426 427
            loss.backward()
            sgd_optimizer = fluid.optimizer.SGD(
428 429
                learning_rate=0.0,
                parameter_list=linear.parameters(),
430
                grad_clip=paddle.nn.ClipGradByGlobalNorm(0.1),
431
            )
432 433 434 435 436 437 438 439 440
            self.check_clip_result(loss, sgd_optimizer)

    def check_clip_result(self, loss, optimizer):
        pass


class TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip):
    def setUp(self):
        self.clip_norm = 0.8
441 442
        self.clip1 = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
        self.clip2 = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
443 444 445

    def check_clip_result(self, loss, optimizer):
        # if grad is None
446 447 448 449 450 451
        x = fluid.dygraph.to_variable(
            np.array([2, 3]).astype("float32"), name="x"
        )
        y = fluid.dygraph.to_variable(
            np.array([3, 4]).astype("float32"), name="y"
        )
452 453
        assert len(self.clip1([(x, x), (x, y), (x, None)])) == 2
        # get params and grads from network
454
        opt, params_grads = optimizer.minimize(loss)
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
        _, grads = zip(*params_grads)
        params_grads = self.clip2(params_grads)
        _, grads_clip = zip(*params_grads)

        global_norm = 0
        for u in grads:
            u = u.numpy()
            global_norm += np.sum(np.power(u, 2))
        global_norm = np.sqrt(global_norm)

        global_norm_clip = 0
        for v in grads_clip:
            v = v.numpy()
            global_norm_clip += np.sum(np.power(v, 2))
        global_norm_clip = np.sqrt(global_norm_clip)

        a = np.minimum(global_norm, self.clip_norm)
        b = global_norm_clip
        self.assertTrue(
474
            np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
475
            "gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
476 477
            % (a, b),
        )
478 479 480 481 482


class TestDygraphGradientClipByNorm(TestDygraphGradientClip):
    def setUp(self):
        self.clip_norm = 0.8
483
        self.clip = paddle.nn.ClipGradByNorm(clip_norm=self.clip_norm)
484 485 486 487 488 489 490

    def check_clip_result(self, loss, optimizer):
        # if grad is None
        x = fluid.dygraph.to_variable(np.array([2, 3]).astype("float32"))
        assert len(self.clip([(x, None)])) == 0
        # get params and grads from network
        self.clip([(fluid.dygraph.to_variable(np.array([2, 3])), None)])
491
        opt, params_grads = optimizer.minimize(loss)
492 493 494 495 496 497 498 499 500 501 502
        _, grads = zip(*params_grads)
        params_grads = self.clip(params_grads)
        _, grads_clip = zip(*params_grads)

        for u, v in zip(grads, grads_clip):
            u = u.numpy()
            v = v.numpy()
            a = np.sqrt(np.sum(np.power(u, 2)))
            a = np.minimum(a, self.clip_norm)
            b = np.sqrt(np.sum(np.power(v, 2)))
            self.assertTrue(
503
                np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
504
                "gradient clip by norm has wrong results, expetcd:%f, but received:%f"
505 506
                % (a, b),
            )
507 508 509 510 511 512


class TestDygraphGradientClipByValue(TestDygraphGradientClip):
    def setUp(self):
        self.max = 0.2
        self.min = 0.1
513
        self.clip = paddle.nn.ClipGradByValue(max=self.max, min=self.min)
514 515 516 517 518 519

    def check_clip_result(self, loss, optimizer):
        # if grad is None
        x = fluid.dygraph.to_variable(np.array([2, 3]).astype("float32"))
        assert len(self.clip([(x, None)])) == 0
        # get params and grads from network
520
        opt, params_grads = optimizer.minimize(loss)
521 522 523 524 525 526
        _, grads = zip(*params_grads)
        params_grads = self.clip(params_grads)
        _, grads_clip = zip(*params_grads)
        for u, v in zip(grads, grads_clip):
            u = np.clip(u.numpy(), self.min, self.max)
            v = v.numpy()
527 528 529 530 531
            np.testing.assert_allclose(
                u,
                v,
                rtol=1e-06,
                atol=1e-08,
532 533
                err_msg='gradient clip by value has wrong results!',
            )
534

C
chengduo 已提交
535

536 537
class SimpleNet(paddle.nn.Layer):
    def __init__(self):
538
        super().__init__()
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
        self.linear = paddle.nn.Linear(5, 5)
        self.batch_norm = paddle.nn.BatchNorm(5)

    def forward(self, x):
        x = self.linear(x)
        x = self.batch_norm(x)
        return x


class TestDygraphGradientClipFP16(unittest.TestCase):
    def test_gradient_clip(self):
        if fluid.core.is_compiled_with_cuda():
            with fluid.dygraph.guard():
                paddle.seed(10)
                model = SimpleNet()
                sgd_optimizer = paddle.optimizer.SGD(
555 556
                    learning_rate=0.0, parameters=model.parameters()
                )
557
                model, sgd_optimizer = paddle.amp.decorate(
558 559
                    models=model, optimizers=sgd_optimizer, level='O2'
                )
560
                scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
561 562 563
                inputs = paddle.uniform([1, 5], min=-10, max=10).astype(
                    'float32'
                )
564 565
                with paddle.amp.auto_cast(level='O2'):
                    out = model(fluid.dygraph.to_variable(inputs))
566
                    loss = paddle.mean(out)
567 568 569 570 571 572 573 574 575 576 577 578
                scaled = scaler.scale(loss)
                scaled.backward()
                scaler.unscale_(sgd_optimizer)
                # before clip
                params_grads = []
                for param in model.parameters():
                    if param.stop_gradient:
                        continue
                    if param._grad_ivar() is not None:
                        params_grads.append((param, param._grad_ivar()))
                _, grads = zip(*params_grads)
                # clip grads
579
                clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.8)
580 581
                params_grads = clip(params_grads)
                _, grads_clip = zip(*params_grads)
582
                # param update
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
                scaler.step(sgd_optimizer)
                scaler.update()

                global_norm = 0
                for u in grads:
                    u = u.numpy()
                    global_norm += np.sum(np.power(u, 2))
                global_norm = np.sqrt(global_norm)
                global_norm_clip = 0
                for v in grads_clip:
                    v = v.numpy()
                    global_norm_clip += np.sum(np.power(v, 2))
                global_norm_clip = np.sqrt(global_norm_clip)

                a = np.minimum(global_norm, 0.8)
                b = global_norm_clip
                self.assertTrue(
600
                    np.isclose(a=a, b=b, rtol=1e-3, atol=1e-8),
601
                    "gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
602 603
                    % (a, b),
                )
604 605 606 607 608


class TestDygraphGradientClipFP64(unittest.TestCase):
    def test_gradient_clip(self):
        with fluid.dygraph.guard():
609
            inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
610
            linear = paddle.nn.Linear(5, 5)
611
            out = linear(fluid.dygraph.to_variable(inputs))
612
            loss = paddle.mean(out)
613 614 615 616 617 618 619 620 621 622
            loss.backward()
            # before clip
            params_grads = []
            for param in linear.parameters():
                if param.stop_gradient:
                    continue
                if param._grad_ivar() is not None:
                    params_grads.append((param, param._grad_ivar()))
            _, grads = zip(*params_grads)
            # clip grads
623
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1)
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
            params_grads = clip(params_grads)
            _, grads_clip = zip(*params_grads)

            global_norm = 0
            for u in grads:
                u = u.numpy()
                global_norm += np.sum(np.power(u, 2))
            global_norm = np.sqrt(global_norm)

            global_norm_clip = 0
            for v in grads_clip:
                v = v.numpy()
                print(v)
                global_norm_clip += np.sum(np.power(v, 2))
            global_norm_clip = np.sqrt(global_norm_clip)
            print(global_norm_clip)

            a = np.minimum(global_norm, 0.1)
            b = global_norm_clip

            self.assertTrue(
645
                np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
646
                "gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
647 648
                % (a, b),
            )
649 650


651 652 653 654 655 656 657 658 659 660 661
class TestPureFP16ClipGradByGlobalNorm(unittest.TestCase):
    def check_main(self, expected_has_cast_op):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
            names = ["p0", "p1"]
            shapes = [[2, 3], [4, 5]]

            param_and_grads = []
            main_block = main_prog.global_block()
            for name, shape in zip(names, shapes):
662 663 664 665 666 667
                p = main_block.create_parameter(
                    name=name, shape=shape, dtype='float16'
                )
                g = main_block.create_parameter(
                    name=p.name + '@GRAD', shape=p.shape, dtype=p.dtype
                )
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
                param_and_grads.append((p, g))

            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
            clip(param_and_grads)
            actual_has_cast = any(op.type == 'cast' for op in main_block.ops)
            self.assertEqual(actual_has_cast, expected_has_cast_op)

    def test_main(self):
        self.check_main(True)
        _allow_pure_fp16_global_norm_clip(True)
        self.check_main(False)
        _allow_pure_fp16_global_norm_clip(False)
        self.check_main(True)


C
chengduo 已提交
683 684
if __name__ == '__main__':
    unittest.main()