test_gradient_clip.py 23.6 KB
Newer Older
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
C
chengduo 已提交
2 3 4 5 6
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
C
chengduo 已提交
8 9 10 11 12 13 14 15
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

C
chengduo 已提交
17
import numpy as np
18 19
from fake_reader import fake_imdb_reader

C
chengduo 已提交
20 21
import paddle
import paddle.fluid as fluid
22
import paddle.fluid.core as core
23
from paddle.nn.clip import _allow_pure_fp16_global_norm_clip
C
chengduo 已提交
24

W
WangXi 已提交
25 26
paddle.enable_static()

C
chengduo 已提交
27

28 29 30
def bow_net(
    data, label, dict_dim, emb_dim=128, hid_dim=128, hid_dim2=96, class_dim=2
):
C
chengduo 已提交
31 32 33 34 35
    """
    BOW net
    This model is from https://github.com/PaddlePaddle/models:
    fluid/PaddleNLP/text_classification/nets.py
    """
36 37 38
    emb = fluid.layers.embedding(
        input=data, is_sparse=True, size=[dict_dim, emb_dim]
    )
C
chengduo 已提交
39
    bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
40
    bow_tanh = paddle.tanh(bow)
C
chengduo 已提交
41 42 43
    fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
    fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
    prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
44 45 46
    cost = paddle.nn.functional.cross_entropy(
        input=prediction, label=label, reduction='none', use_softmax=False
    )
47
    avg_cost = paddle.mean(x=cost)
C
chengduo 已提交
48 49 50 51 52 53

    return avg_cost


class TestGradientClip(unittest.TestCase):
    def setUp(self):
54
        self.word_dict_len = 5147
C
chengduo 已提交
55
        self.BATCH_SIZE = 2
56 57
        reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100)
        self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
58
        self.clip_gradient = lambda x: None
59 60 61 62
        self.init()

    def init(self):
        pass
C
chengduo 已提交
63 64

    def get_places(self):
65
        places = [fluid.CPUPlace()]
C
chengduo 已提交
66
        if core.is_compiled_with_cuda():
67
            places.append(fluid.CUDAPlace(0))
C
chengduo 已提交
68 69
        return places

70 71 72
    def check_clip_result(self, out, out_clip):
        pass

73
    def check_gradient_clip(self, place, dtype='float32'):
74 75
        prog = fluid.Program()
        startup_program = fluid.Program()
76 77 78
        with fluid.program_guard(
            main_program=prog, startup_program=startup_program
        ):
79 80
            image = fluid.data(name="a", shape=[-1, 784], dtype='float32')
            label = fluid.data(name="b", shape=[-1, 1], dtype='int64')
81 82 83 84 85
            if dtype != 'float32':
                image_cast = paddle.cast(image, dtype)
                hidden = fluid.layers.fc(input=image_cast, size=32, act='relu')
            else:
                hidden = fluid.layers.fc(input=image, size=32, act='relu')
86
            predict = fluid.layers.fc(input=hidden, size=10, act='softmax')
C
chengduo 已提交
87

88 89 90
            cost = paddle.nn.functional.cross_entropy(
                input=predict, label=label, reduction='none', use_softmax=False
            )
91
            avg_cost = paddle.mean(cost)
C
chengduo 已提交
92 93 94 95 96 97 98

        prog_clip = prog.clone()
        avg_cost_clip = prog_clip.block(0).var(avg_cost.name)

        p_g = fluid.backward.append_backward(loss=avg_cost)
        p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip)

99 100
        p_g = sorted(p_g, key=lambda x: x[0].name)
        p_g_clip = sorted(p_g_clip, key=lambda x: x[0].name)
101 102 103
        with fluid.program_guard(
            main_program=prog_clip, startup_program=startup_program
        ):
104
            p_g_clip = self.clip_gradient(p_g_clip)
C
chengduo 已提交
105 106 107 108

        grad_list = [elem[1] for elem in p_g]
        grad_clip_list = [elem[1] for elem in p_g_clip]

109
        train_reader = paddle.batch(paddle.dataset.mnist.train(), batch_size=3)
C
chengduo 已提交
110 111 112 113
        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=[image, label], place=place)
        exe.run(startup_program)

114 115
        data = next(train_reader())
        out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list)
116 117 118
        out_clip = exe.run(
            prog_clip, feed=feeder.feed(data), fetch_list=grad_clip_list
        )
119
        self.check_clip_result(out, out_clip)
C
chengduo 已提交
120 121

    def check_sparse_gradient_clip(self, place):
122 123
        prog = fluid.Program()
        startup_program = fluid.Program()
124 125 126 127 128 129
        with fluid.program_guard(
            main_program=prog, startup_program=startup_program
        ):
            data = fluid.data(
                name="words", shape=[-1, 1], dtype="int64", lod_level=1
            )
130
            label = fluid.data(name="label", shape=[-1, 1], dtype="int64")
131
            cost = bow_net(data, label, self.word_dict_len)
C
chengduo 已提交
132

133
            self.backward_and_optimize(cost)
C
chengduo 已提交
134 135 136 137 138 139 140

        exe = fluid.Executor(place)
        feeder = fluid.DataFeeder(feed_list=[data, label], place=place)
        exe.run(startup_program)

        data = next(self.train_data())
        val = exe.run(prog, feed=feeder.feed(data), fetch_list=[cost])[0]
141
        self.assertEqual((1,), val.shape)
C
chengduo 已提交
142 143
        self.assertFalse(np.isnan(val))

144
    def backward_and_optimize(self, cost):
145 146 147 148 149 150 151 152 153 154
        pass


class TestGradientClipByGlobalNorm(TestGradientClip):
    def init(self):
        self.clip_norm = 0.2

    def check_clip_result(self, out, out_clip):
        global_norm = 0
        for v in out:
W
WangXi 已提交
155
            global_norm += np.sum(np.square(v))
156 157 158 159 160 161 162
        global_norm = np.sqrt(global_norm)
        scale = self.clip_norm / np.maximum(self.clip_norm, global_norm)
        res = []
        for i in range(len(out)):
            out[i] = scale * out[i]

        for u, v in zip(out, out_clip):
163 164 165 166 167
            np.testing.assert_allclose(
                u,
                v,
                rtol=1e-05,
                atol=1e-08,
168 169 170 171
                err_msg='gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}'.format(
                    u, v, u - v
                ),
            )
172

173
    # test whether the output is right when use 'set_gradient_clip'
174 175
    def test_old_gradient_clip(self):
        def func(params_grads):
176 177 178
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
            paddle.nn.clip.set_gradient_clip(clip)
            return paddle.nn.clip.append_gradient_clip_ops(params_grads)
179 180 181 182

        self.clip_gradient = func
        self.check_gradient_clip(fluid.CPUPlace())

183
    # test whether the output is right when use grad_clip
184 185
    def test_new_gradient_clip(self):
        def func(params_grads):
186
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
187
            return clip(params_grads)
C
chengduo 已提交
188

189 190 191
        self.clip_gradient = func
        self.check_gradient_clip(fluid.CPUPlace())

192
    # test whether the output is right when use grad_clip under float64
193 194
    def test_new_gradient_clip_fp64(self):
        def func(params_grads):
195
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
196 197 198 199 200
            return clip(params_grads)

        self.clip_gradient = func
        self.check_gradient_clip(fluid.CPUPlace(), "float64")

201 202 203
    # invoke 'set_gradient_clip' in a wrong order
    def test_wrong_API_order(self):
        def backward_func(cost):
204 205
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=5.0)
            paddle.nn.clip.set_gradient_clip(clip)
206 207 208
            sgd_optimizer = fluid.optimizer.SGD(
                learning_rate=0.01, grad_clip=clip
            )
209 210
            # if 'set_gradient_clip' and 'optimize(grad_clip)' together, 'set_gradient_clip' will be ineffective
            sgd_optimizer.minimize(cost)
211
            # 'set_gradient_clip' must before 'minimize', otherwise, 'set_gradient_clip' will be ineffective
212
            paddle.nn.clip.set_gradient_clip(clip)
213 214

        self.backward_and_optimize = backward_func
C
chengduo 已提交
215 216 217
        for place in self.get_places():
            self.check_sparse_gradient_clip(place)

218 219
    # raise typeError
    def test_tpyeError(self):
220
        # the type of optimizer(grad_clip=) must be an instance of GradientClipBase's derived class
221
        with self.assertRaises(TypeError):
222 223 224
            sgd_optimizer = fluid.optimizer.SGD(
                learning_rate=0.1, grad_clip="test"
            )
225

226 227 228
    # if grad is None or not need clip
    def test_none_grad_fp32(self):
        ops = self._test_none_grad_helper("float32")
229 230 231 232 233 234 235 236 237 238 239 240 241 242
        self.assertListEqual(
            ops,
            [
                'squared_l2_norm',
                'squared_l2_norm',
                'sum',
                'sqrt',
                'fill_constant',
                'elementwise_max',
                'elementwise_div',
                'elementwise_mul',
                'elementwise_mul',
            ],
        )
243 244 245

    def test_none_grad_fp16(self):
        ops = self._test_none_grad_helper("float16")
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
        self.assertListEqual(
            ops,
            [
                'square',
                'reduce_sum',
                'square',
                'reduce_sum',
                'sum',
                'cast',
                'sqrt',
                'fill_constant',
                'elementwise_max',
                'elementwise_div',
                'cast',
                'elementwise_mul',
                'cast',
                'elementwise_mul',
            ],
        )
265 266 267 268

    def _test_none_grad_helper(self, dtype):
        prog = fluid.Program()
        startup_program = fluid.Program()
269 270 271
        with fluid.program_guard(
            main_program=prog, startup_program=startup_program
        ):
272
            clip = paddle.nn.ClipGradByGlobalNorm(self.clip_norm)
273 274 275 276 277 278 279 280 281 282
            x = (
                fluid.default_main_program()
                .global_block()
                .create_parameter(name="x", shape=[2, 3], dtype=dtype)
            )
            y = (
                fluid.default_main_program()
                .global_block()
                .create_parameter(name="y", shape=[2, 3], dtype=dtype)
            )
283 284 285 286 287 288

            # (x, None) should not be returned
            params_grads = [(x, None), (x, y), (y, x)]
            params_grads = clip(params_grads)
            self.assertTrue(
                len(params_grads) == 2,
289
                "ClipByGlobalNorm: when grad is None, it shouldn't be returned by gradient clip!",
290 291 292 293 294
            )

            ops = [op.type for op in x.block.ops]
        return ops

295 296 297 298 299 300 301 302 303 304

class TestGradientClipByNorm(TestGradientClip):
    def init(self):
        self.clip_norm = 0.2

    def check_clip_result(self, out, out_clip):
        for u, v in zip(out, out_clip):
            norm = np.sqrt(np.sum(np.power(u, 2)))
            scale = self.clip_norm / np.maximum(self.clip_norm, norm)
            u = u * scale
305 306 307 308 309
            np.testing.assert_allclose(
                u,
                v,
                rtol=1e-05,
                atol=1e-08,
310 311
                err_msg='gradient clip by norm has wrong results!',
            )
312

313
    # test whether the output is right when use grad_clip
314
    def test_gradient_clip(self):
zhouweiwei2014's avatar
zhouweiwei2014 已提交
315
        def func(params_grads):
316
            clip = paddle.nn.ClipGradByNorm(clip_norm=self.clip_norm)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
317 318 319
            return clip(params_grads)

        self.clip_gradient = func
320 321 322 323
        self.check_gradient_clip(fluid.CPUPlace())

    # if grad is None or not need clip
    def test_none_grad(self):
324
        clip = paddle.nn.ClipGradByNorm(self.clip_norm)
325 326 327 328 329 330 331 332 333 334 335 336 337 338
        x = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                name="x", shape=[2, 3], dtype="float32", need_clip=False
            )
        )
        y = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                name="y", shape=[2, 3], dtype="float32", need_clip=False
            )
        )
339 340 341 342 343 344

        # (x, None) should not be returned
        params_grads = [(x, None), (x, y)]
        params_grads = clip(params_grads)
        self.assertTrue(
            len(clip(params_grads)) == 1,
345
            "ClipGradByNorm: when grad is None, it shouldn't be returned by gradient clip!",
346 347 348
        )
        self.assertTrue(
            params_grads[0][1].name == 'y',
349 350
            "ClipGradByNorm: grad should not be clipped when filtered out!",
        )
351 352 353 354 355 356 357 358 359 360 361 362


class TestGradientClipByValue(TestGradientClip):
    def init(self):
        self.max = 0.2
        self.min = 0.1

    def check_clip_result(self, out, out_clip):
        for i, v in enumerate(out):
            out[i] = np.clip(v, self.min, self.max)
        for u, v in zip(out, out_clip):
            u = np.clip(u, self.min, self.max)
363 364 365 366 367
            np.testing.assert_allclose(
                u,
                v,
                rtol=1e-06,
                atol=1e-08,
368 369
                err_msg='gradient clip by value has wrong results!',
            )
370

371
    # test whether the output is right when use grad_clip
372
    def test_gradient_clip(self):
zhouweiwei2014's avatar
zhouweiwei2014 已提交
373
        def func(params_grads):
374
            clip = paddle.nn.ClipGradByValue(max=self.max, min=self.min)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
375 376 377
            return clip(params_grads)

        self.clip_gradient = func
378 379 380 381
        self.check_gradient_clip(fluid.CPUPlace())

    # if grad is None or not need clip
    def test_none_grad(self):
382
        clip = paddle.nn.ClipGradByValue(self.max, self.min)
383 384 385 386 387 388 389 390 391 392 393 394 395 396
        x = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                name="x", shape=[2, 3], dtype="float32", need_clip=False
            )
        )
        y = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                name="y", shape=[2, 3], dtype="float32", need_clip=False
            )
        )
397 398 399 400 401 402

        # (x, None) should not be returned
        params_grads = [(x, None), (x, y)]
        params_grads = clip(params_grads)
        self.assertTrue(
            len(clip(params_grads)) == 1,
403
            "ClipGradByValue: when grad is None, it shouldn't be returned by gradient clip!",
404 405 406
        )
        self.assertTrue(
            params_grads[0][1].name == 'y',
407 408
            "ClipGradByValue: grad should not be clipped when filtered out!",
        )
409 410 411 412 413


class TestDygraphGradientClip(unittest.TestCase):
    def test_gradient_clip(self):
        with fluid.dygraph.guard():
414
            linear = paddle.nn.Linear(5, 5)
415
            inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
416
            out = linear(fluid.dygraph.to_variable(inputs))
417
            loss = paddle.mean(out)
418 419
            loss.backward()
            sgd_optimizer = fluid.optimizer.SGD(
420 421
                learning_rate=0.0,
                parameter_list=linear.parameters(),
422
                grad_clip=paddle.nn.ClipGradByGlobalNorm(0.1),
423
            )
424 425 426 427 428 429 430 431 432
            self.check_clip_result(loss, sgd_optimizer)

    def check_clip_result(self, loss, optimizer):
        pass


class TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip):
    def setUp(self):
        self.clip_norm = 0.8
433 434
        self.clip1 = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
        self.clip2 = paddle.nn.ClipGradByGlobalNorm(clip_norm=self.clip_norm)
435 436 437

    def check_clip_result(self, loss, optimizer):
        # if grad is None
438 439 440 441 442 443
        x = fluid.dygraph.to_variable(
            np.array([2, 3]).astype("float32"), name="x"
        )
        y = fluid.dygraph.to_variable(
            np.array([3, 4]).astype("float32"), name="y"
        )
444 445
        assert len(self.clip1([(x, x), (x, y), (x, None)])) == 2
        # get params and grads from network
446
        opt, params_grads = optimizer.minimize(loss)
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
        _, grads = zip(*params_grads)
        params_grads = self.clip2(params_grads)
        _, grads_clip = zip(*params_grads)

        global_norm = 0
        for u in grads:
            u = u.numpy()
            global_norm += np.sum(np.power(u, 2))
        global_norm = np.sqrt(global_norm)

        global_norm_clip = 0
        for v in grads_clip:
            v = v.numpy()
            global_norm_clip += np.sum(np.power(v, 2))
        global_norm_clip = np.sqrt(global_norm_clip)

        a = np.minimum(global_norm, self.clip_norm)
        b = global_norm_clip
        self.assertTrue(
466
            np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
467
            "gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
468 469
            % (a, b),
        )
470 471 472 473 474


class TestDygraphGradientClipByNorm(TestDygraphGradientClip):
    def setUp(self):
        self.clip_norm = 0.8
475
        self.clip = paddle.nn.ClipGradByNorm(clip_norm=self.clip_norm)
476 477 478 479 480 481 482

    def check_clip_result(self, loss, optimizer):
        # if grad is None
        x = fluid.dygraph.to_variable(np.array([2, 3]).astype("float32"))
        assert len(self.clip([(x, None)])) == 0
        # get params and grads from network
        self.clip([(fluid.dygraph.to_variable(np.array([2, 3])), None)])
483
        opt, params_grads = optimizer.minimize(loss)
484 485 486 487 488 489 490 491 492 493 494
        _, grads = zip(*params_grads)
        params_grads = self.clip(params_grads)
        _, grads_clip = zip(*params_grads)

        for u, v in zip(grads, grads_clip):
            u = u.numpy()
            v = v.numpy()
            a = np.sqrt(np.sum(np.power(u, 2)))
            a = np.minimum(a, self.clip_norm)
            b = np.sqrt(np.sum(np.power(v, 2)))
            self.assertTrue(
495
                np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
496
                "gradient clip by norm has wrong results, expetcd:%f, but received:%f"
497 498
                % (a, b),
            )
499 500 501 502 503 504


class TestDygraphGradientClipByValue(TestDygraphGradientClip):
    def setUp(self):
        self.max = 0.2
        self.min = 0.1
505
        self.clip = paddle.nn.ClipGradByValue(max=self.max, min=self.min)
506 507 508 509 510 511

    def check_clip_result(self, loss, optimizer):
        # if grad is None
        x = fluid.dygraph.to_variable(np.array([2, 3]).astype("float32"))
        assert len(self.clip([(x, None)])) == 0
        # get params and grads from network
512
        opt, params_grads = optimizer.minimize(loss)
513 514 515 516 517 518
        _, grads = zip(*params_grads)
        params_grads = self.clip(params_grads)
        _, grads_clip = zip(*params_grads)
        for u, v in zip(grads, grads_clip):
            u = np.clip(u.numpy(), self.min, self.max)
            v = v.numpy()
519 520 521 522 523
            np.testing.assert_allclose(
                u,
                v,
                rtol=1e-06,
                atol=1e-08,
524 525
                err_msg='gradient clip by value has wrong results!',
            )
526

C
chengduo 已提交
527

528 529
class SimpleNet(paddle.nn.Layer):
    def __init__(self):
530
        super().__init__()
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
        self.linear = paddle.nn.Linear(5, 5)
        self.batch_norm = paddle.nn.BatchNorm(5)

    def forward(self, x):
        x = self.linear(x)
        x = self.batch_norm(x)
        return x


class TestDygraphGradientClipFP16(unittest.TestCase):
    def test_gradient_clip(self):
        if fluid.core.is_compiled_with_cuda():
            with fluid.dygraph.guard():
                paddle.seed(10)
                model = SimpleNet()
                sgd_optimizer = paddle.optimizer.SGD(
547 548
                    learning_rate=0.0, parameters=model.parameters()
                )
549
                model, sgd_optimizer = paddle.amp.decorate(
550 551
                    models=model, optimizers=sgd_optimizer, level='O2'
                )
552
                scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
553 554 555
                inputs = paddle.uniform([1, 5], min=-10, max=10).astype(
                    'float32'
                )
556 557
                with paddle.amp.auto_cast(level='O2'):
                    out = model(fluid.dygraph.to_variable(inputs))
558
                    loss = paddle.mean(out)
559 560 561 562 563 564 565 566 567 568 569 570
                scaled = scaler.scale(loss)
                scaled.backward()
                scaler.unscale_(sgd_optimizer)
                # before clip
                params_grads = []
                for param in model.parameters():
                    if param.stop_gradient:
                        continue
                    if param._grad_ivar() is not None:
                        params_grads.append((param, param._grad_ivar()))
                _, grads = zip(*params_grads)
                # clip grads
571
                clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.8)
572 573
                params_grads = clip(params_grads)
                _, grads_clip = zip(*params_grads)
574
                # param update
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
                scaler.step(sgd_optimizer)
                scaler.update()

                global_norm = 0
                for u in grads:
                    u = u.numpy()
                    global_norm += np.sum(np.power(u, 2))
                global_norm = np.sqrt(global_norm)
                global_norm_clip = 0
                for v in grads_clip:
                    v = v.numpy()
                    global_norm_clip += np.sum(np.power(v, 2))
                global_norm_clip = np.sqrt(global_norm_clip)

                a = np.minimum(global_norm, 0.8)
                b = global_norm_clip
                self.assertTrue(
592
                    np.isclose(a=a, b=b, rtol=1e-3, atol=1e-8),
593
                    "gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
594 595
                    % (a, b),
                )
596 597 598 599 600


class TestDygraphGradientClipFP64(unittest.TestCase):
    def test_gradient_clip(self):
        with fluid.dygraph.guard():
601
            inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
602
            linear = paddle.nn.Linear(5, 5)
603
            out = linear(fluid.dygraph.to_variable(inputs))
604
            loss = paddle.mean(out)
605 606 607 608 609 610 611 612 613 614
            loss.backward()
            # before clip
            params_grads = []
            for param in linear.parameters():
                if param.stop_gradient:
                    continue
                if param._grad_ivar() is not None:
                    params_grads.append((param, param._grad_ivar()))
            _, grads = zip(*params_grads)
            # clip grads
615
            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1)
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
            params_grads = clip(params_grads)
            _, grads_clip = zip(*params_grads)

            global_norm = 0
            for u in grads:
                u = u.numpy()
                global_norm += np.sum(np.power(u, 2))
            global_norm = np.sqrt(global_norm)

            global_norm_clip = 0
            for v in grads_clip:
                v = v.numpy()
                print(v)
                global_norm_clip += np.sum(np.power(v, 2))
            global_norm_clip = np.sqrt(global_norm_clip)
            print(global_norm_clip)

            a = np.minimum(global_norm, 0.1)
            b = global_norm_clip

            self.assertTrue(
637
                np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
638
                "gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
639 640
                % (a, b),
            )
641 642


643 644 645 646 647 648 649 650 651 652 653
class TestPureFP16ClipGradByGlobalNorm(unittest.TestCase):
    def check_main(self, expected_has_cast_op):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
            names = ["p0", "p1"]
            shapes = [[2, 3], [4, 5]]

            param_and_grads = []
            main_block = main_prog.global_block()
            for name, shape in zip(names, shapes):
654 655 656 657 658 659
                p = main_block.create_parameter(
                    name=name, shape=shape, dtype='float16'
                )
                g = main_block.create_parameter(
                    name=p.name + '@GRAD', shape=p.shape, dtype=p.dtype
                )
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
                param_and_grads.append((p, g))

            clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
            clip(param_and_grads)
            actual_has_cast = any(op.type == 'cast' for op in main_block.ops)
            self.assertEqual(actual_has_cast, expected_has_cast_op)

    def test_main(self):
        self.check_main(True)
        _allow_pure_fp16_global_norm_clip(True)
        self.check_main(False)
        _allow_pure_fp16_global_norm_clip(False)
        self.check_main(True)


C
chengduo 已提交
675 676
if __name__ == '__main__':
    unittest.main()