parallel_dygraph_gradient_check.py 4.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np
18 19

import paddle
20
import paddle.distributed as dist
21
from paddle.nn import Linear
22 23 24 25 26 27 28 29 30

paddle.seed(1024)
np.random.seed(2021)

batch = 5
in_dim = 10
out_dim = 20


31
class SimpleNet(paddle.nn.Layer):
32
    def __init__(self, train_id):
33
        super().__init__()
34 35 36 37 38 39
        self.w1 = self.create_parameter(
            shape=[in_dim, out_dim], dtype="float32"
        )
        self.w2 = self.create_parameter(
            shape=[in_dim, out_dim], dtype="float32"
        )
40 41
        self.share_net = Linear(out_dim, 10)

42 43 44
        self.unused_param = self.create_parameter(
            shape=[out_dim, in_dim], dtype="float64"
        )
45 46 47 48 49 50 51 52 53

        # just for test sync_params_buffers
        self.register_buffer("queue", paddle.randn([10, 5]))
        self.queue = paddle.nn.functional.normalize(self.queue, axis=0)
        self.register_buffer("queue_ptr", paddle.zeros([1], 'int64'))

        self.trainer_id = train_id

    def forward(self, x):
54
        is_use = (
zhouweiwei2014's avatar
zhouweiwei2014 已提交
55
            paddle.equal_all(x, paddle.ones(shape=(batch, in_dim))).item()
56 57
            and self.trainer_id == 1
        )
58 59 60 61 62 63 64 65 66

        if is_use:
            tmp = paddle.matmul(x, self.w1)
        else:
            tmp = paddle.matmul(x, self.w2)

        return self.share_net(tmp)


C
co63oc 已提交
67
class TestDistTraining(unittest.TestCase):
68
    def test_multiple_gpus(self):
X
xiongkun 已提交
69
        dist.init_parallel_env()
70 71 72 73 74 75 76 77
        self.trainer_id = dist.get_rank()

        model_a = SimpleNet(self.trainer_id)
        model_b = SimpleNet(self.trainer_id)

        state_dict = model_a.state_dict()
        model_b.set_state_dict(state_dict)

78 79
        model_a = paddle.DataParallel(model_a, find_unused_parameters=True)
        model_b = paddle.DataParallel(model_b, find_unused_parameters=True)
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

        ones_input = paddle.ones(shape=(batch, in_dim))
        ones_input.stop_gradient = True

        w1_grad_sum = np.zeros((in_dim, out_dim), dtype='float32')
        w2_grad_sum = np.zeros((in_dim, out_dim), dtype='float32')

        for step_id in range(5):
            random_input = paddle.rand(shape=(batch, in_dim))
            random_input.stop_gradient = True

            if step_id % 2 == 0:
                out_a = model_a(random_input)
                out_b = model_b(random_input)
            else:
                out_a = model_a(ones_input)
                out_b = model_b(ones_input)

            out_a.sum().backward()
            out_b.sum().backward()

            self.check_gradient(model_a.parameters())
            self.check_gradient(model_b.parameters())

            # test acc gradient
105 106 107 108 109 110
            w1_grad_sum = self.check_acc(
                model_a._layers.w1.grad, w1_grad_sum, model_b._layers.w1.grad
            )
            w2_grad_sum = self.check_acc(
                model_a._layers.w2.grad, w2_grad_sum, model_b._layers.w2.grad
            )
111 112 113 114 115

            model_a.clear_gradients()

    def check_acc(self, grad, grad_sum, acc_grad):
        if grad is not None:
116 117
            grad_sum = grad_sum + grad.numpy(False)
            acc_grad = acc_grad.numpy(False) if acc_grad is not None else None
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
            np.testing.assert_allclose(grad_sum, acc_grad, rtol=1e-6)
        return grad_sum

    def print_trainer_0(self, *args):
        if self.trainer_id == 0:
            print(*args)

    def broadcast_param(self, param, root):
        paddle.distributed.broadcast(param, root)
        return param

    def check_gradient(self, params):
        other_param = []
        for param in params:
            if param.trainable and (param._grad_ivar() is not None):
                grad = param._grad_ivar()
                other_grad = self.broadcast_param(grad.clone(), root=1)
                if self.trainer_id == 0:
136 137 138
                    np.testing.assert_allclose(
                        other_grad.numpy(False), grad.numpy(False)
                    )
139 140 141 142


if __name__ == '__main__':
    unittest.main()