test_dynrnn_gradient_check.py 12.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import numpy as np
Y
Yang Yu 已提交
16 17
import random
import collections
18
import paddle
19
import paddle.fluid as fluid
Y
Yang Yu 已提交
20
import unittest
21
from decorator_helper import prog_scope
Y
Yang Yu 已提交
22 23


24
class Memory:
Y
Yang Yu 已提交
25
    def __init__(self, shape, dtype='float32'):
26
        self.ex = np.zeros(shape=shape, dtype=dtype)
Y
Yang Yu 已提交
27 28 29 30 31 32 33
        self.cur = None

    def update(self, val):
        assert val.shape == self.ex.shape
        assert val.dtype == self.ex.dtype
        self.cur = val

34
    def next(self):
Y
Yang Yu 已提交
35 36 37 38
        self.ex = self.cur
        self.cur = None

    def __next__(self):
39
        self.next()
Y
Yang Yu 已提交
40 41

    def reset(self):
42
        self.ex = np.zeros(shape=self.ex.shape, dtype=self.ex.dtype)
Y
Yang Yu 已提交
43 44 45
        self.cur = None


46
class Output:
Y
Yang Yu 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59
    def __init__(self):
        self.outs = []

    def next_sequence(self):
        self.outs.append([])

    def out(self, val):
        self.outs[-1].append(val)

    def last(self):
        return self.outs[-1][-1]


60
class BaseRNN:
Y
Yang Yu 已提交
61 62 63 64
    def __init__(self, ins, mems, params, outs, num_seq=5, max_seq_len=15):
        self.num_seq = num_seq
        self.inputs = collections.defaultdict(list)

65
        for _ in range(num_seq):
Y
Yang Yu 已提交
66 67 68 69 70
            seq_len = random.randint(1, max_seq_len - 1)
            for iname in ins:
                ishape = ins[iname].get('shape', None)
                idtype = ins[iname].get('dtype', 'float32')
                lst = []
71
                for _ in range(seq_len):
72
                    lst.append(np.random.random(size=ishape).astype(idtype))
Y
Yang Yu 已提交
73 74 75 76 77 78 79 80 81 82 83 84
                self.inputs[iname].append(lst)

        self.mems = dict()
        for mname in mems:
            mshape = mems[mname].get('shape', None)
            mdtype = mems[mname].get('dtype', 'float32')
            self.mems[mname] = Memory(shape=mshape, dtype=mdtype)

        self.params = dict()
        for pname in params:
            pshape = params[pname].get('shape', None)
            pdtype = params[pname].get('dtype', 'float32')
85
            self.params[pname] = np.random.random(size=pshape).astype(pdtype)
Y
Yang Yu 已提交
86 87 88 89 90 91 92

        self.outputs = dict()

        for oname in outs:
            self.outputs[oname] = Output()

    def step(self, **kwargs):
Y
Yang Yu 已提交
93
        raise NotImplementedError()
Y
Yang Yu 已提交
94 95 96 97 98 99

    def exe(self):
        retv = dict()
        for out in self.outputs:
            retv[out] = []

100
        for seq_id in range(self.num_seq):
Y
Yang Yu 已提交
101 102 103 104 105
            for mname in self.mems:
                self.mems[mname].reset()
            for out in self.outputs:
                self.outputs[out].next_sequence()

106
            iname0 = list(self.inputs.keys())[0]
Y
Yang Yu 已提交
107 108
            seq_len = len(self.inputs[iname0][seq_id])

109
            for step_id in range(seq_len):
Y
Yang Yu 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
                xargs = dict()

                for iname in self.inputs:
                    xargs[iname] = self.inputs[iname][seq_id][step_id]

                for mname in self.mems:
                    xargs[mname] = self.mems[mname]

                for pname in self.params:
                    xargs[pname] = self.params[pname]

                for out in self.outputs:
                    xargs[out] = self.outputs[out]

                self.step(**xargs)

                for mname in self.mems:
                    next(self.mems[mname])

            for out in self.outputs:
                retv[out].append(self.outputs[out].last())

        for out in retv:
133
            retv[out] = np.array(retv[out])
Y
Yang Yu 已提交
134 135 136 137 138 139
        return retv

    def to_feed(self, place):
        feed_dict = dict()

        for iname in self.inputs:
140
            lod = []
Y
Yang Yu 已提交
141
            np_flatten = []
142
            for seq_id in range(len(self.inputs[iname])):
Y
Yang Yu 已提交
143
                seq_len = len(self.inputs[iname][seq_id])
144
                lod.append(seq_len)
Y
Yang Yu 已提交
145 146 147
                np_flatten.extend(self.inputs[iname][seq_id])

            t = fluid.Tensor()
148
            t.set(np.array(np_flatten), place)
149
            t.set_recursive_sequence_lengths([lod])
Y
Yang Yu 已提交
150 151 152 153 154 155
            feed_dict[iname] = t

        for pname in self.params:
            feed_dict[pname] = self.params[pname]
        return feed_dict

Y
Yang Yu 已提交
156
    def get_numeric_gradient_of_param(self, param_name, delta=0.001):
Y
Yang Yu 已提交
157
        p = self.params[param_name]
Y
Yang Yu 已提交
158
        if len(p.shape) != 2:
159 160 161 162
            raise ValueError(
                "Not support get numeric gradient of an parameter,"
                " which is not matrix"
            )
163
        g = np.zeros(shape=p.shape, dtype=p.dtype)
Y
Yang Yu 已提交
164

165 166
        for i in range(p.shape[0]):
            for j in range(p.shape[1]):
Y
Yang Yu 已提交
167 168 169 170 171 172 173
                o = p[i][j]
                p[i][j] += delta
                pos = self._exe_mean_out_()
                p[i][j] -= 2 * delta
                neg = self._exe_mean_out_()
                p[i][j] = o
                g[i][j] = (pos - neg) / (delta * 2)
Y
Yang Yu 已提交
174 175
        return g

176 177 178
    def get_numeric_gradient_of_input(
        self, input_name, delta=0.001, return_one_tensor=True
    ):
Y
Stash  
Yang Yu 已提交
179 180 181 182 183 184
        ipt = self.inputs[input_name]
        grad = []

        for seq in ipt:
            seq_grad = []
            for item in seq:
185
                item_grad = np.zeros(shape=item.shape, dtype=item.dtype)
Y
Stash  
Yang Yu 已提交
186 187 188
                if len(item.shape) != 1:
                    raise ValueError("Not support")

189
                for i in range(len(item)):
Y
Stash  
Yang Yu 已提交
190 191 192 193 194 195 196 197 198 199 200 201 202
                    o = item[i]
                    item[i] += delta
                    pos = self._exe_mean_out_()
                    item[i] -= 2 * delta
                    neg = self._exe_mean_out_()
                    item[i] = o
                    item_grad[i] = (pos - neg) / (delta * 2)
                seq_grad.append(item_grad)
            grad.append(seq_grad)

        if not return_one_tensor:
            return grad

203
        for i in range(len(grad)):
204 205
            grad[i] = np.concatenate(grad[i])
        grad = np.concatenate(grad)
Y
Stash  
Yang Yu 已提交
206 207
        return grad

Y
Yang Yu 已提交
208 209
    def _exe_mean_out_(self):
        outs = self.exe()
210
        return np.array([o.mean() for o in outs.values()]).mean()
Y
Yang Yu 已提交
211 212


213 214 215 216
class SeedFixedTestCase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        """Fix random seeds to remove randomness from tests"""
217
        cls._np_rand_state = np.random.get_state()
218 219
        cls._py_rand_state = random.getstate()

220
        np.random.seed(123)
221 222 223 224 225
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
        """Restore random seeds"""
226
        np.random.set_state(cls._np_rand_state)
227 228 229 230
        random.setstate(cls._py_rand_state)


class TestSimpleMul(SeedFixedTestCase):
Y
Yang Yu 已提交
231 232 233 234 235 236 237 238 239
    DATA_NAME = 'X'
    DATA_WIDTH = 32
    PARAM_NAME = 'W'
    HIDDEN_WIDTH = 10
    OUT_NAME = 'Out'

    class SimpleMul(BaseRNN):
        def __init__(self):
            base = TestSimpleMul
240
            super().__init__(
241 242 243 244 245 246 247 248 249
                {base.DATA_NAME: {'shape': [base.DATA_WIDTH]}},
                {},
                {
                    base.PARAM_NAME: {
                        'shape': [base.DATA_WIDTH, base.HIDDEN_WIDTH]
                    }
                },
                [base.OUT_NAME],
            )
Y
Yang Yu 已提交
250 251

        def step(self, X, W, Out):
252
            Out.out(np.matmul(X, W))
Y
Yang Yu 已提交
253

Y
Yang Yu 已提交
254 255 256 257
    # Test many times in local to ensure the random seed cannot breaks CI
    # @many_times(10)
    @prog_scope()
    def test_forward_backward(self):
Y
Stash  
Yang Yu 已提交
258
        py_rnn = TestSimpleMul.SimpleMul()
259 260 261
        dat = fluid.layers.data(
            name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1
        )
Y
Stash  
Yang Yu 已提交
262
        dat.stop_gradient = False
Y
Yang Yu 已提交
263 264 265 266

        rnn = fluid.layers.DynamicRNN()
        with rnn.block():
            d = rnn.step_input(dat)
267 268 269 270 271 272 273
            o = fluid.layers.fc(
                input=d,
                param_attr=self.PARAM_NAME,
                bias_attr=False,
                size=self.HIDDEN_WIDTH,
                act=None,
            )
Y
Yang Yu 已提交
274 275 276 277
            rnn.output(o)

        out = rnn()
        out = fluid.layers.sequence_pool(out, pool_type='last')
278
        loss = paddle.mean(out)
Y
Update  
Yang Yu 已提交
279
        fluid.backward.append_backward(loss)
Y
Yang Yu 已提交
280 281 282

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
283
        out, w_g, i_g = list(
284
            map(
285
                np.array,
286 287 288 289 290 291 292 293 294 295 296
                exe.run(
                    feed=py_rnn.to_feed(cpu),
                    fetch_list=[
                        out,
                        self.PARAM_NAME + "@GRAD",
                        self.DATA_NAME + "@GRAD",
                    ],
                    return_numpy=False,
                ),
            )
        )
Y
Stash  
Yang Yu 已提交
297
        out_by_python = py_rnn.exe()[self.OUT_NAME]
298
        np.testing.assert_allclose(out, out_by_python, rtol=1e-05)
Y
Stash  
Yang Yu 已提交
299
        w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
300
        np.testing.assert_allclose(w_g_num, w_g, rtol=0.05)
Y
Stash  
Yang Yu 已提交
301
        i_g_num = py_rnn.get_numeric_gradient_of_input(
302 303
            input_name=self.DATA_NAME
        )
Y
Stash  
Yang Yu 已提交
304
        i_g_num = i_g_num.reshape(i_g.shape)
305
        np.testing.assert_allclose(i_g_num, i_g, rtol=0.05)
Y
Yang Yu 已提交
306 307


308
class TestSimpleMulWithMemory(SeedFixedTestCase):
Y
Yang Yu 已提交
309
    DATA_WIDTH = 32
Y
Stash  
Yang Yu 已提交
310
    HIDDEN_WIDTH = 20
Y
Yang Yu 已提交
311 312 313 314 315
    DATA_NAME = 'X'
    PARAM_NAME = 'W'

    class SimpleMulWithMemory(BaseRNN):
        def __init__(self):
316
            super().__init__(
317 318 319 320
                {
                    TestSimpleMulWithMemory.DATA_NAME: {
                        'shape': [TestSimpleMulWithMemory.DATA_WIDTH]
                    }
321 322 323
                },
                {'Mem': {'shape': [TestSimpleMulWithMemory.HIDDEN_WIDTH]}},
                {
324 325 326
                    TestSimpleMulWithMemory.PARAM_NAME: {
                        'shape': [
                            TestSimpleMulWithMemory.DATA_WIDTH,
327
                            TestSimpleMulWithMemory.HIDDEN_WIDTH,
328 329
                        ]
                    }
330 331 332
                },
                ['Out'],
            )
Y
Yang Yu 已提交
333 334

        def step(self, X, Mem, W, Out):
335
            o = np.matmul(X, W)
Y
Yang Yu 已提交
336 337 338 339 340 341
            assert isinstance(Mem, Memory)
            o += Mem.ex
            Mem.update(o)
            assert isinstance(Out, Output)
            Out.out(o)

Y
Yang Yu 已提交
342 343
    # many_times used locally for debug. Make sure the calculation is stable.
    # @many_times(10)
Y
Yang Yu 已提交
344 345 346
    @prog_scope()
    def test_forward_backward(self):
        py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory()
347 348 349
        data = fluid.layers.data(
            name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1
        )
Y
Stash  
Yang Yu 已提交
350
        data.stop_gradient = False
Y
Yang Yu 已提交
351 352 353 354
        rnn = fluid.layers.DynamicRNN()
        with rnn.block():
            d = rnn.step_input(data)
            mem = rnn.memory(value=0.0, shape=[self.HIDDEN_WIDTH])
355 356 357 358 359 360 361
            hidden = fluid.layers.fc(
                input=d,
                size=self.HIDDEN_WIDTH,
                param_attr=self.PARAM_NAME,
                bias_attr=False,
                act=None,
            )
Y
Yang Yu 已提交
362 363 364 365 366 367
            o = fluid.layers.elementwise_add(x=hidden, y=mem)
            rnn.update_memory(mem, o)
            rnn.output(o)

        out = rnn()
        last = fluid.layers.sequence_pool(input=out, pool_type='last')
368
        loss = paddle.mean(last)
Y
Update  
Yang Yu 已提交
369
        fluid.backward.append_backward(loss)
Y
Yang Yu 已提交
370 371 372

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
Y
Stash  
Yang Yu 已提交
373
        feed = py_rnn.to_feed(cpu)
374
        last_np, w_g, i_g = list(
375
            map(
376
                np.array,
377 378 379 380 381 382 383 384 385 386 387 388
                exe.run(
                    feed=feed,
                    fetch_list=[
                        last,
                        self.PARAM_NAME + "@GRAD",
                        self.DATA_NAME + "@GRAD",
                    ],
                    return_numpy=False,
                ),
            )
        )
        (last_by_py,) = list(py_rnn.exe().values())
Y
Stash  
Yang Yu 已提交
389
        w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
390
        np.testing.assert_allclose(last_np, last_by_py, rtol=1e-05)
Y
Yang Yu 已提交
391

392
        np.testing.assert_allclose(w_g_num, w_g, rtol=0.1)
Y
Stash  
Yang Yu 已提交
393 394 395 396 397
        i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME)
        i_g_num = i_g_num.reshape(i_g.shape)

        # Since this RNN has many float add. The number could be not stable.
        # rtol = 0.1
398
        np.testing.assert_allclose(i_g_num, i_g, rtol=0.1)
Y
Yang Yu 已提交
399 400


Y
Yang Yu 已提交
401 402
if __name__ == '__main__':
    unittest.main()