test_dynrnn_gradient_check.py 12.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import numpy as np
Y
Yang Yu 已提交
16 17
import random
import collections
18
import paddle
19
import paddle.fluid as fluid
Y
Yang Yu 已提交
20
import unittest
21
from decorator_helper import *
Y
Yang Yu 已提交
22 23 24


class Memory(object):
25

Y
Yang Yu 已提交
26
    def __init__(self, shape, dtype='float32'):
27
        self.ex = np.zeros(shape=shape, dtype=dtype)
Y
Yang Yu 已提交
28 29 30 31 32 33 34
        self.cur = None

    def update(self, val):
        assert val.shape == self.ex.shape
        assert val.dtype == self.ex.dtype
        self.cur = val

35
    def next(self):
Y
Yang Yu 已提交
36 37 38 39
        self.ex = self.cur
        self.cur = None

    def __next__(self):
40
        self.next()
Y
Yang Yu 已提交
41 42

    def reset(self):
43
        self.ex = np.zeros(shape=self.ex.shape, dtype=self.ex.dtype)
Y
Yang Yu 已提交
44 45 46 47
        self.cur = None


class Output(object):
48

Y
Yang Yu 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62
    def __init__(self):
        self.outs = []

    def next_sequence(self):
        self.outs.append([])

    def out(self, val):
        self.outs[-1].append(val)

    def last(self):
        return self.outs[-1][-1]


class BaseRNN(object):
63

Y
Yang Yu 已提交
64 65 66 67
    def __init__(self, ins, mems, params, outs, num_seq=5, max_seq_len=15):
        self.num_seq = num_seq
        self.inputs = collections.defaultdict(list)

68
        for _ in range(num_seq):
Y
Yang Yu 已提交
69 70 71 72 73
            seq_len = random.randint(1, max_seq_len - 1)
            for iname in ins:
                ishape = ins[iname].get('shape', None)
                idtype = ins[iname].get('dtype', 'float32')
                lst = []
74
                for _ in range(seq_len):
75
                    lst.append(np.random.random(size=ishape).astype(idtype))
Y
Yang Yu 已提交
76 77 78 79 80 81 82 83 84 85 86 87
                self.inputs[iname].append(lst)

        self.mems = dict()
        for mname in mems:
            mshape = mems[mname].get('shape', None)
            mdtype = mems[mname].get('dtype', 'float32')
            self.mems[mname] = Memory(shape=mshape, dtype=mdtype)

        self.params = dict()
        for pname in params:
            pshape = params[pname].get('shape', None)
            pdtype = params[pname].get('dtype', 'float32')
88
            self.params[pname] = np.random.random(size=pshape).astype(pdtype)
Y
Yang Yu 已提交
89 90 91 92 93 94 95

        self.outputs = dict()

        for oname in outs:
            self.outputs[oname] = Output()

    def step(self, **kwargs):
Y
Yang Yu 已提交
96
        raise NotImplementedError()
Y
Yang Yu 已提交
97 98 99 100 101 102

    def exe(self):
        retv = dict()
        for out in self.outputs:
            retv[out] = []

103
        for seq_id in range(self.num_seq):
Y
Yang Yu 已提交
104 105 106 107 108
            for mname in self.mems:
                self.mems[mname].reset()
            for out in self.outputs:
                self.outputs[out].next_sequence()

109
            iname0 = list(self.inputs.keys())[0]
Y
Yang Yu 已提交
110 111
            seq_len = len(self.inputs[iname0][seq_id])

112
            for step_id in range(seq_len):
Y
Yang Yu 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
                xargs = dict()

                for iname in self.inputs:
                    xargs[iname] = self.inputs[iname][seq_id][step_id]

                for mname in self.mems:
                    xargs[mname] = self.mems[mname]

                for pname in self.params:
                    xargs[pname] = self.params[pname]

                for out in self.outputs:
                    xargs[out] = self.outputs[out]

                self.step(**xargs)

                for mname in self.mems:
                    next(self.mems[mname])

            for out in self.outputs:
                retv[out].append(self.outputs[out].last())

        for out in retv:
136
            retv[out] = np.array(retv[out])
Y
Yang Yu 已提交
137 138 139 140 141 142
        return retv

    def to_feed(self, place):
        feed_dict = dict()

        for iname in self.inputs:
143
            lod = []
Y
Yang Yu 已提交
144
            np_flatten = []
145
            for seq_id in range(len(self.inputs[iname])):
Y
Yang Yu 已提交
146
                seq_len = len(self.inputs[iname][seq_id])
147
                lod.append(seq_len)
Y
Yang Yu 已提交
148 149 150
                np_flatten.extend(self.inputs[iname][seq_id])

            t = fluid.Tensor()
151
            t.set(np.array(np_flatten), place)
152
            t.set_recursive_sequence_lengths([lod])
Y
Yang Yu 已提交
153 154 155 156 157 158
            feed_dict[iname] = t

        for pname in self.params:
            feed_dict[pname] = self.params[pname]
        return feed_dict

Y
Yang Yu 已提交
159
    def get_numeric_gradient_of_param(self, param_name, delta=0.001):
Y
Yang Yu 已提交
160
        p = self.params[param_name]
Y
Yang Yu 已提交
161 162 163
        if len(p.shape) != 2:
            raise ValueError("Not support get numeric gradient of an parameter,"
                             " which is not matrix")
164
        g = np.zeros(shape=p.shape, dtype=p.dtype)
Y
Yang Yu 已提交
165

166 167
        for i in range(p.shape[0]):
            for j in range(p.shape[1]):
Y
Yang Yu 已提交
168 169 170 171 172 173 174
                o = p[i][j]
                p[i][j] += delta
                pos = self._exe_mean_out_()
                p[i][j] -= 2 * delta
                neg = self._exe_mean_out_()
                p[i][j] = o
                g[i][j] = (pos - neg) / (delta * 2)
Y
Yang Yu 已提交
175 176
        return g

Y
Stash  
Yang Yu 已提交
177 178 179 180 181 182 183 184 185 186
    def get_numeric_gradient_of_input(self,
                                      input_name,
                                      delta=0.001,
                                      return_one_tensor=True):
        ipt = self.inputs[input_name]
        grad = []

        for seq in ipt:
            seq_grad = []
            for item in seq:
187
                item_grad = np.zeros(shape=item.shape, dtype=item.dtype)
Y
Stash  
Yang Yu 已提交
188 189 190
                if len(item.shape) != 1:
                    raise ValueError("Not support")

191
                for i in range(len(item)):
Y
Stash  
Yang Yu 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204
                    o = item[i]
                    item[i] += delta
                    pos = self._exe_mean_out_()
                    item[i] -= 2 * delta
                    neg = self._exe_mean_out_()
                    item[i] = o
                    item_grad[i] = (pos - neg) / (delta * 2)
                seq_grad.append(item_grad)
            grad.append(seq_grad)

        if not return_one_tensor:
            return grad

205
        for i in range(len(grad)):
206 207
            grad[i] = np.concatenate(grad[i])
        grad = np.concatenate(grad)
Y
Stash  
Yang Yu 已提交
208 209
        return grad

Y
Yang Yu 已提交
210 211
    def _exe_mean_out_(self):
        outs = self.exe()
212
        return np.array([o.mean() for o in outs.values()]).mean()
Y
Yang Yu 已提交
213 214


215
class SeedFixedTestCase(unittest.TestCase):
216

217 218 219
    @classmethod
    def setUpClass(cls):
        """Fix random seeds to remove randomness from tests"""
220
        cls._np_rand_state = np.random.get_state()
221 222
        cls._py_rand_state = random.getstate()

223
        np.random.seed(123)
224 225 226 227 228
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
        """Restore random seeds"""
229
        np.random.set_state(cls._np_rand_state)
230 231 232 233
        random.setstate(cls._py_rand_state)


class TestSimpleMul(SeedFixedTestCase):
Y
Yang Yu 已提交
234 235 236 237 238 239 240
    DATA_NAME = 'X'
    DATA_WIDTH = 32
    PARAM_NAME = 'W'
    HIDDEN_WIDTH = 10
    OUT_NAME = 'Out'

    class SimpleMul(BaseRNN):
241

Y
Yang Yu 已提交
242 243
        def __init__(self):
            base = TestSimpleMul
244 245 246 247 248 249 250 251
            super(base.SimpleMul,
                  self).__init__({base.DATA_NAME: {
                      'shape': [base.DATA_WIDTH]
                  }}, {}, {
                      base.PARAM_NAME: {
                          'shape': [base.DATA_WIDTH, base.HIDDEN_WIDTH]
                      }
                  }, [base.OUT_NAME])
Y
Yang Yu 已提交
252 253

        def step(self, X, W, Out):
254
            Out.out(np.matmul(X, W))
Y
Yang Yu 已提交
255

Y
Yang Yu 已提交
256 257 258 259
    # Test many times in local to ensure the random seed cannot breaks CI
    # @many_times(10)
    @prog_scope()
    def test_forward_backward(self):
Y
Stash  
Yang Yu 已提交
260
        py_rnn = TestSimpleMul.SimpleMul()
261 262 263
        dat = fluid.layers.data(name=self.DATA_NAME,
                                shape=[self.DATA_WIDTH],
                                lod_level=1)
Y
Stash  
Yang Yu 已提交
264
        dat.stop_gradient = False
Y
Yang Yu 已提交
265 266 267 268 269

        rnn = fluid.layers.DynamicRNN()
        with rnn.block():
            d = rnn.step_input(dat)
            o = fluid.layers.fc(input=d,
Y
Yang Yu 已提交
270
                                param_attr=self.PARAM_NAME,
Y
Yang Yu 已提交
271
                                bias_attr=False,
Y
Yang Yu 已提交
272
                                size=self.HIDDEN_WIDTH,
Y
Yang Yu 已提交
273 274 275 276 277
                                act=None)
            rnn.output(o)

        out = rnn()
        out = fluid.layers.sequence_pool(out, pool_type='last')
278
        loss = paddle.mean(out)
Y
Update  
Yang Yu 已提交
279
        fluid.backward.append_backward(loss)
Y
Yang Yu 已提交
280 281 282

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
283
        out, w_g, i_g = list(
284
            map(
285
                np.array,
286 287
                exe.run(feed=py_rnn.to_feed(cpu),
                        fetch_list=[
288 289
                            out, self.PARAM_NAME + "@GRAD",
                            self.DATA_NAME + "@GRAD"
290 291
                        ],
                        return_numpy=False)))
Y
Stash  
Yang Yu 已提交
292
        out_by_python = py_rnn.exe()[self.OUT_NAME]
293
        np.testing.assert_allclose(out, out_by_python, rtol=1e-05)
Y
Stash  
Yang Yu 已提交
294
        w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
295
        np.testing.assert_allclose(w_g_num, w_g, rtol=0.05)
Y
Stash  
Yang Yu 已提交
296 297 298
        i_g_num = py_rnn.get_numeric_gradient_of_input(
            input_name=self.DATA_NAME)
        i_g_num = i_g_num.reshape(i_g.shape)
299
        np.testing.assert_allclose(i_g_num, i_g, rtol=0.05)
Y
Yang Yu 已提交
300 301


302
class TestSimpleMulWithMemory(SeedFixedTestCase):
Y
Yang Yu 已提交
303
    DATA_WIDTH = 32
Y
Stash  
Yang Yu 已提交
304
    HIDDEN_WIDTH = 20
Y
Yang Yu 已提交
305 306 307 308
    DATA_NAME = 'X'
    PARAM_NAME = 'W'

    class SimpleMulWithMemory(BaseRNN):
309

Y
Yang Yu 已提交
310
        def __init__(self):
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
            super(TestSimpleMulWithMemory.SimpleMulWithMemory, self).__init__(
                {
                    TestSimpleMulWithMemory.DATA_NAME: {
                        'shape': [TestSimpleMulWithMemory.DATA_WIDTH]
                    }
                }, {'Mem': {
                    'shape': [TestSimpleMulWithMemory.HIDDEN_WIDTH]
                }}, {
                    TestSimpleMulWithMemory.PARAM_NAME: {
                        'shape': [
                            TestSimpleMulWithMemory.DATA_WIDTH,
                            TestSimpleMulWithMemory.HIDDEN_WIDTH
                        ]
                    }
                }, ['Out'])
Y
Yang Yu 已提交
326 327

        def step(self, X, Mem, W, Out):
328
            o = np.matmul(X, W)
Y
Yang Yu 已提交
329 330 331 332 333 334
            assert isinstance(Mem, Memory)
            o += Mem.ex
            Mem.update(o)
            assert isinstance(Out, Output)
            Out.out(o)

Y
Yang Yu 已提交
335 336
    # many_times used locally for debug. Make sure the calculation is stable.
    # @many_times(10)
Y
Yang Yu 已提交
337 338 339
    @prog_scope()
    def test_forward_backward(self):
        py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory()
340 341 342
        data = fluid.layers.data(name=self.DATA_NAME,
                                 shape=[self.DATA_WIDTH],
                                 lod_level=1)
Y
Stash  
Yang Yu 已提交
343
        data.stop_gradient = False
Y
Yang Yu 已提交
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
        rnn = fluid.layers.DynamicRNN()
        with rnn.block():
            d = rnn.step_input(data)
            mem = rnn.memory(value=0.0, shape=[self.HIDDEN_WIDTH])
            hidden = fluid.layers.fc(input=d,
                                     size=self.HIDDEN_WIDTH,
                                     param_attr=self.PARAM_NAME,
                                     bias_attr=False,
                                     act=None)
            o = fluid.layers.elementwise_add(x=hidden, y=mem)
            rnn.update_memory(mem, o)
            rnn.output(o)

        out = rnn()
        last = fluid.layers.sequence_pool(input=out, pool_type='last')
359
        loss = paddle.mean(last)
Y
Update  
Yang Yu 已提交
360
        fluid.backward.append_backward(loss)
Y
Yang Yu 已提交
361 362 363

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
Y
Stash  
Yang Yu 已提交
364
        feed = py_rnn.to_feed(cpu)
365
        last_np, w_g, i_g = list(
366
            map(
367
                np.array,
368 369
                exe.run(feed=feed,
                        fetch_list=[
370 371
                            last, self.PARAM_NAME + "@GRAD",
                            self.DATA_NAME + "@GRAD"
372 373 374
                        ],
                        return_numpy=False)))
        last_by_py, = list(py_rnn.exe().values())
Y
Stash  
Yang Yu 已提交
375
        w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
376
        np.testing.assert_allclose(last_np, last_by_py, rtol=1e-05)
Y
Yang Yu 已提交
377

378
        np.testing.assert_allclose(w_g_num, w_g, rtol=0.1)
Y
Stash  
Yang Yu 已提交
379 380 381 382 383
        i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME)
        i_g_num = i_g_num.reshape(i_g.shape)

        # Since this RNN has many float add. The number could be not stable.
        # rtol = 0.1
384
        np.testing.assert_allclose(i_g_num, i_g, rtol=0.1)
Y
Yang Yu 已提交
385 386


Y
Yang Yu 已提交
387 388
if __name__ == '__main__':
    unittest.main()