test_dynrnn_gradient_check.py 12.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import numpy as np
Y
Yang Yu 已提交
18 19
import random
import collections
20
import paddle
21
import paddle.fluid as fluid
Y
Yang Yu 已提交
22
import unittest
23
from decorator_helper import *
Y
Yang Yu 已提交
24 25 26


class Memory(object):
27

Y
Yang Yu 已提交
28
    def __init__(self, shape, dtype='float32'):
29
        self.ex = np.zeros(shape=shape, dtype=dtype)
Y
Yang Yu 已提交
30 31 32 33 34 35 36
        self.cur = None

    def update(self, val):
        assert val.shape == self.ex.shape
        assert val.dtype == self.ex.dtype
        self.cur = val

37
    def next(self):
Y
Yang Yu 已提交
38 39 40 41
        self.ex = self.cur
        self.cur = None

    def __next__(self):
42
        self.next()
Y
Yang Yu 已提交
43 44

    def reset(self):
45
        self.ex = np.zeros(shape=self.ex.shape, dtype=self.ex.dtype)
Y
Yang Yu 已提交
46 47 48 49
        self.cur = None


class Output(object):
50

Y
Yang Yu 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64
    def __init__(self):
        self.outs = []

    def next_sequence(self):
        self.outs.append([])

    def out(self, val):
        self.outs[-1].append(val)

    def last(self):
        return self.outs[-1][-1]


class BaseRNN(object):
65

Y
Yang Yu 已提交
66 67 68 69
    def __init__(self, ins, mems, params, outs, num_seq=5, max_seq_len=15):
        self.num_seq = num_seq
        self.inputs = collections.defaultdict(list)

70
        for _ in range(num_seq):
Y
Yang Yu 已提交
71 72 73 74 75
            seq_len = random.randint(1, max_seq_len - 1)
            for iname in ins:
                ishape = ins[iname].get('shape', None)
                idtype = ins[iname].get('dtype', 'float32')
                lst = []
76
                for _ in range(seq_len):
77
                    lst.append(np.random.random(size=ishape).astype(idtype))
Y
Yang Yu 已提交
78 79 80 81 82 83 84 85 86 87 88 89
                self.inputs[iname].append(lst)

        self.mems = dict()
        for mname in mems:
            mshape = mems[mname].get('shape', None)
            mdtype = mems[mname].get('dtype', 'float32')
            self.mems[mname] = Memory(shape=mshape, dtype=mdtype)

        self.params = dict()
        for pname in params:
            pshape = params[pname].get('shape', None)
            pdtype = params[pname].get('dtype', 'float32')
90
            self.params[pname] = np.random.random(size=pshape).astype(pdtype)
Y
Yang Yu 已提交
91 92 93 94 95 96 97

        self.outputs = dict()

        for oname in outs:
            self.outputs[oname] = Output()

    def step(self, **kwargs):
Y
Yang Yu 已提交
98
        raise NotImplementedError()
Y
Yang Yu 已提交
99 100 101 102 103 104

    def exe(self):
        retv = dict()
        for out in self.outputs:
            retv[out] = []

105
        for seq_id in range(self.num_seq):
Y
Yang Yu 已提交
106 107 108 109 110
            for mname in self.mems:
                self.mems[mname].reset()
            for out in self.outputs:
                self.outputs[out].next_sequence()

111
            iname0 = list(self.inputs.keys())[0]
Y
Yang Yu 已提交
112 113
            seq_len = len(self.inputs[iname0][seq_id])

114
            for step_id in range(seq_len):
Y
Yang Yu 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
                xargs = dict()

                for iname in self.inputs:
                    xargs[iname] = self.inputs[iname][seq_id][step_id]

                for mname in self.mems:
                    xargs[mname] = self.mems[mname]

                for pname in self.params:
                    xargs[pname] = self.params[pname]

                for out in self.outputs:
                    xargs[out] = self.outputs[out]

                self.step(**xargs)

                for mname in self.mems:
                    next(self.mems[mname])

            for out in self.outputs:
                retv[out].append(self.outputs[out].last())

        for out in retv:
138
            retv[out] = np.array(retv[out])
Y
Yang Yu 已提交
139 140 141 142 143 144
        return retv

    def to_feed(self, place):
        feed_dict = dict()

        for iname in self.inputs:
145
            lod = []
Y
Yang Yu 已提交
146
            np_flatten = []
147
            for seq_id in range(len(self.inputs[iname])):
Y
Yang Yu 已提交
148
                seq_len = len(self.inputs[iname][seq_id])
149
                lod.append(seq_len)
Y
Yang Yu 已提交
150 151 152
                np_flatten.extend(self.inputs[iname][seq_id])

            t = fluid.Tensor()
153
            t.set(np.array(np_flatten), place)
154
            t.set_recursive_sequence_lengths([lod])
Y
Yang Yu 已提交
155 156 157 158 159 160
            feed_dict[iname] = t

        for pname in self.params:
            feed_dict[pname] = self.params[pname]
        return feed_dict

Y
Yang Yu 已提交
161
    def get_numeric_gradient_of_param(self, param_name, delta=0.001):
Y
Yang Yu 已提交
162
        p = self.params[param_name]
Y
Yang Yu 已提交
163 164 165
        if len(p.shape) != 2:
            raise ValueError("Not support get numeric gradient of an parameter,"
                             " which is not matrix")
166
        g = np.zeros(shape=p.shape, dtype=p.dtype)
Y
Yang Yu 已提交
167

168 169
        for i in range(p.shape[0]):
            for j in range(p.shape[1]):
Y
Yang Yu 已提交
170 171 172 173 174 175 176
                o = p[i][j]
                p[i][j] += delta
                pos = self._exe_mean_out_()
                p[i][j] -= 2 * delta
                neg = self._exe_mean_out_()
                p[i][j] = o
                g[i][j] = (pos - neg) / (delta * 2)
Y
Yang Yu 已提交
177 178
        return g

Y
Stash  
Yang Yu 已提交
179 180 181 182 183 184 185 186 187 188
    def get_numeric_gradient_of_input(self,
                                      input_name,
                                      delta=0.001,
                                      return_one_tensor=True):
        ipt = self.inputs[input_name]
        grad = []

        for seq in ipt:
            seq_grad = []
            for item in seq:
189
                item_grad = np.zeros(shape=item.shape, dtype=item.dtype)
Y
Stash  
Yang Yu 已提交
190 191 192
                if len(item.shape) != 1:
                    raise ValueError("Not support")

193
                for i in range(len(item)):
Y
Stash  
Yang Yu 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206
                    o = item[i]
                    item[i] += delta
                    pos = self._exe_mean_out_()
                    item[i] -= 2 * delta
                    neg = self._exe_mean_out_()
                    item[i] = o
                    item_grad[i] = (pos - neg) / (delta * 2)
                seq_grad.append(item_grad)
            grad.append(seq_grad)

        if not return_one_tensor:
            return grad

207
        for i in range(len(grad)):
208 209
            grad[i] = np.concatenate(grad[i])
        grad = np.concatenate(grad)
Y
Stash  
Yang Yu 已提交
210 211
        return grad

Y
Yang Yu 已提交
212 213
    def _exe_mean_out_(self):
        outs = self.exe()
214
        return np.array([o.mean() for o in outs.values()]).mean()
Y
Yang Yu 已提交
215 216


217
class SeedFixedTestCase(unittest.TestCase):
218

219 220 221
    @classmethod
    def setUpClass(cls):
        """Fix random seeds to remove randomness from tests"""
222
        cls._np_rand_state = np.random.get_state()
223 224
        cls._py_rand_state = random.getstate()

225
        np.random.seed(123)
226 227 228 229 230
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
        """Restore random seeds"""
231
        np.random.set_state(cls._np_rand_state)
232 233 234 235
        random.setstate(cls._py_rand_state)


class TestSimpleMul(SeedFixedTestCase):
Y
Yang Yu 已提交
236 237 238 239 240 241 242
    DATA_NAME = 'X'
    DATA_WIDTH = 32
    PARAM_NAME = 'W'
    HIDDEN_WIDTH = 10
    OUT_NAME = 'Out'

    class SimpleMul(BaseRNN):
243

Y
Yang Yu 已提交
244 245
        def __init__(self):
            base = TestSimpleMul
246 247 248 249 250 251 252 253
            super(base.SimpleMul,
                  self).__init__({base.DATA_NAME: {
                      'shape': [base.DATA_WIDTH]
                  }}, {}, {
                      base.PARAM_NAME: {
                          'shape': [base.DATA_WIDTH, base.HIDDEN_WIDTH]
                      }
                  }, [base.OUT_NAME])
Y
Yang Yu 已提交
254 255

        def step(self, X, W, Out):
256
            Out.out(np.matmul(X, W))
Y
Yang Yu 已提交
257

Y
Yang Yu 已提交
258 259 260 261
    # Test many times in local to ensure the random seed cannot breaks CI
    # @many_times(10)
    @prog_scope()
    def test_forward_backward(self):
Y
Stash  
Yang Yu 已提交
262
        py_rnn = TestSimpleMul.SimpleMul()
263 264 265
        dat = fluid.layers.data(name=self.DATA_NAME,
                                shape=[self.DATA_WIDTH],
                                lod_level=1)
Y
Stash  
Yang Yu 已提交
266
        dat.stop_gradient = False
Y
Yang Yu 已提交
267 268 269 270 271

        rnn = fluid.layers.DynamicRNN()
        with rnn.block():
            d = rnn.step_input(dat)
            o = fluid.layers.fc(input=d,
Y
Yang Yu 已提交
272
                                param_attr=self.PARAM_NAME,
Y
Yang Yu 已提交
273
                                bias_attr=False,
Y
Yang Yu 已提交
274
                                size=self.HIDDEN_WIDTH,
Y
Yang Yu 已提交
275 276 277 278 279
                                act=None)
            rnn.output(o)

        out = rnn()
        out = fluid.layers.sequence_pool(out, pool_type='last')
280
        loss = paddle.mean(out)
Y
Update  
Yang Yu 已提交
281
        fluid.backward.append_backward(loss)
Y
Yang Yu 已提交
282 283 284

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
285
        out, w_g, i_g = list(
286
            map(
287
                np.array,
288 289
                exe.run(feed=py_rnn.to_feed(cpu),
                        fetch_list=[
290 291
                            out, self.PARAM_NAME + "@GRAD",
                            self.DATA_NAME + "@GRAD"
292 293
                        ],
                        return_numpy=False)))
Y
Stash  
Yang Yu 已提交
294
        out_by_python = py_rnn.exe()[self.OUT_NAME]
295
        np.testing.assert_allclose(out, out_by_python, rtol=1e-05)
Y
Stash  
Yang Yu 已提交
296
        w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
297
        np.testing.assert_allclose(w_g_num, w_g, rtol=0.05)
Y
Stash  
Yang Yu 已提交
298 299 300
        i_g_num = py_rnn.get_numeric_gradient_of_input(
            input_name=self.DATA_NAME)
        i_g_num = i_g_num.reshape(i_g.shape)
301
        np.testing.assert_allclose(i_g_num, i_g, rtol=0.05)
Y
Yang Yu 已提交
302 303


304
class TestSimpleMulWithMemory(SeedFixedTestCase):
Y
Yang Yu 已提交
305
    DATA_WIDTH = 32
Y
Stash  
Yang Yu 已提交
306
    HIDDEN_WIDTH = 20
Y
Yang Yu 已提交
307 308 309 310
    DATA_NAME = 'X'
    PARAM_NAME = 'W'

    class SimpleMulWithMemory(BaseRNN):
311

Y
Yang Yu 已提交
312
        def __init__(self):
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
            super(TestSimpleMulWithMemory.SimpleMulWithMemory, self).__init__(
                {
                    TestSimpleMulWithMemory.DATA_NAME: {
                        'shape': [TestSimpleMulWithMemory.DATA_WIDTH]
                    }
                }, {'Mem': {
                    'shape': [TestSimpleMulWithMemory.HIDDEN_WIDTH]
                }}, {
                    TestSimpleMulWithMemory.PARAM_NAME: {
                        'shape': [
                            TestSimpleMulWithMemory.DATA_WIDTH,
                            TestSimpleMulWithMemory.HIDDEN_WIDTH
                        ]
                    }
                }, ['Out'])
Y
Yang Yu 已提交
328 329

        def step(self, X, Mem, W, Out):
330
            o = np.matmul(X, W)
Y
Yang Yu 已提交
331 332 333 334 335 336
            assert isinstance(Mem, Memory)
            o += Mem.ex
            Mem.update(o)
            assert isinstance(Out, Output)
            Out.out(o)

Y
Yang Yu 已提交
337 338
    # many_times used locally for debug. Make sure the calculation is stable.
    # @many_times(10)
Y
Yang Yu 已提交
339 340 341
    @prog_scope()
    def test_forward_backward(self):
        py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory()
342 343 344
        data = fluid.layers.data(name=self.DATA_NAME,
                                 shape=[self.DATA_WIDTH],
                                 lod_level=1)
Y
Stash  
Yang Yu 已提交
345
        data.stop_gradient = False
Y
Yang Yu 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
        rnn = fluid.layers.DynamicRNN()
        with rnn.block():
            d = rnn.step_input(data)
            mem = rnn.memory(value=0.0, shape=[self.HIDDEN_WIDTH])
            hidden = fluid.layers.fc(input=d,
                                     size=self.HIDDEN_WIDTH,
                                     param_attr=self.PARAM_NAME,
                                     bias_attr=False,
                                     act=None)
            o = fluid.layers.elementwise_add(x=hidden, y=mem)
            rnn.update_memory(mem, o)
            rnn.output(o)

        out = rnn()
        last = fluid.layers.sequence_pool(input=out, pool_type='last')
361
        loss = paddle.mean(last)
Y
Update  
Yang Yu 已提交
362
        fluid.backward.append_backward(loss)
Y
Yang Yu 已提交
363 364 365

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
Y
Stash  
Yang Yu 已提交
366
        feed = py_rnn.to_feed(cpu)
367
        last_np, w_g, i_g = list(
368
            map(
369
                np.array,
370 371
                exe.run(feed=feed,
                        fetch_list=[
372 373
                            last, self.PARAM_NAME + "@GRAD",
                            self.DATA_NAME + "@GRAD"
374 375 376
                        ],
                        return_numpy=False)))
        last_by_py, = list(py_rnn.exe().values())
Y
Stash  
Yang Yu 已提交
377
        w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
378
        np.testing.assert_allclose(last_np, last_by_py, rtol=1e-05)
Y
Yang Yu 已提交
379

380
        np.testing.assert_allclose(w_g_num, w_g, rtol=0.1)
Y
Stash  
Yang Yu 已提交
381 382 383 384 385
        i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME)
        i_g_num = i_g_num.reshape(i_g.shape)

        # Since this RNN has many float add. The number could be not stable.
        # rtol = 0.1
386
        np.testing.assert_allclose(i_g_num, i_g, rtol=0.1)
Y
Yang Yu 已提交
387 388


Y
Yang Yu 已提交
389 390
if __name__ == '__main__':
    unittest.main()