test_dynrnn_gradient_check.py 11.8 KB
Newer Older
Y
Yang Yu 已提交
1 2 3 4 5
import numpy
import random
import collections
import paddle.v2.fluid as fluid
import unittest
Y
Yang Yu 已提交
6
from decorators import *
Y
Yang Yu 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80


class Memory(object):
    def __init__(self, shape, dtype='float32'):
        self.ex = numpy.zeros(shape=shape, dtype=dtype)
        self.cur = None

    def update(self, val):
        assert val.shape == self.ex.shape
        assert val.dtype == self.ex.dtype
        self.cur = val

    def ex(self):
        return self.ex

    def next(self):
        self.ex = self.cur
        self.cur = None

    def __next__(self):
        self.next()

    def reset(self):
        self.ex = numpy.zeros(shape=self.ex.shape, dtype=self.ex.dtype)
        self.cur = None


class Output(object):
    def __init__(self):
        self.outs = []

    def next_sequence(self):
        self.outs.append([])

    def out(self, val):
        self.outs[-1].append(val)

    def last(self):
        return self.outs[-1][-1]


class BaseRNN(object):
    def __init__(self, ins, mems, params, outs, num_seq=5, max_seq_len=15):
        self.num_seq = num_seq
        self.inputs = collections.defaultdict(list)

        for _ in xrange(num_seq):
            seq_len = random.randint(1, max_seq_len - 1)
            for iname in ins:
                ishape = ins[iname].get('shape', None)
                idtype = ins[iname].get('dtype', 'float32')
                lst = []
                for _ in xrange(seq_len):
                    lst.append(numpy.random.random(size=ishape).astype(idtype))
                self.inputs[iname].append(lst)

        self.mems = dict()
        for mname in mems:
            mshape = mems[mname].get('shape', None)
            mdtype = mems[mname].get('dtype', 'float32')
            self.mems[mname] = Memory(shape=mshape, dtype=mdtype)

        self.params = dict()
        for pname in params:
            pshape = params[pname].get('shape', None)
            pdtype = params[pname].get('dtype', 'float32')
            self.params[pname] = numpy.random.random(size=pshape).astype(pdtype)

        self.outputs = dict()

        for oname in outs:
            self.outputs[oname] = Output()

    def step(self, **kwargs):
Y
Yang Yu 已提交
81
        raise NotImplementedError()
Y
Yang Yu 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143

    def exe(self):
        retv = dict()
        for out in self.outputs:
            retv[out] = []

        for seq_id in xrange(self.num_seq):
            for mname in self.mems:
                self.mems[mname].reset()
            for out in self.outputs:
                self.outputs[out].next_sequence()

            iname0 = self.inputs.keys()[0]
            seq_len = len(self.inputs[iname0][seq_id])

            for step_id in xrange(seq_len):
                xargs = dict()

                for iname in self.inputs:
                    xargs[iname] = self.inputs[iname][seq_id][step_id]

                for mname in self.mems:
                    xargs[mname] = self.mems[mname]

                for pname in self.params:
                    xargs[pname] = self.params[pname]

                for out in self.outputs:
                    xargs[out] = self.outputs[out]

                self.step(**xargs)

                for mname in self.mems:
                    next(self.mems[mname])

            for out in self.outputs:
                retv[out].append(self.outputs[out].last())

        for out in retv:
            retv[out] = numpy.array(retv[out])
        return retv

    def to_feed(self, place):
        feed_dict = dict()

        for iname in self.inputs:
            lod = [0]
            np_flatten = []
            for seq_id in xrange(len(self.inputs[iname])):
                seq_len = len(self.inputs[iname][seq_id])
                lod.append(lod[-1] + seq_len)
                np_flatten.extend(self.inputs[iname][seq_id])

            t = fluid.Tensor()
            t.set(numpy.array(np_flatten), place)
            t.set_lod([lod])
            feed_dict[iname] = t

        for pname in self.params:
            feed_dict[pname] = self.params[pname]
        return feed_dict

Y
Yang Yu 已提交
144
    def get_numeric_gradient_of_param(self, param_name, delta=0.001):
Y
Yang Yu 已提交
145
        p = self.params[param_name]
Y
Yang Yu 已提交
146 147 148
        if len(p.shape) != 2:
            raise ValueError("Not support get numeric gradient of an parameter,"
                             " which is not matrix")
Y
Yang Yu 已提交
149 150
        g = numpy.zeros(shape=p.shape, dtype=p.dtype)

Y
Yang Yu 已提交
151 152 153 154 155 156 157 158 159
        for i in xrange(p.shape[0]):
            for j in xrange(p.shape[1]):
                o = p[i][j]
                p[i][j] += delta
                pos = self._exe_mean_out_()
                p[i][j] -= 2 * delta
                neg = self._exe_mean_out_()
                p[i][j] = o
                g[i][j] = (pos - neg) / (delta * 2)
Y
Yang Yu 已提交
160 161
        return g

Y
Stash  
Yang Yu 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
    def get_numeric_gradient_of_input(self,
                                      input_name,
                                      delta=0.001,
                                      return_one_tensor=True):
        ipt = self.inputs[input_name]
        grad = []

        for seq in ipt:
            seq_grad = []
            for item in seq:
                item_grad = numpy.zeros(shape=item.shape, dtype=item.dtype)
                if len(item.shape) != 1:
                    raise ValueError("Not support")

                for i in xrange(len(item)):
                    o = item[i]
                    item[i] += delta
                    pos = self._exe_mean_out_()
                    item[i] -= 2 * delta
                    neg = self._exe_mean_out_()
                    item[i] = o
                    item_grad[i] = (pos - neg) / (delta * 2)
                seq_grad.append(item_grad)
            grad.append(seq_grad)

        if not return_one_tensor:
            return grad

        for i in xrange(len(grad)):
            grad[i] = numpy.concatenate(grad[i])
        grad = numpy.concatenate(grad)
        return grad

Y
Yang Yu 已提交
195 196 197 198 199
    def _exe_mean_out_(self):
        outs = self.exe()
        return numpy.array([o.mean() for o in outs.itervalues()]).mean()


200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
class SeedFixedTestCase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        """Fix random seeds to remove randomness from tests"""
        cls._np_rand_state = numpy.random.get_state()
        cls._py_rand_state = random.getstate()

        numpy.random.seed(123)
        random.seed(124)

    @classmethod
    def tearDownClass(cls):
        """Restore random seeds"""
        numpy.random.set_state(cls._np_rand_state)
        random.setstate(cls._py_rand_state)


class TestSimpleMul(SeedFixedTestCase):
Y
Yang Yu 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
    DATA_NAME = 'X'
    DATA_WIDTH = 32
    PARAM_NAME = 'W'
    HIDDEN_WIDTH = 10
    OUT_NAME = 'Out'

    class SimpleMul(BaseRNN):
        def __init__(self):
            base = TestSimpleMul
            super(base.SimpleMul, self).__init__({
                base.DATA_NAME: {
                    'shape': [base.DATA_WIDTH]
                }
            }, {}, {
                base.PARAM_NAME: {
                    'shape': [base.DATA_WIDTH, base.HIDDEN_WIDTH]
                }
            }, [base.OUT_NAME])

        def step(self, X, W, Out):
            Out.out(numpy.matmul(X, W))

Y
Yang Yu 已提交
240 241 242 243
    # Test many times in local to ensure the random seed cannot breaks CI
    # @many_times(10)
    @prog_scope()
    def test_forward_backward(self):
Y
Stash  
Yang Yu 已提交
244
        py_rnn = TestSimpleMul.SimpleMul()
Y
Yang Yu 已提交
245 246
        dat = fluid.layers.data(
            name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1)
Y
Stash  
Yang Yu 已提交
247
        dat.stop_gradient = False
Y
Yang Yu 已提交
248 249 250 251 252

        rnn = fluid.layers.DynamicRNN()
        with rnn.block():
            d = rnn.step_input(dat)
            o = fluid.layers.fc(input=d,
Y
Yang Yu 已提交
253
                                param_attr=self.PARAM_NAME,
Y
Yang Yu 已提交
254
                                bias_attr=False,
Y
Yang Yu 已提交
255
                                size=self.HIDDEN_WIDTH,
Y
Yang Yu 已提交
256 257 258 259 260 261
                                act=None)
            rnn.output(o)

        out = rnn()
        out = fluid.layers.sequence_pool(out, pool_type='last')
        loss = fluid.layers.mean(x=out)
Y
Update  
Yang Yu 已提交
262
        fluid.backward.append_backward(loss)
Y
Yang Yu 已提交
263 264 265

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
Y
Stash  
Yang Yu 已提交
266 267 268 269 270 271 272 273
        out, w_g, i_g = map(numpy.array,
                            exe.run(feed=py_rnn.to_feed(cpu),
                                    fetch_list=[
                                        out, self.PARAM_NAME + "@GRAD",
                                        self.DATA_NAME + "@GRAD"
                                    ],
                                    return_numpy=False))
        out_by_python = py_rnn.exe()[self.OUT_NAME]
Y
Yang Yu 已提交
274
        self.assertTrue(numpy.allclose(out, out_by_python))
Y
Stash  
Yang Yu 已提交
275
        w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
Y
Yang Yu 已提交
276
        self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.05))
Y
Stash  
Yang Yu 已提交
277 278 279 280
        i_g_num = py_rnn.get_numeric_gradient_of_input(
            input_name=self.DATA_NAME)
        i_g_num = i_g_num.reshape(i_g.shape)
        self.assertTrue(numpy.allclose(i_g_num, i_g, rtol=0.05))
Y
Yang Yu 已提交
281 282


283
class TestSimpleMulWithMemory(SeedFixedTestCase):
Y
Yang Yu 已提交
284
    DATA_WIDTH = 32
Y
Stash  
Yang Yu 已提交
285
    HIDDEN_WIDTH = 20
Y
Yang Yu 已提交
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
    DATA_NAME = 'X'
    PARAM_NAME = 'W'

    class SimpleMulWithMemory(BaseRNN):
        def __init__(self):
            super(TestSimpleMulWithMemory.SimpleMulWithMemory, self).__init__({
                TestSimpleMulWithMemory.DATA_NAME: {
                    'shape': [TestSimpleMulWithMemory.DATA_WIDTH]
                }
            }, {'Mem': {
                'shape': [TestSimpleMulWithMemory.HIDDEN_WIDTH]
            }}, {
                TestSimpleMulWithMemory.PARAM_NAME: {
                    'shape': [
                        TestSimpleMulWithMemory.DATA_WIDTH,
                        TestSimpleMulWithMemory.HIDDEN_WIDTH
                    ]
                }
            }, ['Out'])

        def step(self, X, Mem, W, Out):
            o = numpy.matmul(X, W)
            assert isinstance(Mem, Memory)
            o += Mem.ex
            Mem.update(o)
            assert isinstance(Out, Output)
            Out.out(o)

Y
Yang Yu 已提交
314 315
    # many_times used locally for debug. Make sure the calculation is stable.
    # @many_times(10)
Y
Yang Yu 已提交
316 317 318 319 320
    @prog_scope()
    def test_forward_backward(self):
        py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory()
        data = fluid.layers.data(
            name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1)
Y
Stash  
Yang Yu 已提交
321
        data.stop_gradient = False
Y
Yang Yu 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
        rnn = fluid.layers.DynamicRNN()
        with rnn.block():
            d = rnn.step_input(data)
            mem = rnn.memory(value=0.0, shape=[self.HIDDEN_WIDTH])
            hidden = fluid.layers.fc(input=d,
                                     size=self.HIDDEN_WIDTH,
                                     param_attr=self.PARAM_NAME,
                                     bias_attr=False,
                                     act=None)
            o = fluid.layers.elementwise_add(x=hidden, y=mem)
            rnn.update_memory(mem, o)
            rnn.output(o)

        out = rnn()
        last = fluid.layers.sequence_pool(input=out, pool_type='last')
Y
Stash  
Yang Yu 已提交
337
        loss = fluid.layers.mean(x=last)
Y
Update  
Yang Yu 已提交
338
        fluid.backward.append_backward(loss)
Y
Yang Yu 已提交
339 340 341

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
Y
Stash  
Yang Yu 已提交
342
        feed = py_rnn.to_feed(cpu)
Y
Yang Yu 已提交
343 344 345 346 347 348 349
        last_np, w_g, i_g = map(numpy.array,
                                exe.run(feed=feed,
                                        fetch_list=[
                                            last, self.PARAM_NAME + "@GRAD",
                                            self.DATA_NAME + "@GRAD"
                                        ],
                                        return_numpy=False))
Y
Yang Yu 已提交
350
        last_by_py, = py_rnn.exe().values()
Y
Stash  
Yang Yu 已提交
351
        w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
Y
Yang Yu 已提交
352
        self.assertTrue(numpy.allclose(last_np, last_by_py))
Y
Yang Yu 已提交
353

Y
Stash  
Yang Yu 已提交
354 355 356 357 358 359 360
        self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.1))
        i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME)
        i_g_num = i_g_num.reshape(i_g.shape)

        # Since this RNN has many float add. The number could be not stable.
        # rtol = 0.1
        self.assertTrue(numpy.allclose(i_g_num, i_g, rtol=0.1))
Y
Yang Yu 已提交
361 362


Y
Yang Yu 已提交
363 364
if __name__ == '__main__':
    unittest.main()