test_dyn_rnn.py 12.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import paddle.fluid as fluid
18
import paddle
19 20 21
import unittest
import numpy

22 23 24 25 26
from paddle.fluid.layers.control_flow import lod_rank_table
from paddle.fluid.layers.control_flow import max_sequence_len
from paddle.fluid.layers.control_flow import lod_tensor_to_array
from paddle.fluid.layers.control_flow import array_to_lod_tensor
from paddle.fluid.layers.control_flow import shrink_memory
27
from fake_reader import fake_imdb_reader
28

29

30
class TestDynamicRNN(unittest.TestCase):
31
    def setUp(self):
32
        self.word_dict_len = 5147
33
        self.BATCH_SIZE = 2
34 35
        reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100)
        self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE)
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
    def _train(self,
               main_program,
               startup_program,
               feed_list,
               fetch_list,
               is_nested=False,
               max_iters=1):
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_program)
        feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
        data = next(self.train_data())

        for iter_id in range(max_iters):
            fetch_outs = exe.run(main_program,
                                 feed=feeder.feed(data),
                                 fetch_list=fetch_list,
                                 return_numpy=False)
            if len(fetch_list) == 3:
                rnn_in_seq = fetch_outs[0]
                rnn_out_seq = fetch_outs[1]
                if not is_nested:
                    # Check for lod set in runtime. When lod_level is 1,
                    # the lod of DynamicRNN's output should be the same as input.
                    self.assertEqual(rnn_in_seq.lod(), rnn_out_seq.lod())

                loss_i = numpy.array(fetch_outs[2])
            elif len(fetch_list) == 1:
                loss_i = numpy.array(fetch_outs[0])
            #print(loss_i)

            self.assertEqual((1, ), loss_i.shape)
            self.assertFalse(numpy.isnan(loss_i))
            if iter_id == 0:
                loss_0 = loss_i

        if max_iters > 10:
            # loss should be small after 10 mini-batch
            self.assertLess(loss_i[0], loss_0[0])

77 78 79 80 81 82 83 84
    def test_plain_while_op(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()

        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(
                name='word', shape=[1], dtype='int64', lod_level=1)
            sent_emb = fluid.layers.embedding(
85
                input=sentence, size=[self.word_dict_len, 32], dtype='float32')
86

87 88
            rank_table = lod_rank_table(x=sent_emb)
            sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)
89

90
            seq_len = max_sequence_len(rank_table=rank_table)
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            i.stop_gradient = False

            boot_mem = fluid.layers.fill_constant_batch_size_like(
                input=fluid.layers.array_read(
                    array=sent_emb_array, i=i),
                value=0,
                shape=[-1, 100],
                dtype='float32')
            boot_mem.stop_gradient = False
            mem_array = fluid.layers.array_write(x=boot_mem, i=i)

            cond = fluid.layers.less_than(x=i, y=seq_len)
            cond.stop_gradient = False
            while_op = fluid.layers.While(cond=cond)
            out = fluid.layers.create_array(dtype='float32')

            with while_op.block():
                mem = fluid.layers.array_read(array=mem_array, i=i)
                ipt = fluid.layers.array_read(array=sent_emb_array, i=i)

112
                mem = shrink_memory(x=mem, i=i, table=rank_table)
113 114

                hidden = fluid.layers.fc(input=[mem, ipt], size=100, act='tanh')
115

116 117 118 119 120
                fluid.layers.array_write(x=hidden, i=i, array=out)
                fluid.layers.increment(x=i, in_place=True)
                fluid.layers.array_write(x=hidden, i=i, array=mem_array)
                fluid.layers.less_than(x=i, y=seq_len, cond=cond)

121 122 123
            result_all_timesteps = array_to_lod_tensor(x=out, table=rank_table)
            last = fluid.layers.sequence_last_step(input=result_all_timesteps)

124
            logits = fluid.layers.fc(input=last, size=1, act=None)
125
            label = fluid.layers.data(name='label', shape=[1], dtype='float32')
126 127
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(
                x=logits, label=label)
Y
Yu Yang 已提交
128
            loss = fluid.layers.mean(loss)
129 130 131
            sgd = fluid.optimizer.SGD(1e-4)
            sgd.minimize(loss=loss)

132 133 134 135 136 137 138 139 140 141
        # Check for lod_level set in compile-time.
        self.assertEqual(sent_emb.lod_level, result_all_timesteps.lod_level)

        self._train(
            main_program=main_program,
            startup_program=startup_program,
            feed_list=[sentence, label],
            fetch_list=[sent_emb, result_all_timesteps, loss],
            is_nested=False,
            max_iters=1)
142

143
    def test_train_dynamic_rnn(self):
144 145 146 147 148 149
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(
                name='word', shape=[1], dtype='int64', lod_level=1)
            sent_emb = fluid.layers.embedding(
150
                input=sentence, size=[self.word_dict_len, 32], dtype='float32')
151

152 153 154 155
            drnn = fluid.layers.DynamicRNN()
            with drnn.block():
                in_ = drnn.step_input(sent_emb)
                mem = drnn.memory(shape=[100], dtype='float32')
156
                out_ = fluid.layers.fc(input=[in_, mem], size=100, act='tanh')
157 158
                drnn.update_memory(mem, out_)
                drnn.output(out_)
159

160 161
            drnn_result = drnn()
            last = fluid.layers.sequence_last_step(input=drnn_result)
162
            logits = fluid.layers.fc(input=last, size=1, act=None)
163

164 165 166
            label = fluid.layers.data(name='label', shape=[1], dtype='float32')
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(
                x=logits, label=label)
Y
Yu Yang 已提交
167
            loss = fluid.layers.mean(loss)
168 169 170
            sgd = fluid.optimizer.Adam(1e-3)
            sgd.minimize(loss=loss)

171 172
        # Check for lod_level set in compile-time.
        self.assertEqual(sent_emb.lod_level, drnn_result.lod_level)
173

174 175 176 177 178 179 180
        self._train(
            main_program=main_program,
            startup_program=startup_program,
            feed_list=[sentence, label],
            fetch_list=[sent_emb, drnn_result, loss],
            is_nested=False,
            max_iters=100)
C
chengduo 已提交
181

182 183 184 185 186 187
    def _fake_reader(self):
        seq_len, label = [[2, 2]], [0, 1]
        data = []
        for ele in seq_len:
            for j in ele:
                data.append([numpy.random.randint(30) for _ in range(j)])
C
chengduo 已提交
188

189 190
        while True:
            yield data, label
C
chengduo 已提交
191

192 193 194
    # this unit test is just used to the two layer nested dyn_rnn.
    def test_train_nested_dynamic_rnn(self):
        word_dict = [i for i in range(30)]
C
chengduo 已提交
195 196 197 198 199 200 201 202 203

        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(
                name='word', shape=[1], dtype='int64', lod_level=2)
            label = fluid.layers.data(
                name='label', shape=[1], dtype='float32', lod_level=1)

204 205 206 207 208 209 210 211 212 213 214 215 216
            drnn0 = fluid.layers.DynamicRNN()
            with drnn0.block():
                in_0 = drnn0.step_input(sentence)
                assert in_0.lod_level == 1, "the lod level of in_ should be 1"
                sentence_emb = fluid.layers.embedding(
                    input=in_0, size=[len(word_dict), 32], dtype='float32')
                out_0 = fluid.layers.fc(input=sentence_emb,
                                        size=100,
                                        act='tanh')

                drnn1 = fluid.layers.DynamicRNN()
                with drnn1.block():
                    in_1 = drnn1.step_input(out_0)
C
chengduo 已提交
217
                    assert in_1.lod_level == 0, "the lod level of in_1 should be 0"
C
chengduo 已提交
218
                    out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh')
219
                    drnn1.output(out_1)
C
chengduo 已提交
220

221 222 223
                drnn1_result = drnn1()
                last_1 = fluid.layers.sequence_last_step(input=drnn1_result)
                drnn0.output(last_1)
C
chengduo 已提交
224

225
            last = drnn0()
C
chengduo 已提交
226 227 228 229 230 231 232
            logits = fluid.layers.fc(input=last, size=1, act=None)
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(
                x=logits, label=label)
            loss = fluid.layers.mean(loss)
            sgd = fluid.optimizer.SGD(1e-3)
            sgd.minimize(loss=loss)

233 234 235 236 237 238 239 240 241 242
        train_data_orig = self.train_data
        self.train_data = paddle.batch(self._fake_reader, batch_size=2)
        self._train(
            main_program=main_program,
            startup_program=startup_program,
            feed_list=[sentence, label],
            fetch_list=[loss],
            is_nested=True,
            max_iters=100)
        self.train_data = train_data_orig
C
chengduo 已提交
243 244

    # this unit test is just used to the two layer nested dyn_rnn.
245
    def test_train_nested_dynamic_rnn2(self):
C
chengduo 已提交
246 247 248 249 250 251 252 253 254 255 256
        word_dict = [i for i in range(30)]

        hidden_size = 32
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(
                name='word', shape=[1], dtype='int64', lod_level=2)
            label = fluid.layers.data(
                name='label', shape=[1], dtype='float32', lod_level=1)

257 258 259 260 261
            drnn0 = fluid.layers.DynamicRNN()
            with drnn0.block():
                in_0 = drnn0.step_input(sentence)
                sentence_emb = fluid.layers.embedding(
                    input=in_0,
C
chengduo 已提交
262 263
                    size=[len(word_dict), hidden_size],
                    dtype='float32')
264
                input_forward_proj = fluid.layers.fc(input=sentence_emb,
C
chengduo 已提交
265 266 267 268 269 270 271 272
                                                     size=hidden_size * 4,
                                                     act=None,
                                                     bias_attr=False)
                forward, _ = fluid.layers.dynamic_lstm(
                    input=input_forward_proj,
                    size=hidden_size * 4,
                    use_peepholes=False)

273 274 275
                drnn1 = fluid.layers.DynamicRNN()
                with drnn1.block():
                    in_1 = drnn1.step_input(forward)
C
chengduo 已提交
276
                    out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh')
277
                    drnn1.output(out_1)
C
chengduo 已提交
278

279 280
                last = fluid.layers.sequence_last_step(input=drnn1())
                drnn0.output(last)
C
chengduo 已提交
281

282
            last = drnn0()
C
chengduo 已提交
283 284 285 286 287 288 289
            logits = fluid.layers.fc(input=last, size=1, act=None)
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(
                x=logits, label=label)
            loss = fluid.layers.mean(loss)
            sgd = fluid.optimizer.SGD(1e-3)
            sgd.minimize(loss=loss)

290 291 292 293 294 295 296 297 298 299
        train_data_orig = self.train_data
        self.train_data = paddle.batch(self._fake_reader, batch_size=2)
        self._train(
            main_program=main_program,
            startup_program=startup_program,
            feed_list=[sentence, label],
            fetch_list=[loss],
            is_nested=True,
            max_iters=100)
        self.train_data = train_data_orig
C
chengduo 已提交
300

301 302 303

if __name__ == '__main__':
    unittest.main()