test_imperative_ptb_rnn.py 15.3 KB
Newer Older
J
JiabinYang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import paddle.fluid as fluid
19
import paddle.fluid.core as core
L
lujun 已提交
20
from paddle.fluid.dygraph.nn import Embedding
J
JiabinYang 已提交
21 22
import paddle.fluid.framework as framework
from paddle.fluid.optimizer import SGDOptimizer
L
lujun 已提交
23
from paddle.fluid.dygraph.base import to_variable
24
from test_imperative_base import new_program_scope
J
JiabinYang 已提交
25
import numpy as np
26
import six
27
from utils import DyGraphProgramDescTracerTestHelper
J
JiabinYang 已提交
28 29


30
class SimpleLSTMRNN(fluid.Layer):
J
JiabinYang 已提交
31
    def __init__(self,
X
Xin Pan 已提交
32
                 name_scope,
J
JiabinYang 已提交
33 34 35 36 37
                 hidden_size,
                 num_steps,
                 num_layers=2,
                 init_scale=0.1,
                 dropout=None):
X
Xin Pan 已提交
38
        super(SimpleLSTMRNN, self).__init__(name_scope)
J
JiabinYang 已提交
39 40 41 42
        self._hidden_size = hidden_size
        self._num_layers = num_layers
        self._init_scale = init_scale
        self._dropout = dropout
43 44
        self._input = None
        self._num_steps = num_steps
45 46
        self.cell_array = []
        self.hidden_array = []
J
JiabinYang 已提交
47

48
    def _build_once(self, input_embedding, init_hidden=None, init_cell=None):
J
JiabinYang 已提交
49 50 51 52 53 54
        self.weight_1_arr = []
        self.weight_2_arr = []
        self.bias_arr = []
        self.mask_array = []

        for i in range(self._num_layers):
55
            weight_1 = self.create_parameter(
56 57 58
                attr=fluid.ParamAttr(
                    initializer=fluid.initializer.UniformInitializer(
                        low=-self._init_scale, high=self._init_scale)),
J
JiabinYang 已提交
59 60 61 62
                shape=[self._hidden_size * 2, self._hidden_size * 4],
                dtype="float32",
                default_initializer=fluid.initializer.UniformInitializer(
                    low=-self._init_scale, high=self._init_scale))
63
            self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
64
            bias_1 = self.create_parameter(
65 66 67 68
                attr=fluid.ParamAttr(
                    initializer=fluid.initializer.UniformInitializer(
                        low=-self._init_scale, high=self._init_scale)),
                shape=[self._hidden_size * 4],
J
JiabinYang 已提交
69 70
                dtype="float32",
                default_initializer=fluid.initializer.Constant(0.0))
71
            self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
J
JiabinYang 已提交
72

73 74 75 76 77
    def forward(self, input_embedding, init_hidden=None, init_cell=None):
        self.cell_array = []
        self.hidden_array = []

        for i in range(self._num_layers):
J
JiabinYang 已提交
78 79 80 81 82 83 84 85 86 87 88 89
            pre_hidden = fluid.layers.slice(
                init_hidden, axes=[0], starts=[i], ends=[i + 1])
            pre_cell = fluid.layers.slice(
                init_cell, axes=[0], starts=[i], ends=[i + 1])
            pre_hidden = fluid.layers.reshape(
                pre_hidden, shape=[-1, self._hidden_size])
            pre_cell = fluid.layers.reshape(
                pre_cell, shape=[-1, self._hidden_size])
            self.hidden_array.append(pre_hidden)
            self.cell_array.append(pre_cell)

        res = []
90 91
        for index in range(self._num_steps):
            self._input = fluid.layers.slice(
J
JiabinYang 已提交
92
                input_embedding, axes=[1], starts=[index], ends=[index + 1])
93 94
            self._input = fluid.layers.reshape(
                self._input, shape=[-1, self._hidden_size])
J
JiabinYang 已提交
95 96 97 98 99 100
            for k in range(self._num_layers):
                pre_hidden = self.hidden_array[k]
                pre_cell = self.cell_array[k]
                weight_1 = self.weight_1_arr[k]
                bias = self.bias_arr[k]

101
                nn = fluid.layers.concat([self._input, pre_hidden], 1)
J
JiabinYang 已提交
102 103 104
                gate_input = fluid.layers.matmul(x=nn, y=weight_1)

                gate_input = fluid.layers.elementwise_add(gate_input, bias)
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
                i, j, f, o = fluid.layers.split(
                    gate_input, num_or_sections=4, dim=-1)
                c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
                    i) * fluid.layers.tanh(j)
                m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
                self.hidden_array[k] = m
                self.cell_array[k] = c
                self._input = m

                if self._dropout is not None and self._dropout > 0.0:
                    self._input = fluid.layers.dropout(
                        self._input,
                        dropout_prob=self._dropout,
                        dropout_implementation='upscale_in_train')
            res.append(
                fluid.layers.reshape(
                    self._input, shape=[1, -1, self._hidden_size]))
        real_res = fluid.layers.concat(res, 0)
        real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
        last_hidden = fluid.layers.concat(self.hidden_array, 1)
        last_hidden = fluid.layers.reshape(
            last_hidden, shape=[-1, self._num_layers, self._hidden_size])
        last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
        last_cell = fluid.layers.concat(self.cell_array, 1)
        last_cell = fluid.layers.reshape(
            last_cell, shape=[-1, self._num_layers, self._hidden_size])
        last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
        return real_res, last_hidden, last_cell
J
JiabinYang 已提交
133 134


135
class PtbModel(fluid.Layer):
J
JiabinYang 已提交
136
    def __init__(self,
X
Xin Pan 已提交
137
                 name_scope,
J
JiabinYang 已提交
138 139 140 141 142 143
                 hidden_size,
                 vocab_size,
                 num_layers=2,
                 num_steps=20,
                 init_scale=0.1,
                 dropout=None):
X
Xin Pan 已提交
144
        super(PtbModel, self).__init__(name_scope)
J
JiabinYang 已提交
145 146 147 148 149 150 151
        self.hidden_size = hidden_size
        self.vocab_size = vocab_size
        self.init_scale = init_scale
        self.num_layers = num_layers
        self.num_steps = num_steps
        self.dropout = dropout
        self.simple_lstm_rnn = SimpleLSTMRNN(
X
Xin Pan 已提交
152
            self.full_name(),
J
JiabinYang 已提交
153 154 155 156 157
            hidden_size,
            num_steps,
            num_layers=num_layers,
            init_scale=init_scale,
            dropout=dropout)
158
        self.embedding = Embedding(
X
Xin Pan 已提交
159
            self.full_name(),
J
JiabinYang 已提交
160 161 162 163 164 165 166
            size=[vocab_size, hidden_size],
            dtype='float32',
            is_sparse=False,
            param_attr=fluid.ParamAttr(
                name='embedding_para',
                initializer=fluid.initializer.UniformInitializer(
                    low=-init_scale, high=init_scale)))
167
        self.softmax_weight = self.create_parameter(
168 169
            attr=fluid.ParamAttr(),
            shape=[self.hidden_size, self.vocab_size],
J
JiabinYang 已提交
170 171 172
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
                low=-self.init_scale, high=self.init_scale))
173
        self.softmax_bias = self.create_parameter(
174 175
            attr=fluid.ParamAttr(),
            shape=[self.vocab_size],
J
JiabinYang 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
                low=-self.init_scale, high=self.init_scale))

    def forward(self, input, label, init_hidden, init_cell):
        init_h = fluid.layers.reshape(
            init_hidden, shape=[self.num_layers, -1, self.hidden_size])

        init_c = fluid.layers.reshape(
            init_cell, shape=[self.num_layers, -1, self.hidden_size])

        x_emb = self.embedding(input)
        x_emb = fluid.layers.reshape(
            x_emb, shape=[-1, self.num_steps, self.hidden_size])
        if self.dropout is not None and self.dropout > 0.0:
            x_emb = fluid.layers.dropout(
                x_emb,
                dropout_prob=self.drop_out,
                dropout_implementation='upscale_in_train')
        rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,
                                                               init_c)
        rnn_out = fluid.layers.reshape(
            rnn_out, shape=[-1, self.num_steps, self.hidden_size])
199
        projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
J
JiabinYang 已提交
200 201 202 203 204 205 206 207 208 209 210 211 212
        projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
        projection = fluid.layers.reshape(
            projection, shape=[-1, self.vocab_size])
        loss = fluid.layers.softmax_with_cross_entropy(
            logits=projection, label=label, soft_label=False)
        loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])
        loss = fluid.layers.reduce_mean(loss, dim=[0])
        loss = fluid.layers.reduce_sum(loss)
        loss.permissions = True

        return loss, last_hidden, last_cell


L
lujun 已提交
213
class TestDygraphPtbRnn(unittest.TestCase):
214
    def test_ptb_rnn_cpu_float32(self):
J
JiabinYang 已提交
215 216 217 218 219 220 221
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
222
        batch_num = 200
J
JiabinYang 已提交
223

L
lujun 已提交
224
        with fluid.dygraph.guard():
J
JiabinYang 已提交
225 226 227 228
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
            # TODO: marsyang1993 Change seed to
            ptb_model = PtbModel(
X
Xin Pan 已提交
229
                "ptb_model",
J
JiabinYang 已提交
230 231 232 233 234 235 236
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale)

            sgd = SGDOptimizer(learning_rate=1e-3)
237 238
            dy_param_updated = dict()
            dy_param_init = dict()
J
JiabinYang 已提交
239 240 241
            dy_loss = None
            last_hidden = None
            last_cell = None
242

243 244
            helper = DyGraphProgramDescTracerTestHelper(ptb_model, self)

245
            for i in range(batch_num):
J
JiabinYang 已提交
246 247 248 249 250 251 252 253 254 255 256 257
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                x_data = x_data.reshape((-1, num_steps, 1))
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32')
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32')
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
258 259 260 261 262 263 264 265 266 267 268
                if i % 5 == 0:
                    outs, outs_static = helper.run(
                        [x, y, init_hidden, init_cell],
                        feed_names=['x', 'y', 'init_hidden', 'init_cell'],
                        fetch_names=['dy_loss', 'last_hidden', 'last_cell'])
                    helper.assertEachVar(outs, outs_static)
                else:
                    outs = ptb_model(x, y, init_hidden, init_cell)

                dy_loss, last_hidden, last_cell = outs

J
JiabinYang 已提交
269
                if i == 0:
270
                    for param in ptb_model.parameters():
271
                        dy_param_init[param.name] = param.numpy()
L
lujun 已提交
272
                dy_loss.backward()
J
JiabinYang 已提交
273
                sgd.minimize(dy_loss)
274 275 276
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
277
                        dy_param_updated[param.name] = param.numpy()
278

279 280 281 282
            dy_loss_value = dy_loss.numpy()
            dy_last_cell_value = last_cell.numpy()
            dy_last_hidden_value = last_hidden.numpy()

283 284 285 286
        with new_program_scope():
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
            ptb_model = PtbModel(
X
Xin Pan 已提交
287
                "ptb_model",
288 289 290 291 292 293
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale)

294 295
            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
296
            sgd = SGDOptimizer(learning_rate=1e-3)
297 298
            x = fluid.layers.data(
                name="x", shape=[-1, num_steps, 1], dtype='int64')
299 300 301 302 303 304 305 306 307 308 309 310
            y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32')
            init_hidden = fluid.layers.data(
                name="init_hidden", shape=[1], dtype='float32')
            init_cell = fluid.layers.data(
                name="init_cell", shape=[1], dtype='float32')

            static_loss, static_last_hidden, static_last_cell = ptb_model(
                x, y, init_hidden, init_cell)
            sgd.minimize(static_loss)
            static_param_updated = dict()
            static_param_init = dict()
            static_param_name_list = list()
311
            for param in ptb_model.parameters():
312 313 314 315 316 317
                static_param_name_list.append(param.name)

            out = exe.run(framework.default_startup_program(),
                          fetch_list=static_param_name_list)
            for i in range(len(static_param_name_list)):
                static_param_init[static_param_name_list[i]] = out[i]
J
JiabinYang 已提交
318 319 320
            static_loss_value = None
            static_last_cell_value = None
            static_last_hidden_value = None
321
            for i in range(batch_num):
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                x_data = x_data.reshape((-1, num_steps, 1))
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32')
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32')
                fetch_list = [static_loss, static_last_hidden, static_last_cell]
                fetch_list.extend(static_param_name_list)
                out = exe.run(fluid.default_main_program(),
                              feed={
                                  "x": x_data,
                                  "y": y_data,
                                  "init_hidden": init_hidden_data,
                                  "init_cell": init_cell_data
                              },
                              fetch_list=fetch_list)
                static_loss_value = out[0]
341 342
                static_last_hidden_value = out[1]
                static_last_cell_value = out[2]
J
JiabinYang 已提交
343

344 345 346 347 348
                if i == batch_num - 1:
                    for k in range(3, len(out)):
                        static_param_updated[static_param_name_list[k -
                                                                    3]] = out[k]

349
        self.assertTrue(np.array_equal(static_loss_value, dy_loss_value))
350
        self.assertTrue(
351
            np.array_equal(static_last_cell_value, dy_last_cell_value))
352
        self.assertTrue(
353
            np.array_equal(static_last_hidden_value, dy_last_hidden_value))
354
        for key, value in six.iteritems(static_param_init):
355
            self.assertTrue(np.array_equal(value, dy_param_init[key]))
356
        for key, value in six.iteritems(static_param_updated):
357
            self.assertTrue(np.array_equal(value, dy_param_updated[key]))
J
JiabinYang 已提交
358 359 360 361


if __name__ == '__main__':
    unittest.main()