test_ptb_lm.py 11.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
import time
import unittest

import numpy as np
20

L
Leo Chen 已提交
21
import paddle
22 23 24
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.optimizer import SGDOptimizer
25
from paddle.jit import ProgramTranslator
H
hjyp 已提交
26
from paddle.jit.api import to_static
27 28 29 30

PRINT_STEP = 20
SEED = 2020

31 32
program_translator = ProgramTranslator()

33 34

class SimpleLSTMRNN(fluid.Layer):
35 36 37
    def __init__(
        self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None
    ):
38
        super().__init__()
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
        self._hidden_size = hidden_size
        self._num_layers = num_layers
        self._init_scale = init_scale
        self._dropout = dropout
        self._num_steps = num_steps
        self.cell_array = []
        self.hidden_array = []

        self.weight_1_arr = []
        self.weight_2_arr = []
        self.bias_arr = []
        self.mask_array = []

        for i in range(self._num_layers):
            weight_1 = self.create_parameter(
                attr=fluid.ParamAttr(
                    initializer=fluid.initializer.UniformInitializer(
56 57 58
                        low=-self._init_scale, high=self._init_scale
                    )
                ),
59 60 61
                shape=[self._hidden_size * 2, self._hidden_size * 4],
                dtype="float32",
                default_initializer=fluid.initializer.UniformInitializer(
62 63 64
                    low=-self._init_scale, high=self._init_scale
                ),
            )
65 66 67 68
            self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
            bias_1 = self.create_parameter(
                attr=fluid.ParamAttr(
                    initializer=fluid.initializer.UniformInitializer(
69 70 71
                        low=-self._init_scale, high=self._init_scale
                    )
                ),
72 73
                shape=[self._hidden_size * 4],
                dtype="float32",
74 75
                default_initializer=fluid.initializer.Constant(0.0),
            )
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
            self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))

    def forward(self, input_embedding, init_hidden=None, init_cell=None):
        cell_array = []
        hidden_array = []

        for i in range(self._num_layers):
            hidden_array.append(init_hidden[i])
            cell_array.append(init_cell[i])

        res = []
        for index in range(self._num_steps):
            step_input = input_embedding[:, index, :]
            for k in range(self._num_layers):
                pre_hidden = hidden_array[k]
                pre_cell = cell_array[k]
                weight_1 = self.weight_1_arr[k]
                bias = self.bias_arr[k]

                nn = fluid.layers.concat([step_input, pre_hidden], 1)
K
kangguangli 已提交
96
                gate_input = paddle.matmul(x=nn, y=weight_1)
97

98
                gate_input = paddle.add(gate_input, bias)
99 100
                i, j, f, o = paddle.split(
                    gate_input, num_or_sections=4, axis=-1
101
                )
102 103 104 105
                c = pre_cell * paddle.nn.functional.sigmoid(
                    f
                ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
                m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
106 107 108 109 110
                hidden_array[k] = m
                cell_array[k] = c
                step_input = m

                if self._dropout is not None and self._dropout > 0.0:
C
ccrrong 已提交
111
                    step_input = paddle.nn.functional.dropout(
112
                        step_input,
C
ccrrong 已提交
113 114
                        p=self._dropout,
                        mode='upscale_in_train',
115
                    )
116 117
            res.append(step_input)
        real_res = fluid.layers.concat(res, 1)
118
        real_res = paddle.reshape(
119 120
            real_res, [-1, self._num_steps, self._hidden_size]
        )
121
        last_hidden = fluid.layers.concat(hidden_array, 1)
122
        last_hidden = paddle.reshape(
123 124
            last_hidden, shape=[-1, self._num_layers, self._hidden_size]
        )
125
        last_hidden = paddle.transpose(x=last_hidden, perm=[1, 0, 2])
126
        last_cell = fluid.layers.concat(cell_array, 1)
127
        last_cell = paddle.reshape(
128 129
            last_cell, shape=[-1, self._num_layers, self._hidden_size]
        )
130
        last_cell = paddle.transpose(x=last_cell, perm=[1, 0, 2])
131 132 133 134
        return real_res, last_hidden, last_cell


class PtbModel(fluid.Layer):
135 136 137 138 139 140 141 142 143
    def __init__(
        self,
        hidden_size,
        vocab_size,
        num_layers=2,
        num_steps=20,
        init_scale=0.1,
        dropout=None,
    ):
144
        super().__init__()
145 146 147 148 149 150
        self.hidden_size = hidden_size
        self.vocab_size = vocab_size
        self.init_scale = init_scale
        self.num_layers = num_layers
        self.num_steps = num_steps
        self.dropout = dropout
151 152 153 154 155 156 157
        self.simple_lstm_rnn = SimpleLSTMRNN(
            hidden_size,
            num_steps,
            num_layers=num_layers,
            init_scale=init_scale,
            dropout=dropout,
        )
158 159 160 161 162
        self.embedding = paddle.nn.Embedding(
            vocab_size,
            hidden_size,
            sparse=False,
            weight_attr=fluid.ParamAttr(
163 164
                name='embedding_para',
                initializer=fluid.initializer.UniformInitializer(
165 166 167 168
                    low=-init_scale, high=init_scale
                ),
            ),
        )
169 170 171 172 173
        self.softmax_weight = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.hidden_size, self.vocab_size],
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
174 175 176
                low=-self.init_scale, high=self.init_scale
            ),
        )
177 178 179 180 181
        self.softmax_bias = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.vocab_size],
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
182 183 184
                low=-self.init_scale, high=self.init_scale
            ),
        )
185 186 187 188

    def build_once(self, input, label, init_hidden, init_cell):
        pass

H
hjyp 已提交
189
    @to_static
190 191
    def forward(self, input, label, init_hidden, init_cell):

192
        init_h = paddle.reshape(
193 194
            init_hidden, shape=[self.num_layers, -1, self.hidden_size]
        )
195

196
        init_c = paddle.reshape(
197 198
            init_cell, shape=[self.num_layers, -1, self.hidden_size]
        )
199 200 201

        x_emb = self.embedding(input)

202
        x_emb = paddle.reshape(
203 204
            x_emb, shape=[-1, self.num_steps, self.hidden_size]
        )
205
        if self.dropout is not None and self.dropout > 0.0:
C
ccrrong 已提交
206
            x_emb = paddle.nn.functional.dropout(
207
                x_emb,
C
ccrrong 已提交
208 209
                p=self.dropout,
                mode='upscale_in_train',
210
            )
211
        rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(
212 213
            x_emb, init_h, init_c
        )
214

K
kangguangli 已提交
215
        projection = paddle.matmul(rnn_out, self.softmax_weight)
216
        projection = paddle.add(projection, self.softmax_bias)
217

218
        loss = paddle.nn.functional.softmax_with_cross_entropy(
219 220
            logits=projection, label=label, soft_label=False
        )
221
        loss = paddle.reshape(loss, shape=[-1, self.num_steps])
222
        loss = paddle.mean(loss, axis=[0])
223
        loss = paddle.sum(loss)
224 225 226 227 228 229 230 231

        return loss, last_hidden, last_cell

    def debug_emb(self):

        np.save("emb_grad", self.x_emb.gradient())


232 233
def train(place):

234 235 236 237 238 239 240 241 242 243 244
    num_layers = 1
    batch_size = 4
    hidden_size = 10
    num_steps = 3
    init_scale = 0.1
    max_epoch = 1
    dropout = 0.0
    vocab_size = 1000
    batch_num = 200

    with fluid.dygraph.guard(place):
C
cnn 已提交
245
        paddle.seed(SEED)
L
Leo Chen 已提交
246
        paddle.framework.random._manual_program_seed(SEED)
247 248 249 250 251 252 253 254 255 256 257 258
        ptb_model = PtbModel(
            hidden_size=hidden_size,
            vocab_size=vocab_size,
            num_layers=num_layers,
            num_steps=num_steps,
            init_scale=init_scale,
            dropout=dropout,
        )

        sgd = SGDOptimizer(
            learning_rate=1e-3, parameter_list=ptb_model.parameters()
        )
259 260 261 262 263 264 265

        for epoch_id in range(max_epoch):

            total_loss = 0.0
            iters = 0.0
            total_sample = 0

266 267 268 269 270 271
            init_hidden_data = np.zeros(
                (num_layers, batch_size, hidden_size), dtype='float32'
            )
            init_cell_data = np.zeros(
                (num_layers, batch_size, hidden_size), dtype='float32'
            )
272 273 274 275 276 277 278 279 280 281 282 283 284 285

            init_hidden = to_variable(init_hidden_data)
            init_cell = to_variable(init_cell_data)
            for step_id in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))

                x_data = x_data.reshape((-1, num_steps, 1))
                y_data = y_data.reshape((-1, num_steps, 1))

                x = to_variable(x_data)
                y = to_variable(y_data)

286
                dy_loss, last_hidden, last_cell = ptb_model(
287 288
                    x, y, init_hidden, init_cell
                )
289 290 291 292 293 294 295 296 297 298 299
                out_loss = dy_loss.numpy()

                dy_loss.backward()
                sgd.minimize(dy_loss)
                ptb_model.clear_gradients()

                total_loss += out_loss
                iters += num_steps
                total_sample += 1
                if step_id % PRINT_STEP == 0:
                    if step_id == 0:
300
                        logging.info(
301 302 303
                            "epoch %d | step %d, loss %0.3f"
                            % (epoch_id, step_id, total_loss / total_sample)
                        )
304 305 306 307 308
                        avg_batch_time = time.time()
                    else:
                        speed = PRINT_STEP / (time.time() - avg_batch_time)
                        logging.info(
                            "epoch %d | step %d, loss %0.3f, speed %.3f steps/s"
309 310 311 312 313 314 315
                            % (
                                epoch_id,
                                step_id,
                                total_loss / total_sample,
                                speed,
                            )
                        )
316 317
                        avg_batch_time = time.time()

318
        return out_loss, last_hidden.numpy(), last_cell.numpy()
319 320


321 322 323
def train_dygraph(place):
    program_translator.enable(False)
    return train(place)
324 325


326 327 328
def train_static(place):
    program_translator.enable(True)
    return train(place)
329 330 331 332


class TestPtb(unittest.TestCase):
    def setUp(self):
333 334 335
        self.place = (
            fluid.CUDAPlace(0)
            if fluid.is_compiled_with_cuda()
336
            else fluid.CPUPlace()
337
        )
338 339 340 341 342

    def test_check_result(self):
        loss_1, hidden_1, cell_1 = train_static(self.place)
        loss_2, hidden_2, cell_2 = train_dygraph(self.place)

343 344 345
        np.testing.assert_allclose(loss_1, loss_2, rtol=1e-05)
        np.testing.assert_allclose(hidden_1, hidden_2, rtol=1e-05)
        np.testing.assert_allclose(cell_1, cell_2, rtol=1e-05)
346 347 348


if __name__ == '__main__':
349
    unittest.main()