test_imperative_save_load.py 36.7 KB
Newer Older
H
hong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
H
hong 已提交
16
import unittest
17 18 19 20

import numpy as np

import paddle
H
hong 已提交
21 22 23 24
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
25
from paddle.fluid.dygraph.nn import Embedding
26
from paddle.fluid.framework import _test_eager_guard
27
from paddle.fluid.optimizer import Adam
H
hong 已提交
28 29 30


class SimpleLSTMRNN(fluid.Layer):
31 32 33
    def __init__(
        self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None
    ):
34
        super().__init__()
H
hong 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
        self._hidden_size = hidden_size
        self._num_layers = num_layers
        self._init_scale = init_scale
        self._dropout = dropout
        self._input = None
        self._num_steps = num_steps
        self.cell_array = []
        self.hidden_array = []
        self.weight_1_arr = []
        self.weight_2_arr = []
        self.bias_arr = []
        self.mask_array = []

        for i in range(self._num_layers):
            weight_1 = self.create_parameter(
                attr=fluid.ParamAttr(
                    initializer=fluid.initializer.UniformInitializer(
52 53 54
                        low=-self._init_scale, high=self._init_scale
                    )
                ),
H
hong 已提交
55 56 57
                shape=[self._hidden_size * 2, self._hidden_size * 4],
                dtype="float32",
                default_initializer=fluid.initializer.UniformInitializer(
58 59 60
                    low=-self._init_scale, high=self._init_scale
                ),
            )
H
hong 已提交
61 62 63 64
            self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
            bias_1 = self.create_parameter(
                attr=fluid.ParamAttr(
                    initializer=fluid.initializer.UniformInitializer(
65 66 67
                        low=-self._init_scale, high=self._init_scale
                    )
                ),
H
hong 已提交
68 69
                shape=[self._hidden_size * 4],
                dtype="float32",
70 71
                default_initializer=fluid.initializer.Constant(0.0),
            )
H
hong 已提交
72 73 74 75 76 77 78
            self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))

    def forward(self, input_embedding, init_hidden=None, init_cell=None):
        self.cell_array = []
        self.hidden_array = []

        for i in range(self._num_layers):
2
201716010711 已提交
79
            pre_hidden = paddle.slice(
80 81
                init_hidden, axes=[0], starts=[i], ends=[i + 1]
            )
2
201716010711 已提交
82
            pre_cell = paddle.slice(
83 84
                init_cell, axes=[0], starts=[i], ends=[i + 1]
            )
85
            pre_hidden = paddle.reshape(
86 87
                pre_hidden, shape=[-1, self._hidden_size]
            )
88
            pre_cell = paddle.reshape(pre_cell, shape=[-1, self._hidden_size])
H
hong 已提交
89 90 91 92 93
            self.hidden_array.append(pre_hidden)
            self.cell_array.append(pre_cell)

        res = []
        for index in range(self._num_steps):
2
201716010711 已提交
94
            self._input = paddle.slice(
95 96
                input_embedding, axes=[1], starts=[index], ends=[index + 1]
            )
97
            self._input = paddle.reshape(
98 99
                self._input, shape=[-1, self._hidden_size]
            )
H
hong 已提交
100 101 102 103 104 105 106 107 108 109
            for k in range(self._num_layers):
                pre_hidden = self.hidden_array[k]
                pre_cell = self.cell_array[k]
                weight_1 = self.weight_1_arr[k]
                bias = self.bias_arr[k]

                nn = fluid.layers.concat([self._input, pre_hidden], 1)
                gate_input = fluid.layers.matmul(x=nn, y=weight_1)

                gate_input = fluid.layers.elementwise_add(gate_input, bias)
110 111 112
                i, j, f, o = fluid.layers.split(
                    gate_input, num_or_sections=4, dim=-1
                )
113 114 115 116
                c = pre_cell * paddle.nn.functional.sigmoid(
                    f
                ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
                m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
H
hong 已提交
117 118 119 120 121 122 123 124
                self.hidden_array[k] = m
                self.cell_array[k] = c
                self._input = m

                if self._dropout is not None and self._dropout > 0.0:
                    self._input = fluid.layers.dropout(
                        self._input,
                        dropout_prob=self._dropout,
125 126
                        dropout_implementation='upscale_in_train',
                    )
H
hong 已提交
127
            res.append(
128
                paddle.reshape(self._input, shape=[1, -1, self._hidden_size])
129
            )
H
hong 已提交
130
        real_res = fluid.layers.concat(res, 0)
131
        real_res = paddle.transpose(x=real_res, perm=[1, 0, 2])
H
hong 已提交
132
        last_hidden = fluid.layers.concat(self.hidden_array, 1)
133
        last_hidden = paddle.reshape(
134 135
            last_hidden, shape=[-1, self._num_layers, self._hidden_size]
        )
136
        last_hidden = paddle.transpose(x=last_hidden, perm=[1, 0, 2])
H
hong 已提交
137
        last_cell = fluid.layers.concat(self.cell_array, 1)
138
        last_cell = paddle.reshape(
139 140
            last_cell, shape=[-1, self._num_layers, self._hidden_size]
        )
141
        last_cell = paddle.transpose(x=last_cell, perm=[1, 0, 2])
H
hong 已提交
142 143 144 145
        return real_res, last_hidden, last_cell


class PtbModel(fluid.Layer):
146 147 148 149 150 151 152 153 154
    def __init__(
        self,
        hidden_size,
        vocab_size,
        num_layers=2,
        num_steps=20,
        init_scale=0.1,
        dropout=None,
    ):
155
        super().__init__()
H
hong 已提交
156 157 158 159 160 161
        self.hidden_size = hidden_size
        self.vocab_size = vocab_size
        self.init_scale = init_scale
        self.num_layers = num_layers
        self.num_steps = num_steps
        self.dropout = dropout
162 163 164 165 166 167 168
        self.simple_lstm_rnn = SimpleLSTMRNN(
            hidden_size,
            num_steps,
            num_layers=num_layers,
            init_scale=init_scale,
            dropout=dropout,
        )
H
hong 已提交
169 170 171 172 173 174 175
        self.embedding = Embedding(
            size=[vocab_size, hidden_size],
            dtype='float32',
            is_sparse=False,
            param_attr=fluid.ParamAttr(
                name='embedding_para',
                initializer=fluid.initializer.UniformInitializer(
176 177 178 179
                    low=-init_scale, high=init_scale
                ),
            ),
        )
H
hong 已提交
180

181 182 183 184 185
        self.softmax_weight = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.hidden_size, self.vocab_size],
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
186 187 188
                low=-self.init_scale, high=self.init_scale
            ),
        )
189 190 191 192 193
        self.softmax_bias = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.vocab_size],
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
194 195 196
                low=-self.init_scale, high=self.init_scale
            ),
        )
H
hong 已提交
197 198

    def forward(self, input, label, init_hidden, init_cell):
199
        init_h = paddle.reshape(
200 201
            init_hidden, shape=[self.num_layers, -1, self.hidden_size]
        )
H
hong 已提交
202

203
        init_c = paddle.reshape(
204 205
            init_cell, shape=[self.num_layers, -1, self.hidden_size]
        )
H
hong 已提交
206 207

        x_emb = self.embedding(input)
208
        x_emb = paddle.reshape(
209 210
            x_emb, shape=[-1, self.num_steps, self.hidden_size]
        )
H
hong 已提交
211 212 213 214
        if self.dropout is not None and self.dropout > 0.0:
            x_emb = fluid.layers.dropout(
                x_emb,
                dropout_prob=self.drop_out,
215 216
                dropout_implementation='upscale_in_train',
            )
217
        rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(
218 219
            x_emb, init_h, init_c
        )
220
        rnn_out = paddle.reshape(
221 222
            rnn_out, shape=[-1, self.num_steps, self.hidden_size]
        )
H
hong 已提交
223

224 225
        projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
        projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
226
        projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
227 228 229
        loss = fluid.layers.softmax_with_cross_entropy(
            logits=projection, label=label, soft_label=False
        )
230
        loss = paddle.reshape(loss, shape=[-1, self.num_steps])
H
hong 已提交
231
        loss = fluid.layers.reduce_mean(loss, dim=[0])
232
        loss = paddle.sum(loss)
H
hong 已提交
233 234 235 236 237

        return loss, last_hidden, last_cell


class TestDygraphPtbRnn(unittest.TestCase):
238
    def func_setUp(self):
H
hong 已提交
239 240 241 242 243 244 245 246 247 248
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
249
            paddle.seed(seed)
L
Leo Chen 已提交
250
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
251
            # TODO: marsyang1993 Change seed to
252 253 254 255 256 257 258
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
259 260 261 262 263 264 265 266 267

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

268 269 270 271 272 273 274 275 276 277 278
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
279 280 281 282 283 284 285 286 287 288 289
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
290 291 292 293 294
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
295 296 297 298
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
299
                dy_loss, last_hidden, last_cell = ptb_model(
300 301
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            self.opti_dict = adam.state_dict()
            self.base_opti = {}
            for k, v in self.opti_dict.items():
316
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
317 318 319 320
                    self.base_opti[v.name] = v.numpy()
                    self.assertTrue(np.sum(np.abs(v.numpy())) != 0)
                else:
                    self.base_opti[k] = v
H
hong 已提交
321 322 323 324

            fluid.save_dygraph(self.opti_dict, "./test_dy")

            self.state_dict = ptb_model.state_dict()
H
hong 已提交
325

H
hong 已提交
326 327 328
            self.model_base = {}
            for k, v in self.state_dict.items():
                np_t = v.numpy()
H
hong 已提交
329
                self.model_base[k] = np_t
H
hong 已提交
330

331
            fluid.save_dygraph(self.state_dict, "./test_dy")
H
hong 已提交
332

333
    def func_testLoadAndSetVarBase(self):
H
hong 已提交
334 335 336 337 338 339 340 341 342 343
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
344
            paddle.seed(seed)
L
Leo Chen 已提交
345
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
346
            # TODO: marsyang1993 Change seed to
347 348 349 350 351 352 353
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
354 355 356 357 358 359 360 361 362

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

363 364 365 366 367 368 369 370 371 372 373
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
374 375 376 377 378 379 380 381 382 383 384
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
385 386 387 388 389
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
390 391 392 393
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
394
                dy_loss, last_hidden, last_cell = ptb_model(
395 396
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
397 398 399 400 401 402 403 404 405 406 407 408 409 410
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            # set to zero
            for k, v in opti_dict.items():
411
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
412 413 414
                    np_t = v.numpy()
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)
H
hong 已提交
415

416
                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
H
hong 已提交
417 418 419 420

            if isinstance(adam._learning_rate, LearningRateDecay):
                adam._learning_rate.step_num = 0

421
            para_state_dict, opti_state_dict = fluid.load_dygraph("./test_dy")
422
            print(opti_state_dict.keys())
423
            adam.set_state_dict(opti_state_dict)
H
hong 已提交
424 425 426

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
427
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
428 429 430
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
431 432
                else:
                    self.assertEqual(v, self.base_opti[k])
H
hong 已提交
433 434 435 436 437

            # check parameter
            state_dict = ptb_model.state_dict()
            for k, v in state_dict.items():
                np_t = v.numpy()
438
                var = v.value().get_tensor()
H
hong 已提交
439 440 441

                var.set(np.zeros_like(np_t), place)

442
            ptb_model.set_state_dict(stat_dict=para_state_dict)
H
hong 已提交
443 444 445 446 447 448

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
449
                base_t = self.model_base[k]
H
hong 已提交
450

451
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
452

453
    def func_testSetVariable(self):
H
hong 已提交
454 455 456 457 458 459 460 461 462 463
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
464
            paddle.seed(seed)
L
Leo Chen 已提交
465
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
466
            # TODO: marsyang1993 Change seed to
467 468 469 470 471 472 473
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
474 475 476 477 478 479 480 481 482

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

483 484 485 486 487 488 489 490 491 492 493
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
494 495 496 497 498 499 500 501 502 503 504
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
505 506 507 508 509
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
510 511 512 513
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
514
                dy_loss, last_hidden, last_cell = ptb_model(
515 516
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            # set to zero
            for k, v in opti_dict.items():
531
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
532 533 534
                    np_t = v.numpy()
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)
H
hong 已提交
535

536
                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
H
hong 已提交
537 538 539 540

            if isinstance(adam._learning_rate, LearningRateDecay):
                adam._learning_rate.step_num = 0

541
            adam.set_state_dict(self.opti_dict)
H
hong 已提交
542 543
            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
544
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
545 546 547
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
548 549
                else:
                    self.assertEqual(v, self.base_opti[k])
H
hong 已提交
550 551 552 553 554

            # check parameter
            state_dict = ptb_model.state_dict()
            for k, v in state_dict.items():
                np_t = v.numpy()
555
                var = v.value().get_tensor()
H
hong 已提交
556 557 558

                var.set(np.zeros_like(np_t), place)

559
            ptb_model.set_state_dict(self.state_dict)
H
hong 已提交
560 561 562 563 564 565

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
566
                base_t = self.model_base[k]
H
hong 已提交
567

568
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
569

570
    def func_testSetNumpy(self):
H
hong 已提交
571 572 573 574 575 576 577 578 579 580
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
581
            paddle.seed(seed)
L
Leo Chen 已提交
582
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
583
            # TODO: marsyang1993 Change seed to
584 585 586 587 588 589 590
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
591 592 593 594 595 596 597 598 599

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

600 601 602 603 604 605 606 607 608 609 610
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
611 612 613 614 615 616 617 618 619 620 621
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
622 623 624 625 626
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
627 628 629 630
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
631
                dy_loss, last_hidden, last_cell = ptb_model(
632 633
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            np_opti_dict = {}
            # set to zero
            for k, v in opti_dict.items():
649
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
650 651 652 653 654 655 656
                    np_t = v.numpy()
                    np_opti_dict[v.name] = np_t
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)
                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
                else:
                    np_opti_dict[k] = v
H
hong 已提交
657 658 659 660

            if isinstance(adam._learning_rate, LearningRateDecay):
                adam._learning_rate.step_num = 0

661
            adam.set_state_dict(np_opti_dict)
H
hong 已提交
662 663 664

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
665
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
666 667 668
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
669 670
                else:
                    self.assertEqual(v, self.base_opti[k])
H
hong 已提交
671 672 673 674 675 676

            # check parameter
            state_dict = ptb_model.state_dict()
            np_state_dict = {}
            for k, v in state_dict.items():
                np_t = v.numpy()
H
hong 已提交
677
                np_state_dict[k] = np_t
678
                var = v.value().get_tensor()
H
hong 已提交
679 680 681

                var.set(np.zeros_like(np_t), place)

682
            ptb_model.set_state_dict(np_state_dict)
H
hong 已提交
683 684 685 686 687 688

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
689
                base_t = self.model_base[k]
H
hong 已提交
690

691
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
692

693
    def func_testSetVariableBeforeTrain(self):
H
hong 已提交
694 695 696 697 698 699 700 701 702 703 704
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
            # TODO: marsyang1993 Change seed to
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )

            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=0.0,
                beta1=0.8,
                beta2=0.6,
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
724 725 726 727 728 729
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

730 731
            adam.set_state_dict(self.opti_dict)
            ptb_model.set_state_dict(self.state_dict)
H
hong 已提交
732 733 734 735 736 737

            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
738 739 740 741 742
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
743 744 745 746
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
747
                dy_loss, last_hidden, last_cell = ptb_model(
748 749
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
750 751 752 753 754 755 756 757

                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
                if k == "global_step":
758 759 760
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name] + 1
                    )
H
hong 已提交
761 762

                if k.find("beta1_pow_acc_0") > 0:
763
                    np.testing.assert_array_equal(
764 765
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
H
hong 已提交
766
                if k.find("beta2_pow_acc_0") > 0:
767
                    np.testing.assert_array_equal(
768 769
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
H
hong 已提交
770 771 772 773 774 775

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
776
                base_t = self.model_base[k]
777
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
778

779
    def func_testLoadAndSetVarBaseBeforeTrain(self):
H
hong 已提交
780 781 782 783 784 785 786 787 788 789
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
790
            paddle.seed(seed)
L
Leo Chen 已提交
791
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
792
            # TODO: marsyang1993 Change seed to
793 794 795 796 797 798 799
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
800 801

            bd = []
H
hong 已提交
802
            lr_arr = [0.0]
H
hong 已提交
803 804 805 806 807 808 809
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                # set lr to zero not update parameter
                new_lr = 0.0
                lr_arr.append(new_lr)

810 811 812 813 814 815 816 817 818 819 820
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=0.0,
                beta1=0.8,
                beta2=0.6,
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
821 822 823 824 825 826 827
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            state_dict, opti_dict = fluid.load_dygraph("./test_dy")
828 829
            adam.set_state_dict(opti_dict)
            ptb_model.set_state_dict(state_dict)
H
hong 已提交
830 831 832 833 834 835

            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
836 837 838 839 840
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
841 842 843 844
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
845
                dy_loss, last_hidden, last_cell = ptb_model(
846 847
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
848 849 850 851 852 853 854 855

                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
                if k == "global_step":
856 857 858
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name] + 1
                    )
H
hong 已提交
859 860

                if k.find("beta1_pow_acc_0") > 0:
861
                    np.testing.assert_array_equal(
862 863
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
H
hong 已提交
864
                if k.find("beta2_pow_acc_0") > 0:
865
                    np.testing.assert_array_equal(
866 867
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
H
hong 已提交
868 869 870 871 872 873 874 875

            # check parameter

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
876
                base_t = self.model_base[k]
877
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
878

879
    def func_testSetNumpyBeforeTrain(self):
H
hong 已提交
880 881 882 883 884 885 886 887 888 889
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
890
            paddle.seed(seed)
L
Leo Chen 已提交
891
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
892
            # TODO: marsyang1993 Change seed to
L
Leo Chen 已提交
893

894 895 896 897 898 899 900
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
901 902

            bd = []
H
hong 已提交
903
            lr_arr = [0.0]
H
hong 已提交
904 905 906 907 908 909 910
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                # set lr to 0.0, not update parameter
                new_lr = 0.0
                lr_arr.append(new_lr)

911 912 913 914 915 916 917 918 919 920 921 922 923
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                beta1=0.8,
                beta2=0.6,
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
924 925 926 927 928 929 930 931 932 933
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            np_opti_dict = {}
            np_state_dict = {}

            for k, v in self.opti_dict.items():
934
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
935 936 937
                    np_opti_dict[v.name] = v.numpy()
                else:
                    np_opti_dict[k] = v
H
hong 已提交
938 939

            for k, v in self.state_dict.items():
H
hong 已提交
940
                np_state_dict[k] = v.numpy()
H
hong 已提交
941

942 943
            adam.set_state_dict(np_opti_dict)
            ptb_model.set_state_dict(np_state_dict)
H
hong 已提交
944 945 946 947 948
            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
949 950 951 952 953
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
954 955 956 957
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
958
                dy_loss, last_hidden, last_cell = ptb_model(
959 960
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
961 962 963 964 965 966 967 968

                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
                if k == "global_step":
969 970 971
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name] + 1
                    )
H
hong 已提交
972 973

                if k.find("beta1_pow_acc_0") > 0:
974
                    np.testing.assert_array_equal(
975 976
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
H
hong 已提交
977
                if k.find("beta2_pow_acc_0") > 0:
978
                    np.testing.assert_array_equal(
979 980
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
H
hong 已提交
981 982 983 984 985 986 987 988

            # check parameter

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
989
                base_t = self.model_base[k]
990
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
991

992
    def func_testOnlyLoadParams(self):
H
hong 已提交
993
        with fluid.dygraph.guard():
994
            emb = fluid.dygraph.Embedding([10, 10])
H
hong 已提交
995
            state_dict = emb.state_dict()
996
            fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))
H
hong 已提交
997

998
            para_state_dict, opti_state_dict = fluid.load_dygraph(
999 1000
                os.path.join('saved_dy', 'emb_dy')
            )
H
hong 已提交
1001

1002
            self.assertIsNone(opti_state_dict)
H
hong 已提交
1003

1004
            para_state_dict, opti_state_dict = fluid.load_dygraph(
1005 1006
                os.path.join('saved_dy', 'emb_dy.pdparams')
            )
1007

1008
            para_state_dict, opti_state_dict = fluid.load_dygraph(
1009 1010
                os.path.join('saved_dy', 'emb_dy.pdopt')
            )
1011

1012
    def func_test_load_compatible_with_keep_name_table(self):
1013 1014 1015
        with fluid.dygraph.guard():
            emb = fluid.dygraph.Embedding([10, 10])
            state_dict = emb.state_dict()
1016
            fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))
1017

1018
            para_state_dict, opti_state_dict = fluid.load_dygraph(
1019 1020
                os.path.join('saved_dy', 'emb_dy'), keep_name_table=True
            )
1021 1022
            self.assertIsNotNone(para_state_dict)
            self.assertIsNone(opti_state_dict)
1023

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
    def test_main(self):
        self.func_setUp()
        self.func_testLoadAndSetVarBase()
        self.func_testSetVariable()
        self.func_testSetNumpy()
        self.func_testSetVariableBeforeTrain()
        self.func_testLoadAndSetVarBaseBeforeTrain()
        self.func_testSetNumpyBeforeTrain()
        self.func_testOnlyLoadParams()
        self.func_test_load_compatible_with_keep_name_table()
        with _test_eager_guard():
            self.func_setUp()
            self.func_testLoadAndSetVarBase()
            self.func_testSetVariable()
            self.func_testSetNumpy()
            self.func_testSetVariableBeforeTrain()
            self.func_testLoadAndSetVarBaseBeforeTrain()
            self.func_testSetNumpyBeforeTrain()
            self.func_testOnlyLoadParams()
            self.func_test_load_compatible_with_keep_name_table()

H
hong 已提交
1045 1046 1047

if __name__ == '__main__':
    unittest.main()