test_imperative_save_load.py 36.1 KB
Newer Older
H
hong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
H
hong 已提交
16
import unittest
17 18 19 20

import numpy as np

import paddle
H
hong 已提交
21 22 23 24
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
25
from paddle.fluid.optimizer import Adam
26
from paddle.nn import Embedding
H
hong 已提交
27 28 29


class SimpleLSTMRNN(fluid.Layer):
30 31 32
    def __init__(
        self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None
    ):
33
        super().__init__()
H
hong 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
        self._hidden_size = hidden_size
        self._num_layers = num_layers
        self._init_scale = init_scale
        self._dropout = dropout
        self._input = None
        self._num_steps = num_steps
        self.cell_array = []
        self.hidden_array = []
        self.weight_1_arr = []
        self.weight_2_arr = []
        self.bias_arr = []
        self.mask_array = []

        for i in range(self._num_layers):
            weight_1 = self.create_parameter(
                attr=fluid.ParamAttr(
                    initializer=fluid.initializer.UniformInitializer(
51 52 53
                        low=-self._init_scale, high=self._init_scale
                    )
                ),
H
hong 已提交
54 55 56
                shape=[self._hidden_size * 2, self._hidden_size * 4],
                dtype="float32",
                default_initializer=fluid.initializer.UniformInitializer(
57 58 59
                    low=-self._init_scale, high=self._init_scale
                ),
            )
H
hong 已提交
60 61 62 63
            self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
            bias_1 = self.create_parameter(
                attr=fluid.ParamAttr(
                    initializer=fluid.initializer.UniformInitializer(
64 65 66
                        low=-self._init_scale, high=self._init_scale
                    )
                ),
H
hong 已提交
67 68
                shape=[self._hidden_size * 4],
                dtype="float32",
69 70
                default_initializer=fluid.initializer.Constant(0.0),
            )
H
hong 已提交
71 72 73 74 75 76 77
            self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))

    def forward(self, input_embedding, init_hidden=None, init_cell=None):
        self.cell_array = []
        self.hidden_array = []

        for i in range(self._num_layers):
2
201716010711 已提交
78
            pre_hidden = paddle.slice(
79 80
                init_hidden, axes=[0], starts=[i], ends=[i + 1]
            )
2
201716010711 已提交
81
            pre_cell = paddle.slice(
82 83
                init_cell, axes=[0], starts=[i], ends=[i + 1]
            )
84
            pre_hidden = paddle.reshape(
85 86
                pre_hidden, shape=[-1, self._hidden_size]
            )
87
            pre_cell = paddle.reshape(pre_cell, shape=[-1, self._hidden_size])
H
hong 已提交
88 89 90 91 92
            self.hidden_array.append(pre_hidden)
            self.cell_array.append(pre_cell)

        res = []
        for index in range(self._num_steps):
2
201716010711 已提交
93
            self._input = paddle.slice(
94 95
                input_embedding, axes=[1], starts=[index], ends=[index + 1]
            )
96
            self._input = paddle.reshape(
97 98
                self._input, shape=[-1, self._hidden_size]
            )
H
hong 已提交
99 100 101 102 103 104 105
            for k in range(self._num_layers):
                pre_hidden = self.hidden_array[k]
                pre_cell = self.cell_array[k]
                weight_1 = self.weight_1_arr[k]
                bias = self.bias_arr[k]

                nn = fluid.layers.concat([self._input, pre_hidden], 1)
K
kangguangli 已提交
106
                gate_input = paddle.matmul(x=nn, y=weight_1)
H
hong 已提交
107

108
                gate_input = paddle.add(gate_input, bias)
109 110
                i, j, f, o = paddle.split(
                    gate_input, num_or_sections=4, axis=-1
111
                )
112 113 114 115
                c = pre_cell * paddle.nn.functional.sigmoid(
                    f
                ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
                m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
H
hong 已提交
116 117 118 119 120
                self.hidden_array[k] = m
                self.cell_array[k] = c
                self._input = m

                if self._dropout is not None and self._dropout > 0.0:
C
ccrrong 已提交
121
                    self._input = paddle.nn.functional.dropout(
H
hong 已提交
122
                        self._input,
C
ccrrong 已提交
123 124
                        p=self._dropout,
                        mode='upscale_in_train',
125
                    )
H
hong 已提交
126
            res.append(
127
                paddle.reshape(self._input, shape=[1, -1, self._hidden_size])
128
            )
H
hong 已提交
129
        real_res = fluid.layers.concat(res, 0)
130
        real_res = paddle.transpose(x=real_res, perm=[1, 0, 2])
H
hong 已提交
131
        last_hidden = fluid.layers.concat(self.hidden_array, 1)
132
        last_hidden = paddle.reshape(
133 134
            last_hidden, shape=[-1, self._num_layers, self._hidden_size]
        )
135
        last_hidden = paddle.transpose(x=last_hidden, perm=[1, 0, 2])
H
hong 已提交
136
        last_cell = fluid.layers.concat(self.cell_array, 1)
137
        last_cell = paddle.reshape(
138 139
            last_cell, shape=[-1, self._num_layers, self._hidden_size]
        )
140
        last_cell = paddle.transpose(x=last_cell, perm=[1, 0, 2])
H
hong 已提交
141 142 143 144
        return real_res, last_hidden, last_cell


class PtbModel(fluid.Layer):
145 146 147 148 149 150 151 152 153
    def __init__(
        self,
        hidden_size,
        vocab_size,
        num_layers=2,
        num_steps=20,
        init_scale=0.1,
        dropout=None,
    ):
154
        super().__init__()
H
hong 已提交
155 156 157 158 159 160
        self.hidden_size = hidden_size
        self.vocab_size = vocab_size
        self.init_scale = init_scale
        self.num_layers = num_layers
        self.num_steps = num_steps
        self.dropout = dropout
161 162 163 164 165 166 167
        self.simple_lstm_rnn = SimpleLSTMRNN(
            hidden_size,
            num_steps,
            num_layers=num_layers,
            init_scale=init_scale,
            dropout=dropout,
        )
H
hong 已提交
168
        self.embedding = Embedding(
169 170 171 172
            vocab_size,
            hidden_size,
            sparse=False,
            weight_attr=fluid.ParamAttr(
H
hong 已提交
173 174
                name='embedding_para',
                initializer=fluid.initializer.UniformInitializer(
175 176 177 178
                    low=-init_scale, high=init_scale
                ),
            ),
        )
H
hong 已提交
179

180 181 182 183 184
        self.softmax_weight = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.hidden_size, self.vocab_size],
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
185 186 187
                low=-self.init_scale, high=self.init_scale
            ),
        )
188 189 190 191 192
        self.softmax_bias = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.vocab_size],
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
193 194 195
                low=-self.init_scale, high=self.init_scale
            ),
        )
H
hong 已提交
196 197

    def forward(self, input, label, init_hidden, init_cell):
198
        init_h = paddle.reshape(
199 200
            init_hidden, shape=[self.num_layers, -1, self.hidden_size]
        )
H
hong 已提交
201

202
        init_c = paddle.reshape(
203 204
            init_cell, shape=[self.num_layers, -1, self.hidden_size]
        )
H
hong 已提交
205 206

        x_emb = self.embedding(input)
207
        x_emb = paddle.reshape(
208 209
            x_emb, shape=[-1, self.num_steps, self.hidden_size]
        )
H
hong 已提交
210
        if self.dropout is not None and self.dropout > 0.0:
C
ccrrong 已提交
211
            x_emb = paddle.nn.functional.dropout(
H
hong 已提交
212
                x_emb,
C
ccrrong 已提交
213 214
                p=self.drop_out,
                mode='upscale_in_train',
215
            )
216
        rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(
217 218
            x_emb, init_h, init_c
        )
219
        rnn_out = paddle.reshape(
220 221
            rnn_out, shape=[-1, self.num_steps, self.hidden_size]
        )
H
hong 已提交
222

K
kangguangli 已提交
223
        projection = paddle.matmul(rnn_out, self.softmax_weight)
224
        projection = paddle.add(projection, self.softmax_bias)
225
        projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
226
        loss = paddle.nn.functional.softmax_with_cross_entropy(
227 228
            logits=projection, label=label, soft_label=False
        )
229
        loss = paddle.reshape(loss, shape=[-1, self.num_steps])
230
        loss = paddle.mean(loss, axis=[0])
231
        loss = paddle.sum(loss)
H
hong 已提交
232 233 234 235 236

        return loss, last_hidden, last_cell


class TestDygraphPtbRnn(unittest.TestCase):
237
    def func_setUp(self):
H
hong 已提交
238 239 240 241 242 243 244 245 246 247
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
248
            paddle.seed(seed)
L
Leo Chen 已提交
249
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
250
            # TODO: marsyang1993 Change seed to
251 252 253 254 255 256 257
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
258 259 260 261 262 263 264 265 266

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

267 268 269 270 271 272 273 274 275 276 277
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
278 279 280 281 282 283 284 285 286 287 288
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
289 290 291 292 293
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
294 295 296 297
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
298
                dy_loss, last_hidden, last_cell = ptb_model(
299 300
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
301 302 303 304 305 306 307 308 309 310 311 312 313 314
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            self.opti_dict = adam.state_dict()
            self.base_opti = {}
            for k, v in self.opti_dict.items():
315
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
316 317 318 319
                    self.base_opti[v.name] = v.numpy()
                    self.assertTrue(np.sum(np.abs(v.numpy())) != 0)
                else:
                    self.base_opti[k] = v
H
hong 已提交
320 321 322 323

            fluid.save_dygraph(self.opti_dict, "./test_dy")

            self.state_dict = ptb_model.state_dict()
H
hong 已提交
324

H
hong 已提交
325 326 327
            self.model_base = {}
            for k, v in self.state_dict.items():
                np_t = v.numpy()
H
hong 已提交
328
                self.model_base[k] = np_t
H
hong 已提交
329

330
            fluid.save_dygraph(self.state_dict, "./test_dy")
H
hong 已提交
331

332
    def func_testLoadAndSetVarBase(self):
H
hong 已提交
333 334 335 336 337 338 339 340 341 342
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
343
            paddle.seed(seed)
L
Leo Chen 已提交
344
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
345
            # TODO: marsyang1993 Change seed to
346 347 348 349 350 351 352
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
353 354 355 356 357 358 359 360 361

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

362 363 364 365 366 367 368 369 370 371 372
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
373 374 375 376 377 378 379 380 381 382 383
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
384 385 386 387 388
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
389 390 391 392
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
393
                dy_loss, last_hidden, last_cell = ptb_model(
394 395
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
396 397 398 399 400 401 402 403 404 405 406 407 408 409
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            # set to zero
            for k, v in opti_dict.items():
410
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
411 412 413
                    np_t = v.numpy()
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)
H
hong 已提交
414

415
                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
H
hong 已提交
416 417 418 419

            if isinstance(adam._learning_rate, LearningRateDecay):
                adam._learning_rate.step_num = 0

420
            para_state_dict, opti_state_dict = fluid.load_dygraph("./test_dy")
421
            print(opti_state_dict.keys())
422
            adam.set_state_dict(opti_state_dict)
H
hong 已提交
423 424 425

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
426
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
427 428 429
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
430 431
                else:
                    self.assertEqual(v, self.base_opti[k])
H
hong 已提交
432 433 434 435 436

            # check parameter
            state_dict = ptb_model.state_dict()
            for k, v in state_dict.items():
                np_t = v.numpy()
437
                var = v.value().get_tensor()
H
hong 已提交
438 439 440

                var.set(np.zeros_like(np_t), place)

441
            ptb_model.set_state_dict(stat_dict=para_state_dict)
H
hong 已提交
442 443 444 445 446 447

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
448
                base_t = self.model_base[k]
H
hong 已提交
449

450
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
451

452
    def func_testSetVariable(self):
H
hong 已提交
453 454 455 456 457 458 459 460 461 462
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
463
            paddle.seed(seed)
L
Leo Chen 已提交
464
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
465
            # TODO: marsyang1993 Change seed to
466 467 468 469 470 471 472
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
473 474 475 476 477 478 479 480 481

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

482 483 484 485 486 487 488 489 490 491 492
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
493 494 495 496 497 498 499 500 501 502 503
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
504 505 506 507 508
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
509 510 511 512
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
513
                dy_loss, last_hidden, last_cell = ptb_model(
514 515
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528 529
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            # set to zero
            for k, v in opti_dict.items():
530
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
531 532 533
                    np_t = v.numpy()
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)
H
hong 已提交
534

535
                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
H
hong 已提交
536 537 538 539

            if isinstance(adam._learning_rate, LearningRateDecay):
                adam._learning_rate.step_num = 0

540
            adam.set_state_dict(self.opti_dict)
H
hong 已提交
541 542
            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
543
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
544 545 546
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
547 548
                else:
                    self.assertEqual(v, self.base_opti[k])
H
hong 已提交
549 550 551 552 553

            # check parameter
            state_dict = ptb_model.state_dict()
            for k, v in state_dict.items():
                np_t = v.numpy()
554
                var = v.value().get_tensor()
H
hong 已提交
555 556 557

                var.set(np.zeros_like(np_t), place)

558
            ptb_model.set_state_dict(self.state_dict)
H
hong 已提交
559 560 561 562 563 564

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
565
                base_t = self.model_base[k]
H
hong 已提交
566

567
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
568

569
    def func_testSetNumpy(self):
H
hong 已提交
570 571 572 573 574 575 576 577 578 579
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
580
            paddle.seed(seed)
L
Leo Chen 已提交
581
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
582
            # TODO: marsyang1993 Change seed to
583 584 585 586 587 588 589
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
590 591 592 593 594 595 596 597 598

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

599 600 601 602 603 604 605 606 607 608 609
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
610 611 612 613 614 615 616 617 618 619 620
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
621 622 623 624 625
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
626 627 628 629
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
630
                dy_loss, last_hidden, last_cell = ptb_model(
631 632
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            np_opti_dict = {}
            # set to zero
            for k, v in opti_dict.items():
648
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
649 650 651 652 653 654 655
                    np_t = v.numpy()
                    np_opti_dict[v.name] = np_t
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)
                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
                else:
                    np_opti_dict[k] = v
H
hong 已提交
656 657 658 659

            if isinstance(adam._learning_rate, LearningRateDecay):
                adam._learning_rate.step_num = 0

660
            adam.set_state_dict(np_opti_dict)
H
hong 已提交
661 662 663

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
664
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
665 666 667
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
668 669
                else:
                    self.assertEqual(v, self.base_opti[k])
H
hong 已提交
670 671 672 673 674 675

            # check parameter
            state_dict = ptb_model.state_dict()
            np_state_dict = {}
            for k, v in state_dict.items():
                np_t = v.numpy()
H
hong 已提交
676
                np_state_dict[k] = np_t
677
                var = v.value().get_tensor()
H
hong 已提交
678 679 680

                var.set(np.zeros_like(np_t), place)

681
            ptb_model.set_state_dict(np_state_dict)
H
hong 已提交
682 683 684 685 686 687

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
688
                base_t = self.model_base[k]
H
hong 已提交
689

690
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
691

692
    def func_testSetVariableBeforeTrain(self):
H
hong 已提交
693 694 695 696 697 698 699 700 701 702 703
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
            # TODO: marsyang1993 Change seed to
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )

            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=0.0,
                beta1=0.8,
                beta2=0.6,
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
723 724 725 726 727 728
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

729 730
            adam.set_state_dict(self.opti_dict)
            ptb_model.set_state_dict(self.state_dict)
H
hong 已提交
731 732 733 734 735 736

            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
737 738 739 740 741
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
742 743 744 745
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
746
                dy_loss, last_hidden, last_cell = ptb_model(
747 748
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
749 750 751 752 753 754 755 756

                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
                if k == "global_step":
757 758 759
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name] + 1
                    )
H
hong 已提交
760 761

                if k.find("beta1_pow_acc_0") > 0:
762
                    np.testing.assert_array_equal(
763 764
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
H
hong 已提交
765
                if k.find("beta2_pow_acc_0") > 0:
766
                    np.testing.assert_array_equal(
767 768
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
H
hong 已提交
769 770 771 772 773 774

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
775
                base_t = self.model_base[k]
776
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
777

778
    def func_testLoadAndSetVarBaseBeforeTrain(self):
H
hong 已提交
779 780 781 782 783 784 785 786 787 788
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
789
            paddle.seed(seed)
L
Leo Chen 已提交
790
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
791
            # TODO: marsyang1993 Change seed to
792 793 794 795 796 797 798
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
799 800

            bd = []
H
hong 已提交
801
            lr_arr = [0.0]
H
hong 已提交
802 803 804 805 806 807 808
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                # set lr to zero not update parameter
                new_lr = 0.0
                lr_arr.append(new_lr)

809 810 811 812 813 814 815 816 817 818 819
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=0.0,
                beta1=0.8,
                beta2=0.6,
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
820 821 822 823 824 825 826
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            state_dict, opti_dict = fluid.load_dygraph("./test_dy")
827 828
            adam.set_state_dict(opti_dict)
            ptb_model.set_state_dict(state_dict)
H
hong 已提交
829 830 831 832 833 834

            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
835 836 837 838 839
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
840 841 842 843
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
844
                dy_loss, last_hidden, last_cell = ptb_model(
845 846
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
847 848 849 850 851 852 853 854

                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
                if k == "global_step":
855 856 857
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name] + 1
                    )
H
hong 已提交
858 859

                if k.find("beta1_pow_acc_0") > 0:
860
                    np.testing.assert_array_equal(
861 862
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
H
hong 已提交
863
                if k.find("beta2_pow_acc_0") > 0:
864
                    np.testing.assert_array_equal(
865 866
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
H
hong 已提交
867 868 869 870 871 872 873 874

            # check parameter

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
875
                base_t = self.model_base[k]
876
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
877

878
    def func_testSetNumpyBeforeTrain(self):
H
hong 已提交
879 880 881 882 883 884 885 886 887 888
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
889
            paddle.seed(seed)
L
Leo Chen 已提交
890
            paddle.framework.random._manual_program_seed(seed)
H
hong 已提交
891
            # TODO: marsyang1993 Change seed to
L
Leo Chen 已提交
892

893 894 895 896 897 898 899
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
H
hong 已提交
900 901

            bd = []
H
hong 已提交
902
            lr_arr = [0.0]
H
hong 已提交
903 904 905 906 907 908 909
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                # set lr to 0.0, not update parameter
                new_lr = 0.0
                lr_arr.append(new_lr)

910 911 912 913 914 915 916 917 918 919 920 921 922
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr_arr
                ),
                beta1=0.8,
                beta2=0.6,
                parameter_list=ptb_model.parameters(),
            )
H
hong 已提交
923 924 925 926 927 928 929 930 931 932
            dy_param_updated = dict()
            dy_param_init = dict()
            dy_loss = None
            last_hidden = None
            last_cell = None

            np_opti_dict = {}
            np_state_dict = {}

            for k, v in self.opti_dict.items():
933
                if isinstance(v, (core.VarBase, core.eager.Tensor)):
934 935 936
                    np_opti_dict[v.name] = v.numpy()
                else:
                    np_opti_dict[k] = v
H
hong 已提交
937 938

            for k, v in self.state_dict.items():
H
hong 已提交
939
                np_state_dict[k] = v.numpy()
H
hong 已提交
940

941 942
            adam.set_state_dict(np_opti_dict)
            ptb_model.set_state_dict(np_state_dict)
H
hong 已提交
943 944 945 946 947
            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
948 949 950 951 952
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
H
hong 已提交
953 954 955 956
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
957
                dy_loss, last_hidden, last_cell = ptb_model(
958 959
                    x, y, init_hidden, init_cell
                )
H
hong 已提交
960 961 962 963 964 965 966 967

                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
                if k == "global_step":
968 969 970
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name] + 1
                    )
H
hong 已提交
971 972

                if k.find("beta1_pow_acc_0") > 0:
973
                    np.testing.assert_array_equal(
974 975
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
H
hong 已提交
976
                if k.find("beta2_pow_acc_0") > 0:
977
                    np.testing.assert_array_equal(
978 979
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
H
hong 已提交
980 981 982 983 984 985 986 987

            # check parameter

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

H
hong 已提交
988
                base_t = self.model_base[k]
989
                np.testing.assert_array_equal(new_t, base_t)
H
hong 已提交
990

991
    def func_testOnlyLoadParams(self):
H
hong 已提交
992
        with fluid.dygraph.guard():
993
            emb = paddle.nn.Embedding(10, 10)
H
hong 已提交
994
            state_dict = emb.state_dict()
995
            fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))
H
hong 已提交
996

997
            para_state_dict, opti_state_dict = fluid.load_dygraph(
998 999
                os.path.join('saved_dy', 'emb_dy')
            )
H
hong 已提交
1000

1001
            self.assertIsNone(opti_state_dict)
H
hong 已提交
1002

1003
            para_state_dict, opti_state_dict = fluid.load_dygraph(
1004 1005
                os.path.join('saved_dy', 'emb_dy.pdparams')
            )
1006

1007
            para_state_dict, opti_state_dict = fluid.load_dygraph(
1008 1009
                os.path.join('saved_dy', 'emb_dy.pdopt')
            )
1010

1011
    def func_test_load_compatible_with_keep_name_table(self):
1012
        with fluid.dygraph.guard():
1013
            emb = paddle.nn.Embedding(10, 10)
1014
            state_dict = emb.state_dict()
1015
            fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))
1016

1017
            para_state_dict, opti_state_dict = fluid.load_dygraph(
1018 1019
                os.path.join('saved_dy', 'emb_dy'), keep_name_table=True
            )
1020 1021
            self.assertIsNotNone(para_state_dict)
            self.assertIsNone(opti_state_dict)
1022

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
    def test_main(self):
        self.func_setUp()
        self.func_testLoadAndSetVarBase()
        self.func_testSetVariable()
        self.func_testSetNumpy()
        self.func_testSetVariableBeforeTrain()
        self.func_testLoadAndSetVarBaseBeforeTrain()
        self.func_testSetNumpyBeforeTrain()
        self.func_testOnlyLoadParams()
        self.func_test_load_compatible_with_keep_name_table()

H
hong 已提交
1034 1035 1036

if __name__ == '__main__':
    unittest.main()