test_imperative_save_load_v2.py 37.0 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
16
import tempfile
M
MRXLT 已提交
17
import unittest
18 19 20

import numpy as np

21
import paddle
22 23
from paddle import fluid
from paddle.fluid import core
M
MRXLT 已提交
24
from paddle.fluid.dygraph.base import to_variable
25
from paddle.nn import Embedding
26
from paddle.optimizer import Adam
27
from paddle.optimizer.lr import LRScheduler
28

M
MRXLT 已提交
29

30
class SimpleLSTMRNN(paddle.nn.Layer):
31 32 33
    def __init__(
        self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None
    ):
34
        super().__init__()
M
MRXLT 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
        self._hidden_size = hidden_size
        self._num_layers = num_layers
        self._init_scale = init_scale
        self._dropout = dropout
        self._input = None
        self._num_steps = num_steps
        self.cell_array = []
        self.hidden_array = []
        self.weight_1_arr = []
        self.weight_2_arr = []
        self.bias_arr = []
        self.mask_array = []

        for i in range(self._num_layers):
            weight_1 = self.create_parameter(
                attr=fluid.ParamAttr(
51
                    initializer=paddle.nn.initializer.Uniform(
52 53 54
                        low=-self._init_scale, high=self._init_scale
                    )
                ),
M
MRXLT 已提交
55 56
                shape=[self._hidden_size * 2, self._hidden_size * 4],
                dtype="float32",
57
                default_initializer=paddle.nn.initializer.Uniform(
58 59 60
                    low=-self._init_scale, high=self._init_scale
                ),
            )
M
MRXLT 已提交
61 62 63
            self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
            bias_1 = self.create_parameter(
                attr=fluid.ParamAttr(
64
                    initializer=paddle.nn.initializer.Uniform(
65 66 67
                        low=-self._init_scale, high=self._init_scale
                    )
                ),
M
MRXLT 已提交
68 69
                shape=[self._hidden_size * 4],
                dtype="float32",
70
                default_initializer=paddle.nn.initializer.Constant(0.0),
71
            )
M
MRXLT 已提交
72 73 74 75 76 77 78
            self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))

    def forward(self, input_embedding, init_hidden=None, init_cell=None):
        self.cell_array = []
        self.hidden_array = []

        for i in range(self._num_layers):
2
201716010711 已提交
79
            pre_hidden = paddle.slice(
80 81
                init_hidden, axes=[0], starts=[i], ends=[i + 1]
            )
2
201716010711 已提交
82
            pre_cell = paddle.slice(
83 84
                init_cell, axes=[0], starts=[i], ends=[i + 1]
            )
85
            pre_hidden = paddle.reshape(
86 87
                pre_hidden, shape=[-1, self._hidden_size]
            )
88
            pre_cell = paddle.reshape(pre_cell, shape=[-1, self._hidden_size])
M
MRXLT 已提交
89 90 91 92 93
            self.hidden_array.append(pre_hidden)
            self.cell_array.append(pre_cell)

        res = []
        for index in range(self._num_steps):
2
201716010711 已提交
94
            self._input = paddle.slice(
95 96
                input_embedding, axes=[1], starts=[index], ends=[index + 1]
            )
97
            self._input = paddle.reshape(
98 99
                self._input, shape=[-1, self._hidden_size]
            )
M
MRXLT 已提交
100 101 102 103 104 105
            for k in range(self._num_layers):
                pre_hidden = self.hidden_array[k]
                pre_cell = self.cell_array[k]
                weight_1 = self.weight_1_arr[k]
                bias = self.bias_arr[k]

106
                nn = paddle.concat([self._input, pre_hidden], 1)
K
kangguangli 已提交
107
                gate_input = paddle.matmul(x=nn, y=weight_1)
M
MRXLT 已提交
108

109
                gate_input = paddle.add(gate_input, bias)
110 111
                i, j, f, o = paddle.split(
                    gate_input, num_or_sections=4, axis=-1
112
                )
113 114 115 116
                c = pre_cell * paddle.nn.functional.sigmoid(
                    f
                ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
                m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
M
MRXLT 已提交
117 118 119 120 121
                self.hidden_array[k] = m
                self.cell_array[k] = c
                self._input = m

                if self._dropout is not None and self._dropout > 0.0:
C
ccrrong 已提交
122
                    self._input = paddle.nn.functional.dropout(
M
MRXLT 已提交
123
                        self._input,
C
ccrrong 已提交
124 125
                        p=self._dropout,
                        mode='upscale_in_train',
126
                    )
M
MRXLT 已提交
127
            res.append(
128
                paddle.reshape(self._input, shape=[1, -1, self._hidden_size])
129
            )
130
        real_res = paddle.concat(res, 0)
131
        real_res = paddle.transpose(x=real_res, perm=[1, 0, 2])
132
        last_hidden = paddle.concat(self.hidden_array, 1)
133
        last_hidden = paddle.reshape(
134 135
            last_hidden, shape=[-1, self._num_layers, self._hidden_size]
        )
136
        last_hidden = paddle.transpose(x=last_hidden, perm=[1, 0, 2])
137
        last_cell = paddle.concat(self.cell_array, 1)
138
        last_cell = paddle.reshape(
139 140
            last_cell, shape=[-1, self._num_layers, self._hidden_size]
        )
141
        last_cell = paddle.transpose(x=last_cell, perm=[1, 0, 2])
M
MRXLT 已提交
142 143 144
        return real_res, last_hidden, last_cell


145
class PtbModel(paddle.nn.Layer):
146 147 148 149 150 151 152 153 154
    def __init__(
        self,
        hidden_size,
        vocab_size,
        num_layers=2,
        num_steps=20,
        init_scale=0.1,
        dropout=None,
    ):
155
        super().__init__()
M
MRXLT 已提交
156 157 158 159 160 161
        self.hidden_size = hidden_size
        self.vocab_size = vocab_size
        self.init_scale = init_scale
        self.num_layers = num_layers
        self.num_steps = num_steps
        self.dropout = dropout
162 163 164 165 166 167 168
        self.simple_lstm_rnn = SimpleLSTMRNN(
            hidden_size,
            num_steps,
            num_layers=num_layers,
            init_scale=init_scale,
            dropout=dropout,
        )
M
MRXLT 已提交
169
        self.embedding = Embedding(
170 171 172 173
            vocab_size,
            hidden_size,
            sparse=False,
            weight_attr=fluid.ParamAttr(
M
MRXLT 已提交
174
                name='embedding_para',
175
                initializer=paddle.nn.initializer.Uniform(
176 177 178 179
                    low=-init_scale, high=init_scale
                ),
            ),
        )
M
MRXLT 已提交
180 181 182 183 184

        self.softmax_weight = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.hidden_size, self.vocab_size],
            dtype="float32",
185
            default_initializer=paddle.nn.initializer.Uniform(
186 187 188
                low=-self.init_scale, high=self.init_scale
            ),
        )
M
MRXLT 已提交
189 190 191 192
        self.softmax_bias = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.vocab_size],
            dtype="float32",
193
            default_initializer=paddle.nn.initializer.Uniform(
194 195 196
                low=-self.init_scale, high=self.init_scale
            ),
        )
M
MRXLT 已提交
197 198

    def forward(self, input, label, init_hidden, init_cell):
199
        init_h = paddle.reshape(
200 201
            init_hidden, shape=[self.num_layers, -1, self.hidden_size]
        )
M
MRXLT 已提交
202

203
        init_c = paddle.reshape(
204 205
            init_cell, shape=[self.num_layers, -1, self.hidden_size]
        )
M
MRXLT 已提交
206 207

        x_emb = self.embedding(input)
208
        x_emb = paddle.reshape(
209 210
            x_emb, shape=[-1, self.num_steps, self.hidden_size]
        )
M
MRXLT 已提交
211
        if self.dropout is not None and self.dropout > 0.0:
C
ccrrong 已提交
212
            x_emb = paddle.nn.functional.dropout(
M
MRXLT 已提交
213
                x_emb,
C
ccrrong 已提交
214 215
                p=self.drop_out,
                mode='upscale_in_train',
216
            )
217
        rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(
218 219
            x_emb, init_h, init_c
        )
220
        rnn_out = paddle.reshape(
221 222
            rnn_out, shape=[-1, self.num_steps, self.hidden_size]
        )
M
MRXLT 已提交
223

K
kangguangli 已提交
224
        projection = paddle.matmul(rnn_out, self.softmax_weight)
225
        projection = paddle.add(projection, self.softmax_bias)
226
        projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
227
        loss = paddle.nn.functional.softmax_with_cross_entropy(
228 229
            logits=projection, label=label, soft_label=False
        )
230
        loss = paddle.reshape(loss, shape=[-1, self.num_steps])
231
        loss = paddle.mean(loss, axis=[0])
232
        loss = paddle.sum(loss)
M
MRXLT 已提交
233 234 235 236 237

        return loss, last_hidden, last_cell


class TestDygraphPtbRnn(unittest.TestCase):
238 239 240 241 242 243
    def setUp(self):
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()

244
    def func_setUp(self):
M
MRXLT 已提交
245 246 247 248 249 250 251 252 253 254
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
255
            paddle.seed(seed)
L
Leo Chen 已提交
256
            paddle.framework.random._manual_program_seed(seed)
M
MRXLT 已提交
257
            # TODO: marsyang1993 Change seed to
258 259 260 261 262 263 264
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
M
MRXLT 已提交
265 266 267 268 269 270 271 272 273

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

274 275 276 277 278 279 280 281 282 283 284
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            scheduler = paddle.optimizer.lr.PiecewiseDecay(
                boundaries=bd, values=lr_arr
            )
            adam = Adam(
                learning_rate=scheduler, parameters=ptb_model.parameters()
            )
285 286
            dy_param_updated = {}
            dy_param_init = {}
M
MRXLT 已提交
287 288 289 290 291 292 293 294 295
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
296 297 298 299 300
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
M
MRXLT 已提交
301 302 303 304
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
305
                dy_loss, last_hidden, last_cell = ptb_model(
306 307
                    x, y, init_hidden, init_cell
                )
M
MRXLT 已提交
308 309 310 311 312
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
313
                scheduler.step()
M
MRXLT 已提交
314
                ptb_model.clear_gradients()
315

M
MRXLT 已提交
316 317 318 319 320 321 322 323
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            self.opti_dict = adam.state_dict()
            self.base_opti = {}
            for k, v in self.opti_dict.items():
W
wanghuancoder 已提交
324
                if isinstance(v, core.eager.Tensor):
M
MRXLT 已提交
325 326 327 328 329
                    self.base_opti[v.name] = v.numpy()
                    self.assertTrue(np.sum(np.abs(v.numpy())) != 0)
                else:
                    self.base_opti[k] = v

330 331 332 333
            paddle.save(
                self.opti_dict,
                os.path.join(self.temp_dir.name, "test_dy_v2.pdopt"),
            )
M
MRXLT 已提交
334 335 336 337 338 339 340 341

            self.state_dict = ptb_model.state_dict()

            self.model_base = {}
            for k, v in self.state_dict.items():
                np_t = v.numpy()
                self.model_base[k] = np_t

342 343 344 345
            paddle.save(
                self.state_dict,
                os.path.join(self.temp_dir.name, "test_dy_v2.pdparams"),
            )
M
MRXLT 已提交
346

347
    def func_testLoadAndSetVarBase(self):
M
MRXLT 已提交
348 349 350 351 352 353 354 355 356 357
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
358
            paddle.seed(seed)
L
Leo Chen 已提交
359
            paddle.framework.random._manual_program_seed(seed)
M
MRXLT 已提交
360
            # TODO: marsyang1993 Change seed to
361 362 363 364 365 366 367
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
M
MRXLT 已提交
368 369 370 371 372 373 374 375 376

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

377 378 379 380 381 382 383 384 385 386 387
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            scheduler = paddle.optimizer.lr.PiecewiseDecay(
                boundaries=bd, values=lr_arr
            )
            adam = Adam(
                learning_rate=scheduler, parameters=ptb_model.parameters()
            )
388 389
            dy_param_updated = {}
            dy_param_init = {}
M
MRXLT 已提交
390 391 392 393 394 395 396 397 398
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
399 400 401 402 403
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
M
MRXLT 已提交
404 405 406 407
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
408
                dy_loss, last_hidden, last_cell = ptb_model(
409 410
                    x, y, init_hidden, init_cell
                )
M
MRXLT 已提交
411 412 413 414 415
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
416
                scheduler.step()
M
MRXLT 已提交
417 418 419 420 421 422 423 424 425
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            # set to zero
            for k, v in opti_dict.items():
W
wanghuancoder 已提交
426
                if isinstance(v, core.eager.Tensor):
M
MRXLT 已提交
427 428 429 430 431 432
                    np_t = v.numpy()
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)

                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)

433
            para_state_dict = paddle.load(
434 435
                os.path.join(self.temp_dir.name, "test_dy_v2.pdparams")
            )
436
            opti_state_dict = paddle.load(
437 438
                os.path.join(self.temp_dir.name, "test_dy_v2.pdopt")
            )
M
MRXLT 已提交
439 440 441 442
            adam.set_state_dict(opti_state_dict)

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
W
wanghuancoder 已提交
443
                if isinstance(v, core.eager.Tensor):
444 445 446
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
M
MRXLT 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
                else:
                    self.assertEqual(v, self.base_opti[k])

            # check parameter
            state_dict = ptb_model.state_dict()
            for k, v in state_dict.items():
                np_t = v.numpy()
                var = v.value().get_tensor()

                var.set(np.zeros_like(np_t), place)

            ptb_model.set_dict(para_state_dict)

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

                base_t = self.model_base[k]

467
                np.testing.assert_array_equal(new_t, base_t)
M
MRXLT 已提交
468

469
    def func_testSetVariable(self):
M
MRXLT 已提交
470 471 472 473 474 475 476 477 478 479
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
480
            paddle.seed(seed)
L
Leo Chen 已提交
481
            paddle.framework.random._manual_program_seed(seed)
M
MRXLT 已提交
482
            # TODO: marsyang1993 Change seed to
483 484 485 486 487 488 489
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
M
MRXLT 已提交
490 491 492 493 494 495 496 497 498

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

499 500 501 502 503 504 505 506 507 508 509
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            scheduler = paddle.optimizer.lr.PiecewiseDecay(
                boundaries=bd, values=lr_arr
            )
            adam = Adam(
                learning_rate=scheduler, parameters=ptb_model.parameters()
            )
510 511
            dy_param_updated = {}
            dy_param_init = {}
M
MRXLT 已提交
512 513 514 515 516 517 518 519 520
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
521 522 523 524 525
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
M
MRXLT 已提交
526 527 528 529
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
530
                dy_loss, last_hidden, last_cell = ptb_model(
531 532
                    x, y, init_hidden, init_cell
                )
M
MRXLT 已提交
533 534 535 536 537
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
538
                scheduler.step()
M
MRXLT 已提交
539 540 541 542 543 544 545 546 547
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            # set to zero
            for k, v in opti_dict.items():
W
wanghuancoder 已提交
548
                if isinstance(v, core.eager.Tensor):
M
MRXLT 已提交
549 550 551 552 553 554
                    np_t = v.numpy()
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)

                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)

555
            if isinstance(adam._learning_rate, LRScheduler):
M
MRXLT 已提交
556 557 558 559 560
                adam._learning_rate.step_num = 0

            adam.set_state_dict(self.opti_dict)
            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
W
wanghuancoder 已提交
561
                if isinstance(v, core.eager.Tensor):
562 563 564
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
M
MRXLT 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
                else:
                    self.assertEqual(v, self.base_opti[k])

            # check parameter
            state_dict = ptb_model.state_dict()
            for k, v in state_dict.items():
                np_t = v.numpy()
                var = v.value().get_tensor()

                var.set(np.zeros_like(np_t), place)

            ptb_model.set_dict(self.state_dict)

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

                base_t = self.model_base[k]

585
                np.testing.assert_array_equal(new_t, base_t)
M
MRXLT 已提交
586

587
    def func_testSetNumpy(self):
M
MRXLT 已提交
588 589 590 591 592 593 594 595 596 597
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
598
            paddle.seed(seed)
L
Leo Chen 已提交
599
            paddle.framework.random._manual_program_seed(seed)
M
MRXLT 已提交
600
            # TODO: marsyang1993 Change seed to
601 602 603 604 605 606 607
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
M
MRXLT 已提交
608 609 610 611 612 613 614 615 616

            bd = []
            lr_arr = [1.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                new_lr = 1.0
                lr_arr.append(new_lr)

617 618 619 620 621 622 623 624 625 626 627
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            scheduler = paddle.optimizer.lr.PiecewiseDecay(
                boundaries=bd, values=lr_arr
            )
            adam = Adam(
                learning_rate=scheduler, parameters=ptb_model.parameters()
            )
628 629
            dy_param_updated = {}
            dy_param_init = {}
M
MRXLT 已提交
630 631 632 633 634 635 636 637 638
            dy_loss = None
            last_hidden = None
            last_cell = None

            for i in range(batch_num):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
639 640 641 642 643
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
M
MRXLT 已提交
644 645 646 647
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
648
                dy_loss, last_hidden, last_cell = ptb_model(
649 650
                    x, y, init_hidden, init_cell
                )
M
MRXLT 已提交
651 652 653 654 655
                if i == 0:
                    for param in ptb_model.parameters():
                        dy_param_init[param.name] = param.numpy()
                dy_loss.backward()
                adam.minimize(dy_loss)
656
                scheduler.step()
M
MRXLT 已提交
657 658 659 660 661 662 663 664 665 666
                ptb_model.clear_gradients()
                if i == batch_num - 1:
                    for param in ptb_model.parameters():
                        dy_param_updated[param.name] = param.numpy()

            # check optimizer
            opti_dict = adam.state_dict()
            np_opti_dict = {}
            # set to zero
            for k, v in opti_dict.items():
W
wanghuancoder 已提交
667
                if isinstance(v, core.eager.Tensor):
M
MRXLT 已提交
668 669 670 671 672 673 674 675
                    np_t = v.numpy()
                    np_opti_dict[v.name] = np_t
                    var = v.value().get_tensor()
                    var.set(np.zeros_like(np_t), place)
                    self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
                else:
                    np_opti_dict[k] = v

676
            if isinstance(adam._learning_rate, LRScheduler):
M
MRXLT 已提交
677 678 679 680 681 682
                adam._learning_rate.step_num = 0

            adam.set_state_dict(np_opti_dict)

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
W
wanghuancoder 已提交
683
                if isinstance(v, core.eager.Tensor):
684 685 686
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name]
                    )
M
MRXLT 已提交
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
                else:
                    self.assertEqual(v, self.base_opti[k])

            # check parameter
            state_dict = ptb_model.state_dict()
            np_state_dict = {}
            for k, v in state_dict.items():
                np_t = v.numpy()
                np_state_dict[k] = np_t
                var = v.value().get_tensor()

                var.set(np.zeros_like(np_t), place)

            ptb_model.set_dict(np_state_dict)

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

                base_t = self.model_base[k]

709
                np.testing.assert_array_equal(new_t, base_t)
M
MRXLT 已提交
710

711
    def func_testSetVariableBeforeTrain(self):
M
MRXLT 已提交
712 713 714 715 716 717 718 719 720 721
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
722
            paddle.seed(seed)
L
Leo Chen 已提交
723
            paddle.framework.random._manual_program_seed(seed)
M
MRXLT 已提交
724
            # TODO: marsyang1993 Change seed to
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )

            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=0.0,
                beta1=0.8,
                beta2=0.6,
                parameters=ptb_model.parameters(),
            )
744 745
            dy_param_updated = {}
            dy_param_init = {}
M
MRXLT 已提交
746 747 748 749 750 751 752 753 754 755 756 757
            dy_loss = None
            last_hidden = None
            last_cell = None

            adam.set_state_dict(self.opti_dict)
            ptb_model.set_dict(self.state_dict)

            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
758 759 760 761 762
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
M
MRXLT 已提交
763 764 765 766
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
767
                dy_loss, last_hidden, last_cell = ptb_model(
768 769
                    x, y, init_hidden, init_cell
                )
M
MRXLT 已提交
770 771 772 773 774 775 776 777

                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
                if k == "global_step":
778 779 780
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name] + 1
                    )
M
MRXLT 已提交
781 782

                if k.find("beta1_pow_acc_0") > 0:
783
                    np.testing.assert_array_equal(
784 785
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
M
MRXLT 已提交
786
                if k.find("beta2_pow_acc_0") > 0:
787
                    np.testing.assert_array_equal(
788 789
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
M
MRXLT 已提交
790 791 792 793 794 795 796

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

                base_t = self.model_base[k]
797
                np.testing.assert_array_equal(new_t, base_t)
M
MRXLT 已提交
798

799
    def func_testLoadAndSetVarBaseBeforeTrain(self):
M
MRXLT 已提交
800 801 802 803 804 805 806 807 808 809
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
810
            paddle.seed(seed)
L
Leo Chen 已提交
811
            paddle.framework.random._manual_program_seed(seed)
M
MRXLT 已提交
812
            # TODO: marsyang1993 Change seed to
813 814 815 816 817 818 819
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
M
MRXLT 已提交
820 821 822 823 824 825 826 827 828 829

            bd = []
            lr_arr = [0.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                # set lr to zero not update parameter
                new_lr = 0.0
                lr_arr.append(new_lr)

830 831 832 833 834 835 836 837 838 839 840
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            adam = Adam(
                learning_rate=0.0,
                beta1=0.8,
                beta2=0.6,
                parameters=ptb_model.parameters(),
            )
841 842
            dy_param_updated = {}
            dy_param_init = {}
M
MRXLT 已提交
843 844 845 846
            dy_loss = None
            last_hidden = None
            last_cell = None

847 848 849
            model_prefix = os.path.join(self.temp_dir.name, "test_dy_v2")
            state_dict = paddle.load(model_prefix + '.pdparams')
            opti_dict = paddle.load(model_prefix + '.pdopt')
M
MRXLT 已提交
850 851 852 853 854 855 856 857
            adam.set_state_dict(opti_dict)
            ptb_model.set_dict(state_dict)

            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
858 859 860 861 862
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
M
MRXLT 已提交
863 864 865 866
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
867
                dy_loss, last_hidden, last_cell = ptb_model(
868 869
                    x, y, init_hidden, init_cell
                )
M
MRXLT 已提交
870 871 872 873 874 875 876 877

                dy_loss.backward()
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
                if k == "global_step":
878 879 880
                    np.testing.assert_array_equal(
                        v.numpy(), self.base_opti[v.name] + 1
                    )
M
MRXLT 已提交
881 882

                if k.find("beta1_pow_acc_0") > 0:
883
                    np.testing.assert_array_equal(
884 885
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
M
MRXLT 已提交
886
                if k.find("beta2_pow_acc_0") > 0:
887
                    np.testing.assert_array_equal(
888 889
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
M
MRXLT 已提交
890 891 892 893 894 895 896 897 898

            # check parameter

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

                base_t = self.model_base[k]
899
                np.testing.assert_array_equal(new_t, base_t)
M
MRXLT 已提交
900

901
    def func_testSetNumpyBeforeTrain(self):
M
MRXLT 已提交
902 903 904 905 906 907 908 909 910 911
        seed = 90
        hidden_size = 10
        vocab_size = 1000
        num_layers = 1
        num_steps = 3
        init_scale = 0.1
        batch_size = 4
        batch_num = 200

        with fluid.dygraph.guard():
C
cnn 已提交
912
            paddle.seed(seed)
L
Leo Chen 已提交
913
            paddle.framework.random._manual_program_seed(seed)
M
MRXLT 已提交
914
            # TODO: marsyang1993 Change seed to
915 916 917 918 919 920 921
            ptb_model = PtbModel(
                hidden_size=hidden_size,
                vocab_size=vocab_size,
                num_layers=num_layers,
                num_steps=num_steps,
                init_scale=init_scale,
            )
M
MRXLT 已提交
922 923 924 925 926 927 928 929 930 931

            bd = []
            lr_arr = [0.0]
            # this a fake lr decay strategy
            for i in range(1, 10):
                bd.append(100 * i)
                # set lr to 0.0, not update parameter
                new_lr = 0.0
                lr_arr.append(new_lr)

932 933 934 935 936 937 938 939 940 941 942 943 944 945
            place = (
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            scheduler = paddle.optimizer.lr.PiecewiseDecay(
                boundaries=bd, values=lr_arr
            )
            adam = Adam(
                learning_rate=scheduler,
                beta1=0.8,
                beta2=0.6,
                parameters=ptb_model.parameters(),
            )
946 947
            dy_param_updated = {}
            dy_param_init = {}
M
MRXLT 已提交
948 949 950 951 952 953 954 955
            dy_loss = None
            last_hidden = None
            last_cell = None

            np_opti_dict = {}
            np_state_dict = {}

            for k, v in self.opti_dict.items():
W
wanghuancoder 已提交
956
                if isinstance(v, core.eager.Tensor):
M
MRXLT 已提交
957 958 959 960 961 962 963 964 965 966 967 968 969 970
                    np_opti_dict[v.name] = v.numpy()
                else:
                    np_opti_dict[k] = v

            for k, v in self.state_dict.items():
                np_state_dict[k] = v.numpy()

            adam.set_state_dict(np_opti_dict)
            ptb_model.set_dict(np_state_dict)
            for i in range(1):
                x_data = np.arange(12).reshape(4, 3).astype('int64')
                y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
                y_data = y_data.reshape((-1, 1))
                init_hidden_data = np.zeros(
971 972 973 974 975
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
                init_cell_data = np.zeros(
                    (num_layers, batch_size, hidden_size), dtype='float32'
                )
M
MRXLT 已提交
976 977 978 979
                x = to_variable(x_data)
                y = to_variable(y_data)
                init_hidden = to_variable(init_hidden_data)
                init_cell = to_variable(init_cell_data)
980
                dy_loss, last_hidden, last_cell = ptb_model(
981 982
                    x, y, init_hidden, init_cell
                )
M
MRXLT 已提交
983 984

                dy_loss.backward()
985
                scheduler.step()
M
MRXLT 已提交
986 987 988 989 990
                adam.minimize(dy_loss)
                ptb_model.clear_gradients()

            opti_dict = adam.state_dict()
            for k, v in opti_dict.items():
991
                if k == "LR_Scheduler":
992
                    np.testing.assert_array_equal(
993 994
                        v['last_epoch'], self.base_opti[k]['last_epoch'] + 1
                    )
M
MRXLT 已提交
995 996

                if k.find("beta1_pow_acc_0") > 0:
997
                    np.testing.assert_array_equal(
998 999
                        v.numpy(), self.base_opti[v.name] * adam._beta1
                    )
M
MRXLT 已提交
1000
                if k.find("beta2_pow_acc_0") > 0:
1001
                    np.testing.assert_array_equal(
1002 1003
                        v.numpy(), self.base_opti[v.name] * adam._beta2
                    )
M
MRXLT 已提交
1004 1005 1006 1007 1008 1009 1010 1011 1012

            # check parameter

            state_dict = ptb_model.state_dict()

            for k, v in state_dict.items():
                new_t = v.numpy()

                base_t = self.model_base[k]
1013
                np.testing.assert_array_equal(new_t, base_t)
M
MRXLT 已提交
1014

1015
    def func_testOnlyLoadParams(self):
M
MRXLT 已提交
1016
        with fluid.dygraph.guard():
1017
            emb = paddle.nn.Embedding(10, 10)
M
MRXLT 已提交
1018
            state_dict = emb.state_dict()
1019 1020
            paddle.save(
                state_dict,
1021 1022
                os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams'),
            )
M
MRXLT 已提交
1023

1024
            para_state_dict = paddle.load(
1025 1026
                os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams')
            )
M
MRXLT 已提交
1027

1028
    def func_test_no_state_in_input_dict(self):
1029
        with fluid.dygraph.guard():
1030
            emb = paddle.nn.Embedding(10, 10)
1031
            state_dict = emb.state_dict()
1032 1033
            paddle.save(
                state_dict,
1034 1035
                os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams'),
            )
1036

1037
            para_state_dict = paddle.load(
1038 1039
                os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams')
            )
1040 1041 1042 1043
            para_state_dict.pop('weight')

            emb.set_state_dict(para_state_dict)

1044
    def func_test_state_shape_mismatch(self):
1045
        with fluid.dygraph.guard():
1046
            emb = paddle.nn.Embedding(10, 10)
1047
            state_dict = emb.state_dict()
1048 1049
            paddle.save(
                state_dict,
1050 1051
                os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams'),
            )
1052

1053 1054 1055 1056
            para_state_dict = paddle.load(
                os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams'),
                return_numpy=True,
            )
1057
            para_state_dict['weight'] = np.expand_dims(
1058 1059
                para_state_dict['weight'], axis=-1
            )
1060 1061 1062

            emb.set_state_dict(para_state_dict)

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
    def test_main(self):
        self.func_setUp()
        self.func_testLoadAndSetVarBase()
        self.func_testSetVariable()
        self.func_testSetNumpy()
        self.func_testSetVariableBeforeTrain()
        self.func_testLoadAndSetVarBaseBeforeTrain()
        self.func_testSetNumpyBeforeTrain()
        self.func_testOnlyLoadParams()
        self.func_test_no_state_in_input_dict()
        self.func_test_state_shape_mismatch()

M
MRXLT 已提交
1075 1076 1077

if __name__ == '__main__':
    unittest.main()