test_save_load.py 2.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import os
import tempfile
17
import unittest
18 19

import numpy as np
20
from test_fetch_feed import Linear
21

22
import paddle
23
from paddle import fluid
24
from paddle.fluid.optimizer import AdamOptimizer
25 26 27

np.random.seed(2020)

28 29 30
place = (
    fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
)
31 32


33
class TestDyToStaticSaveLoad(unittest.TestCase):
34 35
    def setUp(self):
        self.temp_dir = tempfile.TemporaryDirectory()
36 37 38
        self.model_path = os.path.join(
            self.temp_dir.name, "test_dy2stat_save_load"
        )
39 40 41 42

    def tearDown(self):
        self.temp_dir.cleanup()

43 44 45 46 47
    def test_save_load_same_result(self):
        x_data = np.random.randn(30, 10, 32).astype('float32')
        batch_num = 3

        with fluid.dygraph.guard(place):
48

R
Ryan 已提交
49
            paddle.jit.enable_to_static(True)
50 51
            x = fluid.dygraph.to_variable(x_data)
            net = Linear(32, 64)
52 53 54
            adam = AdamOptimizer(
                learning_rate=0.1, parameter_list=net.parameters()
            )
55 56 57 58 59 60 61 62

            for i in range(batch_num):
                static_out, static_loss = net(x)
                # Update parameters
                static_loss.backward()
                adam.minimize(static_loss)
                net.clear_gradients()
            # Save parameters
63

64
            paddle.save(net.state_dict(), self.model_path + '.pdparams')
65 66 67 68 69 70 71 72
            # minimize() will update parameter, call net() to get output and avg_loss.
            # Switch into eval mode.
            net.eval()
            static_out, static_loss = net(x)

        # load parameters into dygraph
        with fluid.dygraph.guard(place):
            dygraph_net = Linear(32, 64)
73

74
            # Load parameters
75
            model_dict = paddle.load(self.model_path + '.pdparams')
76 77 78
            dygraph_net.set_dict(model_dict)
            # Switch into eval mode.
            dygraph_net.eval()
79

80 81
            x = fluid.dygraph.to_variable(x_data)
            # predict output
R
Ryan 已提交
82
            paddle.jit.enable_to_static(False)
83
            dygraph_out, dygraph_loss = dygraph_net(x)
84

85 86 87 88 89 90
        np.testing.assert_allclose(
            dygraph_out.numpy(), static_out.numpy(), rtol=1e-05
        )
        np.testing.assert_allclose(
            dygraph_loss.numpy(), static_loss.numpy(), rtol=1e-05
        )
91 92 93 94


if __name__ == '__main__':
    unittest.main()