test_jit_save_load.py 57.0 KB
Newer Older
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import os
17
import pickle
18
import shutil
19
import unittest
20
import tempfile
21
import numpy as np
L
Leo Chen 已提交
22
import paddle
23
from paddle.static import InputSpec
24
import paddle.fluid as fluid
25
from paddle.fluid.layers.utils import flatten
26
from paddle.fluid.dygraph import Linear
27 28
from paddle.fluid.dygraph import declarative
from paddle.fluid.dygraph.io import INFER_PARAMS_INFO_SUFFIX
W
WeiXin 已提交
29
from paddle.fluid import unique_name
30 31

BATCH_SIZE = 32
32
BATCH_NUM = 10
33 34 35
SEED = 10


36 37
def random_batch_reader(input_size, label_size):
    def _get_random_inputs_and_labels(input_size, label_size):
38
        np.random.seed(SEED)
39 40 41
        input = np.random.random(size=input_size).astype('float32')
        label = np.random.random(size=label_size).astype('int64')
        return input, label
42 43 44

    def __reader__():
        for _ in range(BATCH_NUM):
45
            batch_input, batch_label = _get_random_inputs_and_labels(
46 47
                [BATCH_SIZE, input_size], [BATCH_SIZE, label_size]
            )
48
            yield batch_input, batch_label
49 50 51 52 53 54 55 56 57 58 59 60 61 62

    return __reader__


class LinearNet(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNet, self).__init__()
        self._linear = Linear(in_size, out_size)

    @declarative
    def forward(self, x):
        return self._linear(x)


63 64 65 66 67 68 69 70 71 72
class LinearNetWithInputSpec(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetWithInputSpec, self).__init__()
        self._linear = Linear(in_size, out_size)

    @declarative(input_spec=[InputSpec(shape=[None, 784], dtype='float32')])
    def forward(self, x):
        return self._linear(x)


73 74 75 76 77 78 79 80 81
class LinearNetNotDeclarative(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetNotDeclarative, self).__init__()
        self._linear = Linear(in_size, out_size)

    def forward(self, x):
        return self._linear(x)


82 83 84 85 86
class LinerNetWithLabel(paddle.nn.Layer):
    def __init__(self, in_size, out_size):
        super(LinerNetWithLabel, self).__init__()
        self._linear = Linear(in_size, out_size)

87 88 89 90 91 92
    @declarative(
        input_spec=[
            InputSpec(shape=[None, 784], dtype='float32', name="image"),
            InputSpec(shape=[None, 1], dtype='int64', name="label"),
        ]
    )
93 94 95
    def forward(self, x, label):
        out = self._linear(x)
        loss = fluid.layers.cross_entropy(out, label)
96
        avg_loss = paddle.mean(loss)
97 98 99
        return out, avg_loss


C
Chen Weihang 已提交
100 101 102 103 104
class LinerNetWithPruneInput(paddle.nn.Layer):
    def __init__(self, in_size, out_size):
        super(LinerNetWithPruneInput, self).__init__()
        self._linear = Linear(in_size, out_size)

105 106 107 108 109 110
    @declarative(
        input_spec=[
            InputSpec(shape=[None, 784], dtype='float32', name="image"),
            InputSpec(shape=[None, 1], dtype='int64', name="label"),
        ]
    )
C
Chen Weihang 已提交
111 112 113
    def forward(self, x, label):
        out = self._linear(x)
        loss = fluid.layers.cross_entropy(out, label)
114
        avg_loss = paddle.mean(loss)
C
Chen Weihang 已提交
115 116 117 118 119 120 121 122
        return out


class LinerNetWithUselessInput(paddle.nn.Layer):
    def __init__(self, in_size, out_size):
        super(LinerNetWithUselessInput, self).__init__()
        self._linear = Linear(in_size, out_size)

123 124 125 126 127 128
    @declarative(
        input_spec=[
            InputSpec(shape=[None, 784], dtype='float32', name="image"),
            InputSpec(shape=[None, 1], dtype='int64', name="label"),
        ]
    )
C
Chen Weihang 已提交
129 130 131 132 133
    def forward(self, x, label):
        out = self._linear(x)
        return out


134 135 136 137 138 139 140 141 142
class LinearNetReturnLoss(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetReturnLoss, self).__init__()
        self._linear = Linear(in_size, out_size)

    @declarative
    def forward(self, x):
        y = self._linear(x)
        z = self._linear(y)
143
        loss = paddle.mean(z)
144 145 146
        return z, loss


147 148 149 150 151 152
class LinearNetMultiInput(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetMultiInput, self).__init__()
        self._linear1 = Linear(in_size, out_size)
        self._linear2 = Linear(in_size, out_size)

153 154 155 156 157 158
    @declarative(
        input_spec=[
            InputSpec([None, 8], dtype='float32'),
            InputSpec([None, 8], dtype='float32'),
        ]
    )
159 160 161
    def forward(self, x, y):
        x_out = self._linear1(x)
        y_out = self._linear2(y)
162
        loss = paddle.mean(x_out + y_out)
163 164 165
        return x_out, y_out, loss


166 167 168 169 170 171
class LinearNetMultiInput1(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetMultiInput1, self).__init__()
        self._linear1 = Linear(in_size, out_size)
        self._linear2 = Linear(in_size, out_size)

172 173 174 175 176 177
    @declarative(
        input_spec=(
            InputSpec([None, 8], dtype='float32'),
            InputSpec([None, 8], dtype='float32'),
        )
    )
178 179 180
    def forward(self, x, y):
        x_out = self._linear1(x)
        y_out = self._linear2(y)
181
        loss = paddle.mean(x_out + y_out)
182 183 184
        return x_out, y_out, loss


185 186 187 188
class MultiLoadingLinearNet(fluid.dygraph.Layer):
    def __init__(self, size, model_path):
        super(MultiLoadingLinearNet, self).__init__()
        self._linear = Linear(size, size)
189 190
        self._load_linear1 = paddle.jit.load(model_path)
        self._load_linear2 = paddle.jit.load(model_path)
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210

    @declarative
    def forward(self, x):
        tmp1 = self._linear(x)
        tmp2 = self._load_linear1(tmp1)
        tmp3 = self._load_linear2(tmp2)
        y = self._linear(tmp3)
        return y


class LinearNetReturnHidden(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetReturnHidden, self).__init__()
        self._linear_1 = Linear(in_size, out_size)
        self._linear_2 = Linear(in_size, out_size)

    @declarative
    def forward(self, x):
        y = self._linear_1(x)
        z = self._linear_2(y)
211
        loss = paddle.mean(z)
212 213 214
        return y, loss


215 216 217 218 219 220 221 222 223 224 225
class LinearNetWithNestOut(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetWithNestOut, self).__init__()
        self._linear_1 = Linear(in_size, out_size)
        self._linear_2 = Linear(in_size, out_size)

    @declarative
    def forward(self, x):
        y = self._linear_1(x)
        z = self._linear_2(y)
        out = y + z
226
        loss = paddle.mean(out)
227 228 229
        return y, [(z, loss), out]


230 231 232 233 234
class LinearNetWithDictInput(paddle.nn.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetWithDictInput, self).__init__()
        self._linear = Linear(in_size, out_size)

235 236 237 238 239 240
    @paddle.jit.to_static(
        input_spec=[
            {'img': InputSpec(shape=[None, 8], dtype='float32', name='img')},
            {'label': InputSpec(shape=[None, 1], dtype='int64', name='label')},
        ]
    )
241 242 243 244 245 246 247
    def forward(self, img, label):
        out = self._linear(img['img'])
        # not return loss to avoid prune output
        loss = paddle.nn.functional.cross_entropy(out, label['label'])
        return out


248 249 250 251 252 253 254 255 256 257
class LinearNetWithDictInputNoPrune(paddle.nn.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetWithDictInputNoPrune, self).__init__()
        self._linear = Linear(in_size, out_size)

    def forward(self, img):
        out = self._linear(img['img'] + img['img2'])
        return out


258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
class EmptyLayer(paddle.nn.Layer):
    def __init__(self):
        super(EmptyLayer, self).__init__()

    @paddle.jit.to_static
    def forward(self, x):
        return x


class NoParamLayer(paddle.nn.Layer):
    def __init__(self):
        super(NoParamLayer, self).__init__()

    @paddle.jit.to_static
    def forward(self, x, y):
        return x + y


276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
class LinearNetWithMultiStaticFunc(fluid.dygraph.Layer):
    def __init__(self, in_size, out_size):
        super(LinearNetWithMultiStaticFunc, self).__init__()
        self._linear_0 = Linear(in_size, out_size)
        self._linear_1 = Linear(in_size, out_size)
        self._scale = paddle.to_tensor(9.9)

    @paddle.jit.to_static
    def forward(self, x):
        return self._linear_0(x)

    @paddle.jit.to_static
    def forward_no_param(self, x):
        return x

    @paddle.jit.to_static
    def forward_general(self, x):
        return self._linear_0(x) + self._linear_1(x) * self._scale


296
def train(layer, input_size=784, label_size=1):
297
    # create optimizer
298 299 300
    sgd = fluid.optimizer.SGDOptimizer(
        learning_rate=0.01, parameter_list=layer.parameters()
    )
301 302
    # create data loader
    train_loader = fluid.io.DataLoader.from_generator(capacity=5)
303 304 305
    train_loader.set_batch_generator(
        random_batch_reader(input_size, label_size)
    )
306 307 308 309 310 311 312 313
    # train
    for data in train_loader():
        img, label = data
        label.stop_gradient = True

        cost = layer(img)

        loss = fluid.layers.cross_entropy(cost, label)
314
        avg_loss = paddle.mean(loss)
315 316

        avg_loss.backward()
L
Leo Chen 已提交
317
        sgd.minimize(avg_loss)
318 319 320 321
        layer.clear_gradients()
    return [img], layer, avg_loss


322 323
def train_with_label(layer, input_size=784, label_size=1):
    # create optimizer
324 325 326
    sgd = fluid.optimizer.SGDOptimizer(
        learning_rate=0.01, parameter_list=layer.parameters()
    )
327 328
    # create data loader
    train_loader = fluid.io.DataLoader.from_generator(capacity=5)
329 330 331
    train_loader.set_batch_generator(
        random_batch_reader(input_size, label_size)
    )
332 333 334 335 336 337 338 339 340 341 342 343 344
    # train
    for data in train_loader():
        img, label = data
        label.stop_gradient = True

        out, avg_loss = layer(img, label)

        avg_loss.backward()
        sgd.minimize(avg_loss)
        layer.clear_gradients()
    return out


345 346
class TestJitSaveLoad(unittest.TestCase):
    def setUp(self):
347
        self.temp_dir = tempfile.TemporaryDirectory()
348 349 350
        self.model_path = os.path.join(
            self.temp_dir.name, "test_jit_save_load/model"
        )
351 352 353
        # enable dygraph mode
        fluid.enable_dygraph()
        # config seed
C
cnn 已提交
354
        paddle.seed(SEED)
L
Leo Chen 已提交
355
        paddle.framework.random._manual_program_seed(SEED)
356

357 358 359
    def tearDown(self):
        self.temp_dir.cleanup()

360
    def train_and_save_model(self, model_path=None):
361 362
        layer = LinearNet(784, 1)
        example_inputs, layer, _ = train(layer)
363
        final_model_path = model_path if model_path else self.model_path
364
        orig_input_types = [type(x) for x in example_inputs]
365 366 367
        paddle.jit.save(
            layer=layer, path=final_model_path, input_spec=example_inputs
        )
368 369
        new_input_types = [type(x) for x in example_inputs]
        self.assertEqual(orig_input_types, new_input_types)
370 371
        return layer

372
    def test_save_load(self):
373 374 375
        # train and save model
        train_layer = self.train_and_save_model()
        # load model
376
        loaded_layer = paddle.jit.load(self.model_path)
377 378 379 380 381
        self.load_and_inference(train_layer, loaded_layer)
        self.load_dygraph_state_dict(train_layer)
        self.load_and_finetune(train_layer, loaded_layer)

    def load_and_inference(self, train_layer, infer_layer):
382
        train_layer.eval()
383
        infer_layer.eval()
384 385
        # inference & compare
        x = fluid.dygraph.to_variable(
386 387
            np.random.random((1, 784)).astype('float32')
        )
388
        np.testing.assert_array_equal(
389 390
            train_layer(x).numpy(), infer_layer(x).numpy()
        )
391

392 393
    def load_and_finetune(self, train_layer, load_train_layer):
        train_layer.train()
394 395
        load_train_layer.train()
        # train & compare
L
Leo Chen 已提交
396 397
        img0, _, train_loss = train(train_layer)
        img1, _, load_train_loss = train(load_train_layer)
398 399 400
        np.testing.assert_array_equal(
            train_loss.numpy(), load_train_loss.numpy()
        )
401

402 403
    def load_dygraph_state_dict(self, train_layer):
        train_layer.eval()
404
        # construct new model
405
        new_layer = LinearNet(784, 1)
406
        orig_state_dict = new_layer.state_dict()
407
        load_state_dict = paddle.load(self.model_path)
408 409 410
        for structured_name in orig_state_dict:
            self.assertTrue(structured_name in load_state_dict)
        new_layer.set_state_dict(load_state_dict)
411 412 413
        new_layer.eval()
        # inference & compare
        x = fluid.dygraph.to_variable(
414 415
            np.random.random((1, 784)).astype('float32')
        )
416
        np.testing.assert_array_equal(
417 418
            train_layer(x).numpy(), new_layer(x).numpy()
        )
419

420
    def test_load_dygraph_no_path(self):
421 422 423
        model_path = os.path.join(
            self.temp_dir.name, "test_jit_save_load.no_path/model_path"
        )
424 425 426
        with self.assertRaises(ValueError):
            model_dict, _ = fluid.dygraph.load_dygraph(model_path)

427
    def test_jit_load_no_path(self):
428 429 430
        path = os.path.join(
            self.temp_dir.name, "test_jit_save_load.no_path/model_path"
        )
431 432 433
        with self.assertRaises(ValueError):
            loaded_layer = paddle.jit.load(path)

434

435 436 437 438
class TestSaveLoadWithNestOut(unittest.TestCase):
    def setUp(self):
        # enable dygraph mode
        fluid.enable_dygraph()
439 440 441 442
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
443 444 445

    def test_nest_output(self):
        x = fluid.dygraph.to_variable(
446 447
            np.random.random((4, 8)).astype('float32')
        )
448 449 450 451 452

        net = LinearNetWithNestOut(8, 8)
        dy_outs = flatten(net(x))
        net = declarative(net, input_spec=[InputSpec([None, 8], name='x')])

453
        model_path = os.path.join(self.temp_dir.name, "net_with_nest_out/model")
454 455 456 457 458 459 460
        paddle.jit.save(net, model_path)

        load_net = paddle.jit.load(model_path)
        load_outs = flatten(load_net(x))

        self.assertTrue(len(dy_outs) == 4)
        for dy_out, load_out in zip(dy_outs, load_outs):
461 462 463
            np.testing.assert_allclose(
                dy_out.numpy(), load_out.numpy(), rtol=1e-05
            )
464 465


466 467
class TestSaveLoadWithDictInput(unittest.TestCase):
    def test_dict_input(self):
468
        # NOTE: This net cannot be executed, it is just
469 470 471
        # a special case for exporting models in model validation
        # We DO NOT recommend this writing way of Layer
        net = LinearNetWithDictInput(8, 8)
472 473 474
        # net.forward.concrete_program.inputs:
        # (<__main__.LinearNetWithDictInput object at 0x7f2655298a98>,
        #  {'img': var img : fluid.VarType.LOD_TENSOR.shape(-1, 8).astype(VarType.FP32)},
475 476
        #  {'label': var label : fluid.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64)})
        self.assertEqual(len(net.forward.concrete_program.inputs), 3)
477
        temp_dir = tempfile.TemporaryDirectory()
478 479 480
        path = os.path.join(
            temp_dir.name, "test_jit_save_load_with_dict_input/model"
        )
481
        # prune inputs
482 483 484 485 486 487 488
        paddle.jit.save(
            layer=net,
            path=path,
            input_spec=[
                {'img': InputSpec(shape=[None, 8], dtype='float32', name='img')}
            ],
        )
489 490 491 492 493 494 495 496

        img = paddle.randn(shape=[4, 8], dtype='float32')
        loaded_net = paddle.jit.load(path)
        loaded_out = loaded_net(img)

        # loaded_net._input_spec():
        # [InputSpec(shape=(-1, 8), dtype=VarType.FP32, name=img)]
        self.assertEqual(len(loaded_net._input_spec()), 1)
497
        temp_dir.cleanup()
498 499


500 501 502
class TestSaveLoadWithDictInputNoPrune(unittest.TestCase):
    def test_dict_input(self):
        net = LinearNetWithDictInputNoPrune(8, 8)
503 504
        temp_dir = tempfile.TemporaryDirectory()
        path = os.path.join(
505 506
            temp_dir.name, "test_jit_save_load_with_dict_input_no_prune/model"
        )
507
        # prune inputs
508 509 510 511 512 513 514 515 516 517 518 519 520 521
        paddle.jit.save(
            layer=net,
            path=path,
            input_spec=[
                {
                    'img': InputSpec(
                        shape=[None, 8], dtype='float32', name='img'
                    ),
                    'img2': InputSpec(
                        shape=[None, 8], dtype='float32', name='img2'
                    ),
                }
            ],
        )
522 523 524 525 526 527 528

        img = paddle.randn(shape=[4, 8], dtype='float32')
        img2 = paddle.randn(shape=[4, 8], dtype='float32')
        loaded_net = paddle.jit.load(path)
        loaded_out = loaded_net(img, img2)

        self.assertEqual(len(loaded_net._input_spec()), 2)
529
        temp_dir.cleanup()
530 531


532 533 534 535
class TestSaveLoadWithInputSpec(unittest.TestCase):
    def setUp(self):
        # enable dygraph mode
        fluid.enable_dygraph()
536 537 538 539
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
540 541 542 543

    def test_with_input_spec(self):
        net = LinearNetReturnLoss(8, 8)
        # set x.shape = [None, 8]
544 545 546
        net.forward = declarative(
            net.forward, input_spec=[InputSpec([None, 8], name='x')]
        )
547

548 549 550
        model_path = os.path.join(
            self.temp_dir.name, "input_spec.output_spec/model"
        )
551 552 553 554 555 556 557
        # check inputs and outputs
        self.assertTrue(len(net.forward.inputs) == 1)
        input_x = net.forward.inputs[0]
        self.assertTrue(input_x.shape == (-1, 8))
        self.assertTrue(input_x.name == 'x')

        # 1. prune loss
558 559
        output_spec = net.forward.outputs[:1]
        paddle.jit.save(net, model_path, output_spec=output_spec)
560 561

        # 2. load to infer
562
        infer_layer = paddle.jit.load(model_path)
563
        x = fluid.dygraph.to_variable(
564 565
            np.random.random((4, 8)).astype('float32')
        )
566 567 568 569 570
        pred = infer_layer(x)

    def test_multi_in_out(self):
        net = LinearNetMultiInput(8, 8)

571 572 573
        model_path = os.path.join(
            self.temp_dir.name, "multi_inout.output_spec1/model"
        )
574 575 576 577 578 579 580 581
        # 1. check inputs and outputs
        self.assertTrue(len(net.forward.inputs) == 2)
        input_x = net.forward.inputs[0]
        input_y = net.forward.inputs[1]
        self.assertTrue(input_x.shape == (-1, 8))
        self.assertTrue(input_y.shape == (-1, 8))

        # 2. prune loss
582 583
        output_spec = net.forward.outputs[:2]
        paddle.jit.save(net, model_path, output_spec=output_spec)
584 585

        # 3. load to infer
586
        infer_layer = paddle.jit.load(model_path)
587
        x = fluid.dygraph.to_variable(
588 589
            np.random.random((4, 8)).astype('float32')
        )
590
        y = fluid.dygraph.to_variable(
591 592
            np.random.random((4, 8)).astype('float32')
        )
593 594 595 596
        # 4. predict
        pred_x, pred_y = infer_layer(x, y)

        # 1. prune y and loss
597 598 599
        model_path = os.path.join(
            self.temp_dir.name, "multi_inout.output_spec2/model"
        )
600 601
        output_spec = net.forward.outputs[:1]
        paddle.jit.save(net, model_path, [input_x], output_spec=output_spec)
602
        # 2. load again
603
        infer_layer2 = paddle.jit.load(model_path)
604 605 606 607
        # 3. predict
        pred_xx = infer_layer2(x)

        # 4. assert pred_x == pred_xx
608
        np.testing.assert_allclose(pred_x.numpy(), pred_xx.numpy(), rtol=1e-05)
609 610 611 612

    def test_multi_in_out1(self):
        net = LinearNetMultiInput1(8, 8)

613 614 615
        model_path = os.path.join(
            self.temp_dir.name, "multi_inout1.output_spec1/model"
        )
616 617 618 619 620 621 622 623 624 625 626 627 628 629
        # 1. check inputs and outputs
        self.assertTrue(len(net.forward.inputs) == 2)
        input_x = net.forward.inputs[0]
        input_y = net.forward.inputs[1]
        self.assertTrue(input_x.shape == (-1, 8))
        self.assertTrue(input_y.shape == (-1, 8))

        # 2. prune loss
        output_spec = net.forward.outputs[:2]
        paddle.jit.save(net, model_path, output_spec=output_spec)

        # 3. load to infer
        infer_layer = paddle.jit.load(model_path)
        x = fluid.dygraph.to_variable(
630 631
            np.random.random((4, 8)).astype('float32')
        )
632
        y = fluid.dygraph.to_variable(
633 634
            np.random.random((4, 8)).astype('float32')
        )
635 636 637 638
        # 4. predict
        pred_x, pred_y = infer_layer(x, y)

        # 1. prune y and loss
639 640 641
        model_path = os.path.join(
            self.temp_dir.name, "multi_inout1.output_spec2/model"
        )
642
        output_spec = net.forward.outputs[:1]
643
        paddle.jit.save(net, model_path, (input_x,), output_spec=output_spec)
644 645 646 647 648 649
        # 2. load again
        infer_layer2 = paddle.jit.load(model_path)
        # 3. predict
        pred_xx = infer_layer2(x)

        # 4. assert pred_x == pred_xx
650
        np.testing.assert_allclose(pred_x.numpy(), pred_xx.numpy(), rtol=1e-05)
651 652


653 654 655 656 657
class TestJitSaveLoadConfig(unittest.TestCase):
    def setUp(self):
        # enable dygraph mode
        fluid.enable_dygraph()
        # config seed
C
cnn 已提交
658
        paddle.seed(SEED)
L
Leo Chen 已提交
659
        paddle.framework.random._manual_program_seed(SEED)
660 661 662 663
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
664 665 666 667

    def test_output_spec(self):
        train_layer = LinearNetReturnLoss(8, 8)
        adam = fluid.optimizer.AdamOptimizer(
668 669
            learning_rate=0.1, parameter_list=train_layer.parameters()
        )
670
        x = fluid.dygraph.to_variable(
671 672
            np.random.random((4, 8)).astype('float32')
        )
673 674 675 676 677 678
        for i in range(10):
            out, loss = train_layer(x)
            loss.backward()
            adam.minimize(loss)
            train_layer.clear_gradients()

679 680 681
        model_path = os.path.join(
            self.temp_dir.name, "save_load_config.output_spec"
        )
682
        output_spec = [out]
683 684 685 686 687 688
        paddle.jit.save(
            layer=train_layer,
            path=model_path,
            input_spec=[x],
            output_spec=output_spec,
        )
689 690

        train_layer.eval()
691
        infer_layer = paddle.jit.load(model_path)
692
        x = fluid.dygraph.to_variable(
693 694
            np.random.random((4, 8)).astype('float32')
        )
695
        np.testing.assert_array_equal(
696 697
            train_layer(x)[0].numpy(), infer_layer(x).numpy()
        )
698

699 700
    def test_save_no_support_config_error(self):
        layer = LinearNet(784, 1)
701
        path = os.path.join(self.temp_dir.name, "no_support_config_test")
702 703 704 705
        with self.assertRaises(ValueError):
            paddle.jit.save(layer=layer, path=path, model_filename="")

    def test_load_empty_model_filename_error(self):
706
        path = os.path.join(self.temp_dir.name, "error_model_filename_test")
707 708 709 710
        with self.assertRaises(ValueError):
            paddle.jit.load(path, model_filename="")

    def test_load_empty_params_filename_error(self):
711
        path = os.path.join(self.temp_dir.name, "error_params_filename_test")
712 713 714 715
        with self.assertRaises(ValueError):
            paddle.jit.load(path, params_filename="")

    def test_load_with_no_support_config(self):
716
        path = os.path.join(self.temp_dir.name, "no_support_config_test")
717 718 719
        with self.assertRaises(ValueError):
            paddle.jit.load(path, separate_params=True)

720

721 722 723
class TestJitMultipleLoading(unittest.TestCase):
    def setUp(self):
        self.linear_size = 4
724
        self.temp_dir = tempfile.TemporaryDirectory()
725 726 727
        self.model_path = os.path.join(
            self.temp_dir.name, "jit_multi_load/model"
        )
728 729 730
        # enable dygraph mode
        fluid.enable_dygraph()
        # config seed
C
cnn 已提交
731
        paddle.seed(SEED)
L
Leo Chen 已提交
732
        paddle.framework.random._manual_program_seed(SEED)
733 734 735
        # train and save base model
        self.train_and_save_orig_model()

736 737 738
    def tearDown(self):
        self.temp_dir.cleanup()

739 740 741
    def train_and_save_orig_model(self):
        layer = LinearNet(self.linear_size, self.linear_size)
        example_inputs, layer, _ = train(layer, self.linear_size, 1)
742 743 744
        paddle.jit.save(
            layer=layer, path=self.model_path, input_spec=example_inputs
        )
745 746

    def test_load_model_retransform_inference(self):
747 748 749
        multi_loaded_layer = MultiLoadingLinearNet(
            self.linear_size, self.model_path
        )
750 751 752 753 754 755 756
        state_dict = multi_loaded_layer.state_dict()
        name_set = set()
        for _, var in state_dict.items():
            self.assertTrue(var.name not in name_set)
            name_set.add(var.name)


757 758 759
class TestJitPruneModelAndLoad(unittest.TestCase):
    def setUp(self):
        self.linear_size = 4
760
        self.temp_dir = tempfile.TemporaryDirectory()
761 762 763
        self.model_path = os.path.join(
            self.temp_dir.name, "jit_prune_model_and_load/model"
        )
764 765 766
        # enable dygraph mode
        fluid.enable_dygraph()
        # config seed
C
cnn 已提交
767
        paddle.seed(SEED)
L
Leo Chen 已提交
768
        paddle.framework.random._manual_program_seed(SEED)
769

770 771 772
    def tearDown(self):
        self.temp_dir.cleanup()

773 774 775
    def train_and_save(self):
        train_layer = LinearNetReturnHidden(8, 8)
        adam = fluid.optimizer.AdamOptimizer(
776 777
            learning_rate=0.1, parameter_list=train_layer.parameters()
        )
778
        x = fluid.dygraph.to_variable(
779 780
            np.random.random((4, 8)).astype('float32')
        )
781 782 783 784 785 786
        for i in range(10):
            hidden, loss = train_layer(x)
            loss.backward()
            adam.minimize(loss)
            train_layer.clear_gradients()

787
        output_spec = [hidden]
788 789 790 791 792 793
        paddle.jit.save(
            layer=train_layer,
            path=self.model_path,
            input_spec=[x],
            output_spec=output_spec,
        )
794 795 796 797 798 799 800

        return train_layer

    def test_load_pruned_model(self):
        train_layer = self.train_and_save()
        train_layer.eval()

801
        infer_layer = paddle.jit.load(self.model_path)
802 803

        x = fluid.dygraph.to_variable(
804 805
            np.random.random((4, 8)).astype('float32')
        )
806
        np.testing.assert_array_equal(
807 808
            train_layer(x)[0].numpy(), infer_layer(x).numpy()
        )
809 810 811 812 813

    def test_load_var_not_in_extra_var_info(self):
        self.train_and_save()

        # chage extra var info
814
        var_info_path = self.model_path + INFER_PARAMS_INFO_SUFFIX
815 816 817 818 819 820 821
        with open(var_info_path, 'rb') as f:
            extra_var_info = pickle.load(f)
            extra_var_info.clear()
        with open(var_info_path, 'wb') as f:
            pickle.dump(extra_var_info, f, protocol=2)

        with self.assertRaises(RuntimeError):
822
            paddle.jit.load(self.model_path)
823 824


825 826 827 828 829
class TestJitSaveMultiCases(unittest.TestCase):
    def setUp(self):
        # enable dygraph mode
        fluid.enable_dygraph()
        # config seed
C
cnn 已提交
830
        paddle.seed(SEED)
831
        paddle.framework.random._manual_program_seed(SEED)
832 833 834 835
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
836

837 838 839
    def verify_inference_correctness(
        self, layer, model_path, with_label_and_loss=False, with_label=False
    ):
840 841 842 843
        layer.eval()
        loaded_layer = paddle.jit.load(model_path)
        loaded_layer.eval()
        # inference & compare
Z
Zhou Wei 已提交
844
        x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
C
Chen Weihang 已提交
845
        if with_label_and_loss:
Z
Zhou Wei 已提交
846
            y = paddle.to_tensor(np.random.random((1, 1)).astype('int64'))
847 848
            pred, _ = layer(x, y)
            pred = pred.numpy()
C
Chen Weihang 已提交
849 850 851 852
        elif with_label:
            y = paddle.to_tensor(np.random.random((1, 1)).astype('int64'))
            pred = layer(x, y)
            pred = pred.numpy()
853 854 855
        else:
            pred = layer(x).numpy()
        loaded_pred = loaded_layer(x).numpy()
856 857 858
        np.testing.assert_array_equal(
            pred,
            loaded_pred,
859 860 861 862
            err_msg='Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'.format(
                pred, loaded_pred
            ),
        )
863 864 865 866 867 868

    def test_no_prune_to_static_after_train(self):
        layer = LinearNet(784, 1)

        train(layer)

869 870 871
        model_path = os.path.join(
            self.temp_dir.name, "test_no_prune_to_static_after_train/model"
        )
872 873 874 875 876 877 878
        paddle.jit.save(layer, model_path)

        self.verify_inference_correctness(layer, model_path)

    def test_no_prune_to_static_no_train(self):
        layer = LinearNetWithInputSpec(784, 1)

879 880 881
        model_path = os.path.join(
            self.temp_dir.name, "test_no_prune_to_static_no_train/model"
        )
882 883 884 885 886 887 888 889 890
        paddle.jit.save(layer, model_path)

        self.verify_inference_correctness(layer, model_path)

    def test_no_prune_no_to_static_after_train(self):
        layer = LinearNetNotDeclarative(784, 1)

        train(layer)

891
        model_path = os.path.join(
892 893
            self.temp_dir.name, "test_no_prune_no_to_static_after_train/model"
        )
894 895 896
        paddle.jit.save(
            layer,
            model_path,
897 898
            input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
        )
899 900 901 902 903 904 905 906

        self.verify_inference_correctness(layer, model_path)

    def test_no_prune_no_to_static_after_train_with_examples(self):
        layer = LinearNetNotDeclarative(784, 1)

        example_inputs, _, _ = train(layer)

907 908
        model_path = os.path.join(
            self.temp_dir.name,
909 910
            "test_no_prune_no_to_static_after_train_with_examples/model",
        )
911
        paddle.jit.save(layer=layer, path=model_path, input_spec=example_inputs)
912 913 914 915 916 917

        self.verify_inference_correctness(layer, model_path)

    def test_no_prune_no_to_static_no_train(self):
        layer = LinearNetNotDeclarative(784, 1)

918 919 920
        model_path = os.path.join(
            self.temp_dir.name, "test_no_prune_no_to_static_no_train/model"
        )
921 922 923
        paddle.jit.save(
            layer,
            model_path,
924 925
            input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
        )
926 927 928 929 930 931 932 933

        self.verify_inference_correctness(layer, model_path)

    def test_prune_to_static_after_train(self):
        layer = LinerNetWithLabel(784, 1)

        out = train_with_label(layer)

934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
        model_path = os.path.join(
            self.temp_dir.name, "test_prune_to_static_after_train/model"
        )
        paddle.jit.save(
            layer,
            model_path,
            input_spec=[
                InputSpec(shape=[None, 784], dtype='float32', name="image")
            ],
            output_spec=[out],
        )

        self.verify_inference_correctness(
            layer, model_path, with_label_and_loss=True
        )
949 950 951 952

    def test_prune_to_static_no_train(self):
        layer = LinerNetWithLabel(784, 1)

953 954 955
        model_path = os.path.join(
            self.temp_dir.name, "test_prune_to_static_no_train/model"
        )
956 957
        # TODO: no train, cannot get output_spec var here
        # now only can use index
958
        output_spec = layer.forward.outputs[:1]
959 960 961 962 963 964 965 966 967 968 969 970
        paddle.jit.save(
            layer,
            model_path,
            input_spec=[
                InputSpec(shape=[None, 784], dtype='float32', name="image")
            ],
            output_spec=output_spec,
        )

        self.verify_inference_correctness(
            layer, model_path, with_label_and_loss=True
        )
C
Chen Weihang 已提交
971 972 973 974

    def test_prune_input_to_static_no_train(self):
        layer = LinerNetWithPruneInput(784, 1)

975 976 977 978 979 980 981 982 983 984
        model_path = os.path.join(
            self.temp_dir.name, "test_prune_input_to_static_no_train/model"
        )
        paddle.jit.save(
            layer,
            model_path,
            input_spec=[
                InputSpec(shape=[None, 784], dtype='float32', name="image")
            ],
        )
C
Chen Weihang 已提交
985 986 987 988 989 990

        self.verify_inference_correctness(layer, model_path, with_label=True)

    def test_prune_useless_input_to_static_no_train(self):
        layer = LinerNetWithUselessInput(784, 1)

991 992
        model_path = os.path.join(
            self.temp_dir.name,
993 994 995 996 997 998 999 1000 1001
            "test_prune_useless_input_to_static_no_train/model",
        )
        paddle.jit.save(
            layer,
            model_path,
            input_spec=[
                InputSpec(shape=[None, 784], dtype='float32', name="image")
            ],
        )
C
Chen Weihang 已提交
1002 1003

        self.verify_inference_correctness(layer, model_path, with_label=True)
1004 1005 1006 1007 1008 1009

    def test_no_prune_input_spec_name_warning(self):
        layer = LinearNetWithInputSpec(784, 1)

        train(layer)

1010
        model_path = os.path.join(
1011 1012
            self.temp_dir.name, "test_no_prune_input_spec_name_warning/model"
        )
1013 1014 1015
        paddle.jit.save(
            layer,
            model_path,
1016 1017 1018 1019 1020 1021 1022 1023 1024
            input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
        )
        paddle.jit.save(
            layer,
            model_path,
            input_spec=[
                InputSpec(shape=[None, 784], dtype='float32', name='feed_input')
            ],
        )
1025 1026 1027 1028 1029 1030 1031 1032

        self.verify_inference_correctness(layer, model_path)

    def test_not_prune_output_spec_name_warning(self):
        layer = LinearNet(784, 1)

        train(layer)

1033
        model_path = os.path.join(
1034 1035
            self.temp_dir.name, "test_not_prune_output_spec_name_warning/model"
        )
Z
Zhou Wei 已提交
1036
        out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
1037
        paddle.jit.save(layer, model_path, output_spec=[out])
1038 1039 1040 1041 1042 1043

        self.verify_inference_correctness(layer, model_path)

    def test_prune_input_spec_name_error(self):
        layer = LinerNetWithLabel(784, 1)

1044 1045 1046
        model_path = os.path.join(
            self.temp_dir.name, "test_prune_input_spec_name_error/model"
        )
1047 1048 1049 1050
        with self.assertRaises(ValueError):
            paddle.jit.save(
                layer,
                model_path,
1051 1052
                input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
            )
1053
        with self.assertRaises(ValueError):
1054 1055 1056 1057 1058 1059 1060 1061 1062
            paddle.jit.save(
                layer,
                model_path,
                input_spec=[
                    InputSpec(
                        shape=[None, 784], dtype='float32', name='feed_input'
                    )
                ],
            )
1063 1064 1065 1066 1067 1068

    def test_prune_output_spec_name_error(self):
        layer = LinerNetWithLabel(784, 1)

        train_with_label(layer)

1069 1070 1071
        model_path = os.path.join(
            self.temp_dir.name, "test_prune_to_static_after_train/model"
        )
Z
Zhou Wei 已提交
1072
        out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
1073
        with self.assertRaises(ValueError):
1074 1075 1076 1077 1078 1079 1080 1081
            paddle.jit.save(
                layer,
                model_path,
                input_spec=[
                    InputSpec(shape=[None, 784], dtype='float32', name="image")
                ],
                output_spec=[out],
            )
1082 1083


1084 1085
class TestJitSaveLoadEmptyLayer(unittest.TestCase):
    def setUp(self):
1086
        self.temp_dir = tempfile.TemporaryDirectory()
1087 1088 1089
        self.model_path = os.path.join(
            self.temp_dir.name, "jit_save_load_empty_layer/model"
        )
1090 1091 1092
        # enable dygraph mode
        paddle.disable_static()

1093 1094 1095
    def tearDown(self):
        self.temp_dir.cleanup()

1096 1097
    def test_save_load_empty_layer(self):
        layer = EmptyLayer()
Z
Zhou Wei 已提交
1098
        x = paddle.to_tensor(np.random.random((10)).astype('float32'))
1099 1100 1101 1102
        out = layer(x)
        paddle.jit.save(layer, self.model_path)
        load_layer = paddle.jit.load(self.model_path)
        load_out = load_layer(x)
1103
        np.testing.assert_array_equal(out, load_out)
1104 1105 1106 1107


class TestJitSaveLoadNoParamLayer(unittest.TestCase):
    def setUp(self):
1108
        self.temp_dir = tempfile.TemporaryDirectory()
1109 1110 1111
        self.model_path = os.path.join(
            self.temp_dir.name, "jit_save_load_no_param_layer/model"
        )
1112 1113 1114
        # enable dygraph mode
        paddle.disable_static()

1115 1116 1117
    def tearDown(self):
        self.temp_dir.cleanup()

1118 1119
    def test_save_load_no_param_layer(self):
        layer = NoParamLayer()
Z
Zhou Wei 已提交
1120 1121
        x = paddle.to_tensor(np.random.random((5)).astype('float32'))
        y = paddle.to_tensor(np.random.random((5)).astype('float32'))
1122 1123 1124 1125
        out = layer(x, y)
        paddle.jit.save(layer, self.model_path)
        load_layer = paddle.jit.load(self.model_path)
        load_out = load_layer(x, y)
1126
        np.testing.assert_array_equal(out, load_out)
1127 1128


1129 1130 1131 1132
class TestJitSaveLoadMultiMethods(unittest.TestCase):
    def setUp(self):
        # enable dygraph mode
        paddle.disable_static()
1133 1134 1135 1136
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
1137 1138

    def test_jit_save_load_inference(self):
1139
        model_path_inference = os.path.join(
1140 1141
            self.temp_dir.name, "jit_save_load_multi_methods/model"
        )
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
        IMAGE_SIZE = 224
        layer = LinearNetWithMultiStaticFunc(IMAGE_SIZE, 10)
        inps = paddle.randn([1, IMAGE_SIZE])
        result_origin = {}
        for func in dir(layer):
            if func.startswith('forward'):
                result_origin[func] = getattr(layer, func, None)(inps)
        paddle.jit.save(layer, model_path_inference)
        load_net = paddle.jit.load(model_path_inference)
        for func, result in result_origin.items():
            self.assertTrue(
1153 1154 1155 1156 1157
                float(
                    (result - getattr(load_net, func, None)(inps)).abs().max()
                )
                < 1e-5
            )
1158 1159

    def test_jit_save_load_multi_methods_inputspec(self):
1160 1161 1162
        model_path = os.path.join(
            self.temp_dir.name, 'jit_save_load_multi_methods/model'
        )
1163 1164
        layer = LinearNetWithMultiStaticFunc(784, 1)
        with self.assertRaises(ValueError):
1165 1166 1167
            paddle.jit.save(
                layer, model_path, input_spec=[InputSpec(shape=[None, 784])]
            )
1168

1169
    def test_parse_name(self):
1170 1171 1172
        model_path_inference = os.path.join(
            self.temp_dir.name, "jit_save_load_parse_name/model"
        )
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
        IMAGE_SIZE = 224
        layer = LinearNet(IMAGE_SIZE, 1)
        inps = paddle.randn([1, IMAGE_SIZE])
        layer(inps)
        paddle.jit.save(layer, model_path_inference)
        paddle.jit.save(layer, model_path_inference + '_v2')
        load_net = paddle.jit.load(model_path_inference)

        self.assertFalse(hasattr(load_net, 'v2'))

1183

W
WeiXin 已提交
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
class LayerSaved(paddle.nn.Layer):
    def __init__(self, in_size, out_size):
        super(LayerSaved, self).__init__()
        self.hidden = 100
        self._linear_0 = Linear(in_size, self.hidden)
        self._linear_1_0 = Linear(self.hidden, self.hidden)
        self._linear_1_1 = Linear(self.hidden, self.hidden)
        self._linear_2 = Linear(self.hidden, out_size)
        self._scale = paddle.to_tensor(9.9)

    @paddle.jit.to_static
    def forward(self, x):
        y = self._linear_0(x)
        # Multiple blocks
1198
        if paddle.shape(x)[0] == 1:
W
WeiXin 已提交
1199 1200 1201 1202 1203 1204
            y = self._linear_1_0(y)
        else:
            y += self._linear_1_1(y + self._scale)
        return self._linear_2(y)


1205 1206
class Net(paddle.nn.Layer):
    def __init__(self):
1207
        super().__init__()
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
        self.fc1 = paddle.nn.Linear(4, 4)
        self.fc2 = paddle.nn.Linear(4, 4)
        self.bias = 0.4
        self.flag = paddle.ones([2], dtype="int32")

    @paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
    def log_softmax(self, input):
        return paddle.nn.functional.log_softmax(input, axis=-1)

    @paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
    def forward(self, x):
        out = self.fc1(x)
        out = paddle.nn.functional.relu(out)
        out = paddle.mean(out)
        return out

    @paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
    def infer(self, input):
        out = self.fc2(input)
        out = out + self.bias
        out = paddle.mean(out)
        return out

    # For extra Python float
    @paddle.jit.to_static(property=True)
    def fbias(self):
        return self.bias + 1

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
    @paddle.jit.to_static(property=True)
    def down_sampling(self):
        return 4

    @paddle.jit.to_static(property=True)
    def fstr(self):
        return "save str property"

    @paddle.jit.to_static(property=True)
    def ints(self):
        return [10, 20]

    @paddle.jit.to_static(property=True)
    def floats(self):
        return [1.1, 2.2]

    @paddle.jit.to_static(property=True)
    def strs(self):
        return ["hello", "world"]


class NetTensor(paddle.nn.Layer):
    def __init__(self):
        super().__init__()
        self.fc1 = paddle.nn.Linear(4, 4)
        self.fc2 = paddle.nn.Linear(4, 4)
        self.bias = 0.4
        self.flag = paddle.ones([2], dtype="int32")

    @paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
    def forward(self, x):
        out = self.fc1(x)
        out = paddle.nn.functional.relu(out)
        out = paddle.mean(out)
        return out

1272 1273
    @paddle.jit.to_static(property=True)
    def fflag(self):
1274
        return True
1275 1276


1277
class TestJitSaveCombineProperty(unittest.TestCase):
1278 1279 1280 1281 1282 1283 1284 1285
    def setUp(self):
        # enable dygraph mode
        paddle.disable_static()
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()

1286
    def test_jit_save_combine_property(self):
1287 1288 1289
        model_path = os.path.join(
            self.temp_dir.name, "test_jit_save_combine/model"
        )
1290 1291 1292
        # Use new namespace
        with unique_name.guard():
            net = Net()
1293
        # save
1294
        paddle.jit.save(net, model_path, combine_params=True)
1295

1296
    def test_jit_save_tensor_property(self):
1297 1298 1299
        model_path = os.path.join(
            self.temp_dir.name, "test_jit_save_combine/model"
        )
1300 1301 1302 1303 1304 1305
        # Use new namespace
        with unique_name.guard():
            net = NetTensor()

        paddle.jit.save(net, model_path, combine_params=True)

1306

W
WeiXin 已提交
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
class LayerLoadFinetune(paddle.nn.Layer):
    def __init__(self, in_size, out_size, load_path):
        super(LayerLoadFinetune, self).__init__()
        # Test duplicate name
        self._linear_0 = Linear(in_size, in_size)
        self._linear_1_0 = Linear(out_size, in_size)
        self._linear_1_1 = Linear(out_size, in_size)
        self._linear_2 = Linear(out_size, out_size)
        self._scale = paddle.to_tensor(9.9)

        # Load multiple times
        self._load_l1 = paddle.jit.load(load_path)
        self._load_l2 = paddle.jit.load(load_path)

    @paddle.jit.to_static
    def forward(self, x):
        y = self._linear_0(x)
        y = self._load_l1(y)
        # Multiple blocks
1326
        if paddle.shape(x)[0] == 1:
W
WeiXin 已提交
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
            y = self._linear_1_0(y)
            y = self._load_l1(y)
        else:
            y += self._linear_1_1(x + self._scale)
            y = self._load_l2(y)
        y = self._linear_1_0(y)
        y = self._load_l1(y)
        y = self._linear_1_0(y)
        # Use the same layer multiple times.
        y = self._load_l1(y)
        return y


1340 1341 1342 1343
class TestJitSaveLoadSaveWithoutRunning(unittest.TestCase):
    def setUp(self):
        # enable dygraph mode
        paddle.disable_static()
1344 1345 1346 1347
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
1348 1349

    def test_save_load_finetune_load(self):
1350
        model_path = os.path.join(
1351 1352
            self.temp_dir.name, "test_jit_save_load_save_without_running/model"
        )
1353 1354 1355 1356 1357 1358
        IMAGE_SIZE = 224
        inps0 = paddle.randn([1, IMAGE_SIZE])
        inps1 = paddle.randn([2, IMAGE_SIZE])
        # Use new namespace
        with unique_name.guard():
            layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE)
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
        # save
        paddle.jit.save(
            layer_save,
            model_path,
            input_spec=[
                paddle.static.InputSpec(
                    shape=[None, IMAGE_SIZE], dtype='float32'
                )
            ],
        )
1369 1370
        result_00 = layer_save(inps0)
        result_01 = layer_save(inps1)
1371
        # load and save without running
1372 1373
        with unique_name.guard():
            layer_load = paddle.jit.load(model_path)
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
            paddle.jit.save(
                layer_load,
                model_path,
                input_spec=[
                    paddle.static.InputSpec(
                        shape=[None, IMAGE_SIZE], dtype='float32'
                    )
                ],
            )
        # reload
1384 1385 1386 1387 1388 1389 1390 1391
        layer_reload = paddle.jit.load(model_path)
        result_10 = layer_reload(inps0)
        result_11 = layer_reload(inps1)

        self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
        self.assertTrue(float((result_01 - result_11).abs().max()) < 1e-5)


W
WeiXin 已提交
1392 1393 1394 1395
class TestJitSaveLoadFinetuneLoad(unittest.TestCase):
    def setUp(self):
        # enable dygraph mode
        paddle.disable_static()
1396 1397 1398 1399
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
W
WeiXin 已提交
1400 1401

    def test_save_load_finetune_load(self):
1402 1403 1404
        model_path = os.path.join(
            self.temp_dir.name, "test_jit_save_load_finetune_load/model"
        )
W
WeiXin 已提交
1405 1406 1407 1408 1409 1410 1411
        IMAGE_SIZE = 224
        inps0 = paddle.randn([1, IMAGE_SIZE])
        inps1 = paddle.randn([2, IMAGE_SIZE])
        # Use new namespace
        with unique_name.guard():
            layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE)
        layer_save(inps0)
1412
        # save
W
WeiXin 已提交
1413
        paddle.jit.save(layer_save, model_path)
1414
        # load
W
WeiXin 已提交
1415 1416
        with unique_name.guard():
            layer_load = LayerLoadFinetune(IMAGE_SIZE, IMAGE_SIZE, model_path)
1417
        # train
W
WeiXin 已提交
1418 1419 1420
        train(layer_load, input_size=IMAGE_SIZE)
        result_00 = layer_load(inps0)
        result_01 = layer_load(inps1)
1421
        # save
W
WeiXin 已提交
1422
        paddle.jit.save(layer_load, model_path)
1423
        # load
W
WeiXin 已提交
1424 1425 1426 1427 1428 1429 1430 1431
        layer_finetune = paddle.jit.load(model_path)
        result_10 = layer_finetune(inps0)
        result_11 = layer_finetune(inps1)

        self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
        self.assertTrue(float(((result_01 - result_11)).abs().max()) < 1e-5)


1432 1433 1434 1435
# NOTE(weixin): When there are multiple test functions in an
# `unittest.TestCase`, functions will affect each other,
# and there is a risk of random failure.
# So divided into three TestCase: TestJitSaveLoadFunctionCase1,
1436 1437
# TestJitSaveLoadFunctionCase2, TestJitSaveLoadFunctionCase3.
class TestJitSaveLoadFunctionCase1(unittest.TestCase):
1438 1439
    def setUp(self):
        paddle.disable_static()
1440 1441 1442 1443
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
1444 1445 1446 1447 1448 1449

    def test_jit_save_load_static_function(self):
        @paddle.jit.to_static
        def fun(inputs):
            return paddle.tanh(inputs)

1450 1451 1452
        path = os.path.join(
            self.temp_dir.name, 'test_jit_save_load_function_1/func'
        )
1453 1454 1455 1456 1457 1458 1459 1460 1461
        inps = paddle.rand([3, 6])
        origin = fun(inps)

        paddle.jit.save(fun, path)
        load_func = paddle.jit.load(path)

        load_result = load_func(inps)
        self.assertTrue((load_result - origin).abs().max() < 1e-10)

1462 1463 1464 1465

class TestJitSaveLoadFunctionCase2(unittest.TestCase):
    def setUp(self):
        paddle.disable_static()
1466 1467 1468 1469
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
1470

1471
    def test_jit_save_load_function_input_spec(self):
1472 1473 1474 1475 1476
        @paddle.jit.to_static(
            input_spec=[
                InputSpec(shape=[None, 6], dtype='float32', name='x'),
            ]
        )
1477 1478 1479
        def fun(inputs):
            return paddle.nn.functional.relu(inputs)

1480 1481 1482
        path = os.path.join(
            self.temp_dir.name, 'test_jit_save_load_function_2/func'
        )
1483 1484 1485 1486 1487 1488 1489 1490
        inps = paddle.rand([3, 6])
        origin = fun(inps)

        paddle.jit.save(fun, path)
        load_func = paddle.jit.load(path)
        load_result = load_func(inps)
        self.assertTrue((load_result - origin).abs().max() < 1e-10)

1491 1492 1493 1494

class TestJitSaveLoadFunctionCase3(unittest.TestCase):
    def setUp(self):
        paddle.disable_static()
1495 1496 1497 1498
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
1499

1500 1501 1502 1503
    def test_jit_save_load_function_function(self):
        def fun(inputs):
            return paddle.tanh(inputs)

1504 1505 1506
        path = os.path.join(
            self.temp_dir.name, 'test_jit_save_load_function_3/func'
        )
1507 1508 1509
        inps = paddle.rand([3, 6])
        origin = fun(inps)

1510 1511 1512 1513 1514 1515 1516
        paddle.jit.save(
            fun,
            path,
            input_spec=[
                InputSpec(shape=[None, 6], dtype='float32', name='x'),
            ],
        )
1517 1518 1519 1520 1521 1522
        load_func = paddle.jit.load(path)

        load_result = load_func(inps)
        self.assertTrue((load_result - origin).abs().max() < 1e-10)


1523 1524 1525
class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase):
    def setUp(self):
        paddle.disable_static()
1526 1527 1528 1529
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547

    def test_jit_save_load_function(self):
        class LinearNet(paddle.nn.Layer):
            def __init__(self):
                super(LinearNet, self).__init__()
                self._linear = paddle.nn.Linear(5, 6)

            def forward(self, x):
                return paddle.tanh(x)

            def anothor_forward(self, x):
                return self._linear(x)

        layer = LinearNet()

        inps = paddle.rand([3, 5])
        origin = layer.anothor_forward(inps)

1548 1549 1550
        func = paddle.jit.to_static(
            layer.anothor_forward, [paddle.static.InputSpec(shape=[-1, 5])]
        )
1551 1552
        path = os.path.join(
            self.temp_dir.name,
1553 1554
            'test_jit_save_load_function_with_params_case1/func',
        )
1555 1556 1557 1558
        paddle.jit.save(func, path)
        load_func = paddle.jit.load(path)

        load_result = load_func(inps)
1559
        np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
1560 1561 1562 1563 1564


class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase):
    def setUp(self):
        paddle.disable_static()
1565 1566 1567 1568
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586

    def test_jit_save_load_function(self):
        class LinearNet(paddle.nn.Layer):
            def __init__(self):
                super(LinearNet, self).__init__()
                self._linear = paddle.nn.Linear(5, 6)

            def forward(self, x):
                return paddle.tanh(x)

            @paddle.jit.to_static(input_spec=[InputSpec(shape=[-1, 5])])
            def anothor_forward(self, x):
                return self._linear(x)

        layer = LinearNet()

        inps = paddle.rand([3, 5])

1587 1588
        path = os.path.join(
            self.temp_dir.name,
1589 1590
            'test_jit_save_load_function_with_params_case2/func',
        )
1591 1592 1593 1594 1595 1596
        paddle.jit.save(layer.anothor_forward, path)
        origin_result = layer.anothor_forward(inps)
        load_func = paddle.jit.load(path)

        load_result = load_func(inps)

1597 1598 1599
        np.testing.assert_array_equal(
            origin_result.numpy(), load_result.numpy()
        )
1600 1601 1602 1603 1604


class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase):
    def setUp(self):
        paddle.disable_static()
1605 1606 1607 1608
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627

    def test_jit_save_load_function(self):
        class LinearNet(paddle.nn.Layer):
            def __init__(self):
                super(LinearNet, self).__init__()
                self._linear = paddle.nn.Linear(5, 6)

            def forward(self, x):
                return paddle.tanh(x)

            @paddle.jit.to_static
            def anothor_forward(self, x):
                return self._linear(x)

        layer = LinearNet()

        inps = paddle.rand([3, 5])
        origin = layer.anothor_forward(inps)

1628 1629
        path = os.path.join(
            self.temp_dir.name,
1630 1631
            'test_jit_save_load_function_with_params_case3/func',
        )
1632 1633 1634 1635
        paddle.jit.save(layer.anothor_forward, path)
        load_func = paddle.jit.load(path)

        load_result = load_func(inps)
1636
        np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
1637 1638


1639
class TestJitSaveLoadDataParallel(unittest.TestCase):
1640 1641 1642 1643 1644 1645
    def setUp(self):
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()

1646 1647 1648 1649 1650 1651 1652 1653
    def verify_inference_correctness(self, layer, path):
        layer.eval()
        loaded_layer = paddle.jit.load(path)
        loaded_layer.eval()
        # inference & compare
        x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
        pred = layer(x).numpy()
        loaded_pred = loaded_layer(x).numpy()
1654 1655 1656
        np.testing.assert_array_equal(
            pred,
            loaded_pred,
1657 1658 1659 1660
            err_msg='Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'.format(
                pred, loaded_pred
            ),
        )
1661 1662 1663 1664

    def test_jit_save_data_parallel_with_inputspec(self):
        layer = LinearNetNotDeclarative(784, 1)
        layer = paddle.DataParallel(layer)
1665 1666 1667 1668 1669 1670
        path = os.path.join(
            self.temp_dir.name, "jit_save_data_parallel_with_inputspec/model"
        )
        paddle.jit.save(
            layer=layer, path=path, input_spec=[InputSpec(shape=[None, 784])]
        )
1671 1672 1673 1674 1675 1676 1677

        self.verify_inference_correctness(layer, path)

    def test_jit_save_data_parallel_with_to_static(self):
        layer = LinearNetWithInputSpec(784, 1)
        layer = paddle.DataParallel(layer)

1678 1679 1680
        path = os.path.join(
            self.temp_dir.name, "jit_save_data_parallel_with_to_static/model"
        )
1681 1682 1683 1684 1685
        paddle.jit.save(layer, path)

        self.verify_inference_correctness(layer, path)


1686 1687 1688 1689 1690
class InputSepcLayer(paddle.nn.Layer):
    '''
    A layer with InputSpec to test InputSpec compatibility
    '''

1691 1692 1693 1694 1695 1696
    @paddle.jit.to_static(
        input_spec=[
            InputSpec(shape=[None, 8], dtype='float32', name='x'),
            InputSpec(shape=[None, 1], dtype='float64', name='y'),
        ]
    )
1697 1698 1699 1700 1701
    def forward(self, x, y):
        return x, y


class TestInputSpecCompatibility(unittest.TestCase):
1702 1703 1704 1705 1706 1707
    def setUp(self):
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()

1708 1709 1710 1711 1712
    def _assert_input_spec_layer_return(self, expect_layer, test_layer):
        input_x = paddle.uniform([8, 8], dtype='float32')
        input_y = paddle.uniform([8, 1], dtype='float64')
        expected_result = expect_layer(input_x, input_y)
        test_result = test_layer(input_x, input_y)
1713 1714 1715 1716 1717 1718
        np.testing.assert_allclose(
            expected_result[0].numpy(), test_result[0].numpy()
        )
        np.testing.assert_allclose(
            expected_result[1].numpy(), test_result[1].numpy()
        )
1719 1720 1721

    def test_jit_save_compatible_input_sepc(self):
        layer = InputSepcLayer()
1722 1723 1724
        save_dir = os.path.join(
            self.temp_dir.name, "jit_save_compatible_input_spec"
        )
1725 1726 1727 1728 1729 1730 1731
        path = save_dir + "/model"

        paddle.jit.save(layer=layer, path=path)
        no_input_spec_layer = paddle.jit.load(path)
        self._assert_input_spec_layer_return(layer, no_input_spec_layer)
        shutil.rmtree(save_dir)

1732 1733 1734 1735 1736 1737 1738 1739
        paddle.jit.save(
            layer=layer,
            path=path,
            input_spec=[
                InputSpec(shape=[None, 8], dtype='float32', name='x'),
                InputSpec(shape=[None, 1], dtype='float64', name='y'),
            ],
        )
1740 1741 1742 1743
        same_input_spec_layer = paddle.jit.load(path)
        self._assert_input_spec_layer_return(layer, same_input_spec_layer)
        shutil.rmtree(save_dir)

1744 1745 1746 1747 1748 1749 1750 1751
        paddle.jit.save(
            layer=layer,
            path=path,
            input_spec=[
                InputSpec(shape=[8, 8], dtype='float32'),
                InputSpec(shape=[8, -1], dtype='float64'),
            ],
        )
1752 1753 1754 1755 1756 1757
        compatible_input_spec_layer = paddle.jit.load(path)
        self._assert_input_spec_layer_return(layer, compatible_input_spec_layer)
        shutil.rmtree(save_dir)

    def test_jit_save_incompatible_input_sepc(self):
        layer = InputSepcLayer()
1758 1759 1760
        save_dir = os.path.join(
            self.temp_dir.name, "jit_save_compatible_input_spec"
        )
1761 1762 1763 1764
        path = save_dir + "/model"

        with self.assertRaises(ValueError):
            # type mismatch
1765 1766 1767 1768 1769 1770 1771 1772
            paddle.jit.save(
                layer=layer,
                path=path,
                input_spec=[
                    InputSpec(shape=[None, 8], dtype='float64'),
                    InputSpec(shape=[None, 1], dtype='float64'),
                ],
            )
1773 1774 1775

        with self.assertRaises(ValueError):
            # shape len mismatch
1776 1777 1778 1779 1780 1781 1782 1783
            paddle.jit.save(
                layer=layer,
                path=path,
                input_spec=[
                    InputSpec(shape=[None, 8, 1], dtype='float32'),
                    InputSpec(shape=[None, 1], dtype='float64'),
                ],
            )
1784 1785 1786

        with self.assertRaises(ValueError):
            # shape mismatch
1787 1788 1789 1790 1791 1792 1793 1794
            paddle.jit.save(
                layer=layer,
                path=path,
                input_spec=[
                    InputSpec(shape=[None, 8], dtype='float32'),
                    InputSpec(shape=[None, 2], dtype='float64'),
                ],
            )
1795 1796 1797 1798
        if os.path.exists(save_dir):
            shutil.rmtree(save_dir)


H
Hui Zhang 已提交
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
class NotJitForward(paddle.nn.Layer):
    def __init__(self):
        super(NotJitForward, self).__init__()

    def forward(self, x, y):
        return x + y


class TestNotJitForward(unittest.TestCase):
    def setUp(self):
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()

    def test_jit_not_save_forward(self):
        layer = NotJitForward()

        save_dir = os.path.join(self.temp_dir.name, "jit_not_save_forward")
        path = save_dir + "/model"

        paddle.jit.save(layer=layer, path=path, skip_forward=True)

        self.assertTrue(not os.path.exists(path + ".pdmodel"))
        self.assertTrue(not os.path.exists(path + ".pdparam"))

        with self.assertRaises(ValueError):
            paddle.jit.load(path=path)

        shutil.rmtree(save_dir)


1831
if __name__ == '__main__':
1832 1833
    with fluid.framework._test_eager_guard():
        unittest.main()