test_model.py 37.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# copyright (c) 2020 paddlepaddle authors. all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import os
import numpy as np
import shutil
import tempfile

L
Leo Chen 已提交
22
import paddle
23
from paddle import fluid
24
from paddle import to_tensor
25
from paddle.nn import Conv2D, Linear, ReLU, Sequential
26

27 28
from paddle import Model
from paddle.static import InputSpec
29
from paddle.nn.layer.loss import CrossEntropyLoss
30
from paddle.metric import Accuracy
31 32
from paddle.vision.datasets import MNIST
from paddle.vision.models import LeNet
Y
yukavio 已提交
33 34
import paddle.vision.models as models
import paddle.fluid.dygraph.jit as jit
35
from paddle.io import DistributedBatchSampler, Dataset
36
from paddle.hapi.model import prepare_distributed_context
37 38 39
from paddle.fluid.dygraph.dygraph_to_static.program_translator import (
    ProgramTranslator,
)
40 41


42
class LeNetDygraph(paddle.nn.Layer):
L
LielinJiang 已提交
43
    def __init__(self, num_classes=10):
44 45
        super(LeNetDygraph, self).__init__()
        self.num_classes = num_classes
46 47 48 49 50 51 52 53
        self.features = Sequential(
            Conv2D(1, 6, 3, stride=1, padding=1),
            ReLU(),
            paddle.fluid.dygraph.Pool2D(2, 'max', 2),
            Conv2D(6, 16, 5, stride=1, padding=0),
            ReLU(),
            paddle.fluid.dygraph.Pool2D(2, 'max', 2),
        )
54 55

        if num_classes > 0:
56 57 58
            self.fc = Sequential(
                Linear(400, 120), Linear(120, 84), Linear(84, 10)
            )
59 60 61 62 63 64 65 66 67 68

    def forward(self, inputs):
        x = self.features(inputs)

        if self.num_classes > 0:
            x = fluid.layers.flatten(x, 1)
            x = self.fc(x)
        return x


69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
class ModelInner(paddle.nn.Layer):
    def __init__(self):
        super(ModelInner, self).__init__()
        self.fc = paddle.nn.Linear(3, 4)

    def forward(self, x):
        y = self.fc(x)
        return y, 0


class ModelOutter(paddle.nn.Layer):
    def __init__(self):
        super(ModelOutter, self).__init__()
        self.module1 = ModelInner()
        self.module2 = paddle.nn.Linear(4, 5)

    def forward(self, x):
        y, dummpy = self.module1(x)
        y = self.module2(y)
        return y, 3


91 92 93 94 95 96 97
class LeNetListInput(paddle.nn.Layer):
    def __init__(self, num_classes=10):
        super(LeNetListInput, self).__init__()
        self.num_classes = num_classes
        self.cov = Conv2D(1, 6, 3, stride=1, padding=1)
        for param in self.cov.parameters():
            param.trainable = False
98 99 100 101 102 103 104 105
        self.features = Sequential(
            self.cov,
            ReLU(),
            paddle.fluid.dygraph.Pool2D(2, 'max', 2),
            Conv2D(6, 16, 5, stride=1, padding=0),
            ReLU(),
            paddle.fluid.dygraph.Pool2D(2, 'max', 2),
        )
106 107

        if num_classes > 0:
108 109 110
            self.fc = Sequential(
                Linear(400, 120), Linear(120, 84), Linear(84, 10)
            )
111

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
    def forward(self, inputs):
        x = inputs[0]
        x = self.features(x)

        if self.num_classes > 0:
            x = paddle.flatten(x, 1)
            x = self.fc(x + inputs[1])
        return x


class LeNetDictInput(LeNetDygraph):
    def forward(self, inputs):
        x = self.features(inputs['x1'])

        if self.num_classes > 0:
            x = paddle.flatten(x, 1)
            x = self.fc(x + inputs['x2'])
        return x


132 133 134 135 136 137 138 139 140 141 142 143 144
class MnistDataset(MNIST):
    def __init__(self, mode, return_label=True, sample_num=None):
        super(MnistDataset, self).__init__(mode=mode)
        self.return_label = return_label
        if sample_num:
            self.images = self.images[:sample_num]
            self.labels = self.labels[:sample_num]

    def __getitem__(self, idx):
        img, label = self.images[idx], self.labels[idx]
        img = np.reshape(img, [1, 28, 28])
        if self.return_label:
            return img, np.array(self.labels[idx]).astype('int64')
145
        return (img,)
146 147 148 149 150 151 152 153 154 155 156 157 158

    def __len__(self):
        return len(self.images)


def compute_acc(pred, label):
    pred = np.argmax(pred, -1)
    label = np.array(label)
    correct = pred[:, np.newaxis] == label
    return np.sum(correct) / correct.shape[0]


def dynamic_train(model, dataloader):
159 160 161
    optim = fluid.optimizer.Adam(
        learning_rate=0.001, parameter_list=model.parameters()
    )
162 163 164
    model.train()
    for inputs, labels in dataloader:
        outputs = model(inputs)
165
        loss = CrossEntropyLoss(reduction="sum")(outputs, labels)
166 167 168 169 170 171 172 173 174 175 176 177 178
        avg_loss = fluid.layers.reduce_sum(loss)
        avg_loss.backward()
        optim.minimize(avg_loss)
        model.clear_gradients()


def dynamic_evaluate(model, dataloader):
    with fluid.dygraph.no_grad():
        model.eval()
        cnt = 0
        for inputs, labels in dataloader:
            outputs = model(inputs)

179 180 181 182 183 184 185 186
            cnt += (
                (
                    np.argmax(outputs.numpy(), -1)[:, np.newaxis]
                    == labels.numpy()
                )
                .astype('int')
                .sum()
            )
187 188 189 190

    return cnt / len(dataloader.dataset)


191 192 193
@unittest.skipIf(
    not fluid.is_compiled_with_cuda(), 'CPU testing is not supported'
)
194 195 196 197
class TestModel(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        if not fluid.is_compiled_with_cuda():
J
Jiangxinz 已提交
198
            cls().skipTest('module not tested when ONLY_CPU compling')
199
        cls.device = paddle.set_device('gpu')
200 201 202 203 204
        fluid.enable_dygraph(cls.device)

        sp_num = 1280
        cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num)
        cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num)
205 206 207 208 209 210 211 212 213 214 215 216 217
        cls.test_dataset = MnistDataset(
            mode='test', return_label=False, sample_num=sp_num
        )

        cls.train_loader = fluid.io.DataLoader(
            cls.train_dataset, places=cls.device, batch_size=64
        )
        cls.val_loader = fluid.io.DataLoader(
            cls.val_dataset, places=cls.device, batch_size=64
        )
        cls.test_loader = fluid.io.DataLoader(
            cls.test_dataset, places=cls.device, batch_size=64
        )
218 219

        seed = 333
C
cnn 已提交
220
        paddle.seed(seed)
L
Leo Chen 已提交
221
        paddle.framework.random._manual_program_seed(seed)
222 223 224 225 226 227 228

        dy_lenet = LeNetDygraph()
        cls.init_param = dy_lenet.state_dict()
        dynamic_train(dy_lenet, cls.train_loader)

        cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader)

229 230
        cls.inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
        cls.labels = [InputSpec([None, 1], 'int64', 'label')]
231

232 233 234
        cls.save_dir = os.path.join(tempfile.mkdtemp(), '.cache_test_model')
        if not os.path.exists(cls.save_dir):
            os.makedirs(cls.save_dir)
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
        cls.weight_path = os.path.join(cls.save_dir, 'lenet')
        fluid.dygraph.save_dygraph(dy_lenet.state_dict(), cls.weight_path)

        fluid.disable_dygraph()

    @classmethod
    def tearDownClass(cls):
        shutil.rmtree(cls.save_dir)

    def test_fit_dygraph(self):
        self.fit(True)

    def test_fit_static(self):
        self.fit(False)

250 251 252 253 254 255
    def test_fit_dynamic_with_tuple_input(self):
        self.fit_with_tuple_input(True)

    def test_fit_static_with_tuple_input(self):
        self.fit_with_tuple_input(False)

256 257 258 259 260 261
    def test_fit_dynamic_with_rank(self):
        self.fit(True, 2, 0)

    def test_fit_static_with_rank(self):
        self.fit(False, 2, 0)

262 263 264 265 266 267
    def test_fit_dynamic_with_num_iters(self):
        self.fit(True, num_iters=1)

    def test_fit_static_with_num_iters(self):
        self.fit(False, num_iters=1)

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
    def test_evaluate_dygraph(self):
        self.evaluate(True)

    def test_evaluate_static(self):
        self.evaluate(False)

    def test_predict_dygraph(self):
        self.predict(True)

    def test_predict_static(self):
        self.predict(False)

    def test_prepare_context(self):
        prepare_distributed_context()

283
    def fit(self, dynamic, num_replicas=None, rank=None, num_iters=None):
284 285
        fluid.enable_dygraph(self.device) if dynamic else None
        seed = 333
C
cnn 已提交
286
        paddle.seed(seed)
L
Leo Chen 已提交
287
        paddle.framework.random._manual_program_seed(seed)
288

L
LielinJiang 已提交
289
        net = LeNet()
290 291 292
        optim_new = fluid.optimizer.Adam(
            learning_rate=0.001, parameter_list=net.parameters()
        )
293
        model = Model(net, inputs=self.inputs, labels=self.labels)
294 295 296 297 298
        model.prepare(
            optim_new,
            loss=CrossEntropyLoss(reduction="sum"),
            metrics=Accuracy(),
        )
299 300 301 302 303
        model.fit(self.train_dataset, batch_size=64, shuffle=False)

        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
        model.fit(
            self.train_dataset,
            batch_size=64,
            shuffle=False,
            num_iters=num_iters,
        )

        result = model.evaluate(
            self.val_dataset, batch_size=64, num_iters=num_iters
        )

        train_sampler = DistributedBatchSampler(
            self.train_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank,
        )
        val_sampler = DistributedBatchSampler(
            self.val_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank,
        )

        train_loader = fluid.io.DataLoader(
            self.train_dataset,
            batch_sampler=train_sampler,
            places=self.device,
            return_list=True,
        )

        val_loader = fluid.io.DataLoader(
            self.val_dataset,
            batch_sampler=val_sampler,
            places=self.device,
            return_list=True,
        )
343 344 345 346 347 348 349 350 351 352 353

        model.fit(train_loader, val_loader)
        fluid.disable_dygraph() if dynamic else None

    def fit_with_tuple_input(self, dynamic, num_replicas=None, rank=None):
        fluid.enable_dygraph(self.device) if dynamic else None
        seed = 333
        paddle.seed(seed)
        paddle.framework.random._manual_program_seed(seed)

        net = LeNet()
354 355 356
        optim_new = fluid.optimizer.Adam(
            learning_rate=0.001, parameter_list=net.parameters()
        )
357
        model = Model(net, inputs=tuple(self.inputs), labels=tuple(self.labels))
358 359 360 361 362
        model.prepare(
            optim_new,
            loss=CrossEntropyLoss(reduction="sum"),
            metrics=Accuracy(),
        )
363 364 365 366 367
        model.fit(self.train_dataset, batch_size=64, shuffle=False)

        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
        train_sampler = DistributedBatchSampler(
            self.train_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank,
        )
        val_sampler = DistributedBatchSampler(
            self.val_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank,
        )

        train_loader = fluid.io.DataLoader(
            self.train_dataset,
            batch_sampler=train_sampler,
            places=self.device,
            return_list=True,
        )

        val_loader = fluid.io.DataLoader(
            self.val_dataset,
            batch_sampler=val_sampler,
            places=self.device,
            return_list=True,
        )
396 397 398 399 400 401

        model.fit(train_loader, val_loader)
        fluid.disable_dygraph() if dynamic else None

    def evaluate(self, dynamic):
        fluid.enable_dygraph(self.device) if dynamic else None
402 403
        model = Model(LeNet(), self.inputs, self.labels)
        model.prepare(metrics=Accuracy())
404 405 406 407
        model.load(self.weight_path)
        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)

408 409 410
        sampler = DistributedBatchSampler(
            self.val_dataset, batch_size=64, shuffle=False
        )
411

412 413 414 415 416 417
        val_loader = fluid.io.DataLoader(
            self.val_dataset,
            batch_sampler=sampler,
            places=self.device,
            return_list=True,
        )
418 419 420 421 422 423 424

        model.evaluate(val_loader)

        fluid.disable_dygraph() if dynamic else None

    def predict(self, dynamic):
        fluid.enable_dygraph(self.device) if dynamic else None
425 426
        model = Model(LeNet(), self.inputs)
        model.prepare()
427
        model.load(self.weight_path)
428 429 430
        output = model.predict(
            self.test_dataset, batch_size=64, stack_outputs=True
        )
431 432 433 434 435
        np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))

        acc = compute_acc(output[0], self.val_dataset.labels)
        np.testing.assert_allclose(acc, self.acc1)

436 437 438
        sampler = DistributedBatchSampler(
            self.test_dataset, batch_size=64, shuffle=False
        )
439

440 441 442 443 444 445
        test_loader = fluid.io.DataLoader(
            self.test_dataset,
            batch_sampler=sampler,
            places=self.device,
            return_list=True,
        )
446 447 448 449 450

        model.evaluate(test_loader)

        fluid.disable_dygraph() if dynamic else None

451 452 453 454 455 456
    def test_predict_without_inputs(self):
        fluid.enable_dygraph(self.device)
        model = Model(LeNet())
        model.prepare()
        model.load(self.weight_path)
        model._inputs = None
457 458 459
        output = model.predict(
            self.test_dataset, batch_size=64, stack_outputs=True
        )
460 461 462
        np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))
        fluid.disable_dygraph()

463 464 465
    def test_summary_gpu(self):
        paddle.disable_static(self.device)
        rnn = paddle.nn.LSTM(16, 32, 2)
466 467 468
        params_info = paddle.summary(
            rnn, [(-1, 23, 16), ((2, None, 32), (2, -1, 32))]
        )
469

470

471
class MyModel(paddle.nn.Layer):
L
LielinJiang 已提交
472
    def __init__(self):
473
        super(MyModel, self).__init__()
474
        self._fc = Linear(20, 10)
475 476 477 478 479 480

    def forward(self, x):
        y = self._fc(x)
        return y


481 482
class MyDataset(Dataset):
    def __getitem__(self, idx):
483 484 485
        return np.random.random(size=(20,)).astype(
            np.float32
        ), np.random.randint(0, 10, size=(1,)).astype(np.int64)
486 487 488 489 490

    def __len__(self):
        return 40


491 492
class TestModelFunction(unittest.TestCase):
    def set_seed(self, seed=1024):
C
cnn 已提交
493
        paddle.seed(seed)
L
Leo Chen 已提交
494
        paddle.framework.random._manual_program_seed(seed)
495 496 497 498 499 500 501 502 503

    def test_train_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
L
LielinJiang 已提交
504
            m = MyModel()
505 506 507
            optim = fluid.optimizer.SGD(
                learning_rate=0.001, parameter_list=m.parameters()
            )
508
            m.train()
509 510
            output = m(to_tensor(data))
            loss = CrossEntropyLoss(reduction='sum')(output, to_tensor(label))
511 512 513 514 515 516 517 518 519
            avg_loss = fluid.layers.reduce_sum(loss)
            avg_loss.backward()
            optim.minimize(avg_loss)
            m.clear_gradients()
            fluid.disable_dygraph()
            return avg_loss.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
520
            device = paddle.set_device('cpu')
521 522 523
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()

L
LielinJiang 已提交
524
            net = MyModel()
525 526 527
            optim2 = fluid.optimizer.SGD(
                learning_rate=0.001, parameter_list=net.parameters()
            )
528

529 530
            inputs = [InputSpec([None, dim], 'float32', 'x')]
            labels = [InputSpec([None, 1], 'int64', 'label')]
531
            model = Model(net, inputs, labels)
532
            model.prepare(optim2, loss=CrossEntropyLoss(reduction="sum"))
533
            (loss,) = model.train_batch([data], [label])
534 535 536
            np.testing.assert_allclose(loss.flatten(), ref.flatten())
            fluid.disable_dygraph() if dynamic else None

537
    def test_test_batch(self):
538 539 540 541 542 543 544 545
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            m.eval()
546
            output = m(to_tensor(data))
547 548 549 550 551
            fluid.disable_dygraph()
            return output.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
552
            device = paddle.set_device('cpu')
553 554
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
555
            net = MyModel()
556
            inputs = [InputSpec([None, dim], 'float32', 'x')]
557 558
            model = Model(net, inputs)
            model.prepare()
559
            (out,) = model.predict_batch([data])
560

561
            np.testing.assert_allclose(out, ref, rtol=1e-6)
562 563 564
            fluid.disable_dygraph() if dynamic else None

    def test_save_load(self):
565 566 567
        path = os.path.join(tempfile.mkdtemp(), '.cache_test_save_load')
        if not os.path.exists(path):
            os.makedirs(path)
568
        for dynamic in [True, False]:
569
            device = paddle.set_device('cpu')
570
            fluid.enable_dygraph(device) if dynamic else None
L
LielinJiang 已提交
571
            net = MyModel()
572 573
            inputs = [InputSpec([None, 20], 'float32', 'x')]
            labels = [InputSpec([None, 1], 'int64', 'label')]
574 575 576
            optim = fluid.optimizer.SGD(
                learning_rate=0.001, parameter_list=net.parameters()
            )
577
            model = Model(net, inputs, labels)
578 579 580
            model.prepare(
                optimizer=optim, loss=CrossEntropyLoss(reduction="sum")
            )
581 582
            model.save(path)
            model.load(path)
583
            fluid.disable_dygraph() if dynamic else None
584
        shutil.rmtree(path)
585

586 587
    def test_dynamic_load(self):
        mnist_data = MnistDataset(mode='train')
588 589 590 591 592

        path = os.path.join(tempfile.mkdtemp(), '.cache_dynamic_load')
        if not os.path.exists(path):
            os.makedirs(path)

593 594 595 596 597 598
        for new_optimizer in [True, False]:
            paddle.disable_static()
            net = LeNet()
            inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
            labels = [InputSpec([None, 1], 'int64', 'label')]
            if new_optimizer:
599 600 601
                optim = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=net.parameters()
                )
602
            else:
603 604 605
                optim = fluid.optimizer.Adam(
                    learning_rate=0.001, parameter_list=net.parameters()
                )
606
            model = Model(net, inputs, labels)
607 608 609
            model.prepare(
                optimizer=optim, loss=CrossEntropyLoss(reduction="sum")
            )
610
            model.fit(mnist_data, batch_size=64, verbose=0)
611 612
            model.save(path)
            model.load(path)
613
            paddle.enable_static()
614
        shutil.rmtree(path)
615

616
    def test_dynamic_save_static_load(self):
617 618 619
        path = os.path.join(
            tempfile.mkdtemp(), '.cache_dynamic_save_static_load'
        )
620 621
        if not os.path.exists(path):
            os.makedirs(path)
622
        # dynamic saving
623
        device = paddle.set_device('cpu')
624
        fluid.enable_dygraph(device)
625
        model = Model(MyModel())
626 627 628
        optim = fluid.optimizer.SGD(
            learning_rate=0.001, parameter_list=model.parameters()
        )
629
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
630
        model.save(path)
631
        fluid.disable_dygraph()
632

633 634
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
L
LielinJiang 已提交
635
        model = Model(MyModel(), inputs, labels)
636 637 638
        optim = fluid.optimizer.SGD(
            learning_rate=0.001, parameter_list=model.parameters()
        )
639
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
640
        model.load(path)
641 642 643
        shutil.rmtree(path)

    def test_static_save_dynamic_load(self):
644 645 646
        path = os.path.join(
            tempfile.mkdtemp(), '.cache_test_static_save_dynamic_load'
        )
647 648
        if not os.path.exists(path):
            os.makedirs(path)
L
LielinJiang 已提交
649
        net = MyModel()
650 651
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
652 653 654
        optim = fluid.optimizer.SGD(
            learning_rate=0.001, parameter_list=net.parameters()
        )
655
        model = Model(net, inputs, labels)
656
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
657
        model.save(path)
658

659
        device = paddle.set_device('cpu')
660
        fluid.enable_dygraph(device)  # if dynamic else None
661

L
LielinJiang 已提交
662
        net = MyModel()
663 664
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
665 666 667
        optim = fluid.optimizer.SGD(
            learning_rate=0.001, parameter_list=net.parameters()
        )
668
        model = Model(net, inputs, labels)
669
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
670
        model.load(path)
671 672 673 674 675
        shutil.rmtree(path)
        fluid.disable_dygraph()

    def test_parameters(self):
        for dynamic in [True, False]:
676
            device = paddle.set_device('cpu')
677
            fluid.enable_dygraph(device) if dynamic else None
678
            net = MyModel()
679
            inputs = [InputSpec([None, 20], 'float32', 'x')]
680 681
            model = Model(net, inputs)
            model.prepare()
682 683 684 685 686
            params = model.parameters()
            self.assertTrue(params[0].shape[0] == 20)
            self.assertTrue(params[0].shape[1] == 10)
            fluid.disable_dygraph() if dynamic else None

L
LielinJiang 已提交
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
    def test_summary(self):
        def _get_param_from_state_dict(state_dict):
            params = 0
            for k, v in state_dict.items():
                params += np.prod(v.numpy().shape)
            return params

        for dynamic in [True, False]:
            device = paddle.set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            net = MyModel()
            inputs = [InputSpec([None, 20], 'float32', 'x')]
            model = Model(net, inputs)
            model.prepare()
            params_info = model.summary()
            gt_params = _get_param_from_state_dict(net.state_dict())

            np.testing.assert_allclose(params_info['total_params'], gt_params)
            print(params_info)

707 708
            model.summary(input_size=(20))
            model.summary(input_size=[(20)])
L
LielinJiang 已提交
709
            model.summary(input_size=(20), dtype='float32')
710

711 712 713
    def test_summary_non_tensor(self):
        paddle.summary(ModelOutter(), input_size=(-1, 3))

L
LielinJiang 已提交
714
    def test_summary_nlp(self):
715 716 717 718 719 720
        def _get_param_from_state_dict(state_dict):
            params = 0
            for k, v in state_dict.items():
                params += np.prod(v.numpy().shape)
            return params

721 722 723
        nlp_net = paddle.nn.GRU(
            input_size=2, hidden_size=3, num_layers=3, direction="bidirectional"
        )
L
LielinJiang 已提交
724
        paddle.summary(nlp_net, (1, 1, 2))
725

L
LielinJiang 已提交
726
        rnn = paddle.nn.LSTM(16, 32, 2)
727 728 729
        params_info = paddle.summary(
            rnn, [(-1, 23, 16), ((2, None, 32), (2, -1, 32))]
        )
730 731 732 733 734 735 736 737 738 739 740 741
        gt_params = _get_param_from_state_dict(rnn.state_dict())
        np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)

        rnn = paddle.nn.GRU(16, 32, 2, direction='bidirectional')
        params_info = paddle.summary(rnn, (4, 23, 16))
        gt_params = _get_param_from_state_dict(rnn.state_dict())
        np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)

        rnn = paddle.nn.SimpleRNN(16, 32, 2, direction='bidirectional')
        params_info = paddle.summary(rnn, (4, 23, 16))
        gt_params = _get_param_from_state_dict(rnn.state_dict())
        np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)
L
LielinJiang 已提交
742

743
    def test_summary_input(self):
744 745 746 747 748 749
        paddle.enable_static()
        mymodel = MyModel()
        input_data = paddle.rand([1, 20])
        paddle.summary(mymodel, input=input_data)
        paddle.disable_static()

750 751 752 753 754 755 756 757 758 759 760
        rnn = paddle.nn.SimpleRNN(16, 32, 2, direction='bidirectional')
        input_data = paddle.rand([4, 23, 16])
        paddle.summary(rnn, input=input_data)

        lenet_List_input = LeNetListInput()
        input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]
        paddle.summary(lenet_List_input, input=input_data)

        lenet_dict_input = LeNetDictInput()
        input_data = {
            'x1': paddle.rand([1, 1, 28, 28]),
761
            'x2': paddle.rand([1, 400]),
762 763 764
        }
        paddle.summary(lenet_dict_input, input=input_data)

L
LielinJiang 已提交
765 766 767 768 769
    def test_summary_dtype(self):
        input_shape = (3, 1)
        net = paddle.nn.Embedding(10, 3, sparse=True)
        paddle.summary(net, input_shape, dtypes='int64')

L
LielinJiang 已提交
770 771 772
    def test_summary_error(self):
        with self.assertRaises(TypeError):
            nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)
L
LielinJiang 已提交
773
            paddle.summary(nlp_net, (1, 1, '2'))
L
LielinJiang 已提交
774 775 776 777 778 779 780

        with self.assertRaises(ValueError):
            nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)
            paddle.summary(nlp_net, (-1, -1))

        paddle.disable_static()
        nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)
L
LielinJiang 已提交
781
        paddle.summary(nlp_net, (1, 1, 2))
L
LielinJiang 已提交
782

Y
yukavio 已提交
783
    def test_static_flops(self):
J
Jiabin Yang 已提交
784 785
        if paddle.fluid.framework._in_eager_without_dygraph_check():
            return
Y
yukavio 已提交
786 787 788 789 790 791 792 793 794 795 796 797
        paddle.disable_static()
        net = models.__dict__['mobilenet_v2'](pretrained=False)
        inputs = paddle.randn([1, 3, 224, 224])
        static_program = jit._trace(net, inputs=[inputs])[1]
        paddle.flops(static_program, [1, 3, 224, 224], print_detail=True)

    def test_dynamic_flops(self):
        net = models.__dict__['mobilenet_v2'](pretrained=False)

        def customize_dropout(m, x, y):
            m.total_ops += 0

798 799 800 801 802 803
        paddle.flops(
            net,
            [1, 3, 224, 224],
            custom_ops={paddle.nn.Dropout: customize_dropout},
            print_detail=True,
        )
Y
yukavio 已提交
804

805
    def test_dynamic_flops_with_multiple_outputs(self):
806 807 808
        net = paddle.nn.MaxPool2D(
            kernel_size=2, stride=2, padding=0, return_mask=True
        )
809 810 811 812

        def customize_dropout(m, x, y):
            m.total_ops += 0

813 814 815 816 817 818
        paddle.flops(
            net,
            [1, 2, 32, 32],
            custom_ops={paddle.nn.Dropout: customize_dropout},
            print_detail=True,
        )
819

820
    def test_export_deploy_model(self):
821
        self.set_seed()
822
        np.random.seed(201)
823

824 825 826
        save_dir = os.path.join(
            tempfile.mkdtemp(), '.cache_test_export_deploy_model'
        )
827 828 829
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

830
        for dynamic in [True, False]:
831
            paddle.disable_static() if dynamic else None
832 833
            prog_translator = ProgramTranslator()
            prog_translator.enable(False) if not dynamic else None
834
            net = LeNet()
835
            inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
836 837
            model = Model(net, inputs)
            model.prepare()
838

839 840 841
            tensor_img = np.array(
                np.random.random((1, 1, 28, 28)), dtype=np.float32
            )
842

843
            model.save(save_dir, training=False)
844
            ori_results = model.predict_batch(tensor_img)
845
            fluid.disable_dygraph() if dynamic else None
846

847 848 849 850 851
            place = (
                fluid.CPUPlace()
                if not fluid.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
852 853 854
            new_scope = fluid.Scope()
            with fluid.scope_guard(new_scope):
                exe = fluid.Executor(place)
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
                [
                    inference_program,
                    feed_target_names,
                    fetch_targets,
                ] = paddle.static.io.load_inference_model(
                    path_prefix=save_dir, executor=exe
                )
                results = exe.run(
                    inference_program,
                    feed={feed_target_names[0]: tensor_img},
                    fetch_list=fetch_targets,
                )
                np.testing.assert_allclose(
                    results, ori_results, rtol=1e-5, atol=1e-6
                )
870

871
            paddle.enable_static()
872

873 874
        shutil.rmtree(save_dir)

L
LiuChiachi 已提交
875
    def test_dygraph_export_deploy_model_about_inputs(self):
J
Jiaqi Liu 已提交
876 877
        self.set_seed()
        np.random.seed(201)
878 879
        mnist_data = MnistDataset(mode='train')
        paddle.disable_static()
L
LiuChiachi 已提交
880
        # without inputs
881 882 883
        save_dir = os.path.join(
            tempfile.mkdtemp(), '.cache_test_dygraph_export_deploy'
        )
884 885
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
886
        for initial in ["fit", "train_batch", "eval_batch", "predict_batch"]:
887 888
            net = LeNet()
            model = Model(net)
889 890 891 892 893 894
            optim = fluid.optimizer.Adam(
                learning_rate=0.001, parameter_list=model.parameters()
            )
            model.prepare(
                optimizer=optim, loss=CrossEntropyLoss(reduction="sum")
            )
895 896 897
            if initial == "fit":
                model.fit(mnist_data, batch_size=64, verbose=0)
            else:
898 899 900
                img = np.array(
                    np.random.random((1, 1, 28, 28)), dtype=np.float32
                )
901 902 903 904 905 906
                label = np.array(np.random.rand(1, 1), dtype=np.int64)
                if initial == "train_batch":
                    model.train_batch([img], [label])
                elif initial == "eval_batch":
                    model.eval_batch([img], [label])
                else:
907
                    model.predict_batch([img])
908 909

            model.save(save_dir, training=False)
910
        shutil.rmtree(save_dir)
L
LiuChiachi 已提交
911
        # with inputs, and the type of inputs is InputSpec
912 913 914
        save_dir = os.path.join(
            tempfile.mkdtemp(), '.cache_test_dygraph_export_deploy_2'
        )
L
LiuChiachi 已提交
915 916 917 918 919
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        net = LeNet()
        inputs = InputSpec([None, 1, 28, 28], 'float32', 'x')
        model = Model(net, inputs)
920 921 922
        optim = fluid.optimizer.Adam(
            learning_rate=0.001, parameter_list=model.parameters()
        )
L
LiuChiachi 已提交
923 924 925
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(save_dir, training=False)
        shutil.rmtree(save_dir)
926

927 928 929
    def test_accumulate(
        self,
    ):
L
lyuwenyu 已提交
930 931 932 933
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
        net = MyModel()
934 935 936
        optim = fluid.optimizer.SGD(
            learning_rate=0.001, parameter_list=net.parameters()
        )
L
lyuwenyu 已提交
937 938
        inputs = [InputSpec([None, dim], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
L
lyuwenyu 已提交
939

L
lyuwenyu 已提交
940 941
        for amp_cfg in [None, 'O1']:
            model = Model(net, inputs, labels)
942 943 944 945 946
            model.prepare(
                optim,
                loss=CrossEntropyLoss(reduction="sum"),
                amp_configs=amp_cfg,
            )
L
lyuwenyu 已提交
947 948
            losses, grads = [], []
            for stat in [False, False, True]:
949
                (loss,) = model.train_batch([data], [label], update=stat)
L
lyuwenyu 已提交
950 951 952 953 954
                losses.append(loss)
                grads.append([p.grad.numpy() for p in net.parameters()])

            for grad1, grad2, grad3 in zip(*grads):
                np.testing.assert_almost_equal(grad1 * 2, grad2, decimal=4)
955 956 957
                np.testing.assert_almost_equal(
                    grad3, np.zeros_like(grad3), decimal=4
                )
L
lyuwenyu 已提交
958 959 960

            np.testing.assert_almost_equal(losses[0], losses[1], decimal=4)
            np.testing.assert_almost_equal(losses[0], losses[2], decimal=4)
L
lyuwenyu 已提交
961

962

963
class TestModelWithLRScheduler(unittest.TestCase):
964 965 966 967
    def test_fit_by_step(self):
        base_lr = 1e-3
        boundaries = [5, 8]

968 969 970 971 972
        def make_optimizer(parameters=None):
            momentum = 0.9
            weight_decay = 5e-4
            values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]
            learning_rate = paddle.optimizer.lr.PiecewiseDecay(
973 974
                boundaries=boundaries, values=values
            )
975 976 977
            learning_rate = paddle.optimizer.lr.LinearWarmup(
                learning_rate=learning_rate,
                warmup_steps=4,
978
                start_lr=base_lr / 5.0,
979
                end_lr=base_lr,
980 981 982 983 984 985 986 987
                verbose=True,
            )
            optimizer = paddle.optimizer.Momentum(
                learning_rate=learning_rate,
                weight_decay=weight_decay,
                momentum=momentum,
                parameters=parameters,
            )
988 989
            return optimizer

990
        # dynamic test
991 992 993 994 995 996 997 998 999 1000 1001 1002
        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = make_optimizer(net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))

        dataset = MyDataset()
        model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)

1003 1004 1005 1006
        np.testing.assert_allclose(
            model._optimizer._learning_rate.last_lr,
            base_lr * (0.1 ** len(boundaries)),
        )
1007
        # static test
1008 1009
        paddle.enable_static()

1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = make_optimizer(net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))

        dataset = MyDataset()
        model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)

1020 1021 1022 1023
        np.testing.assert_allclose(
            model._optimizer._learning_rate.last_lr,
            base_lr * (0.1 ** len(boundaries)),
        )
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035

    def test_fit_by_epoch(self):
        base_lr = 1e-3
        boundaries = [5, 8]
        epochs = 10
        wamup_epochs = 4

        def make_optimizer(parameters=None):
            momentum = 0.9
            weight_decay = 5e-4
            values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]
            learning_rate = paddle.optimizer.lr.PiecewiseDecay(
1036 1037
                boundaries=boundaries, values=values
            )
1038 1039 1040
            learning_rate = paddle.optimizer.lr.LinearWarmup(
                learning_rate=learning_rate,
                warmup_steps=wamup_epochs,
1041
                start_lr=base_lr / 5.0,
1042
                end_lr=base_lr,
1043 1044 1045 1046 1047 1048 1049 1050
                verbose=True,
            )
            optimizer = paddle.optimizer.Momentum(
                learning_rate=learning_rate,
                weight_decay=weight_decay,
                momentum=momentum,
                parameters=parameters,
            )
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
            return optimizer

        # dynamic test
        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = make_optimizer(net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))

        dataset = MyDataset()

1065 1066 1067
        lr_scheduler_callback = paddle.callbacks.LRScheduler(
            by_step=False, by_epoch=True
        )
1068

1069 1070 1071 1072 1073 1074 1075 1076
        model.fit(
            dataset,
            dataset,
            batch_size=4,
            epochs=epochs,
            num_workers=0,
            callbacks=lr_scheduler_callback,
        )
1077 1078 1079 1080 1081 1082

        cnt = 0
        for b in boundaries:
            if b + wamup_epochs <= epochs:
                cnt += 1

1083 1084 1085
        np.testing.assert_allclose(
            model._optimizer._learning_rate.last_lr, base_lr * (0.1**cnt)
        )
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
        # static test
        paddle.enable_static()

        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = make_optimizer(net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))

        dataset = MyDataset()

1098 1099 1100
        lr_scheduler_callback = paddle.callbacks.LRScheduler(
            by_step=False, by_epoch=True
        )
1101

1102 1103 1104 1105 1106 1107 1108 1109
        model.fit(
            dataset,
            dataset,
            batch_size=4,
            epochs=epochs,
            num_workers=0,
            callbacks=lr_scheduler_callback,
        )
1110 1111 1112 1113 1114 1115

        cnt = 0
        for b in boundaries:
            if b + wamup_epochs <= epochs:
                cnt += 1

1116 1117 1118
        np.testing.assert_allclose(
            model._optimizer._learning_rate.last_lr, base_lr * (0.1**cnt)
        )
1119

1120

1121 1122
class TestRaiseError(unittest.TestCase):
    def test_input_without_name(self):
L
LielinJiang 已提交
1123
        net = MyModel()
1124 1125
        inputs = [InputSpec([None, 10], 'float32')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
1126 1127 1128
        with self.assertRaises(ValueError):
            model = Model(net, inputs, labels)

1129 1130 1131 1132 1133 1134 1135 1136 1137
    def test_static_without_inputs(self):
        paddle.enable_static()
        net = MyModel()
        with self.assertRaises(TypeError):
            model = Model(net)

    def test_save_infer_model_without_inputs_and_run_in_dygraph(self):
        paddle.disable_static()
        net = MyModel()
1138
        save_dir = os.path.join(tempfile.mkdtemp(), '.cache_test_save_infer')
1139 1140 1141 1142 1143 1144
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        with self.assertRaises(RuntimeError):
            model = Model(net)
            model.save(save_dir, training=False)
        paddle.enable_static()
1145
        shutil.rmtree(save_dir)
1146

1147 1148 1149 1150 1151 1152 1153
    def test_save_infer_model_without_file_prefix(self):
        paddle.enable_static()
        net = LeNet()
        inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
        model = Model(net, inputs)
        model.prepare()
        path = ""
1154 1155 1156
        tensor_img = np.array(
            np.random.random((1, 1, 28, 28)), dtype=np.float32
        )
1157 1158 1159
        with self.assertRaises(ValueError):
            model.save(path, training=False)

1160

1161 1162
if __name__ == '__main__':
    unittest.main()