dist_mnist.py 3.1 KB
Newer Older
L
LielinJiang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import division
from __future__ import print_function

import unittest

import os

import numpy as np
import contextlib

import paddle
from paddle import fluid

28
from hapi.model import Model, Input, set_device
L
LielinJiang 已提交
29
from hapi.loss import CrossEntropy
L
LielinJiang 已提交
30
from hapi.vision.models import LeNet
L
LielinJiang 已提交
31 32
from hapi.metrics import Accuracy
from hapi.callbacks import ProgBarLogger
L
LielinJiang 已提交
33 34 35 36 37 38 39
from hapi.datasets import MNIST


class MnistDataset(MNIST):
    def __init__(self, mode, return_label=True):
        super(MnistDataset, self).__init__(mode=mode)
        self.return_label = return_label
L
LielinJiang 已提交
40 41

    def __getitem__(self, idx):
L
LielinJiang 已提交
42 43 44 45
        img = np.reshape(self.images[idx], [1, 28, 28])
        if self.return_label:
            return img, np.array(self.labels[idx]).astype('int64')
        return img,
L
LielinJiang 已提交
46 47 48 49 50

    def __len__(self):
        return len(self.images)


L
LielinJiang 已提交
51
def compute_accuracy(pred, gt):
L
LielinJiang 已提交
52 53 54 55 56 57 58 59 60
    pred = np.argmax(pred, -1)
    gt = np.array(gt)

    correct = pred[:, np.newaxis] == gt

    return np.sum(correct) / correct.shape[0]


class TestModel(unittest.TestCase):
L
LielinJiang 已提交
61
    def run(self, dynamic):
L
LielinJiang 已提交
62 63 64 65 66 67 68 69 70 71 72
        device = set_device('gpu')
        fluid.enable_dygraph(device) if dynamic else None

        im_shape = (-1, 784)
        batch_size = 128

        inputs = [Input(im_shape, 'float32', name='image')]
        labels = [Input([None, 1], 'int64', name='label')]

        train_dataset = MnistDataset(mode='train')
        val_dataset = MnistDataset(mode='test')
L
LielinJiang 已提交
73
        test_dataset = MnistDataset(mode='test', return_label=False)
L
LielinJiang 已提交
74

L
LielinJiang 已提交
75
        model = LeNet()
L
LielinJiang 已提交
76
        optim = fluid.optimizer.Momentum(
L
LielinJiang 已提交
77 78 79
            learning_rate=0.001,
            momentum=.9,
            parameter_list=model.parameters())
L
LielinJiang 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
        loss = CrossEntropy()
        model.prepare(optim, loss, Accuracy(), inputs, labels, device=device)
        cbk = ProgBarLogger(50)

        model.fit(train_dataset,
                  val_dataset,
                  epochs=2,
                  batch_size=batch_size,
                  callbacks=cbk)

        eval_result = model.evaluate(val_dataset, batch_size=batch_size)

        output = model.predict(
            test_dataset, batch_size=batch_size, stack_outputs=True)

        np.testing.assert_equal(output[0].shape[0], len(test_dataset))

L
LielinJiang 已提交
97
        acc = compute_accuracy(output[0], val_dataset.labels)
L
LielinJiang 已提交
98 99 100 101

        np.testing.assert_allclose(acc, eval_result['acc'])

    def test_multiple_gpus_static(self):
L
LielinJiang 已提交
102
        self.run(False)
L
LielinJiang 已提交
103 104

    def test_multiple_gpus_dygraph(self):
L
LielinJiang 已提交
105
        self.run(True)
L
LielinJiang 已提交
106 107 108 109


if __name__ == '__main__':
    unittest.main()