eval.py 3.7 KB
Newer Older
X
xiaoting 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
K
kbChen 已提交
14 15 16 17
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

18 19
import os
import sys
K
kbChen 已提交
20 21 22 23 24
import math
import time
import argparse
import functools
import numpy as np
25 26 27
import paddle
import paddle.fluid as fluid
import models
K
kbChen 已提交
28
import reader
L
LielinJiang 已提交
29
from utility import add_arguments, print_arguments, check_cuda
K
kbChen 已提交
30
from utility import fmt_time, recall_topk
31 32 33 34

# yapf: disable
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
K
kbChen 已提交
35 36 37 38 39 40
add_arg('model', str, "ResNet50", "Set the network to use.")
add_arg('embedding_size', int, 0, "Embedding size.")
add_arg('batch_size', int, 10, "Minibatch size.")
add_arg('image_shape', str, "3,224,224", "Input image size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
41 42 43 44 45 46 47 48 49 50 51 52 53 54
# yapf: enable

model_list = [m for m in dir(models) if "__" not in m]


def eval(args):
    # parameters from arguments
    model_name = args.model
    pretrained_model = args.pretrained_model
    image_shape = [int(m) for m in args.image_shape.split(",")]

    assert model_name in model_list, "{} is not in lists: {}".format(args.model,
                                                                     model_list)

S
suytingwan 已提交
55 56 57
    image = fluid.data(name='image', shape=[None] + image_shape, dtype='float32')
    label = fluid.data(name='label', shape=[None, 1], dtype='int64')

58 59 60 61 62
    test_loader = fluid.io.DataLoader.from_generator(
                feed_list=[image, label],
                capacity=64,
                use_double_buffer=True,
                iterable=True)
63 64 65

    # model definition
    model = models.__dict__[model_name]()
K
kbChen 已提交
66 67
    out = model.net(input=image, embedding_size=args.embedding_size)

68 69 70 71 72 73 74 75 76 77 78
    test_program = fluid.default_main_program().clone(for_test=True)

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    if pretrained_model:

        def if_exist(var):
            return os.path.exists(os.path.join(pretrained_model, var.name))

S
suytingwan 已提交
79
        fluid.load(program=test_program, model_path=pretrained_model, executor=exe)
80

81 82 83 84 85
    test_loader.set_sample_generator(
        reader.test(args),
        batch_size=args.batch_size,
        drop_last=False,
        places=place)
86

K
kbChen 已提交
87
    fetch_list = [out.name]
88

K
kbChen 已提交
89
    f, l = [], []
90
    for batch_id, data in enumerate(test_loader()):
91
        t1 = time.time()
92 93 94 95
        [feas] = exe.run(test_program, fetch_list=fetch_list, feed=data)
        label = np.asarray(data[0]['label'])
        label = np.squeeze(label)
 
96 97 98 99 100
        f.append(feas)
        l.append(label)

        t2 = time.time()
        period = t2 - t1
K
kbChen 已提交
101
        if batch_id % 20 == 0:
K
kbChen 已提交
102 103
            print("[%s] testbatch %d, time %2.2f sec" % \
                    (fmt_time(), batch_id, period))
104 105

    f = np.vstack(f)
K
kbChen 已提交
106
    l = np.hstack(l)
107
    recall = recall_topk(f, l, k=1)
K
kbChen 已提交
108
    print("[%s] End test %d, test_recall %.5f" % (fmt_time(), len(f), recall))
109 110 111 112 113 114
    sys.stdout.flush()


def main():
    args = parser.parse_args()
    print_arguments(args)
L
LielinJiang 已提交
115
    check_cuda(args.use_gpu)
116 117 118 119 120
    eval(args)


if __name__ == '__main__':
    main()