infer.py 3.0 KB
Newer Older
R
root 已提交
1 2 3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
4 5
import os
import numpy as np
6 7
import time
import sys
8 9
import paddle
import paddle.fluid as fluid
10
import models
11
import reader
12 13 14 15 16
import argparse
import functools
from models.learning_rate import cosine_decay
from utility import add_arguments, print_arguments
import math
17 18 19

parser = argparse.ArgumentParser(description=__doc__)
# yapf: disable
20 21 22 23 24 25 26
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('use_gpu',          bool, True,                 "Whether to use GPU or not.")
add_arg('class_dim',        int,  1000,                 "Class number.")
add_arg('image_shape',      str,  "3,224,224",          "Input image size")
add_arg('with_mem_opt',     bool, True,                 "Whether to use memory optimization or not.")
add_arg('pretrained_model', str,  None,                 "Whether to use pretrained model.")
add_arg('model',            str,  "SE_ResNeXt50_32x4d", "Set the network to use.")
27 28
# yapf: enable

29 30
model_list = [m for m in dir(models) if "__" not in m]

31 32

def infer(args):
33 34 35 36 37 38 39 40 41 42
    # parameters from arguments
    class_dim = args.class_dim
    model_name = args.model
    pretrained_model = args.pretrained_model
    with_memory_optimization = args.with_mem_opt
    image_shape = [int(m) for m in args.image_shape.split(",")]

    assert model_name in model_list, "{} is not in lists: {}".format(args.model,
                                                                     model_list)

43 44
    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')

45 46 47 48 49 50 51 52 53 54 55 56
    # model definition
    model = models.__dict__[model_name]()

    if model_name is "GoogleNet":
        out, _, _ = model.net(input=image, class_dim=class_dim)
    else:
        out = model.net(input=image, class_dim=class_dim)

    test_program = fluid.default_main_program().clone(for_test=True)

    if with_memory_optimization:
        fluid.memory_optimize(fluid.default_main_program())
57 58 59

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
60
    exe.run(fluid.default_startup_program())
61

62
    if pretrained_model:
63

64 65
        def if_exist(var):
            return os.path.exists(os.path.join(pretrained_model, var.name))
66

67
        fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
68

69 70
    test_batch_size = 1
    test_reader = paddle.batch(reader.test(), batch_size=test_batch_size)
71 72
    feeder = fluid.DataFeeder(place=place, feed_list=[image])

73
    fetch_list = [out.name]
74 75 76

    TOPK = 1
    for batch_id, data in enumerate(test_reader()):
77 78 79 80 81 82 83
        result = exe.run(test_program,
                         fetch_list=fetch_list,
                         feed=feeder.feed(data))
        result = result[0][0]
        pred_label = np.argsort(result)[::-1][:TOPK]
        print("Test-{0}-score: {1}, class {2}"
              .format(batch_id, result[pred_label], pred_label))
84 85 86
        sys.stdout.flush()


87
def main():
88 89 90
    args = parser.parse_args()
    print_arguments(args)
    infer(args)
91 92 93 94


if __name__ == '__main__':
    main()