提交 547b3918 编写于 作者: D Dang Qingqing

Add eval.py and fix bug.

上级 2b8a3dfb
...@@ -6,3 +6,4 @@ pretrained/ssd_mobilenet_v1_coco ...@@ -6,3 +6,4 @@ pretrained/ssd_mobilenet_v1_coco
pretrained/mobilenet_v1_imagenet.tar.gz pretrained/mobilenet_v1_imagenet.tar.gz
pretrained/mobilenet_v1_imagenet pretrained/mobilenet_v1_imagenet
log* log*
*.log
import os
import time
import numpy as np
import argparse
import functools
import paddle
import paddle.fluid as fluid
import reader
from mobilenet_ssd import mobile_net
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('dataset', str, 'pascalvoc', "coco or pascalvoc.")
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether use GPU.")
add_arg('data_dir', str, '', "The path to save model.")
add_arg('test_list', str, '', "The path to save model.")
add_arg('label_file', str, '', "Label file.")
add_arg('model_dir', str, '', "The path to save model.")
add_arg('ap_version', str, '11point', "11point or integral")
add_arg('resize_h', int, 300, "resize image size")
add_arg('resize_w', int, 300, "resize image size")
add_arg('mean_value_B', float, 127.5, "mean value which will be subtracted") #123.68
add_arg('mean_value_G', float, 127.5, "mean value which will be subtracted") #116.78
add_arg('mean_value_R', float, 127.5, "mean value which will be subtracted") #103.94
# yapf: disable
def eval(args,
data_args,
test_list,
batch_size,
model_dir=None):
image_shape = [3, data_args.resize_h, data_args.resize_w]
if data_args.dataset == 'coco':
num_classes = 81
elif data_args.dataset == 'pascalvoc':
num_classes = 21
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
gt_box = fluid.layers.data(
name='gt_box', shape=[4], dtype='float32', lod_level=1)
gt_label = fluid.layers.data(
name='gt_label', shape=[1], dtype='int32', lod_level=1)
difficult = fluid.layers.data(
name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=0.45)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
box_var)
loss = fluid.layers.reduce_sum(loss)
test_program = fluid.default_main_program().clone(for_test=True)
with fluid.program_guard(test_program):
map_eval = fluid.evaluator.DetectionMAP(
nmsed_out,
gt_label,
gt_box,
difficult,
num_classes,
overlap_threshold=0.5,
evaluate_difficult=False,
ap_version=args.ap_version)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
#exe.run(fluid.default_startup_program())
if model_dir:
def if_exist(var):
return os.path.exists(os.path.join(model_dir, var.name))
fluid.io.load_vars(exe, model_dir, predicate=if_exist)
#fluid.io.load_persistables(exe, model_dir, main_program=test_program)
test_reader = paddle.batch(
reader.test(data_args, test_list), batch_size=batch_size)
feeder = fluid.DataFeeder(
place=place, feed_list=[image, gt_box, gt_label, difficult])
_, accum_map = map_eval.get_map_var()
map_eval.reset(exe)
for _, data in enumerate(test_reader()):
test_map = exe.run(test_program,
feed=feeder.feed(data),
fetch_list=[accum_map])
print("Test model {0}, map {1}".format(model_dir, test_map[0]))
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
data_args = reader.Settings(
dataset=args.dataset,
data_dir=args.data_dir,
label_file=args.label_file,
resize_h=args.resize_h,
resize_w=args.resize_w,
mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
eval(args,
test_list=args.test_list,
data_args=data_args,
batch_size=args.batch_size,
model_dir=args.model_dir)
...@@ -216,7 +216,7 @@ def distort_image(img, settings): ...@@ -216,7 +216,7 @@ def distort_image(img, settings):
def expand_image(img, bbox_labels, img_width, img_height, settings): def expand_image(img, bbox_labels, img_width, img_height, settings):
prob = random.uniform(0, 1) prob = random.uniform(0, 1)
if prob < settings._expand_prob: if prob < settings._expand_prob:
if _expand_max_ratio - 1 >= 0.01: if settings._expand_max_ratio - 1 >= 0.01:
expand_ratio = random.uniform(1, settings._expand_max_ratio) expand_ratio = random.uniform(1, settings._expand_max_ratio)
height = int(img_height * expand_ratio) height = int(img_height * expand_ratio)
width = int(img_width * expand_ratio) width = int(img_width * expand_ratio)
......
...@@ -25,8 +25,16 @@ import copy ...@@ -25,8 +25,16 @@ import copy
class Settings(object): class Settings(object):
def __init__(self, dataset, toy, data_dir, label_file, resize_h, resize_w, def __init__(self,
mean_value, apply_distort, apply_expand): dataset=None,
data_dir=None,
label_file=None,
resize_h=300,
resize_w=300,
mean_value=[127.5, 127.5, 127.5],
apply_distort=True,
apply_expand=True,
toy=0):
self._dataset = dataset self._dataset = dataset
self._toy = toy self._toy = toy
self._data_dir = data_dir self._data_dir = data_dir
...@@ -108,7 +116,7 @@ def _reader_creator(settings, file_list, mode, shuffle): ...@@ -108,7 +116,7 @@ def _reader_creator(settings, file_list, mode, shuffle):
category_names = [ category_names = [
item['name'] for item in coco.loadCats(category_ids) item['name'] for item in coco.loadCats(category_ids)
] ]
elif settings.dataset == 'pascalvoc': else:
flist = open(file_list) flist = open(file_list)
images = [line.strip() for line in flist] images = [line.strip() for line in flist]
...@@ -213,7 +221,7 @@ def _reader_creator(settings, file_list, mode, shuffle): ...@@ -213,7 +221,7 @@ def _reader_creator(settings, file_list, mode, shuffle):
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0)) image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))
""" random crop """ """ random crop """
sampled_bbox = image_util.generate_batch_samples( sampled_bbox = image_util.generate_batch_samples(
batch_sampler, bbox_labels, img_width, img_height) batch_sampler, bbox_labels)
img = np.array(img) img = np.array(img)
if len(sampled_bbox) > 0: if len(sampled_bbox) > 0:
...@@ -302,7 +310,7 @@ def train(settings, file_list, shuffle=True): ...@@ -302,7 +310,7 @@ def train(settings, file_list, shuffle=True):
sub_dir = "train2017" sub_dir = "train2017"
train_settings.data_dir = os.path.join(settings.data_dir, sub_dir) train_settings.data_dir = os.path.join(settings.data_dir, sub_dir)
return _reader_creator(train_settings, file_list, 'train', shuffle) return _reader_creator(train_settings, file_list, 'train', shuffle)
elif settings.dataset == 'pascalvoc': else:
return _reader_creator(settings, file_list, 'train', shuffle) return _reader_creator(settings, file_list, 'train', shuffle)
...@@ -316,7 +324,7 @@ def test(settings, file_list): ...@@ -316,7 +324,7 @@ def test(settings, file_list):
sub_dir = "val2017" sub_dir = "val2017"
test_settings.data_dir = os.path.join(settings.data_dir, sub_dir) test_settings.data_dir = os.path.join(settings.data_dir, sub_dir)
return _reader_creator(test_settings, file_list, 'test', False) return _reader_creator(test_settings, file_list, 'test', False)
elif settings.dataset == 'pascalvoc': else:
return _reader_creator(settings, file_list, 'test', False) return _reader_creator(settings, file_list, 'test', False)
......
import paddle
import paddle.fluid as fluid
import reader
import load_model as load_model
from mobilenet_ssd import mobile_net
from utility import add_arguments, print_arguments
import os import os
import time import time
import numpy as np import numpy as np
import argparse import argparse
import functools import functools
import paddle
import paddle.fluid as fluid
import reader
from mobilenet_ssd import mobile_net
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__) parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser) add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable # yapf: disable
add_arg('learning_rate', float, 0.001, "Learning rate.") add_arg('learning_rate', float, 0.001, "Learning rate.")
add_arg('batch_size', int, 32, "Minibatch size.") add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('num_passes', int, 25, "Epoch number.") add_arg('num_passes', int, 120, "Epoch number.")
add_arg('parallel', bool, True, "Whether use parallel training.") add_arg('parallel', bool, True, "Whether use parallel training.")
add_arg('use_gpu', bool, True, "Whether use GPU.") add_arg('use_gpu', bool, True, "Whether use GPU.")
add_arg('use_nccl', bool, False, "Whether use NCCL.") add_arg('use_nccl', bool, False, "Whether use NCCL.")
add_arg('dataset', str, 'pascalvoc', "coco or pascalvoc.") add_arg('dataset', str, 'pascalvoc', "coco or pascalvoc.")
add_arg('model_save_dir', str, 'model', "The path to save model.") add_arg('model_save_dir', str, 'model', "The path to save model.")
add_arg('pretrained_model', str, 'pretrained/ssd_mobilenet_v1_coco/', "The init model path.") add_arg('pretrained_model', str, 'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
add_arg('apply_distort', bool, True, "Whether apply distort") add_arg('apply_distort', bool, True, "Whether apply distort")
add_arg('apply_expand', bool, False, "Whether appley expand") add_arg('apply_expand', bool, True, "Whether appley expand")
add_arg('ap_version', str, '11point', "11point or integral")
add_arg('resize_h', int, 300, "resize image size") add_arg('resize_h', int, 300, "resize image size")
add_arg('resize_w', int, 300, "resize image size") add_arg('resize_w', int, 300, "resize image size")
add_arg('mean_value_B', float, 127.5, "mean value which will be subtracted") #123.68 add_arg('mean_value_B', float, 127.5, "mean value which will be subtracted") #123.68
...@@ -94,7 +95,7 @@ def parallel_do(args, ...@@ -94,7 +95,7 @@ def parallel_do(args,
num_classes, num_classes,
overlap_threshold=0.5, overlap_threshold=0.5,
evaluate_difficult=False, evaluate_difficult=False,
ap_version='integral') ap_version=args.ap_version)
if data_args.dataset == 'coco': if data_args.dataset == 'coco':
# learning rate decay in 12, 19 pass, respectively # learning rate decay in 12, 19 pass, respectively
...@@ -202,17 +203,21 @@ def parallel_exe(args, ...@@ -202,17 +203,21 @@ def parallel_exe(args,
num_classes, num_classes,
overlap_threshold=0.5, overlap_threshold=0.5,
evaluate_difficult=False, evaluate_difficult=False,
ap_version='integral') ap_version=args.ap_version)
print('ParallelExecutor, ap_version = ', args.ap_version)
if data_args.dataset == 'coco': if data_args.dataset == 'coco':
# learning rate decay in 12, 19 pass, respectively # learning rate decay in 12, 19 pass, respectively
if '2014' in train_file_list: if '2014' in train_file_list:
boundaries = [82783 / batch_size * 12, 82783 / batch_size * 19] epocs = 82783 / batch_size
boundaries = [epocs * 12, epocs * 19]
elif '2017' in train_file_list: elif '2017' in train_file_list:
boundaries = [118287 / batch_size * 12, 118287 / batch_size * 19] epocs = 118287 / batch_size
boundaries = [epcos * 12, epocs * 19]
elif data_args.dataset == 'pascalvoc': elif data_args.dataset == 'pascalvoc':
boundaries = [40000, 60000] epocs = 19200 / batch_size
values = [learning_rate, learning_rate * 0.5, learning_rate * 0.25] boundaries = [epocs * 40, epocs * 60, epocs * 80, epocs * 100]
values = [learning_rate, learning_rate * 0.5, learning_rate * 0.25, learning_rate * 0.1, learning_rate * 0.01]
optimizer = fluid.optimizer.RMSProp( optimizer = fluid.optimizer.RMSProp(
learning_rate=fluid.layers.piecewise_decay(boundaries, values), learning_rate=fluid.layers.piecewise_decay(boundaries, values),
regularization=fluid.regularizer.L2Decay(0.00005), ) regularization=fluid.regularizer.L2Decay(0.00005), )
...@@ -287,14 +292,14 @@ if __name__ == '__main__': ...@@ -287,14 +292,14 @@ if __name__ == '__main__':
data_args = reader.Settings( data_args = reader.Settings(
dataset=args.dataset, dataset=args.dataset,
toy=args.is_toy,
data_dir=data_dir, data_dir=data_dir,
label_file=label_file, label_file=label_file,
apply_distort=args.apply_distort, apply_distort=args.apply_distort,
apply_expand=args.apply_expand, apply_expand=args.apply_expand,
resize_h=args.resize_h, resize_h=args.resize_h,
resize_w=args.resize_w, resize_w=args.resize_w,
mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R]) mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
toy=args.is_toy)
#method = parallel_do #method = parallel_do
method = parallel_exe method = parallel_exe
method(args, method(args,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册