提交 d76a168a 编写于 作者: Q qingqing01 提交者: GitHub

Add benchmark (fps) in MODEL_ZOO doc (#3263)

* Add benchmark (fps) in MODEL_ZOO doc
上级 20ca4cc3
此差异已折叠。
此差异已折叠。
......@@ -19,6 +19,7 @@ from __future__ import print_function
import logging
import numpy as np
import os
import time
import paddle.fluid as fluid
......@@ -69,6 +70,10 @@ def eval_run(exe, compile_program, pyreader, keys, values, cls):
cls[i].reset(exe)
values.append(accum_map)
images_num = 0
start_time = time.time()
has_bbox = 'bbox' in keys
try:
pyreader.start()
while True:
......@@ -83,10 +88,20 @@ def eval_run(exe, compile_program, pyreader, keys, values, cls):
if iter_id % 100 == 0:
logger.info('Test iter {}'.format(iter_id))
iter_id += 1
images_num += len(res['bbox'][1][0]) if has_bbox else 1
except (StopIteration, fluid.core.EOFException):
pyreader.reset()
logger.info('Test finish iter {}'.format(iter_id))
end_time = time.time()
fps = images_num / (end_time - start_time)
if has_bbox:
logger.info('Total number of images: {}, inference time: {} fps.'.
format(images_num, fps))
else:
logger.info('Total iteration: {}, inference time: {} batch/s.'.format(
images_num, fps))
return results
......@@ -114,9 +129,12 @@ def eval_results(results,
if output_directory:
output = os.path.join(output_directory, 'bbox.json')
box_ap_stats = bbox_eval(results, anno_file, output,
with_background,
is_bbox_normalized=is_bbox_normalized)
box_ap_stats = bbox_eval(
results,
anno_file,
output,
with_background,
is_bbox_normalized=is_bbox_normalized)
if 'mask' in results[0]:
output = 'mask.json'
......@@ -130,7 +148,8 @@ def eval_results(results,
box_ap_stats.append(res * 100.)
elif 'bbox' in results[0]:
box_ap = voc_bbox_eval(
results, num_classes,
results,
num_classes,
is_bbox_normalized=is_bbox_normalized,
map_type=map_type)
box_ap_stats.append(box_ap)
......
......@@ -17,7 +17,6 @@ from __future__ import division
from __future__ import print_function
import os
import multiprocessing
def set_paddle_flags(**kwargs):
......@@ -63,12 +62,6 @@ def main():
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed')
else:
......@@ -100,15 +93,9 @@ def main():
json_eval_results(
eval_feed, cfg.metric, json_directory=FLAGS.output_eval)
return
# compile program for multi-devices
if devices_num <= 1:
compile_program = fluid.compiler.CompiledProgram(eval_prog)
else:
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
compile_program = fluid.compiler.CompiledProgram(
eval_prog).with_data_parallel(build_strategy=build_strategy)
compile_program = fluid.compiler.CompiledProgram(
eval_prog).with_data_parallel()
# load model
exe.run(startup_prog)
......@@ -132,6 +119,7 @@ def main():
is_bbox_normalized = model.is_bbox_normalized()
results = eval_run(exe, compile_program, pyreader, keys, values, cls)
# evaluation
resolution = None
if 'mask' in results[0]:
......
......@@ -18,16 +18,17 @@ from __future__ import print_function
import os
import time
import multiprocessing
import numpy as np
import datetime
from collections import deque
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
......@@ -69,8 +70,7 @@ def main():
if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
devices_num = int(os.environ.get('CPU_NUM', 1))
if 'train_feed' not in cfg:
train_feed = create(main_arch + 'TrainFeed')
......@@ -133,12 +133,10 @@ def main():
# compile program for multi-devices
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
sync_bn = getattr(model.backbone, 'norm_type', None) == 'sync_bn'
# only enable sync_bn in multi GPU devices
build_strategy.sync_batch_norm = sync_bn and devices_num > 1 \
and cfg.use_gpu
build_strategy.sync_batch_norm = sync_bn and devices_num > 1 and cfg.use_gpu
train_compile_program = fluid.compiler.CompiledProgram(
train_prog).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
......@@ -202,14 +200,16 @@ def main():
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
box_ap_stats = eval_results(results, eval_feed, cfg.metric, cfg.num_classes,
resolution, is_bbox_normalized, FLAGS.output_eval, map_type)
box_ap_stats = eval_results(
results, eval_feed, cfg.metric, cfg.num_classes, resolution,
is_bbox_normalized, FLAGS.output_eval, map_type)
if box_ap_stats[0] > best_box_ap_list[0]:
best_box_ap_list[0] = box_ap_stats[0]
best_box_ap_list[1] = it
checkpoint.save(exe, train_prog, os.path.join(save_dir,"best_model"))
checkpoint.save(exe, train_prog,
os.path.join(save_dir, "best_model"))
logger.info("Best test box ap: {}, in iter: {}".format(
best_box_ap_list[0],best_box_ap_list[1]))
best_box_ap_list[0], best_box_ap_list[1]))
train_pyreader.reset()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册