提交 d76a168a 编写于 作者: Q qingqing01 提交者: GitHub

Add benchmark (fps) in MODEL_ZOO doc (#3263)

* Add benchmark (fps) in MODEL_ZOO doc
上级 20ca4cc3
此差异已折叠。
此差异已折叠。
...@@ -19,6 +19,7 @@ from __future__ import print_function ...@@ -19,6 +19,7 @@ from __future__ import print_function
import logging import logging
import numpy as np import numpy as np
import os import os
import time
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -69,6 +70,10 @@ def eval_run(exe, compile_program, pyreader, keys, values, cls): ...@@ -69,6 +70,10 @@ def eval_run(exe, compile_program, pyreader, keys, values, cls):
cls[i].reset(exe) cls[i].reset(exe)
values.append(accum_map) values.append(accum_map)
images_num = 0
start_time = time.time()
has_bbox = 'bbox' in keys
try: try:
pyreader.start() pyreader.start()
while True: while True:
...@@ -83,10 +88,20 @@ def eval_run(exe, compile_program, pyreader, keys, values, cls): ...@@ -83,10 +88,20 @@ def eval_run(exe, compile_program, pyreader, keys, values, cls):
if iter_id % 100 == 0: if iter_id % 100 == 0:
logger.info('Test iter {}'.format(iter_id)) logger.info('Test iter {}'.format(iter_id))
iter_id += 1 iter_id += 1
images_num += len(res['bbox'][1][0]) if has_bbox else 1
except (StopIteration, fluid.core.EOFException): except (StopIteration, fluid.core.EOFException):
pyreader.reset() pyreader.reset()
logger.info('Test finish iter {}'.format(iter_id)) logger.info('Test finish iter {}'.format(iter_id))
end_time = time.time()
fps = images_num / (end_time - start_time)
if has_bbox:
logger.info('Total number of images: {}, inference time: {} fps.'.
format(images_num, fps))
else:
logger.info('Total iteration: {}, inference time: {} batch/s.'.format(
images_num, fps))
return results return results
...@@ -114,9 +129,12 @@ def eval_results(results, ...@@ -114,9 +129,12 @@ def eval_results(results,
if output_directory: if output_directory:
output = os.path.join(output_directory, 'bbox.json') output = os.path.join(output_directory, 'bbox.json')
box_ap_stats = bbox_eval(results, anno_file, output, box_ap_stats = bbox_eval(
with_background, results,
is_bbox_normalized=is_bbox_normalized) anno_file,
output,
with_background,
is_bbox_normalized=is_bbox_normalized)
if 'mask' in results[0]: if 'mask' in results[0]:
output = 'mask.json' output = 'mask.json'
...@@ -130,7 +148,8 @@ def eval_results(results, ...@@ -130,7 +148,8 @@ def eval_results(results,
box_ap_stats.append(res * 100.) box_ap_stats.append(res * 100.)
elif 'bbox' in results[0]: elif 'bbox' in results[0]:
box_ap = voc_bbox_eval( box_ap = voc_bbox_eval(
results, num_classes, results,
num_classes,
is_bbox_normalized=is_bbox_normalized, is_bbox_normalized=is_bbox_normalized,
map_type=map_type) map_type=map_type)
box_ap_stats.append(box_ap) box_ap_stats.append(box_ap)
......
...@@ -17,7 +17,6 @@ from __future__ import division ...@@ -17,7 +17,6 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import os import os
import multiprocessing
def set_paddle_flags(**kwargs): def set_paddle_flags(**kwargs):
...@@ -63,12 +62,6 @@ def main(): ...@@ -63,12 +62,6 @@ def main():
# check if set use_gpu=True in paddlepaddle cpu version # check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu) check_gpu(cfg.use_gpu)
if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
if 'eval_feed' not in cfg: if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed') eval_feed = create(main_arch + 'EvalFeed')
else: else:
...@@ -100,15 +93,9 @@ def main(): ...@@ -100,15 +93,9 @@ def main():
json_eval_results( json_eval_results(
eval_feed, cfg.metric, json_directory=FLAGS.output_eval) eval_feed, cfg.metric, json_directory=FLAGS.output_eval)
return return
# compile program for multi-devices
if devices_num <= 1: compile_program = fluid.compiler.CompiledProgram(
compile_program = fluid.compiler.CompiledProgram(eval_prog) eval_prog).with_data_parallel()
else:
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
compile_program = fluid.compiler.CompiledProgram(
eval_prog).with_data_parallel(build_strategy=build_strategy)
# load model # load model
exe.run(startup_prog) exe.run(startup_prog)
...@@ -132,6 +119,7 @@ def main(): ...@@ -132,6 +119,7 @@ def main():
is_bbox_normalized = model.is_bbox_normalized() is_bbox_normalized = model.is_bbox_normalized()
results = eval_run(exe, compile_program, pyreader, keys, values, cls) results = eval_run(exe, compile_program, pyreader, keys, values, cls)
# evaluation # evaluation
resolution = None resolution = None
if 'mask' in results[0]: if 'mask' in results[0]:
......
...@@ -18,16 +18,17 @@ from __future__ import print_function ...@@ -18,16 +18,17 @@ from __future__ import print_function
import os import os
import time import time
import multiprocessing
import numpy as np import numpy as np
import datetime import datetime
from collections import deque from collections import deque
def set_paddle_flags(**kwargs): def set_paddle_flags(**kwargs):
for key, value in kwargs.items(): for key, value in kwargs.items():
if os.environ.get(key, None) is None: if os.environ.get(key, None) is None:
os.environ[key] = str(value) os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before # NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect. # `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags( set_paddle_flags(
...@@ -69,8 +70,7 @@ def main(): ...@@ -69,8 +70,7 @@ def main():
if cfg.use_gpu: if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count() devices_num = fluid.core.get_cuda_device_count()
else: else:
devices_num = int( devices_num = int(os.environ.get('CPU_NUM', 1))
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
if 'train_feed' not in cfg: if 'train_feed' not in cfg:
train_feed = create(main_arch + 'TrainFeed') train_feed = create(main_arch + 'TrainFeed')
...@@ -133,12 +133,10 @@ def main(): ...@@ -133,12 +133,10 @@ def main():
# compile program for multi-devices # compile program for multi-devices
build_strategy = fluid.BuildStrategy() build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False build_strategy.enable_inplace = False
sync_bn = getattr(model.backbone, 'norm_type', None) == 'sync_bn' sync_bn = getattr(model.backbone, 'norm_type', None) == 'sync_bn'
# only enable sync_bn in multi GPU devices # only enable sync_bn in multi GPU devices
build_strategy.sync_batch_norm = sync_bn and devices_num > 1 \ build_strategy.sync_batch_norm = sync_bn and devices_num > 1 and cfg.use_gpu
and cfg.use_gpu
train_compile_program = fluid.compiler.CompiledProgram( train_compile_program = fluid.compiler.CompiledProgram(
train_prog).with_data_parallel( train_prog).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy) loss_name=loss.name, build_strategy=build_strategy)
...@@ -202,14 +200,16 @@ def main(): ...@@ -202,14 +200,16 @@ def main():
resolution = None resolution = None
if 'mask' in results[0]: if 'mask' in results[0]:
resolution = model.mask_head.resolution resolution = model.mask_head.resolution
box_ap_stats = eval_results(results, eval_feed, cfg.metric, cfg.num_classes, box_ap_stats = eval_results(
resolution, is_bbox_normalized, FLAGS.output_eval, map_type) results, eval_feed, cfg.metric, cfg.num_classes, resolution,
is_bbox_normalized, FLAGS.output_eval, map_type)
if box_ap_stats[0] > best_box_ap_list[0]: if box_ap_stats[0] > best_box_ap_list[0]:
best_box_ap_list[0] = box_ap_stats[0] best_box_ap_list[0] = box_ap_stats[0]
best_box_ap_list[1] = it best_box_ap_list[1] = it
checkpoint.save(exe, train_prog, os.path.join(save_dir,"best_model")) checkpoint.save(exe, train_prog,
os.path.join(save_dir, "best_model"))
logger.info("Best test box ap: {}, in iter: {}".format( logger.info("Best test box ap: {}, in iter: {}".format(
best_box_ap_list[0],best_box_ap_list[1])) best_box_ap_list[0], best_box_ap_list[1]))
train_pyreader.reset() train_pyreader.reset()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册