提交 f7d58cc4 编写于 作者: L Liufang Sang 提交者: Bai Yifan

[PaddleSlim] add infer time (#3688)

* add infer time test=release/1.6

* fix model name in doc  test=release/1.6
上级 8c2e9593
...@@ -19,6 +19,7 @@ from __future__ import print_function ...@@ -19,6 +19,7 @@ from __future__ import print_function
import os import os
import sys import sys
import glob import glob
import time
import numpy as np import numpy as np
from PIL import Image from PIL import Image
...@@ -166,8 +167,29 @@ def main(): ...@@ -166,8 +167,29 @@ def main():
imid2path = reader.imid2path imid2path = reader.imid2path
keys = ['bbox'] keys = ['bbox']
infer_time = True
for iter_id, data in enumerate(reader()): for iter_id, data in enumerate(reader()):
feed_data = [[d[0], d[1]] for d in data] feed_data = [[d[0], d[1]] for d in data]
# for infer time
if infer_time:
warmup_times = 10
repeats_time = 30
feed_data_dict = feeder.feed(feed_data);
for i in range(warmup_times):
exe.run(infer_prog,
feed=feed_data_dict,
fetch_list=fetch_list,
return_numpy=False)
start_time = time.time()
for i in range(repeats_time):
exe.run(infer_prog,
feed=feed_data_dict,
fetch_list=fetch_list,
return_numpy=False)
print("infer time: {} ms/sample".format((time.time()-start_time) * 1000 / repeats_time))
infer_time = False
outs = exe.run(infer_prog, outs = exe.run(infer_prog,
feed=feeder.feed(feed_data), feed=feeder.feed(feed_data),
fetch_list=fetch_list, fetch_list=fetch_list,
......
...@@ -228,7 +228,7 @@ FP32模型可使用PaddleLite进行加载预测,可参见教程[Paddle-Lite如 ...@@ -228,7 +228,7 @@ FP32模型可使用PaddleLite进行加载预测,可参见教程[Paddle-Lite如
>当前release的结果并非超参调优后的最好结果,仅做示例参考,后续我们会优化当前结果。 >当前release的结果并非超参调优后的最好结果,仅做示例参考,后续我们会优化当前结果。
### MobileNetV1 ### MobileNetV1-YOLO-V3
| weight量化方式 | activation量化方式| Box ap |Paddle Fluid inference time(ms)| Paddle Lite inference time(ms)| | weight量化方式 | activation量化方式| Box ap |Paddle Fluid inference time(ms)| Paddle Lite inference time(ms)|
|---|---|---|---|---| |---|---|---|---|---|
...@@ -237,8 +237,6 @@ FP32模型可使用PaddleLite进行加载预测,可参见教程[Paddle-Lite如 ...@@ -237,8 +237,6 @@ FP32模型可使用PaddleLite进行加载预测,可参见教程[Paddle-Lite如
|abs_max|moving_average_abs_max|- |- |-| |abs_max|moving_average_abs_max|- |- |-|
|channel_wise_abs_max|abs_max|- |- |-| |channel_wise_abs_max|abs_max|- |- |-|
>训练超参:
## FAQ ## FAQ
......
...@@ -17,6 +17,7 @@ import sys ...@@ -17,6 +17,7 @@ import sys
import numpy as np import numpy as np
import argparse import argparse
import functools import functools
import time
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -46,9 +47,26 @@ def infer(args): ...@@ -46,9 +47,26 @@ def infer(args):
test_reader = paddle.batch(reader.test(), batch_size=1) test_reader = paddle.batch(reader.test(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=test_program) feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=test_program)
results=[] results=[]
#for infer time, if you don't need, please change infer_time to False
infer_time = True
for batch_id, data in enumerate(test_reader()): for batch_id, data in enumerate(test_reader()):
# for infer time
if infer_time:
warmup_times = 10
repeats_time = 30
feed_data = feeder.feed(data)
for i in range(warmup_times):
exe.run(test_program,
feed=feed_data,
fetch_list=fetch_targets)
start_time = time.time()
for i in range(repeats_time):
exe.run(test_program,
feed=feed_data,
fetch_list=fetch_targets)
print("infer time: {} ms/sample".format((time.time()-start_time) * 1000 / repeats_time))
infer_time = False
# top1_acc, top5_acc # top1_acc, top5_acc
result = exe.run(test_program, result = exe.run(test_program,
feed=feeder.feed(data), feed=feeder.feed(data),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册