提交 9e8dd9f6 编写于 作者: D dongshuilong

fix bugs

上级 7138b30f
......@@ -48,18 +48,21 @@ class ClsPredictor(Predictor):
import os
pid = os.getpid()
self.auto_log = auto_log.AutoLogger(
model_name='cls',
model_name=config["Global"].get("model_name", "cls"),
model_precision='fp16'
if config["Global"]["use_fp16"] else 'fp32',
batch_size=1,
batch_size=config["Global"].get("batch_size", 1),
data_shape=[3, 224, 224],
save_path="../output/auto_log.lpg",
inference_config=None,
save_path=config["Global"].get("save_log_path",
"./auto_log.log"),
inference_config=self.config,
pids=pid,
process_name=None,
gpu_ids=None,
time_keys=['preprocess_time', 'inference_time'],
warmup=10)
time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time'
],
warmup=2)
def predict(self, images):
input_names = self.paddle_predictor.get_input_names()
......@@ -99,8 +102,8 @@ def main(config):
output = cls_predictor.postprocess(output, [image_file])
if cls_predictor.benchmark:
cls_predictor.auto_log.times.end(stamp=True)
cls_predictor.auto_log.report()
print(output)
cls_predictor.auto_log.report()
return
......
python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=True -o Global.use_tensorrt=False -o Global.use_fp16=False -o Global.inference_model_dir=.././test/output/ResNet50_vd_gpus_0,1/inference -o Global.batch_size=1 -o Global.infer_imgs=.././dataset/chain_dataset/val -o Global.save_log_path=.././test/output/ResNet50_vd_infer_gpu_usetrt_True_precision_False_batchsize_1.log -o benchmark=True
......@@ -28,7 +28,7 @@ class Predictor(object):
if args.use_fp16 is True:
assert args.use_tensorrt is True
self.args = args
self.paddle_predictor = self.create_paddle_predictor(
self.paddle_predictor, self.config = self.create_paddle_predictor(
args, inference_model_dir)
def predict(self, image):
......@@ -66,4 +66,4 @@ class Predictor(object):
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return predictor
return predictor, config
......@@ -92,7 +92,7 @@ function func_inference(){
for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/${_model_name}_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log"
command="${_python} ${_script} -o ${use_gpu_key}=${use_gpu} -o ${use_mkldnn_key}=${use_mkldnn} -o ${cpu_threads_key}=${threads} -o ${infer_model_key}=${_model_dir} -o ${batch_size_key}=${batch_size} -o ${image_dir_key}=${_img_dir} -o ${save_log_key}=${_save_log_path} -o benchmark=True"
command="${_python} ${_script} -o ${use_gpu_key}=${use_gpu} -o ${use_mkldnn_key}=${use_mkldnn} -o ${cpu_threads_key}=${threads} -o ${infer_model_key}=${_model_dir} -o ${batch_size_key}=${batch_size} -o ${image_dir_key}=${_img_dir} -o ${save_log_key}=${_save_log_path} -o benchmark=True -o Global.model_name=${_model_name}"
eval $command
status_check $? "${command}" "${status_log}"
done
......@@ -106,7 +106,7 @@ function func_inference(){
fi
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/${_model_name}_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${_python} ${_script} -o ${use_gpu_key}=${use_gpu} -o ${use_trt_key}=${use_trt} -o ${precision_key}=${precision} -o ${infer_model_key}=${_model_dir} -o ${batch_size_key}=${batch_size} -o ${image_dir_key}=${_img_dir} -o ${save_log_key}=${_save_log_path} -o benchmark=True"
command="${_python} ${_script} -o ${use_gpu_key}=${use_gpu} -o ${use_trt_key}=${use_trt} -o ${precision_key}=${precision} -o ${infer_model_key}=${_model_dir} -o ${batch_size_key}=${batch_size} -o ${image_dir_key}=${_img_dir} -o ${save_log_key}=${_save_log_path} -o benchmark=True -o Global.model_name=${_model_name}"
eval $command
status_check $? "${command}" "${status_log}"
done
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册