提交 6453565c 编写于 作者: H HydrogenSulfate

debug

上级 bd2bd031
......@@ -254,8 +254,8 @@ bash test_tipc/test_inference_cpp.sh test_tipc/config/ResNet/ResNet50_linux_gpu_
输出结果如下,表示命令运行成功。
```shell
Run successfully with command - ./deploy/cpp/build/clas_system -c ./deploy/config/inference_cls.yaml > ./test_tipc/output/ResNet50/infer_cpp/infer_cpp_use_gpu.log 2>&1 !
Run successfully with command - ./deploy/cpp/build/clas_system -c ./deploy/config/inference_cls.yaml > ./test_tipc/output/ResNet50/infer_cpp/infer_cpp_use_cpu.log 2>&1 !
Run successfully with command - ./deploy/cpp/build/clas_system -c inference_cls.yaml > ./test_tipc/output/ResNet50/cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log 2>&1!
Run successfully with command - ./deploy/cpp/build/clas_system -c inference_cls.yaml > ./test_tipc/output/ResNet50/cls_cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log 2>&1!
```
最终log中会打印出结果,如下所示
......@@ -306,6 +306,6 @@ Current total inferen time cost: 5449.39 ms.
Top5: class_id: 265, score: 0.0420, label: toy poodle
```
详细log位于`./test_tipc/output/ResNet50/infer_cpp/infer_cpp_use_gpu.log``./test_tipc/output/ResNet50/infer_cpp/infer_cpp_use_cpu.log`中。
详细log位于`./test_tipc/output/ResNet50/cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log``./test_tipc/output/ResNet50/cls_cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log`中。
如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。
......@@ -66,6 +66,10 @@ def main():
"test_images")
config["IndexProcess"]["index_dir"] = os.path.join(args.data_dir,
"index")
config["IndexProcess"]["image_root"] = os.path.join(args.data_dir,
"gallery")
config["IndexProcess"]["data_file"] = os.path.join(args.data_dir,
"drink_label.txt")
assert args.cls_model_dir
assert args.det_model_dir
config["Global"]["det_inference_model_dir"] = args.det_model_dir
......
......@@ -93,7 +93,7 @@ if [ ${MODE} = "cpp_infer" ]; then
if [[ $cpp_type == "cls" ]]; then
eval "wget -nc $cls_inference_url"
tar xf "${model_name}_infer.tar"
eval "mv ${model_name}_infer $cls_inference_model_dir"
cd dataset
rm -rf ILSVRC2012
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar
......@@ -105,13 +105,12 @@ if [ ${MODE} = "cpp_infer" ]; then
tar_name=$(func_get_url_file_name "$cls_inference_url")
model_dir=${tar_name%.*}
eval "tar xf ${tar_name}"
eval "mv ${model_dir}_infer ${cls_inference_model_dir}"
eval "wget -nc $det_inference_url"
tar_name=$(func_get_url_file_name "$det_inference_url")
model_dir=${tar_name%.*}
eval "tar xf ${tar_name}"
eval "mv ${model_dir}_infer ${det_inference_model_dir}"
cd dataset
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar
tar -xf drink_dataset_v1.0.tar
......
......@@ -61,7 +61,7 @@ function func_shitu_cpp_inference(){
command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}"
eval $command
eval $transform_index_cmd
command="${_script} 2>&1|tee ${_save_log_path}"
command="${_script} > ${_save_log_path} 2>&1"
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}"
......@@ -85,7 +85,7 @@ function func_shitu_cpp_inference(){
command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}"
eval $command
eval $transform_index_cmd
command="${_script} 2>&1|tee ${_save_log_path}"
command="${_script} > ${_save_log_path} 2>&1"
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${_script}" "${status_log}"
......@@ -123,7 +123,7 @@ function func_cls_cpp_inference(){
command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}"
eval $command
command1="${_script} 2>&1|tee ${_save_log_path}"
command1="${_script} > ${_save_log_path} 2>&1"
eval ${command1}
last_status=${PIPESTATUS[0]}
status_check $last_status "${command1}" "${status_log}"
......@@ -146,7 +146,7 @@ function func_cls_cpp_inference(){
_save_log_path="${_log_path}/cls_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}"
eval $command
command="${_script} 2>&1|tee ${_save_log_path}"
command="${_script} > ${_save_log_path} 2>&1"
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}"
......@@ -256,9 +256,9 @@ for infer_model in ${cpp_infer_model_dir[*]}; do
#run inference
is_quant=${infer_quant_flag[Count]}
if [[ $cpp_infer_type == "cls" ]]; then
func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
else
func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
fi
Count=$(($Count + 1))
done
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册