未验证 提交 033cc4cf 编写于 作者: M MissPenguin 提交者: GitHub

Merge pull request #4294 from LDOUBLEV/dygraph

[full chain] opt params.txt
...@@ -40,13 +40,13 @@ infer_quant:False ...@@ -40,13 +40,13 @@ infer_quant:False
inference:tools/infer/predict_det.py inference:tools/infer/predict_det.py
--use_gpu:True|False --use_gpu:True|False
--enable_mkldnn:True|False --enable_mkldnn:True|False
--cpu_threads:6 --cpu_threads:1|6
--rec_batch_num:1 --rec_batch_num:1
--use_tensorrt:False|True --use_tensorrt:False|True
--precision:fp32|fp16|int8 --precision:fp32|fp16|int8
--det_model_dir: --det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/ --image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null null:null
--benchmark:True --benchmark:True
null:null null:null
===========================cpp_infer_params=========================== ===========================cpp_infer_params===========================
...@@ -79,4 +79,20 @@ op.det.local_service_conf.thread_num:1|6 ...@@ -79,4 +79,20 @@ op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8 op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs pipline:pipeline_http_client.py --image_dir=../../doc/imgs
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:False
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16|int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
null:null
\ No newline at end of file
...@@ -12,10 +12,10 @@ train_model_name:latest ...@@ -12,10 +12,10 @@ train_model_name:latest
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null null:null
## ##
trainer:norm_train|pact_train trainer:norm_train|pact_train|fpgm_export
norm_train:tools/train.py -c tests/configs/det_r50_vd_db.yml -o Global.pretrained_model="" norm_train:tools/train.py -c tests/configs/det_r50_vd_db.yml -o
pact_train:null quant_export:deploy/slim/quantization/export_model.py -c tests/configs/det_r50_vd_db.yml -o
fpgm_train:null fpgm_export:deploy/slim/prune/export_prune_model.py -c tests/configs/det_r50_vd_db.yml -o
distill_train:null distill_train:null
null:null null:null
null:null null:null
...@@ -34,8 +34,8 @@ distill_export:null ...@@ -34,8 +34,8 @@ distill_export:null
export1:null export1:null
export2:null export2:null
## ##
infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ train_model:./inference/ch_ppocr_server_v2.0_det_train/best_accuracy
infer_export:null infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml -o
infer_quant:False infer_quant:False
inference:tools/infer/predict_det.py inference:tools/infer/predict_det.py
--use_gpu:True|False --use_gpu:True|False
......
===========================train_params===========================
model_name:ocr_system
python:python3.7
gpu_list:null
Global.use_gpu:null
Global.auto_cast:null
Global.epoch_num:null
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:null
Global.pretrained_model:null
train_model_name:null
train_infer_img_dir:null
null:null
##
trainer:
norm_train:null
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:null
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
kl_quant:deploy/slim/quantization/quant_kl.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:True
inference:tools/infer/predict_det.py
--use_gpu:TrueFalse
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16|int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null
--benchmark:True
null:null
#!/bin/bash #!/bin/bash
FILENAME=$1 FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer', 'serving_infer'] # MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer',
# 'cpp_infer', 'serving_infer', 'klquant_infer']
MODE=$2 MODE=$2
dataline=$(cat ${FILENAME}) dataline=$(cat ${FILENAME})
...@@ -72,9 +74,9 @@ elif [ ${MODE} = "infer" ];then ...@@ -72,9 +74,9 @@ elif [ ${MODE} = "infer" ];then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../
elif [ ${model_name} = "ocr_server_det" ]; then elif [ ${model_name} = "ocr_server_det" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ cd ./inference && tar xf ch_ppocr_server_v2.0_det_train.tar && tar xf ch_det_data_50.tar && cd ../
elif [ ${model_name} = "ocr_system_mobile" ]; then elif [ ${model_name} = "ocr_system_mobile" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
...@@ -98,6 +100,12 @@ elif [ ${MODE} = "infer" ];then ...@@ -98,6 +100,12 @@ elif [ ${MODE} = "infer" ];then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar
cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../ cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../
fi fi
elif [ ${MODE} = "klquant_infer" ];then
if [ ${model_name} = "ocr_det" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../
fi
elif [ ${MODE} = "cpp_infer" ];then elif [ ${MODE} = "cpp_infer" ];then
if [ ${model_name} = "ocr_det" ]; then if [ ${model_name} = "ocr_det" ]; then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
......
#!/bin/bash #!/bin/bash
FILENAME=$1 FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer'] # MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer', 'serving_infer', 'klquant_infer']
MODE=$2 MODE=$2
if [ ${MODE} = "cpp_infer" ]; then
dataline=$(cat ${FILENAME}) dataline=$(awk 'NR==67, NR==81{print}' $FILENAME)
elif [ ${MODE} = "serving_infer" ]; then
dataline=$(awk 'NR==52, NR==66{print}' $FILENAME)
elif [ ${MODE} = "klquant_infer" ]; then
dataline=$(awk 'NR==82, NR==98{print}' $FILENAME)
else
dataline=$(awk 'NR==1, NR==51{print}' $FILENAME)
fi
# parser params # parser params
IFS=$'\n' IFS=$'\n'
...@@ -144,61 +151,93 @@ benchmark_key=$(func_parser_key "${lines[49]}") ...@@ -144,61 +151,93 @@ benchmark_key=$(func_parser_key "${lines[49]}")
benchmark_value=$(func_parser_value "${lines[49]}") benchmark_value=$(func_parser_value "${lines[49]}")
infer_key1=$(func_parser_key "${lines[50]}") infer_key1=$(func_parser_key "${lines[50]}")
infer_value1=$(func_parser_value "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}")
# parser serving
trans_model_py=$(func_parser_value "${lines[67]}")
infer_model_dir_key=$(func_parser_key "${lines[68]}")
infer_model_dir_value=$(func_parser_value "${lines[68]}")
model_filename_key=$(func_parser_key "${lines[69]}")
model_filename_value=$(func_parser_value "${lines[69]}")
params_filename_key=$(func_parser_key "${lines[70]}")
params_filename_value=$(func_parser_value "${lines[70]}")
serving_server_key=$(func_parser_key "${lines[71]}")
serving_server_value=$(func_parser_value "${lines[71]}")
serving_client_key=$(func_parser_key "${lines[72]}")
serving_client_value=$(func_parser_value "${lines[72]}")
serving_dir_value=$(func_parser_value "${lines[73]}")
web_service_py=$(func_parser_value "${lines[74]}")
web_use_gpu_key=$(func_parser_key "${lines[75]}")
web_use_gpu_list=$(func_parser_value "${lines[75]}")
web_use_mkldnn_key=$(func_parser_key "${lines[76]}")
web_use_mkldnn_list=$(func_parser_value "${lines[76]}")
web_cpu_threads_key=$(func_parser_key "${lines[77]}")
web_cpu_threads_list=$(func_parser_value "${lines[77]}")
web_use_trt_key=$(func_parser_key "${lines[78]}")
web_use_trt_list=$(func_parser_value "${lines[78]}")
web_precision_key=$(func_parser_key "${lines[79]}")
web_precision_list=$(func_parser_value "${lines[79]}")
pipeline_py=$(func_parser_value "${lines[80]}")
# parser serving
if [ ${MODE} = "klquant_infer" ]; then
# parser inference model
infer_model_dir_list=$(func_parser_value "${lines[1]}")
infer_export_list=$(func_parser_value "${lines[2]}")
infer_is_quant=$(func_parser_value "${lines[3]}")
# parser inference
inference_py=$(func_parser_value "${lines[4]}")
use_gpu_key=$(func_parser_key "${lines[5]}")
use_gpu_list=$(func_parser_value "${lines[5]}")
use_mkldnn_key=$(func_parser_key "${lines[6]}")
use_mkldnn_list=$(func_parser_value "${lines[6]}")
cpu_threads_key=$(func_parser_key "${lines[7]}")
cpu_threads_list=$(func_parser_value "${lines[7]}")
batch_size_key=$(func_parser_key "${lines[8]}")
batch_size_list=$(func_parser_value "${lines[8]}")
use_trt_key=$(func_parser_key "${lines[9]}")
use_trt_list=$(func_parser_value "${lines[9]}")
precision_key=$(func_parser_key "${lines[10]}")
precision_list=$(func_parser_value "${lines[10]}")
infer_model_key=$(func_parser_key "${lines[11]}")
image_dir_key=$(func_parser_key "${lines[12]}")
infer_img_dir=$(func_parser_value "${lines[12]}")
save_log_key=$(func_parser_key "${lines[13]}")
benchmark_key=$(func_parser_key "${lines[14]}")
benchmark_value=$(func_parser_value "${lines[14]}")
infer_key1=$(func_parser_key "${lines[15]}")
infer_value1=$(func_parser_value "${lines[15]}")
fi
# parser serving
if [ ${MODE} = "server_infer" ]; then
trans_model_py=$(func_parser_value "${lines[1]}")
infer_model_dir_key=$(func_parser_key "${lines[2]}")
infer_model_dir_value=$(func_parser_value "${lines[2]}")
model_filename_key=$(func_parser_key "${lines[3]}")
model_filename_value=$(func_parser_value "${lines[3]}")
params_filename_key=$(func_parser_key "${lines[4]}")
params_filename_value=$(func_parser_value "${lines[4]}")
serving_server_key=$(func_parser_key "${lines[5]}")
serving_server_value=$(func_parser_value "${lines[5]}")
serving_client_key=$(func_parser_key "${lines[6]}")
serving_client_value=$(func_parser_value "${lines[6]}")
serving_dir_value=$(func_parser_value "${lines[7]}")
web_service_py=$(func_parser_value "${lines[8]}")
web_use_gpu_key=$(func_parser_key "${lines[9]}")
web_use_gpu_list=$(func_parser_value "${lines[9]}")
web_use_mkldnn_key=$(func_parser_key "${lines[10]}")
web_use_mkldnn_list=$(func_parser_value "${lines[10]}")
web_cpu_threads_key=$(func_parser_key "${lines[11]}")
web_cpu_threads_list=$(func_parser_value "${lines[11]}")
web_use_trt_key=$(func_parser_key "${lines[12]}")
web_use_trt_list=$(func_parser_value "${lines[12]}")
web_precision_key=$(func_parser_key "${lines[13]}")
web_precision_list=$(func_parser_value "${lines[13]}")
pipeline_py=$(func_parser_value "${lines[14]}")
fi
if [ ${MODE} = "cpp_infer" ]; then if [ ${MODE} = "cpp_infer" ]; then
# parser cpp inference model # parser cpp inference model
cpp_infer_model_dir_list=$(func_parser_value "${lines[53]}") cpp_infer_model_dir_list=$(func_parser_value "${lines[1]}")
cpp_infer_is_quant=$(func_parser_value "${lines[54]}") cpp_infer_is_quant=$(func_parser_value "${lines[2]}")
# parser cpp inference # parser cpp inference
inference_cmd=$(func_parser_value "${lines[55]}") inference_cmd=$(func_parser_value "${lines[3]}")
cpp_use_gpu_key=$(func_parser_key "${lines[56]}") cpp_use_gpu_key=$(func_parser_key "${lines[4]}")
cpp_use_gpu_list=$(func_parser_value "${lines[56]}") cpp_use_gpu_list=$(func_parser_value "${lines[4]}")
cpp_use_mkldnn_key=$(func_parser_key "${lines[57]}") cpp_use_mkldnn_key=$(func_parser_key "${lines[5]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[57]}") cpp_use_mkldnn_list=$(func_parser_value "${lines[5]}")
cpp_cpu_threads_key=$(func_parser_key "${lines[58]}") cpp_cpu_threads_key=$(func_parser_key "${lines[6]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[58]}") cpp_cpu_threads_list=$(func_parser_value "${lines[6]}")
cpp_batch_size_key=$(func_parser_key "${lines[59]}") cpp_batch_size_key=$(func_parser_key "${lines[7]}")
cpp_batch_size_list=$(func_parser_value "${lines[59]}") cpp_batch_size_list=$(func_parser_value "${lines[7]}")
cpp_use_trt_key=$(func_parser_key "${lines[60]}") cpp_use_trt_key=$(func_parser_key "${lines[8]}")
cpp_use_trt_list=$(func_parser_value "${lines[60]}") cpp_use_trt_list=$(func_parser_value "${lines[8]}")
cpp_precision_key=$(func_parser_key "${lines[61]}") cpp_precision_key=$(func_parser_key "${lines[9]}")
cpp_precision_list=$(func_parser_value "${lines[61]}") cpp_precision_list=$(func_parser_value "${lines[9]}")
cpp_infer_model_key=$(func_parser_key "${lines[62]}") cpp_infer_model_key=$(func_parser_key "${lines[10]}")
cpp_image_dir_key=$(func_parser_key "${lines[63]}") cpp_image_dir_key=$(func_parser_key "${lines[11]}")
cpp_infer_img_dir=$(func_parser_value "${lines[63]}") cpp_infer_img_dir=$(func_parser_value "${lines[12]}")
cpp_infer_key1=$(func_parser_key "${lines[64]}") cpp_infer_key1=$(func_parser_key "${lines[13]}")
cpp_infer_value1=$(func_parser_value "${lines[64]}") cpp_infer_value1=$(func_parser_value "${lines[13]}")
cpp_benchmark_key=$(func_parser_key "${lines[65]}") cpp_benchmark_key=$(func_parser_key "${lines[14]}")
cpp_benchmark_value=$(func_parser_value "${lines[65]}") cpp_benchmark_value=$(func_parser_value "${lines[14]}")
fi fi
LOG_PATH="./tests/output" LOG_PATH="./tests/output"
mkdir -p ${LOG_PATH} mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log" status_log="${LOG_PATH}/results.log"
...@@ -414,7 +453,7 @@ function func_cpp_inference(){ ...@@ -414,7 +453,7 @@ function func_cpp_inference(){
done done
} }
if [ ${MODE} = "infer" ]; then if [ ${MODE} = "infer" ] || [ ${MODE} = "klquant_infer" ]; then
GPUID=$3 GPUID=$3
if [ ${#GPUID} -le 0 ];then if [ ${#GPUID} -le 0 ];then
env=" " env=" "
...@@ -447,7 +486,6 @@ if [ ${MODE} = "infer" ]; then ...@@ -447,7 +486,6 @@ if [ ${MODE} = "infer" ]; then
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
Count=$(($Count + 1)) Count=$(($Count + 1))
done done
elif [ ${MODE} = "cpp_infer" ]; then elif [ ${MODE} = "cpp_infer" ]; then
GPUID=$3 GPUID=$3
if [ ${#GPUID} -le 0 ];then if [ ${#GPUID} -le 0 ];then
...@@ -481,6 +519,8 @@ elif [ ${MODE} = "serving_infer" ]; then ...@@ -481,6 +519,8 @@ elif [ ${MODE} = "serving_infer" ]; then
#run serving #run serving
func_serving "${web_service_cmd}" func_serving "${web_service_cmd}"
else else
IFS="|" IFS="|"
export Count=0 export Count=0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册