diff --git a/tests/configs/ppocr_det_mobile_params.txt b/tests/configs/ppocr_det_mobile_params.txt index 7b65895856684c1921deaf746997536b14c6c46c..63a78fb39f05552651fe02832e6e2622f5cba155 100644 --- a/tests/configs/ppocr_det_mobile_params.txt +++ b/tests/configs/ppocr_det_mobile_params.txt @@ -65,6 +65,8 @@ inference:./deploy/cpp_infer/build/ppocr det null:null --benchmark:True ===========================serving_params=========================== +model_name:ocr_det +python:python3.7 trans_model:-m paddle_serving_client.convert --dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ --model_filename:inference.pdmodel diff --git a/tests/configs/ppocr_det_server_params.txt b/tests/configs/ppocr_det_server_params.txt index b3df1735e50d941b34eeb274c28eb4ce50d79292..bba4ef44f769ed16671ead55a0eba6ee986aaaaa 100644 --- a/tests/configs/ppocr_det_server_params.txt +++ b/tests/configs/ppocr_det_server_params.txt @@ -49,4 +49,35 @@ inference:tools/infer/predict_det.py --save_log_path:null --benchmark:True null:null - +===========================cpp_infer_params=========================== +use_opencv:True +infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr det +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +===========================serving_params=========================== +model_name:ocr_det_server +python:python3.7 +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 +op.det.local_service_conf.devices:null|0 +op.det.local_service_conf.use_mkldnn:True|False +op.det.local_service_conf.thread_num:1|6 +op.det.local_service_conf.use_trt:False|True +op.det.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_http_client.py --image_dir=../../doc/imgs diff --git a/tests/configs/ppocr_rec_mobile_params.txt b/tests/configs/ppocr_rec_mobile_params.txt index f9c407897269d4729b9cab7313c45fe69712c62d..f3f3a54e14e042693d28559e487852a079f77bdd 100644 --- a/tests/configs/ppocr_rec_mobile_params.txt +++ b/tests/configs/ppocr_rec_mobile_params.txt @@ -65,6 +65,8 @@ inference:./deploy/cpp_infer/build/ppocr rec null:null --benchmark:True ===========================serving_params=========================== +model_name:ocr_rec +python:python3.7 trans_model:-m paddle_serving_client.convert --dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ --model_filename:inference.pdmodel @@ -78,4 +80,4 @@ op.rec.local_service_conf.use_mkldnn:True|False op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en \ No newline at end of file +pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en diff --git a/tests/configs/ppocr_rec_server_params.txt b/tests/configs/ppocr_rec_server_params.txt index 7d151fcf0b793bd0bf63ac925c9ef3cf0ff56557..77961e8e651e0d770dae64860cc129aa2d50dcf2 100644 --- a/tests/configs/ppocr_rec_server_params.txt +++ b/tests/configs/ppocr_rec_server_params.txt @@ -65,12 +65,14 @@ inference:./deploy/cpp_infer/build/ppocr rec null:null --benchmark:True ===========================serving_params=========================== +model_name:ocr_server_rec +python:python3.7 trans_model:-m paddle_serving_client.convert --dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ --model_filename:inference.pdmodel --params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_rec_server_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/ +--serving_server:./deploy/pdserving/ppocr_rec_mobile_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/ serving_dir:./deploy/pdserving web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 op.rec.local_service_conf.devices:null|0 @@ -78,4 +80,4 @@ op.rec.local_service_conf.use_mkldnn:True|False op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en \ No newline at end of file +pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en diff --git a/tests/prepare.sh b/tests/prepare.sh index abb84c881e52ca8076f218e926d41679b6578d09..d842f4f573d0b1bd697bdad9b67a765ebcf6da6c 100644 --- a/tests/prepare.sh +++ b/tests/prepare.sh @@ -134,5 +134,5 @@ if [ ${MODE} = "serving_infer" ];then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar - cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar cd ../ + cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && cd ../ fi diff --git a/tests/test_serving.sh b/tests/test_serving.sh index dad8bd8e8fe26f86b18fc3135c71a6f19b5d2651..ec79a46c9bf4b51c16b1c0ddfff41b772b13b0ae 100644 --- a/tests/test_serving.sh +++ b/tests/test_serving.sh @@ -2,44 +2,44 @@ source tests/common_func.sh FILENAME=$1 -dataline=$(awk 'NR==67, NR==81{print}' $FILENAME) +dataline=$(awk 'NR==67, NR==83{print}' $FILENAME) # parser params IFS=$'\n' lines=(${dataline}) # parser serving -trans_model_py=$(func_parser_value "${lines[1]}") -infer_model_dir_key=$(func_parser_key "${lines[2]}") -infer_model_dir_value=$(func_parser_value "${lines[2]}") -model_filename_key=$(func_parser_key "${lines[3]}") -model_filename_value=$(func_parser_value "${lines[3]}") -params_filename_key=$(func_parser_key "${lines[4]}") -params_filename_value=$(func_parser_value "${lines[4]}") -serving_server_key=$(func_parser_key "${lines[5]}") -serving_server_value=$(func_parser_value "${lines[5]}") -serving_client_key=$(func_parser_key "${lines[6]}") -serving_client_value=$(func_parser_value "${lines[6]}") -serving_dir_value=$(func_parser_value "${lines[7]}") -web_service_py=$(func_parser_value "${lines[8]}") -web_use_gpu_key=$(func_parser_key "${lines[9]}") -web_use_gpu_list=$(func_parser_value "${lines[9]}") -web_use_mkldnn_key=$(func_parser_key "${lines[10]}") -web_use_mkldnn_list=$(func_parser_value "${lines[10]}") -web_cpu_threads_key=$(func_parser_key "${lines[11]}") -web_cpu_threads_list=$(func_parser_value "${lines[11]}") -web_use_trt_key=$(func_parser_key "${lines[12]}") -web_use_trt_list=$(func_parser_value "${lines[12]}") -web_precision_key=$(func_parser_key "${lines[13]}") -web_precision_list=$(func_parser_value "${lines[13]}") -pipeline_py=$(func_parser_value "${lines[14]}") +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +trans_model_py=$(func_parser_value "${lines[3]}") +infer_model_dir_key=$(func_parser_key "${lines[4]}") +infer_model_dir_value=$(func_parser_value "${lines[4]}") +model_filename_key=$(func_parser_key "${lines[5]}") +model_filename_value=$(func_parser_value "${lines[5]}") +params_filename_key=$(func_parser_key "${lines[6]}") +params_filename_value=$(func_parser_value "${lines[6]}") +serving_server_key=$(func_parser_key "${lines[7]}") +serving_server_value=$(func_parser_value "${lines[7]}") +serving_client_key=$(func_parser_key "${lines[8]}") +serving_client_value=$(func_parser_value "${lines[8]}") +serving_dir_value=$(func_parser_value "${lines[9]}") +web_service_py=$(func_parser_value "${lines[10]}") +web_use_gpu_key=$(func_parser_key "${lines[11]}") +web_use_gpu_list=$(func_parser_value "${lines[11]}") +web_use_mkldnn_key=$(func_parser_key "${lines[12]}") +web_use_mkldnn_list=$(func_parser_value "${lines[12]}") +web_cpu_threads_key=$(func_parser_key "${lines[13]}") +web_cpu_threads_list=$(func_parser_value "${lines[13]}") +web_use_trt_key=$(func_parser_key "${lines[14]}") +web_use_trt_list=$(func_parser_value "${lines[14]}") +web_precision_key=$(func_parser_key "${lines[15]}") +web_precision_list=$(func_parser_value "${lines[15]}") +pipeline_py=$(func_parser_value "${lines[16]}") - -LOG_PATH="./tests/output" -mkdir -p ${LOG_PATH} +LOG_PATH="../../tests/output" +mkdir -p ./tests/output status_log="${LOG_PATH}/results_serving.log" - function func_serving(){ IFS='|' _python=$1 @@ -65,12 +65,12 @@ function func_serving(){ continue fi for threads in ${web_cpu_threads_list[*]}; do - _save_log_path="${_log_path}/server_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" + _save_log_path="${LOG_PATH}/server_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &>${_save_log_path} &" + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" eval $web_service_cmd sleep 2s - pipeline_cmd="${python} ${pipeline_py}" + pipeline_cmd="${python} ${pipeline_py} > ${_save_log_path} 2>&1 " eval $pipeline_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" @@ -93,13 +93,13 @@ function func_serving(){ if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then continue fi - _save_log_path="${_log_path}/server_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" + _save_log_path="${LOG_PATH}/server_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") set_precision=$(func_set_params "${web_precision_key}" "${precision}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} &>${_save_log_path} & " + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " eval $web_service_cmd sleep 2s - pipeline_cmd="${python} ${pipeline_py}" + pipeline_cmd="${python} ${pipeline_py} > ${_save_log_path} 2>&1" eval $pipeline_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" @@ -129,3 +129,7 @@ eval $env echo "################### run test ###################" + +export Count=0 +IFS="|" +func_serving "${web_service_cmd}"