diff --git a/deploy/pdserving/ocr_cpp_client.py b/deploy/pdserving/ocr_cpp_client.py new file mode 100755 index 0000000000000000000000000000000000000000..2baa7565ac78b9551c788c7b36457bce38828eb5 --- /dev/null +++ b/deploy/pdserving/ocr_cpp_client.py @@ -0,0 +1,56 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + +from paddle_serving_client import Client +import sys +import numpy as np +import base64 +import os +import cv2 +from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor +from paddle_serving_app.reader import Div, Normalize, Transpose +from ocr_reader import OCRReader + +client = Client() +# TODO:load_client need to load more than one client model. +# this need to figure out some details. +client.load_client_config(sys.argv[1:]) +client.connect(["127.0.0.1:9293"]) + +import paddle +test_img_dir = "test_img/" + +ocr_reader = OCRReader(char_dict_path="../../ppocr/utils/ppocr_keys_v1.txt") + + +def cv2_to_base64(image): + return base64.b64encode(image).decode( + 'utf8') #data.tostring()).decode('utf8') + + +for img_file in os.listdir(test_img_dir): + with open(os.path.join(test_img_dir, img_file), 'rb') as file: + image_data = file.read() + image = cv2_to_base64(image_data) + res_list = [] + #print(image) + fetch_map = client.predict( + feed={"x": image}, fetch=["save_infer_model/scale_0.tmp_1"], batch=True) + print("fetrch map:", fetch_map) + one_batch_res = ocr_reader.postprocess(fetch_map, with_score=True) + for res in one_batch_res: + res_list.append(res[0]) + res = {"res": str(res_list)} + print(res) diff --git a/deploy/pdserving/pipeline_http_client.py b/deploy/pdserving/pipeline_http_client.py index 0befe2f6144d18e24fb3f72ed1d919fd8cd7d5a4..61d13178220118eaf53c51723a9ef65201373ffb 100644 --- a/deploy/pdserving/pipeline_http_client.py +++ b/deploy/pdserving/pipeline_http_client.py @@ -18,13 +18,19 @@ import json import base64 import os +import argparse +parser = argparse.ArgumentParser(description="args for paddleserving") +parser.add_argument("--image_dir", type=str, default="../../doc/imgs/") +args = parser.parse_args() + def cv2_to_base64(image): return base64.b64encode(image).decode('utf8') url = "http://127.0.0.1:9998/ocr/prediction" -test_img_dir = "../../doc/imgs/" +test_img_dir = args.image_dir + for idx, img_file in enumerate(os.listdir(test_img_dir)): with open(os.path.join(test_img_dir, img_file), 'rb') as file: image_data1 = file.read() @@ -36,5 +42,4 @@ for idx, img_file in enumerate(os.listdir(test_img_dir)): r = requests.post(url=url, data=json.dumps(data)) print(r.json()) -test_img_dir = "../../doc/imgs/" print("==> total number of test imgs: ", len(os.listdir(test_img_dir))) diff --git a/deploy/pdserving/pipeline_rpc_client.py b/deploy/pdserving/pipeline_rpc_client.py index 79f898faf37f946cdbf4a87d4d62c8b1f9d5c93b..4dcb1ad5f533729e344809e99951b59fb2908537 100644 --- a/deploy/pdserving/pipeline_rpc_client.py +++ b/deploy/pdserving/pipeline_rpc_client.py @@ -30,7 +30,12 @@ def cv2_to_base64(image): return base64.b64encode(image).decode('utf8') -test_img_dir = "imgs/" +import argparse +parser = argparse.ArgumentParser(description="args for paddleserving") +parser.add_argument("--image_dir", type=str, default="../../doc/imgs/") +args = parser.parse_args() +test_img_dir = args.image_dir + for img_file in os.listdir(test_img_dir): with open(os.path.join(test_img_dir, img_file), 'rb') as file: image_data = file.read() diff --git a/test_tipc/configs/mac_ppocr_det_mobile_params.txt b/test_tipc/configs/mac_ppocr_det_mobile_params.txt index b0415c9a1f79837866812d1e545ad8fd09fb681d..0200e2954948e5aeb719aa43e8a88d70c2af506d 100644 --- a/test_tipc/configs/mac_ppocr_det_mobile_params.txt +++ b/test_tipc/configs/mac_ppocr_det_mobile_params.txt @@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs ===========================kl_quant_params=========================== infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o diff --git a/test_tipc/configs/ppocr_det_mobile_params.txt b/test_tipc/configs/ppocr_det_mobile_params.txt index 3442627613b20b687566ce9e84d7404c4a836e83..a6bbabd15449053fdaa24087c9d0492280ebe4e3 100644 --- a/test_tipc/configs/ppocr_det_mobile_params.txt +++ b/test_tipc/configs/ppocr_det_mobile_params.txt @@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs ===========================kl_quant_params=========================== infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o diff --git a/test_tipc/configs/ppocr_det_server_params.txt b/test_tipc/configs/ppocr_det_server_params.txt index bba4ef44f769ed16671ead55a0eba6ee986aaaaa..f688fffac8824b0608ea6b6cec0683c70feb659e 100644 --- a/test_tipc/configs/ppocr_det_server_params.txt +++ b/test_tipc/configs/ppocr_det_server_params.txt @@ -80,4 +80,5 @@ op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs diff --git a/test_tipc/configs/ppocr_rec_mobile_params.txt b/test_tipc/configs/ppocr_rec_mobile_params.txt index f3f3a54e14e042693d28559e487852a079f77bdd..3177d19cf6cf7759e13e5597492f3bd7fcea78ff 100644 --- a/test_tipc/configs/ppocr_rec_mobile_params.txt +++ b/test_tipc/configs/ppocr_rec_mobile_params.txt @@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs_words_en diff --git a/test_tipc/configs/ppocr_rec_server_params.txt b/test_tipc/configs/ppocr_rec_server_params.txt index 77961e8e651e0d770dae64860cc129aa2d50dcf2..3bc1dcce2c7103f2180c19551e8f5379e5524476 100644 --- a/test_tipc/configs/ppocr_rec_server_params.txt +++ b/test_tipc/configs/ppocr_rec_server_params.txt @@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs_words_en diff --git a/test_tipc/configs/win_ppocr_det_mobile_params.txt b/test_tipc/configs/win_ppocr_det_mobile_params.txt index 5a532ceb307fe87174dc6b46fbde236405f59ff5..0f4faee4b32925b4d0780ece6838c176238c7000 100644 --- a/test_tipc/configs/win_ppocr_det_mobile_params.txt +++ b/test_tipc/configs/win_ppocr_det_mobile_params.txt @@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs ===========================kl_quant_params=========================== infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o diff --git a/test_tipc/test_lite.sh b/test_tipc/test_lite.sh index 832003ba302fe86995e20029cdb019e72d9ce162..1fd9d3c7186207922c436e7981622c707a56596f 100644 --- a/test_tipc/test_lite.sh +++ b/test_tipc/test_lite.sh @@ -3,7 +3,7 @@ source ./common_func.sh export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH FILENAME=$1 -dataline=$(awk 'NR==101, NR==110{print}' $FILENAME) +dataline=$(awk 'NR==102, NR==111{print}' $FILENAME) echo $dataline # parser params IFS=$'\n' diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh index be7b594c3848c423937c59336ce3bf686f8f228d..9b1e90ed6116f32e232657e30277a747a70904c7 100644 --- a/test_tipc/test_serving.sh +++ b/test_tipc/test_serving.sh @@ -2,7 +2,7 @@ source test_tipc/common_func.sh FILENAME=$1 -dataline=$(awk 'NR==67, NR==83{print}' $FILENAME) +dataline=$(awk 'NR==67, NR==84{print}' $FILENAME) # parser params IFS=$'\n' @@ -35,6 +35,8 @@ web_use_trt_list=$(func_parser_value "${lines[14]}") web_precision_key=$(func_parser_key "${lines[15]}") web_precision_list=$(func_parser_value "${lines[15]}") pipeline_py=$(func_parser_value "${lines[16]}") +image_dir_key=$(func_parser_key "${lines[17]}") +image_dir_value=$(func_parser_value "${lines[17]}") LOG_PATH="../../test_tipc/output" mkdir -p ./test_tipc/output @@ -51,67 +53,98 @@ function func_serving(){ set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") + set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" eval $trans_model_cmd cd ${serving_dir_value} echo $PWD unset https_proxy unset http_proxy - for use_gpu in ${web_use_gpu_list[*]}; do - echo ${ues_gpu} - if [ ${use_gpu} = "null" ]; then - for use_mkldnn in ${web_use_mkldnn_list[*]}; do - if [ ${use_mkldnn} = "False" ]; then - continue - fi - for threads in ${web_cpu_threads_list[*]}; do - _save_log_path="${LOG_PATH}/server_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" - set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" - eval $web_service_cmd - sleep 2s - pipeline_cmd="${python} ${pipeline_py} > ${_save_log_path} 2>&1 " - eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" - PID=$! - kill $PID - sleep 2s - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - done - done - elif [ ${use_gpu} = "0" ]; then - for use_trt in ${web_use_trt_list[*]}; do - for precision in ${web_precision_list[*]}; do - if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then - continue - fi - if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then - continue - fi - if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then - continue - fi - _save_log_path="${LOG_PATH}/server_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" - set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") - set_precision=$(func_set_params "${web_precision_key}" "${precision}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " + for python in ${python[*]}; do + if [ ${python} = "cpp"]; then + for use_gpu in ${web_use_gpu_list[*]}; do + if [ ${use_gpu} = "null" ]; then + web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293" eval $web_service_cmd sleep 2s - pipeline_cmd="${python} ${pipeline_py} > ${_save_log_path} 2>&1" + _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" + pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" status_check $last_status "${pipeline_cmd}" "${status_log}" - PID=$! - kill $PID sleep 2s ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - done + else + web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0" + eval $web_service_cmd + sleep 2s + _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" + pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" + eval $pipeline_cmd + status_check $last_status "${pipeline_cmd}" "${status_log}" + sleep 2s + ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 + fi done else - echo "Does not support hardware other than CPU and GPU Currently!" + # python serving + for use_gpu in ${web_use_gpu_list[*]}; do + echo ${ues_gpu} + if [ ${use_gpu} = "null" ]; then + for use_mkldnn in ${web_use_mkldnn_list[*]}; do + if [ ${use_mkldnn} = "False" ]; then + continue + fi + for threads in ${web_cpu_threads_list[*]}; do + set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" + eval $web_service_cmd + sleep 2s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" + pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 " + eval $pipeline_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" + sleep 2s + done + ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 + done + done + elif [ ${use_gpu} = "0" ]; then + for use_trt in ${web_use_trt_list[*]}; do + for precision in ${web_precision_list[*]}; do + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then + continue + fi + set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${web_precision_key}" "${precision}") + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " + eval $web_service_cmd + + sleep 2s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" + pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1" + eval $pipeline_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" + sleep 2s + done + ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done fi done } diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index a9be33ea45cc56b5478e9135451849e25888f8d1..23dfed5c044ffca78bb2cf4eb7e948626e4c4b52 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -90,7 +90,7 @@ infer_value1=$(func_parser_value "${lines[50]}") # parser klquant_infer if [ ${MODE} = "klquant_whole_infer" ]; then - dataline=$(awk 'NR==82, NR==98{print}' $FILENAME) + dataline=$(awk 'NR==85 NR==101{print}' $FILENAME) lines=(${dataline}) # parser inference model infer_model_dir_list=$(func_parser_value "${lines[1]}")