From 7399cbac200187154fede35d122d688b9ea663b8 Mon Sep 17 00:00:00 2001 From: andyjpaddle Date: Mon, 6 Jun 2022 07:48:40 +0000 Subject: [PATCH] add tipc for cpp infer --- deploy/pdserving/ocr_cpp_client.py | 29 ++++++++++++++----- ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 4 +-- ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 4 +-- ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 4 +-- ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 4 +-- test_tipc/test_serving_infer_cpp.sh | 25 ++++++++-------- 6 files changed, 41 insertions(+), 29 deletions(-) diff --git a/deploy/pdserving/ocr_cpp_client.py b/deploy/pdserving/ocr_cpp_client.py index 92ccb9cd..649dfe04 100755 --- a/deploy/pdserving/ocr_cpp_client.py +++ b/deploy/pdserving/ocr_cpp_client.py @@ -22,15 +22,16 @@ import cv2 from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor from paddle_serving_app.reader import Div, Normalize, Transpose from ocr_reader import OCRReader +import codecs client = Client() # TODO:load_client need to load more than one client model. # this need to figure out some details. client.load_client_config(sys.argv[1:]) -client.connect(["127.0.0.1:9293"]) +client.connect(["127.0.0.1:8181"]) # 9293 import paddle -test_img_dir = "../../doc/imgs/1.jpg" +test_img_dir = "../../doc/imgs/" ocr_reader = OCRReader(char_dict_path="../../ppocr/utils/ppocr_keys_v1.txt") @@ -62,9 +63,21 @@ for img_file in test_img_list: image = cv2_to_base64(image_data) res_list = [] fetch_map = client.predict(feed={"x": image}, fetch=[], batch=True) - print(fetch_map) - one_batch_res = ocr_reader.postprocess(fetch_map, with_score=True) - for res in one_batch_res: - res_list.append(res[0]) - res = {"res": str(res_list)} - print(res) + if fetch_map is None: + print('no results') + else: + if "text" in fetch_map: + for x in fetch_map["text"]: + x = codecs.encode(x) + words = base64.b64decode(x).decode('utf-8') + res_list.append(words) + else: + try: + one_batch_res = ocr_reader.postprocess( + fetch_map, with_score=True) + for res in one_batch_res: + res_list.append(res[0]) + except: + print('no results') + res = {"res": str(res_list)} + print(res) diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt index 645409d3..f0456b5c 100644 --- a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert serving_dir:./deploy/pdserving web_service:-m paddle_serving_server.serve --op:GeneralDetectionOp GeneralInferOp ---port:9293 -device:gpu +--port:8181 +--gpu_id:"0"|null cpp_client:ocr_cpp_client.py --image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt index fee9f307..91c57bed 100644 --- a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert serving_dir:./deploy/pdserving web_service:-m paddle_serving_server.serve --op:GeneralDetectionOp GeneralInferOp ---port:9293 -device:gpu +--port:8181 +--gpu_id:"0"|null cpp_client:ocr_cpp_client.py --image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt index bc9e77cc..d18e9f11 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert serving_dir:./deploy/pdserving web_service:-m paddle_serving_server.serve --op:GeneralDetectionOp GeneralInferOp ---port:9293 -device:gpu +--port:8181 +--gpu_id:"0"|null cpp_client:ocr_cpp_client.py --image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt index 67f4833e..bbfec44d 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert serving_dir:./deploy/pdserving web_service:-m paddle_serving_server.serve --op:GeneralDetectionOp GeneralInferOp ---port:9293 -device:gpu +--port:8181 +--gpu_id:"0"|null cpp_client:ocr_cpp_client.py --image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh index 9aafc6a8..e0e31b59 100644 --- a/test_tipc/test_serving_infer_cpp.sh +++ b/test_tipc/test_serving_infer_cpp.sh @@ -47,7 +47,8 @@ op_key=$(func_parser_key "${lines[14]}") op_value=$(func_parser_value "${lines[14]}") port_key=$(func_parser_key "${lines[15]}") port_value=$(func_parser_value "${lines[15]}") -device_value=$(func_parser_value "${lines[16]}") +gpu_key=$(func_parser_key "${lines[16]}") +gpu_value=$(func_parser_value "${lines[16]}") cpp_client_py=$(func_parser_value "${lines[17]}") image_dir_key=$(func_parser_key "${lines[18]}") image_dir_value=$(func_parser_value "${lines[18]}") @@ -108,8 +109,8 @@ function func_serving(){ # cpp serving unset https_proxy unset http_proxy - for device in ${device_value[*]}; do - if [ ${device} = "cpu" ]; then + for gpu_id in ${gpu_value[*]}; do + if [ ${gpu_id} = "null" ]; then if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} > serving_log_cpu.log &" elif [ ${model_name} = "ch_PP-OCRv2_det" ] || [ ${model_name} = "ch_PP-OCRv3_det" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ] || [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then @@ -132,16 +133,16 @@ function func_serving(){ eval $cpp_client_cmd last_status=${PIPESTATUS[0]} status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" - sleep 5s + # sleep 5s ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 - ps ux | grep -i ${web_service_py} | awk '{print $2}' | xargs kill -s 9 - elif [ ${device} = "gpu" ]; then + # ps ux | grep -i ${web_service_py} | awk '{print $2}' | xargs kill -s 9 + else if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then - web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} --gpu_id=0 > serving_log_gpu.log &" + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} ${gpu_key} ${gpu_id} > serving_log_gpu.log &" elif [ ${model_name} = "ch_PP-OCRv2_det" ] || [ ${model_name} = "ch_PP-OCRv3_det" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ] || [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then - web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${op_key} ${op_value} ${port_key} ${port_value} --gpu_id=0 > serving_log_gpu.log &" + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${op_key} ${op_value} ${port_key} ${port_value} ${gpu_key} ${gpu_id} > serving_log_gpu.log &" elif [ ${model_name} = "ch_PP-OCRv2_rec" ] || [ ${model_name} = "ch_PP-OCRv3_rec" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ] || [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then - web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} --gpu_id=0 > serving_log_gpu.log &" + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} ${gpu_key} ${gpu_id} > serving_log_gpu.log &" fi eval $web_service_cpp_cmd sleep 5s @@ -157,11 +158,9 @@ function func_serving(){ last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" - sleep 5s + # sleep 5s ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 - ps ux | grep -i ${web_service_py} | awk '{print $2}' | xargs kill -s 9 - else - echo "Does not support hardware other than CPU and GPU Currently!" + # ps ux | grep -i ${web_service_py} | awk '{print $2}' | xargs kill -s 9 fi done } -- GitLab