未验证 提交 5ebf5d6e 编写于 作者: X xiaoting 提交者: GitHub

Merge pull request #4544 from tink2123/update_serving

update test serving
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client
import sys
import numpy as np
import base64
import os
import cv2
from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose
from ocr_reader import OCRReader
client = Client()
# TODO:load_client need to load more than one client model.
# this need to figure out some details.
client.load_client_config(sys.argv[1:])
client.connect(["127.0.0.1:9293"])
import paddle
test_img_dir = "test_img/"
ocr_reader = OCRReader(char_dict_path="../../ppocr/utils/ppocr_keys_v1.txt")
def cv2_to_base64(image):
return base64.b64encode(image).decode(
'utf8') #data.tostring()).decode('utf8')
for img_file in os.listdir(test_img_dir):
with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data = file.read()
image = cv2_to_base64(image_data)
res_list = []
#print(image)
fetch_map = client.predict(
feed={"x": image}, fetch=["save_infer_model/scale_0.tmp_1"], batch=True)
print("fetrch map:", fetch_map)
one_batch_res = ocr_reader.postprocess(fetch_map, with_score=True)
for res in one_batch_res:
res_list.append(res[0])
res = {"res": str(res_list)}
print(res)
...@@ -18,13 +18,19 @@ import json ...@@ -18,13 +18,19 @@ import json
import base64 import base64
import os import os
import argparse
parser = argparse.ArgumentParser(description="args for paddleserving")
parser.add_argument("--image_dir", type=str, default="../../doc/imgs/")
args = parser.parse_args()
def cv2_to_base64(image): def cv2_to_base64(image):
return base64.b64encode(image).decode('utf8') return base64.b64encode(image).decode('utf8')
url = "http://127.0.0.1:9998/ocr/prediction" url = "http://127.0.0.1:9998/ocr/prediction"
test_img_dir = "../../doc/imgs/" test_img_dir = args.image_dir
for idx, img_file in enumerate(os.listdir(test_img_dir)): for idx, img_file in enumerate(os.listdir(test_img_dir)):
with open(os.path.join(test_img_dir, img_file), 'rb') as file: with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data1 = file.read() image_data1 = file.read()
...@@ -36,5 +42,4 @@ for idx, img_file in enumerate(os.listdir(test_img_dir)): ...@@ -36,5 +42,4 @@ for idx, img_file in enumerate(os.listdir(test_img_dir)):
r = requests.post(url=url, data=json.dumps(data)) r = requests.post(url=url, data=json.dumps(data))
print(r.json()) print(r.json())
test_img_dir = "../../doc/imgs/"
print("==> total number of test imgs: ", len(os.listdir(test_img_dir))) print("==> total number of test imgs: ", len(os.listdir(test_img_dir)))
...@@ -30,7 +30,12 @@ def cv2_to_base64(image): ...@@ -30,7 +30,12 @@ def cv2_to_base64(image):
return base64.b64encode(image).decode('utf8') return base64.b64encode(image).decode('utf8')
test_img_dir = "imgs/" import argparse
parser = argparse.ArgumentParser(description="args for paddleserving")
parser.add_argument("--image_dir", type=str, default="../../doc/imgs/")
args = parser.parse_args()
test_img_dir = args.image_dir
for img_file in os.listdir(test_img_dir): for img_file in os.listdir(test_img_dir):
with open(os.path.join(test_img_dir, img_file), 'rb') as file: with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data = file.read() image_data = file.read()
......
...@@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False ...@@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8 op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs
===========================kl_quant_params=========================== ===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
......
...@@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False ...@@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8 op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs
===========================kl_quant_params=========================== ===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
......
...@@ -80,4 +80,5 @@ op.det.local_service_conf.use_mkldnn:True|False ...@@ -80,4 +80,5 @@ op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8 op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs
...@@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False ...@@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8 op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs_words_en
...@@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False ...@@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8 op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs_words_en
...@@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False ...@@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8 op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs
===========================kl_quant_params=========================== ===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
......
...@@ -3,7 +3,7 @@ source ./common_func.sh ...@@ -3,7 +3,7 @@ source ./common_func.sh
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
FILENAME=$1 FILENAME=$1
dataline=$(awk 'NR==101, NR==110{print}' $FILENAME) dataline=$(awk 'NR==102, NR==111{print}' $FILENAME)
echo $dataline echo $dataline
# parser params # parser params
IFS=$'\n' IFS=$'\n'
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
source test_tipc/common_func.sh source test_tipc/common_func.sh
FILENAME=$1 FILENAME=$1
dataline=$(awk 'NR==67, NR==83{print}' $FILENAME) dataline=$(awk 'NR==67, NR==84{print}' $FILENAME)
# parser params # parser params
IFS=$'\n' IFS=$'\n'
...@@ -35,6 +35,8 @@ web_use_trt_list=$(func_parser_value "${lines[14]}") ...@@ -35,6 +35,8 @@ web_use_trt_list=$(func_parser_value "${lines[14]}")
web_precision_key=$(func_parser_key "${lines[15]}") web_precision_key=$(func_parser_key "${lines[15]}")
web_precision_list=$(func_parser_value "${lines[15]}") web_precision_list=$(func_parser_value "${lines[15]}")
pipeline_py=$(func_parser_value "${lines[16]}") pipeline_py=$(func_parser_value "${lines[16]}")
image_dir_key=$(func_parser_key "${lines[17]}")
image_dir_value=$(func_parser_value "${lines[17]}")
LOG_PATH="../../test_tipc/output" LOG_PATH="../../test_tipc/output"
mkdir -p ./test_tipc/output mkdir -p ./test_tipc/output
...@@ -51,12 +53,40 @@ function func_serving(){ ...@@ -51,12 +53,40 @@ function func_serving(){
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $trans_model_cmd eval $trans_model_cmd
cd ${serving_dir_value} cd ${serving_dir_value}
echo $PWD echo $PWD
unset https_proxy unset https_proxy
unset http_proxy unset http_proxy
for python in ${python[*]}; do
if [ ${python} = "cpp"]; then
for use_gpu in ${web_use_gpu_list[*]}; do
if [ ${use_gpu} = "null" ]; then
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
eval $web_service_cmd
sleep 2s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
else
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
eval $web_service_cmd
sleep 2s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
fi
done
else
# python serving
for use_gpu in ${web_use_gpu_list[*]}; do for use_gpu in ${web_use_gpu_list[*]}; do
echo ${ues_gpu} echo ${ues_gpu}
if [ ${use_gpu} = "null" ]; then if [ ${use_gpu} = "null" ]; then
...@@ -65,19 +95,19 @@ function func_serving(){ ...@@ -65,19 +95,19 @@ function func_serving(){
continue continue
fi fi
for threads in ${web_cpu_threads_list[*]}; do for threads in ${web_cpu_threads_list[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &"
eval $web_service_cmd eval $web_service_cmd
sleep 2s sleep 2s
pipeline_cmd="${python} ${pipeline_py} > ${_save_log_path} 2>&1 " for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 "
eval $pipeline_cmd eval $pipeline_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}"
PID=$!
kill $PID
sleep 2s sleep 2s
done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
done done
done done
...@@ -93,20 +123,21 @@ function func_serving(){ ...@@ -93,20 +123,21 @@ function func_serving(){
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
continue continue
fi fi
_save_log_path="${LOG_PATH}/server_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_1.log"
set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${web_precision_key}" "${precision}") set_precision=$(func_set_params "${web_precision_key}" "${precision}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & "
eval $web_service_cmd eval $web_service_cmd
sleep 2s sleep 2s
pipeline_cmd="${python} ${pipeline_py} > ${_save_log_path} 2>&1" for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1"
eval $pipeline_cmd eval $pipeline_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}"
PID=$!
kill $PID
sleep 2s sleep 2s
done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
done done
done done
...@@ -114,6 +145,8 @@ function func_serving(){ ...@@ -114,6 +145,8 @@ function func_serving(){
echo "Does not support hardware other than CPU and GPU Currently!" echo "Does not support hardware other than CPU and GPU Currently!"
fi fi
done done
fi
done
} }
......
...@@ -90,7 +90,7 @@ infer_value1=$(func_parser_value "${lines[50]}") ...@@ -90,7 +90,7 @@ infer_value1=$(func_parser_value "${lines[50]}")
# parser klquant_infer # parser klquant_infer
if [ ${MODE} = "klquant_whole_infer" ]; then if [ ${MODE} = "klquant_whole_infer" ]; then
dataline=$(awk 'NR==82, NR==98{print}' $FILENAME) dataline=$(awk 'NR==85 NR==101{print}' $FILENAME)
lines=(${dataline}) lines=(${dataline})
# parser inference model # parser inference model
infer_model_dir_list=$(func_parser_value "${lines[1]}") infer_model_dir_list=$(func_parser_value "${lines[1]}")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册