未验证 提交 c2aaed44 编写于 作者: X xiaoting 提交者: GitHub

Merge pull request #5711 from tink2123/fix_tipc_serving

polish tipc serving
...@@ -433,3 +433,54 @@ class OCRReader(object): ...@@ -433,3 +433,54 @@ class OCRReader(object):
text = self.label_ops.decode( text = self.label_ops.decode(
preds_idx, preds_prob, is_remove_duplicate=True) preds_idx, preds_prob, is_remove_duplicate=True)
return text return text
from argparse import ArgumentParser,RawDescriptionHelpFormatter
import yaml
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument("-c", "--config", help="configuration file to use")
self.add_argument(
"-o", "--opt", nargs='+', help="set configuration options")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.conf_dict = self._parse_opt(args.opt, args.config)
print("args config:", args.conf_dict)
return args
def _parse_helper(self, v):
if v.isnumeric():
if "." in v:
v = float(v)
else:
v = int(v)
elif v == "True" or v == "False":
v = (v == "True")
return v
def _parse_opt(self, opts, conf_path):
f = open(conf_path)
config = yaml.load(f, Loader=yaml.Loader)
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=')
v = self._parse_helper(v)
print(k,v, type(v))
cur = config
parent = cur
for kk in k.split("."):
if kk not in cur:
cur[kk] = {}
parent = cur
cur = cur[kk]
else:
parent = cur
cur = cur[kk]
parent[k.split(".")[-1]] = v
return config
\ No newline at end of file
...@@ -18,63 +18,13 @@ import numpy as np ...@@ -18,63 +18,13 @@ import numpy as np
import cv2 import cv2
import base64 import base64
# from paddle_serving_app.reader import OCRReader # from paddle_serving_app.reader import OCRReader
from ocr_reader import OCRReader, DetResizeForTest from ocr_reader import OCRReader, DetResizeForTest, ArgsParser
from paddle_serving_app.reader import Sequential, ResizeByFactor from paddle_serving_app.reader import Sequential, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose from paddle_serving_app.reader import Div, Normalize, Transpose
from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
import yaml
from argparse import ArgumentParser,RawDescriptionHelpFormatter
_LOGGER = logging.getLogger() _LOGGER = logging.getLogger()
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument("-c", "--config", help="configuration file to use")
self.add_argument(
"-o", "--opt", nargs='+', help="set configuration options")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.conf_dict = self._parse_opt(args.opt, args.config)
return args
def _parse_helper(self, v):
if v.isnumeric():
if "." in v:
v = float(v)
else:
v = int(v)
elif v == "True" or v == "False":
v = (v == "True")
return v
def _parse_opt(self, opts, conf_path):
f = open(conf_path)
config = yaml.load(f, Loader=yaml.Loader)
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=')
v = self._parse_helper(v)
print(k,v, type(v))
cur = config
parent = cur
for kk in k.split("."):
if kk not in cur:
cur[kk] = {}
parent = cur
cur = cur[kk]
else:
parent = cur
cur = cur[kk]
parent[k.split(".")[-1]] = v
return config
class DetOp(Op): class DetOp(Op):
def init_op(self): def init_op(self):
......
...@@ -18,10 +18,9 @@ import numpy as np ...@@ -18,10 +18,9 @@ import numpy as np
import cv2 import cv2
import base64 import base64
# from paddle_serving_app.reader import OCRReader # from paddle_serving_app.reader import OCRReader
from ocr_reader import OCRReader, DetResizeForTest from ocr_reader import OCRReader, DetResizeForTest, ArgsParser
from paddle_serving_app.reader import Sequential, ResizeByFactor from paddle_serving_app.reader import Sequential, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose from paddle_serving_app.reader import Div, Normalize, Transpose
from web_service_det import ArgsParser
_LOGGER = logging.getLogger() _LOGGER = logging.getLogger()
......
...@@ -8,8 +8,8 @@ trans_model:-m paddle_serving_client.convert ...@@ -8,8 +8,8 @@ trans_model:-m paddle_serving_client.convert
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ --serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ --serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 web_service:web_service_det.py --config=config.yml --opt op.det.concurrency="1"
op.det.local_service_conf.devices:null|0 op.det.local_service_conf.devices:"0"|null
op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.use_trt:False|True
......
...@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert ...@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/ --serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/
serving_dir:./deploy/pdserving serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:null|0 op.rec.local_service_conf.devices:"0"|null
op.rec.local_service_conf.use_mkldnn:True|False op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.use_trt:False|True
......
...@@ -9,10 +9,10 @@ trans_model:-m paddle_serving_client.convert ...@@ -9,10 +9,10 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/ --serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/
serving_dir:./deploy/pdserving serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0 op.det.local_service_conf.devices:"0"|null
op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8 op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_rpc_client.py|pipeline_http_client.py pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs_words_en --image_dir:../../doc/imgs
\ No newline at end of file \ No newline at end of file
===========================serving_params=========================== ===========================serving_params===========================
model_name:ocr_rec_server model_name:ocr_rec_server
python:python3.7 python:python3.7|cpp
trans_model:-m paddle_serving_client.convert trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ --dirname:./inference/ch_ppocr_server_v2.0_rec_infer/
--model_filename:inference.pdmodel --model_filename:inference.pdmodel
...@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert ...@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/ --serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/
serving_dir:./deploy/pdserving serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:null|0 op.rec.local_service_conf.devices:"0"|null
op.rec.local_service_conf.use_mkldnn:True|False op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.use_trt:False|True
......
...@@ -308,10 +308,9 @@ if [ ${MODE} = "serving_infer" ];then ...@@ -308,10 +308,9 @@ if [ ${MODE} = "serving_infer" ];then
IFS='|' IFS='|'
array=(${python_name_list}) array=(${python_name_list})
python_name=${array[0]} python_name=${array[0]}
wget -nc https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl ${python_name} -m pip install paddle-serving-server-gpu==0.8.3.post101
${python_name} -m pip install install paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl ${python_name} -m pip install paddle_serving_client==0.8.3
${python_name} -m pip install paddle_serving_client==0.6.1 ${python_name} -m pip install paddle-serving-app==0.8.3
${python_name} -m pip install paddle-serving-app==0.6.3
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
......
...@@ -58,29 +58,32 @@ function func_serving(){ ...@@ -58,29 +58,32 @@ function func_serving(){
trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $trans_model_cmd eval $trans_model_cmd
cd ${serving_dir_value} cd ${serving_dir_value}
echo $PWD
unset https_proxy unset https_proxy
unset http_proxy unset http_proxy
for python in ${python_list[*]}; do for python in ${python_list[*]}; do
if [ ${python} = "cpp" ]; then if [ ${python} = "cpp" ]; then
for use_gpu in ${web_use_gpu_list[*]}; do for use_gpu in ${web_use_gpu_list[*]}; do
if [ ${use_gpu} = "null" ]; then if [ ${use_gpu} = "null" ]; then
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293" web_service_cpp_cmd="${python_list[0]} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
eval $web_service_cmd eval $web_service_cpp_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}"
sleep 2s sleep 2s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" pipeline_cmd="${python_list[0]} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd eval $pipeline_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
else else
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0" web_service_cpp_cmd="${python_list[0]} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
eval $web_service_cmd eval $web_service_cpp_cmd
sleep 2s sleep 2s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" pipeline_cmd="${python_list[0]} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd eval $pipeline_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
...@@ -89,13 +92,14 @@ function func_serving(){ ...@@ -89,13 +92,14 @@ function func_serving(){
else else
# python serving # python serving
for use_gpu in ${web_use_gpu_list[*]}; do for use_gpu in ${web_use_gpu_list[*]}; do
echo ${ues_gpu}
if [ ${use_gpu} = "null" ]; then if [ ${use_gpu} = "null" ]; then
for use_mkldnn in ${web_use_mkldnn_list[*]}; do for use_mkldnn in ${web_use_mkldnn_list[*]}; do
for threads in ${web_cpu_threads_list[*]}; do for threads in ${web_cpu_threads_list[*]}; do
set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &"
eval $web_service_cmd eval $web_service_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}"
sleep 2s sleep 2s
for pipeline in ${pipeline_py[*]}; do for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
...@@ -128,6 +132,8 @@ function func_serving(){ ...@@ -128,6 +132,8 @@ function func_serving(){
set_precision=$(func_set_params "${web_precision_key}" "${precision}") set_precision=$(func_set_params "${web_precision_key}" "${precision}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & "
eval $web_service_cmd eval $web_service_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}"
sleep 2s sleep 2s
for pipeline in ${pipeline_py[*]}; do for pipeline in ${pipeline_py[*]}; do
...@@ -151,15 +157,15 @@ function func_serving(){ ...@@ -151,15 +157,15 @@ function func_serving(){
} }
# set cuda device #set cuda device
GPUID=$2 GPUID=$2
if [ ${#GPUID} -le 0 ];then if [ ${#GPUID} -le 0 ];then
env=" " env="export CUDA_VISIBLE_DEVICES=0"
else else
env="export CUDA_VISIBLE_DEVICES=${GPUID}" env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi fi
set CUDA_VISIBLE_DEVICES
eval $env eval $env
echo $env
echo "################### run test ###################" echo "################### run test ###################"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册