diff --git a/deploy/pdserving/README.md b/deploy/pdserving/README.md index 55e03c4c2654f336ed942ae03e61e88b61940006..29c85c0826791bb843a7afd5daee3999fd00c046 100644 --- a/deploy/pdserving/README.md +++ b/deploy/pdserving/README.md @@ -217,7 +217,7 @@ The C++ service deployment is the same as python in the environment setup and da 2. Run the following command to start the service. ``` # Start the service and save the running log in log.txt - python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 9293 &>log.txt & + python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 8181 &>log.txt & ``` After the service is successfully started, a log similar to the following will be printed in log.txt ![](./imgs/start_server.png) diff --git a/deploy/pdserving/README_CN.md b/deploy/pdserving/README_CN.md index 0891611db5f39d322473354f7d988b10afa78cbd..92b1ad0c69d57264f67f3a32484e80793ed18de6 100644 --- a/deploy/pdserving/README_CN.md +++ b/deploy/pdserving/README_CN.md @@ -230,7 +230,7 @@ cp -rf general_detection_op.cpp Serving/core/general-server/op ``` # 启动服务,运行日志保存在log.txt - python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 9293 &>log.txt & + python3 -m paddle_serving_server.serve --model ppocr_det_v3_serving ppocr_rec_v3_serving --op GeneralDetectionOp GeneralInferOp --port 8181 &>log.txt & ``` 成功启动服务后,log.txt中会打印类似如下日志 ![](./imgs/start_server.png) diff --git a/deploy/pdserving/ocr_cpp_client.py b/deploy/pdserving/ocr_cpp_client.py index 7f9333dd858aad5440ff256d501cf1e5d2f5fb1f..3aaf03155953ce2129fd548deef15033e91e9a09 100755 --- a/deploy/pdserving/ocr_cpp_client.py +++ b/deploy/pdserving/ocr_cpp_client.py @@ -22,15 +22,16 @@ import cv2 from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor from paddle_serving_app.reader import Div, Normalize, Transpose from ocr_reader import OCRReader +import codecs client = Client() # TODO:load_client need to load more than one client model. # this need to figure out some details. client.load_client_config(sys.argv[1:]) -client.connect(["127.0.0.1:9293"]) +client.connect(["127.0.0.1:8181"]) import paddle -test_img_dir = "../../doc/imgs/" +test_img_dir = "../../doc/imgs/1.jpg" ocr_reader = OCRReader(char_dict_path="../../ppocr/utils/ppocr_keys_v1.txt") @@ -40,14 +41,43 @@ def cv2_to_base64(image): 'utf8') #data.tostring()).decode('utf8') -for img_file in os.listdir(test_img_dir): - with open(os.path.join(test_img_dir, img_file), 'rb') as file: +def _check_image_file(path): + img_end = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff', 'gif'} + return any([path.lower().endswith(e) for e in img_end]) + + +test_img_list = [] +if os.path.isfile(test_img_dir) and _check_image_file(test_img_dir): + test_img_list.append(test_img_dir) +elif os.path.isdir(test_img_dir): + for single_file in os.listdir(test_img_dir): + file_path = os.path.join(test_img_dir, single_file) + if os.path.isfile(file_path) and _check_image_file(file_path): + test_img_list.append(file_path) +if len(test_img_list) == 0: + raise Exception("not found any img file in {}".format(test_img_dir)) + +for img_file in test_img_list: + with open(img_file, 'rb') as file: image_data = file.read() image = cv2_to_base64(image_data) res_list = [] fetch_map = client.predict(feed={"x": image}, fetch=[], batch=True) - one_batch_res = ocr_reader.postprocess(fetch_map, with_score=True) - for res in one_batch_res: - res_list.append(res[0]) - res = {"res": str(res_list)} - print(res) + if fetch_map is None: + print('no results') + else: + if "text" in fetch_map: + for x in fetch_map["text"]: + x = codecs.encode(x) + words = base64.b64decode(x).decode('utf-8') + res_list.append(words) + else: + try: + one_batch_res = ocr_reader.postprocess( + fetch_map, with_score=True) + for res in one_batch_res: + res_list.append(res[0]) + except: + print('no results') + res = {"res": str(res_list)} + print(res) diff --git a/deploy/pdserving/serving_client_conf.prototxt b/deploy/pdserving/serving_client_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..33960540a2ea1a7a1e37bda07ab62197a54e272d --- /dev/null +++ b/deploy/pdserving/serving_client_conf.prototxt @@ -0,0 +1,16 @@ +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 20 + shape: 1 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "save_infer_model/scale_0.tmp_1" + is_lod_tensor: false + fetch_type: 1 + shape: 1 + shape: 640 + shape: 640 +} diff --git a/test_tipc/build_server.sh b/test_tipc/build_server.sh new file mode 100644 index 0000000000000000000000000000000000000000..3173359785290ffa5c6f865efe96705e2b09fae1 --- /dev/null +++ b/test_tipc/build_server.sh @@ -0,0 +1,69 @@ +#使用镜像: +#registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82 + +#编译Serving Server: + +#client和app可以直接使用release版本 + +#server因为加入了自定义OP,需要重新编译 + +apt-get update +apt install -y libcurl4-openssl-dev libbz2-dev +wget https://paddle-serving.bj.bcebos.com/others/centos_ssl.tar && tar xf centos_ssl.tar && rm -rf centos_ssl.tar && mv libcrypto.so.1.0.2k /usr/lib/libcrypto.so.1.0.2k && mv libssl.so.1.0.2k /usr/lib/libssl.so.1.0.2k && ln -sf /usr/lib/libcrypto.so.1.0.2k /usr/lib/libcrypto.so.10 && ln -sf /usr/lib/libssl.so.1.0.2k /usr/lib/libssl.so.10 && ln -sf /usr/lib/libcrypto.so.10 /usr/lib/libcrypto.so && ln -sf /usr/lib/libssl.so.10 /usr/lib/libssl.so + +# 安装go依赖 +rm -rf /usr/local/go +wget -qO- https://paddle-ci.cdn.bcebos.com/go1.17.2.linux-amd64.tar.gz | tar -xz -C /usr/local +export GOROOT=/usr/local/go +export GOPATH=/root/gopath +export PATH=$PATH:$GOPATH/bin:$GOROOT/bin +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.cn,direct +go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.15.2 +go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.15.2 +go install github.com/golang/protobuf/protoc-gen-go@v1.4.3 +go install google.golang.org/grpc@v1.33.0 +go env -w GO111MODULE=auto + +# 下载opencv库 +wget https://paddle-qa.bj.bcebos.com/PaddleServing/opencv3.tar.gz && tar -xvf opencv3.tar.gz && rm -rf opencv3.tar.gz +export OPENCV_DIR=$PWD/opencv3 + +# clone Serving +git clone https://github.com/PaddlePaddle/Serving.git -b develop --depth=1 +cd Serving +export Serving_repo_path=$PWD +git submodule update --init --recursive +python -m pip install -r python/requirements.txt + + +export PYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") +export PYTHON_LIBRARIES=$(python -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR'))") +export PYTHON_EXECUTABLE=`which python` + +export CUDA_PATH='/usr/local/cuda' +export CUDNN_LIBRARY='/usr/local/cuda/lib64/' +export CUDA_CUDART_LIBRARY='/usr/local/cuda/lib64/' +export TENSORRT_LIBRARY_PATH='/usr/local/TensorRT6-cuda10.1-cudnn7/targets/x86_64-linux-gnu/' + +# cp 自定义OP代码 +cp -rf ../deploy/pdserving/general_detection_op.cpp ${Serving_repo_path}/core/general-server/op + +# 编译Server, export SERVING_BIN +mkdir server-build-gpu-opencv && cd server-build-gpu-opencv +cmake -DPYTHON_INCLUDE_DIR=$PYTHON_INCLUDE_DIR \ + -DPYTHON_LIBRARIES=$PYTHON_LIBRARIES \ + -DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \ + -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ + -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \ + -DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DWITH_OPENCV=ON \ + -DSERVER=ON \ + -DWITH_GPU=ON .. +make -j32 + +python -m pip install python/dist/paddle* +export SERVING_BIN=$PWD/core/general-server/serving +cd ../../ diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f0456b5c351d20222e331df6a5019a51b79b6d28 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv2 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv2_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v2_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v2_client/ +--rec_dirname:./inference/ch_PP-OCRv2_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v2_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v2_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..91c57bed1b9e9bbafc6438766b81781433a06aa2 --- /dev/null +++ b/test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_PP-OCRv3 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_PP-OCRv3_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_v3_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_v3_client/ +--rec_dirname:./inference/ch_PP-OCRv3_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_v3_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_v3_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d18e9f11fdd2ff605cdd8f6c1bcf51ca780eb766 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_ppocr_mobile_v2.0 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_mobile_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_mobile_client/ +--rec_dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_mobile_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_mobile_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..bbfec44dbab08dcfb932a922797448e541ea385b --- /dev/null +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================serving_params=========================== +model_name:ch_ppocr_server_v2.0 +python:python3.7 +trans_model:-m paddle_serving_client.convert +--det_dirname:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--det_serving_server:./deploy/pdserving/ppocr_det_server_serving/ +--det_serving_client:./deploy/pdserving/ppocr_det_server_client/ +--rec_dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ +--rec_serving_server:./deploy/pdserving/ppocr_rec_server_serving/ +--rec_serving_client:./deploy/pdserving/ppocr_rec_server_client/ +serving_dir:./deploy/pdserving +web_service:-m paddle_serving_server.serve +--op:GeneralDetectionOp GeneralInferOp +--port:8181 +--gpu_id:"0"|null +cpp_client:ocr_cpp_client.py +--image_dir:../../doc/imgs/1.jpg diff --git a/test_tipc/docs/test_serving.md b/test_tipc/docs/test_serving.md index b9acaa8ee15491af624d6b27e6c167e85c0402d4..71f01c0d5ff47004d70baa17b404c10714a6fb64 100644 --- a/test_tipc/docs/test_serving.md +++ b/test_tipc/docs/test_serving.md @@ -24,7 +24,7 @@ PaddleServing预测功能测试的主程序为`test_serving_infer_python.sh`和` bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" # 用法: -bash test_tipc/test_serving_infer_python.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +bash test_tipc/test_serving_infer_python.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" ``` **cpp serving** 先运行`prepare.sh`准备数据和模型,然后运行`test_serving_infer_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_cpp*.log`后缀的日志文件。 @@ -33,7 +33,7 @@ bash test_tipc/test_serving_infer_python.sh ./test_tipc/configs/ch_ppocr_mobile_ bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" # 用法: -bash test_tipc/test_serving_infer_cpp.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt +bash test_tipc/test_serving_infer_cpp.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt "serving_infer" ``` #### 运行结果 diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh new file mode 100644 index 0000000000000000000000000000000000000000..cad17506e5aa6efa49edef62575feb7f2003b8fc --- /dev/null +++ b/test_tipc/test_serving_infer_cpp.sh @@ -0,0 +1,183 @@ +#!/bin/bash +source test_tipc/common_func.sh + +function func_parser_model_config(){ + strs=$1 + IFS="/" + array=(${strs}) + tmp=${array[-1]} + echo ${tmp} +} + +FILENAME=$1 +dataline=$(awk 'NR==1, NR==19{print}' $FILENAME) +MODE=$2 + +# parser params +IFS=$'\n' +lines=(${dataline}) + +# parser serving +model_name=$(func_parser_value "${lines[1]}") +python_list=$(func_parser_value "${lines[2]}") +trans_model_py=$(func_parser_value "${lines[3]}") +det_infer_model_dir_key=$(func_parser_key "${lines[4]}") +det_infer_model_dir_value=$(func_parser_value "${lines[4]}") +model_filename_key=$(func_parser_key "${lines[5]}") +model_filename_value=$(func_parser_value "${lines[5]}") +params_filename_key=$(func_parser_key "${lines[6]}") +params_filename_value=$(func_parser_value "${lines[6]}") +det_serving_server_key=$(func_parser_key "${lines[7]}") +det_serving_server_value=$(func_parser_value "${lines[7]}") +det_serving_client_key=$(func_parser_key "${lines[8]}") +det_serving_client_value=$(func_parser_value "${lines[8]}") +rec_infer_model_dir_key=$(func_parser_key "${lines[9]}") +rec_infer_model_dir_value=$(func_parser_value "${lines[9]}") +rec_serving_server_key=$(func_parser_key "${lines[10]}") +rec_serving_server_value=$(func_parser_value "${lines[10]}") +rec_serving_client_key=$(func_parser_key "${lines[11]}") +rec_serving_client_value=$(func_parser_value "${lines[11]}") +det_server_value=$(func_parser_model_config "${lines[7]}") +det_client_value=$(func_parser_model_config "${lines[8]}") +rec_server_value=$(func_parser_model_config "${lines[10]}") +rec_client_value=$(func_parser_model_config "${lines[11]}") +serving_dir_value=$(func_parser_value "${lines[12]}") +web_service_py=$(func_parser_value "${lines[13]}") +op_key=$(func_parser_key "${lines[14]}") +op_value=$(func_parser_value "${lines[14]}") +port_key=$(func_parser_key "${lines[15]}") +port_value=$(func_parser_value "${lines[15]}") +gpu_key=$(func_parser_key "${lines[16]}") +gpu_value=$(func_parser_value "${lines[16]}") +cpp_client_py=$(func_parser_value "${lines[17]}") +image_dir_key=$(func_parser_key "${lines[18]}") +image_dir_value=$(func_parser_value "${lines[18]}") + +LOG_PATH="$(pwd)/test_tipc/output/${model_name}/${MODE}/cpp" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_cpp_serving.log" + +function func_serving(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + # pdserving + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + # trans det + set_dirname=$(func_set_params "--dirname" "${det_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${det_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${det_serving_client_value}") + python_list=(${python_list}) + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval $trans_model_cmd + cp "deploy/pdserving/serving_client_conf.prototxt" ${det_serving_client_value} + # trans rec + set_dirname=$(func_set_params "--dirname" "${rec_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${rec_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${rec_serving_client_value}") + python_list=(${python_list}) + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval $trans_model_cmd + elif [ ${model_name} = "ch_PP-OCRv2_det" ] || [ ${model_name} = "ch_PP-OCRv3_det" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ] || [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then + # trans det + set_dirname=$(func_set_params "--dirname" "${det_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${det_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${det_serving_client_value}") + python_list=(${python_list}) + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval $trans_model_cmd + cp "deploy/pdserving/serving_client_conf.prototxt" ${det_serving_client_value} + elif [ ${model_name} = "ch_PP-OCRv2_rec" ] || [ ${model_name} = "ch_PP-OCRv3_rec" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ] || [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then + # trans rec + set_dirname=$(func_set_params "--dirname" "${rec_infer_model_dir_value}") + set_serving_server=$(func_set_params "--serving_server" "${rec_serving_server_value}") + set_serving_client=$(func_set_params "--serving_client" "${rec_serving_client_value}") + python_list=(${python_list}) + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval $trans_model_cmd + fi + last_status=${PIPESTATUS[0]} + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") + python_list=(${python_list}) + + cd ${serving_dir_value} + # cpp serving + unset https_proxy + unset http_proxy + for gpu_id in ${gpu_value[*]}; do + if [ ${gpu_id} = "null" ]; then + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} > serving_log_cpu.log &" + elif [ ${model_name} = "ch_PP-OCRv2_det" ] || [ ${model_name} = "ch_PP-OCRv3_det" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ] || [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${op_key} ${op_value} ${port_key} ${port_value} > serving_log_cpu.log &" + elif [ ${model_name} = "ch_PP-OCRv2_rec" ] || [ ${model_name} = "ch_PP-OCRv3_rec" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ] || [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} > serving_log_cpu.log &" + fi + eval $web_service_cpp_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" + sleep 5s + _save_log_path="${LOG_PATH}/server_infer_cpp_cpu.log" + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${det_client_value} ${rec_client_value} > ${_save_log_path} 2>&1" + elif [ ${model_name} = "ch_PP-OCRv2_det" ] || [ ${model_name} = "ch_PP-OCRv3_det" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ] || [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then + cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${det_client_value} > ${_save_log_path} 2>&1" + elif [ ${model_name} = "ch_PP-OCRv2_rec" ] || [ ${model_name} = "ch_PP-OCRv3_rec" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ] || [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then + cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${rec_client_value} > ${_save_log_path} 2>&1" + fi + eval $cpp_client_cmd + last_status=${PIPESTATUS[0]} + status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" + # sleep 5s + ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 + # ps ux | grep -i ${web_service_py} | awk '{print $2}' | xargs kill -s 9 + else + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} ${gpu_key} ${gpu_id} > serving_log_gpu.log &" + elif [ ${model_name} = "ch_PP-OCRv2_det" ] || [ ${model_name} = "ch_PP-OCRv3_det" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ] || [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${det_server_value} ${op_key} ${op_value} ${port_key} ${port_value} ${gpu_key} ${gpu_id} > serving_log_gpu.log &" + elif [ ${model_name} = "ch_PP-OCRv2_rec" ] || [ ${model_name} = "ch_PP-OCRv3_rec" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ] || [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then + web_service_cpp_cmd="${python_list[0]} ${web_service_py} --model ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} ${gpu_key} ${gpu_id} > serving_log_gpu.log &" + fi + eval $web_service_cpp_cmd + sleep 5s + _save_log_path="${LOG_PATH}/server_infer_cpp_gpu.log" + if [ ${model_name} = "ch_PP-OCRv2" ] || [ ${model_name} = "ch_PP-OCRv3" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0" ] || [ ${model_name} = "ch_ppocr_server_v2.0" ]; then + cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${det_client_value} ${rec_client_value} > ${_save_log_path} 2>&1" + elif [ ${model_name} = "ch_PP-OCRv2_det" ] || [ ${model_name} = "ch_PP-OCRv3_det" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ] || [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then + cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${det_client_value} > ${_save_log_path} 2>&1" + elif [ ${model_name} = "ch_PP-OCRv2_rec" ] || [ ${model_name} = "ch_PP-OCRv3_rec" ] || [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ] || [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then + cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${rec_client_value} > ${_save_log_path} 2>&1" + fi + eval $cpp_client_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" + # sleep 5s + ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 + # ps ux | grep -i ${web_service_py} | awk '{print $2}' | xargs kill -s 9 + fi + done +} + + +#set cuda device +GPUID=$3 +if [ ${#GPUID} -le 0 ];then + env="export CUDA_VISIBLE_DEVICES=0" +else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" +fi +eval $env +echo $env + + +echo "################### run test ###################" + +export Count=0 +IFS="|" +func_serving "${web_service_cpp_cmd}" diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 4affa417e10ebf642b7b280dc0b4b373a6b341fe..8bdb27bce7b128f3141c6f1ed774ecc709c71968 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -11,6 +11,7 @@ function func_parser_model_config(){ FILENAME=$1 dataline=$(awk 'NR==1, NR==23{print}' $FILENAME) +MODE=$2 # parser params IFS=$'\n' @@ -58,7 +59,7 @@ pipeline_py=$(func_parser_value "${lines[21]}") image_dir_key=$(func_parser_key "${lines[22]}") image_dir_value=$(func_parser_value "${lines[22]}") -LOG_PATH="$(pwd)/test_tipc/output/${model_name}/python_serving" +LOG_PATH="$(pwd)/test_tipc/output/${model_name}/${MODE}/python" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python_serving.log" @@ -207,7 +208,7 @@ function func_serving(){ #set cuda device -GPUID=$2 +GPUID=$3 if [ ${#GPUID} -le 0 ];then env="export CUDA_VISIBLE_DEVICES=0" else