提交 8641aed6 编写于 作者: H HydrogenSulfate

polish prepare.sh and docs

上级 5609211f
......@@ -52,6 +52,19 @@ Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh
```
- 安装 PaddleServing 相关组件,包括serving_client、serving-app,自动编译并安装带自定义OP的 serving_server 包,以及自动下载并解压推理模型
```bash
# 安装必要依赖包
python3.7 -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
python3.7 -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
# 安装编译自定义OP的serving-server包
pushd ./deploy/paddleserving
source build_server.sh python3.7
popd
# 测试PP-ShiTu识别模型时需安装faiss包
python3.7-m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple
# 下载模型与数据
bash test_tipc/prepare.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer
```
......
......@@ -52,6 +52,15 @@ Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_pyt
```
- 安装 PaddleServing 相关组件,包括serving-server、serving_client、serving-app,自动下载并解压推理模型
```bash
# 安装必要依赖包
python3.7 -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
python3.7 -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
python3.7 -m pip install install paddle-serving-server-gpu==0.9.0.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple
# 测试PP-ShiTu识别模型时需安装faiss包
python3.7-m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple
# 下载模型与数据
bash test_tipc/prepare.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer
```
......
......@@ -200,18 +200,7 @@ fi
if [[ ${MODE} = "serving_infer" ]]; then
# prepare serving env
python_name=$(func_parser_value "${lines[2]}")
${python_name} -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
${python_name} -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
python_name=$(func_parser_value "${lines[2]}")
if [[ ${FILENAME} =~ "cpp" ]]; then
pushd ./deploy/paddleserving
source build_server.sh ${python_name}
popd
else
${python_name} -m pip install install paddle-serving-server-gpu==0.9.0.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple
fi
if [[ ${model_name} =~ "ShiTu" ]]; then
${python_name} -m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple
cls_inference_model_url=$(func_parser_value "${lines[3]}")
cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}")
det_inference_model_url=$(func_parser_value "${lines[4]}")
......
......@@ -104,6 +104,8 @@ function func_serving_cls(){
if [[ ${use_gpu} = "null" ]]; then
web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &"
eval ${web_service_cpp_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
sleep 5s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log"
pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
......@@ -116,6 +118,8 @@ function func_serving_cls(){
else
web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} &"
eval ${web_service_cpp_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
sleep 8s
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log"
......@@ -213,6 +217,8 @@ function func_serving_rec(){
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
eval ${web_service_cpp_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
sleep 5s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log"
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
......@@ -226,6 +232,8 @@ function func_serving_rec(){
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &"
eval ${web_service_cpp_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
sleep 5s
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log"
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
......@@ -243,7 +251,7 @@ function func_serving_rec(){
# set cuda device
GPUID=$2
if [ ${#GPUID} -le 0 ];then
env=" "
env="export CUDA_VISIBLE_DEVICES=0"
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
......
......@@ -98,6 +98,8 @@ function func_serving_cls(){
web_service_cmd="${python_} ${web_service_py} &"
eval ${web_service_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
......@@ -130,6 +132,8 @@ function func_serving_cls(){
web_service_cmd="${python_} ${web_service_py} & "
eval ${web_service_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
......@@ -237,6 +241,8 @@ function func_serving_rec(){
web_service_cmd="${python} ${web_service_py} &"
eval ${web_service_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
......@@ -269,6 +275,8 @@ function func_serving_rec(){
web_service_cmd="${python} ${web_service_py} & "
eval ${web_service_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
sleep 10s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
......@@ -290,7 +298,7 @@ function func_serving_rec(){
# set cuda device
GPUID=$2
if [ ${#GPUID} -le 0 ];then
env=" "
env="export CUDA_VISIBLE_DEVICES=0"
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册