未验证 提交 e08c8220 编写于 作者: X xiaoting 提交者: GitHub

update cpp serving doc (#5533)

* update cpp serving doc

* server build shell

* Update serving_cpp.md
上级 37406905
...@@ -11,4 +11,5 @@ serving_dir:./deploy/serving_cpp ...@@ -11,4 +11,5 @@ serving_dir:./deploy/serving_cpp
--model:serving_server --model:serving_server
--op:GeneralClasOp --op:GeneralClasOp
--port:9997 --port:9997
--gpu_id:"0"|null
cpp_client:serving_client.py cpp_client:serving_client.py
\ No newline at end of file
...@@ -30,10 +30,9 @@ op_key=$(func_parser_key "${lines[11]}") ...@@ -30,10 +30,9 @@ op_key=$(func_parser_key "${lines[11]}")
op_value=$(func_parser_value "${lines[11]}") op_value=$(func_parser_value "${lines[11]}")
port_key=$(func_parser_key "${lines[12]}") port_key=$(func_parser_key "${lines[12]}")
port_value=$(func_parser_value "${lines[12]}") port_value=$(func_parser_value "${lines[12]}")
cpp_client_value=$(func_parser_value "${lines[13]}") gpu_key=$(func_parser_key "${lines[13]}")
gpu_value=$(func_parser_value "${lines[13]}")
cpp_client_value=$(func_parser_value "${lines[14]}")
LOG_PATH="./log/${model_name}/${MODE}" LOG_PATH="./log/${model_name}/${MODE}"
mkdir -p ${LOG_PATH} mkdir -p ${LOG_PATH}
...@@ -63,20 +62,38 @@ function func_serving(){ ...@@ -63,20 +62,38 @@ function func_serving(){
unset https_proxy unset https_proxy
unset http_proxy unset http_proxy
_save_log_path="../../log/${model_name}/${MODE}/servering_infer_cpp_gpu_batchsize_1.log"
# phrase 2: run server # phrase 2: run server
cpp_server_cmd="${python} -m paddle_serving_server.serve ${run_model_path_key} ${run_model_path_value} ${op_key} ${op_value} ${port_key} ${port_value} > serving_log.log & " for gpu_id in ${gpu_value[*]}; do
eval $cpp_server_cmd cat gpu_id
last_status=${PIPESTATUS[0]} if [ ${gpu_id} = "null" ]; then
status_check $last_status "${cpp_server_cmd}" "${status_log}" "${model_name}" _save_log_path="../../log/${model_name}/${MODE}/servering_infer_cpp_cpu_batchsize_1.log"
sleep 5s cpp_server_cmd="${python} -m paddle_serving_server.serve ${run_model_path_key} ${run_model_path_value} ${op_key} ${op_value} ${port_key} ${port_value} > serving_log.log & "
clinet_cmd="${python} ${cpp_client_value} > ${_save_log_path} 2>&1 " eval $cpp_server_cmd
eval $clinet_cmd last_status=${PIPESTATUS[0]}
last_status=${PIPESTATUS[0]} status_check $last_status "${cpp_server_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${clinet_cmd}" "${status_log}" "${model_name}" sleep 5s
# eval "cat ${_save_log_path}" clinet_cmd="${python} ${cpp_client_value} > ${_save_log_path} 2>&1 "
cd ../../ eval $clinet_cmd
ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 last_status=${PIPESTATUS[0]}
status_check $last_status "${clinet_cmd}" "${status_log}" "${model_name}"
# eval "cat ${_save_log_path}"
ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9
else
_save_log_path="../../log/${model_name}/${MODE}/servering_infer_cpp_gpu_batchsize_1.log"
cpp_server_cmd="${python} -m paddle_serving_server.serve ${run_model_path_key} ${run_model_path_value} ${op_key} ${op_value} ${port_key} ${port_value} ${gpu_key} ${gpu_id} > serving_log.log & "
eval $cpp_server_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${cpp_server_cmd}" "${status_log}" "${model_name}"
sleep 5s
clinet_cmd="${python} ${cpp_client_value} > ${_save_log_path} 2>&1 "
eval $clinet_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${clinet_cmd}" "${status_log}" "${model_name}"
# eval "cat ${_save_log_path}"
ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9
fi
done
} }
......
...@@ -168,7 +168,7 @@ feed_var { ...@@ -168,7 +168,7 @@ feed_var {
} }
``` ```
完成代码开发后,将相关代码cp到Serving源码中并进行编译[编译](https://github.com/PaddlePaddle/Serving/blob/v0.8.3/doc/Compile_CN.md) 完成代码开发后,将相关代码cp到Serving源码中并进行编译[编译](https://github.com/PaddlePaddle/Serving/blob/v0.8.3/doc/Compile_CN.md)
``` ```
cp deploy/serving_cpp/preprocess/general_clas_op.* ${Serving_repo_path}/core/general-server/op cp deploy/serving_cpp/preprocess/general_clas_op.* ${Serving_repo_path}/core/general-server/op
...@@ -176,6 +176,17 @@ cp deploy/serving_cpp/preprocess/preprocess_op.* ${Serving_repo_path}/core/predi ...@@ -176,6 +176,17 @@ cp deploy/serving_cpp/preprocess/preprocess_op.* ${Serving_repo_path}/core/predi
``` ```
如当前docker镜像为 `registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82` 可使用一键编译脚本:
```
bash template/build_server.sh
```
* 注意修改L49-L50,替换自定义op的文件名
* 使用其他镜像需手动修改 CUDA 和 TensorRT 相关path
<a name="2.6"></a> <a name="2.6"></a>
### 2.6 客户端修改 ### 2.6 客户端修改
......
#使用镜像:
#registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82
#编译Serving Server:
#client和app可以直接使用release版本
#server因为加入了自定义OP,需要重新编译
apt-get update
apt install -y libcurl4-openssl-dev libbz2-dev
wget https://paddle-serving.bj.bcebos.com/others/centos_ssl.tar && tar xf centos_ssl.tar && rm -rf centos_ssl.tar && mv libcrypto.so.1.0.2k /usr/lib/libcrypto.so.1.0.2k && mv libssl.so.1.0.2k /usr/lib/libssl.so.1.0.2k && ln -sf /usr/lib/libcrypto.so.1.0.2k /usr/lib/libcrypto.so.10 && ln -sf /usr/lib/libssl.so.1.0.2k /usr/lib/libssl.so.10 && ln -sf /usr/lib/libcrypto.so.10 /usr/lib/libcrypto.so && ln -sf /usr/lib/libssl.so.10 /usr/lib/libssl.so
# 安装go依赖
rm -rf /usr/local/go
wget -qO- https://paddle-ci.cdn.bcebos.com/go1.17.2.linux-amd64.tar.gz | tar -xz -C /usr/local
export GOROOT=/usr/local/go
export GOPATH=/root/gopath
export PATH=$PATH:$GOPATH/bin:$GOROOT/bin
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.15.2
go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.15.2
go install github.com/golang/protobuf/protoc-gen-go@v1.4.3
go install google.golang.org/grpc@v1.33.0
go env -w GO111MODULE=auto
# 下载opencv库
wget https://paddle-qa.bj.bcebos.com/PaddleServing/opencv3.tar.gz && tar -xvf opencv3.tar.gz && rm -rf opencv3.tar.gz
export OPENCV_DIR=$PWD/opencv3
# clone Serving
git clone https://github.com/PaddlePaddle/Serving.git -b develop --depth=1
cd Serving
export Serving_repo_path=$PWD
git submodule update --init --recursive
python -m pip install -r python/requirements.txt
export PYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())")
export PYTHON_LIBRARIES=$(python -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR'))")
export PYTHON_EXECUTABLE=`which python`
export CUDA_PATH='/usr/local/cuda'
export CUDNN_LIBRARY='/usr/local/cuda/lib64/'
export CUDA_CUDART_LIBRARY='/usr/local/cuda/lib64/'
export TENSORRT_LIBRARY_PATH='/usr/local/TensorRT6-cuda10.1-cudnn7/targets/x86_64-linux-gnu/'
# cp 自定义OP代码
\cp deploy/serving_cpp/preprocess/general_clas_op.* ${Serving_repo_path}/core/general-server/op
\cp deploy/serving_cpp/preprocess/preprocess_op.* ${Serving_repo_path}/core/predictor/tools/pp_shitu_tools
# 编译Server, export SERVING_BIN
mkdir server-build-gpu-opencv && cd server-build-gpu-opencv
cmake -DPYTHON_INCLUDE_DIR=$PYTHON_INCLUDE_DIR \
-DPYTHON_LIBRARIES=$PYTHON_LIBRARIES \
-DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
-DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \
-DCUDNN_LIBRARY=${CUDNN_LIBRARY} \
-DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \
-DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \
-DOPENCV_DIR=${OPENCV_DIR} \
-DWITH_OPENCV=ON \
-DSERVER=ON \
-DWITH_GPU=ON ..
make -j32
python -m pip install python/dist/paddle*
export SERVING_BIN=$PWD/core/general-server/serving
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册