提交 b9dabc4e 编写于 作者: H HydrogenSulfate

debug

上级 27a59780
#使用镜像:
#registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82
# 使用镜像:
# registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82
#编译Serving Server:
# 编译Serving Server:
#client和app可以直接使用release版本
# client和app可以直接使用release版本
#server因为加入了自定义OP,需要重新编译
# server因为加入了自定义OP,需要重新编译
#默认编译时的${PWD}=PaddleClas/deploy/paddleserving/
# 默认编译时的${PWD}=PaddleClas/deploy/paddleserving/
python_name=${1:-'python'}
apt-get update
apt install -y libcurl4-openssl-dev libbz2-dev
wget https://paddle-serving.bj.bcebos.com/others/centos_ssl.tar && tar xf centos_ssl.tar && rm -rf centos_ssl.tar && mv libcrypto.so.1.0.2k /usr/lib/libcrypto.so.1.0.2k && mv libssl.so.1.0.2k /usr/lib/libssl.so.1.0.2k && ln -sf /usr/lib/libcrypto.so.1.0.2k /usr/lib/libcrypto.so.10 && ln -sf /usr/lib/libssl.so.1.0.2k /usr/lib/libssl.so.10 && ln -sf /usr/lib/libcrypto.so.10 /usr/lib/libcrypto.so && ln -sf /usr/lib/libssl.so.10 /usr/lib/libssl.so
wget -nc https://paddle-serving.bj.bcebos.com/others/centos_ssl.tar
tar xf centos_ssl.tar
rm -rf centos_ssl.tar
mv libcrypto.so.1.0.2k /usr/lib/libcrypto.so.1.0.2k
mv libssl.so.1.0.2k /usr/lib/libssl.so.1.0.2k
ln -sf /usr/lib/libcrypto.so.1.0.2k /usr/lib/libcrypto.so.10
ln -sf /usr/lib/libssl.so.1.0.2k /usr/lib/libssl.so.10
ln -sf /usr/lib/libcrypto.so.10 /usr/lib/libcrypto.so
ln -sf /usr/lib/libssl.so.10 /usr/lib/libssl.so
# 安装go依赖
rm -rf /usr/local/go
......@@ -30,11 +38,14 @@ go install google.golang.org/grpc@v1.33.0
go env -w GO111MODULE=auto
# 下载opencv库
wget https://paddle-qa.bj.bcebos.com/PaddleServing/opencv3.tar.gz && tar -xvf opencv3.tar.gz && rm -rf opencv3.tar.gz
wget https://paddle-qa.bj.bcebos.com/PaddleServing/opencv3.tar.gz
tar -xvf opencv3.tar.gz
rm -rf opencv3.tar.gz
export OPENCV_DIR=$PWD/opencv3
# clone Serving
git clone https://github.com/PaddlePaddle/Serving.git -b develop --depth=1
cd Serving # PaddleClas/deploy/paddleserving/Serving
export Serving_repo_path=$PWD
git submodule update --init --recursive
......@@ -54,21 +65,24 @@ export TENSORRT_LIBRARY_PATH='/usr/local/TensorRT6-cuda10.1-cudnn7/targets/x86_6
\cp ../preprocess/general_clas_op.* ${Serving_repo_path}/core/general-server/op
\cp ../preprocess/preprocess_op.* ${Serving_repo_path}/core/predictor/tools/pp_shitu_tools
# 编译Server, export SERVING_BIN
mkdir server-build-gpu-opencv && cd server-build-gpu-opencv
# 编译Server
mkdir server-build-gpu-opencv
cd server-build-gpu-opencv
cmake -DPYTHON_INCLUDE_DIR=$PYTHON_INCLUDE_DIR \
-DPYTHON_LIBRARIES=$PYTHON_LIBRARIES \
-DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
-DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \
-DCUDNN_LIBRARY=${CUDNN_LIBRARY} \
-DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \
-DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \
-DOPENCV_DIR=${OPENCV_DIR} \
-DWITH_OPENCV=ON \
-DSERVER=ON \
-DWITH_GPU=ON ..
-DPYTHON_LIBRARIES=$PYTHON_LIBRARIES \
-DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
-DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \
-DCUDNN_LIBRARY=${CUDNN_LIBRARY} \
-DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \
-DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \
-DOPENCV_DIR=${OPENCV_DIR} \
-DWITH_OPENCV=ON \
-DSERVER=ON \
-DWITH_GPU=ON ..
make -j32
${python_name} -m pip install python/dist/paddle*
# export SERVING_BIN
export SERVING_BIN=$PWD/core/general-server/serving
cd ../../
\ No newline at end of file
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
from paddle_serving_client import Client
......
......@@ -54,7 +54,7 @@ function func_serving_cls(){
for python_ in ${python[*]}; do
if [[ ${python_} =~ "python" ]]; then
trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $trans_model_cmd
eval ${trans_model_cmd}
break
fi
done
......@@ -144,19 +144,19 @@ function func_serving_cls(){
if [[ ${use_gpu} = "null" ]]; then
device_type_line=24
set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml"
eval $set_device_type_cmd
eval ${set_device_type_cmd}
devices_line=27
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
eval $set_devices_cmd
eval ${set_devices_cmd}
web_service_cmd="${python_} ${web_service_py} &"
eval $web_service_cmd
eval ${web_service_cmd}
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1 "
eval $pipeline_cmd
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
......@@ -176,19 +176,19 @@ function func_serving_cls(){
device_type_line=24
set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml"
eval $set_device_type_cmd
eval ${set_device_type_cmd}
devices_line=27
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
eval $set_devices_cmd
eval ${set_devices_cmd}
web_service_cmd="${python_} ${web_service_py} & "
eval $web_service_cmd
eval ${web_service_cmd}
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1"
eval $pipeline_cmd
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
......@@ -250,7 +250,7 @@ function func_serving_rec(){
set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}")
set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}")
cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $cls_trans_model_cmd
eval ${cls_trans_model_cmd}
set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
......@@ -258,7 +258,7 @@ function func_serving_rec(){
set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}")
set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}")
det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $det_trans_model_cmd
eval ${det_trans_model_cmd}
if [[ ${FILENAME} =~ "cpp" ]]; then
cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ${cls_serving_server_value}"
......@@ -292,7 +292,7 @@ function func_serving_rec(){
if [ ${use_gpu} = "null" ]; then
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
eval $web_service_cpp_cmd
eval ${web_service_cpp_cmd}
sleep 5s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log"
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
......@@ -305,7 +305,7 @@ function func_serving_rec(){
else
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &"
eval $web_service_cpp_cmd
eval ${web_service_cpp_cmd}
sleep 5s
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log"
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
......@@ -326,19 +326,19 @@ function func_serving_rec(){
if [[ ${use_gpu} = "null" ]]; then
device_type_line=24
set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml"
eval $set_device_type_cmd
eval ${set_device_type_cmd}
devices_line=27
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
eval $set_devices_cmd
eval ${set_devices_cmd}
web_service_cmd="${python} ${web_service_py} &"
eval $web_service_cmd
eval ${web_service_cmd}
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1 "
eval $pipeline_cmd
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
......@@ -358,19 +358,19 @@ function func_serving_rec(){
device_type_line=24
set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml"
eval $set_device_type_cmd
eval ${set_device_type_cmd}
devices_line=27
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
eval $set_devices_cmd
eval ${set_devices_cmd}
web_service_cmd="${python} ${web_service_py} & "
eval $web_service_cmd
eval ${web_service_cmd}
sleep 10s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1"
eval $pipeline_cmd
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
......@@ -393,7 +393,7 @@ else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
set CUDA_VISIBLE_DEVICES
eval $env
eval ${env}
echo "################### run test ###################"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册