diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index a9fbfff6f166ee40f09fc07d240c27ff1039ef0a..c18fbb2e17534ebec1d10b45ddef9028e3f48f45 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -80,14 +80,6 @@ elif [ ${MODE} = "paddle2onnx_infer" ];then ${python} -m pip install install paddle2onnx ${python} -m pip install onnxruntime==1.10.0 elif [ ${MODE} = "serving_infer" ];then - git clone https://github.com/PaddlePaddle/Serving - cd Serving - bash tools/paddle_env_install.sh - ${python} -m pip install -r python/requirements.txt - cd .. - ${python} -m pip install paddle-serving-client -i https://pypi.tuna.tsinghua.edu.cn/simple - ${python} -m pip install paddle-serving-app -i https://pypi.tuna.tsinghua.edu.cn/simple - ${python} -m pip install paddle-serving-server-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple unset https_proxy http_proxy else # download coco lite data diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh index a9cf17ad33b2823e51e3058139bcd328e3f7b4b8..17c3a50ec6c7ba951fd19686a72bc1ad093b833f 100644 --- a/test_tipc/test_serving_infer_cpp.sh +++ b/test_tipc/test_serving_infer_cpp.sh @@ -89,10 +89,6 @@ function func_serving_inference(){ done } -# build paddle_serving_server -bash deploy/serving/cpp/build_server.sh -echo "################### build finished! ###################" - # run serving infer Count=0 IFS="|" diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 96bd26ddb65f6ac0870f48be4c988f9b585200f5..be70a0297a6f02badfe040376e50521653c8719e 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -81,9 +81,9 @@ function func_serving_inference(){ } # set cuda device -GPUID=$2 +GPUID=$3 if [ ${#GPUID} -le 0 ];then - env=" " + env="export CUDA_VISIBLE_DEVICES=0" else env="export CUDA_VISIBLE_DEVICES=${GPUID}" fi