提交 7515ec6d 编写于 作者: H HexToString

Merge branch 'develop-p' of github.com:HexToString/Serving into develop-p

...@@ -242,6 +242,9 @@ InvalidArgumentError: Device id must be less than GPU count, but received id is: ...@@ -242,6 +242,9 @@ InvalidArgumentError: Device id must be less than GPU count, but received id is:
**A:** 支持离线部署,需要把一些相关的[依赖包](https://github.com/PaddlePaddle/Serving/blob/develop/doc/COMPILE.md)提前准备安装好 **A:** 支持离线部署,需要把一些相关的[依赖包](https://github.com/PaddlePaddle/Serving/blob/develop/doc/COMPILE.md)提前准备安装好
#### Q: Docker中启动server IP地址 127.0.0.1 与 0.0.0.0 差异
**A:** 您必须将容器的主进程设置为绑定到特殊的 0.0.0.0 “所有接口”地址,否则它将无法从容器外部访问。在Docker中 127.0.0.1 代表“这个容器”,而不是“这台机器”。如果您从容器建立到 127.0.0.1 的出站连接,它将返回到同一个容器;如果您将服务器绑定到 127.0.0.1,接收不到来自外部的连接。
## 预测问题 ## 预测问题
#### Q: 使用GPU第一次预测时特别慢,如何调整RPC服务的等待时间避免超时? #### Q: 使用GPU第一次预测时特别慢,如何调整RPC服务的等待时间避免超时?
......
...@@ -42,6 +42,7 @@ class BertService(WebService): ...@@ -42,6 +42,7 @@ class BertService(WebService):
bert_service = BertService(name="bert") bert_service = BertService(name="bert")
bert_service.load() bert_service.load()
bert_service.load_model_config(sys.argv[1]) bert_service.load_model_config(sys.argv[1])
bert_service.set_gpus("0")
bert_service.prepare_server( bert_service.prepare_server(
workdir="workdir", port=int(sys.argv[2]), device="gpu") workdir="workdir", port=int(sys.argv[2]), device="gpu")
bert_service.run_rpc_service() bert_service.run_rpc_service()
......
...@@ -335,8 +335,8 @@ class Client(object): ...@@ -335,8 +335,8 @@ class Client(object):
string_feed_names = [] string_feed_names = []
string_lod_slot_batch = [] string_lod_slot_batch = []
string_shape = [] string_shape = []
fetch_names = [] fetch_names = []
counter = 0 counter = 0
for key in fetch_list: for key in fetch_list:
...@@ -346,7 +346,6 @@ class Client(object): ...@@ -346,7 +346,6 @@ class Client(object):
if len(fetch_names) == 0: if len(fetch_names) == 0:
raise ValueError( raise ValueError(
"Fetch names should not be empty or out of saved fetch list.") "Fetch names should not be empty or out of saved fetch list.")
return {}
feed_i = feed_batch[0] feed_i = feed_batch[0]
for key in feed_i: for key in feed_i:
......
...@@ -93,7 +93,7 @@ class WebService(object): ...@@ -93,7 +93,7 @@ class WebService(object):
f = open(file_path_list[0], 'r') f = open(file_path_list[0], 'r')
model_conf = google.protobuf.text_format.Merge( model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf) str(f.read()), model_conf)
self.feed_vars = {var.name: var for var in model_conf.feed_var} self.feed_vars = {var.alias_name: var for var in model_conf.feed_var}
if len(file_path_list) > 1: if len(file_path_list) > 1:
model_conf = m_config.GeneralModelConfig() model_conf = m_config.GeneralModelConfig()
...@@ -101,7 +101,7 @@ class WebService(object): ...@@ -101,7 +101,7 @@ class WebService(object):
model_conf = google.protobuf.text_format.Merge( model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf) str(f.read()), model_conf)
self.fetch_vars = {var.name: var for var in model_conf.fetch_var} self.fetch_vars = {var.alias_name: var for var in model_conf.fetch_var}
if client_config_path == None: if client_config_path == None:
self.client_config_path = file_path_list self.client_config_path = file_path_list
......
...@@ -120,31 +120,66 @@ function check() { ...@@ -120,31 +120,66 @@ function check() {
fi fi
} }
function check_gpu_memory() {
gpu_memory=`nvidia-smi --id=$1 --format=csv,noheader --query-gpu=memory.used | awk '{print $1}'`
echo -e "${GREEN_COLOR}-------id-$1 gpu_memory_used: ${gpu_memory}${RES}"
if [ ${gpu_memory} -le 100 ]; then
echo "-------GPU-$1 is not used"
status="GPU-$1 is not used"
else
echo "-------GPU_memory used is expected"
fi
}
function check_result() { function check_result() {
if [ $? == 0 ]; then if [ $? == 0 ]; then
echo -e "${GREEN_COLOR}$1 execute normally${RES}" echo -e "${GREEN_COLOR}$1 execute normally${RES}"
if [ $1 == "server" ]; then if [ $1 == "server" ]; then
sleep $2 sleep $2
tail ${dir}server_log.txt | tee -a ${log_dir}server_total.txt cat ${dir}server_log.txt | tee -a ${log_dir}server_total.txt
fi fi
if [ $1 == "client" ]; then if [ $1 == "client" ]; then
tail ${dir}client_log.txt | tee -a ${log_dir}client_total.txt cat ${dir}client_log.txt | tee -a ${log_dir}client_total.txt
grep -E "${error_words}" ${dir}client_log.txt > /dev/null grep -E "${error_words}" ${dir}client_log.txt > /dev/null
if [ $? == 0 ]; then if [ $? == 0 ]; then
if [ "${status}" != "" ]; then
status="${status}|Failed"
else
status="Failed"
fi
echo -e "${RED_COLOR}$1 error command${RES}\n" | tee -a ${log_dir}server_total.txt ${log_dir}client_total.txt echo -e "${RED_COLOR}$1 error command${RES}\n" | tee -a ${log_dir}server_total.txt ${log_dir}client_total.txt
echo -e "--------------pipeline.log:----------------\n" echo "--------------server log:--------------"
cat ${dir}server_log.txt
echo "--------------client log:--------------"
cat ${dir}client_log.txt
echo "--------------pipeline.log:----------------"
cat PipelineServingLogs/pipeline.log cat PipelineServingLogs/pipeline.log
echo -e "-------------------------------------------\n" echo "-------------------------------------------\n"
error_log $2 error_log $2
else else
if [ "${status}" != "" ]; then
error_log $2
fi
echo -e "${GREEN_COLOR}$2${RES}\n" | tee -a ${log_dir}server_total.txt ${log_dir}client_total.txt echo -e "${GREEN_COLOR}$2${RES}\n" | tee -a ${log_dir}server_total.txt ${log_dir}client_total.txt
fi fi
fi fi
else else
echo -e "${RED_COLOR}$1 error command${RES}\n" | tee -a ${log_dir}server_total.txt ${log_dir}client_total.txt echo -e "${RED_COLOR}$1 error command${RES}\n" | tee -a ${log_dir}server_total.txt ${log_dir}client_total.txt
tail ${dir}client_log.txt | tee -a ${log_dir}client_total.txt echo "--------------server log:--------------"
cat ${dir}server_log.txt
echo "--------------client log:--------------"
cat ${dir}client_log.txt
echo "--------------pipeline.log:----------------"
cat PipelineServingLogs/pipeline.log
echo "-------------------------------------------\n"
if [ "${status}" != "" ]; then
status="${status}|Failed"
else
status="Failed"
fi
error_log $2 error_log $2
fi fi
status=""
} }
function error_log() { function error_log() {
...@@ -163,7 +198,7 @@ function error_log() { ...@@ -163,7 +198,7 @@ function error_log() {
echo "deployment: ${deployment// /_}" | tee -a ${log_dir}error_models.txt echo "deployment: ${deployment// /_}" | tee -a ${log_dir}error_models.txt
echo "py_version: ${py_version}" | tee -a ${log_dir}error_models.txt echo "py_version: ${py_version}" | tee -a ${log_dir}error_models.txt
echo "cuda_version: ${cuda_version}" | tee -a ${log_dir}error_models.txt echo "cuda_version: ${cuda_version}" | tee -a ${log_dir}error_models.txt
echo "status: Failed" | tee -a ${log_dir}error_models.txt echo "status: ${status}" | tee -a ${log_dir}error_models.txt
echo -e "-----------------------------\n\n" | tee -a ${log_dir}error_models.txt echo -e "-----------------------------\n\n" | tee -a ${log_dir}error_models.txt
prefix=${arg//\//_} prefix=${arg//\//_}
for file in ${dir}* for file in ${dir}*
...@@ -192,7 +227,7 @@ function link_data() { ...@@ -192,7 +227,7 @@ function link_data() {
function before_hook() { function before_hook() {
setproxy setproxy
cd ${build_path}/python cd ${build_path}/python
${py_version} -m pip install --upgrade pip ${py_version} -m pip install --upgrade pip==21.1.3
${py_version} -m pip install requests ${py_version} -m pip install requests
${py_version} -m pip install -r requirements.txt ${py_version} -m pip install -r requirements.txt
${py_version} -m pip install numpy==1.16.4 ${py_version} -m pip install numpy==1.16.4
...@@ -379,6 +414,7 @@ function bert_rpc_gpu() { ...@@ -379,6 +414,7 @@ function bert_rpc_gpu() {
ls -hlst ls -hlst
${py_version} -m paddle_serving_server.serve --model bert_seq128_model/ --port 8860 --gpu_ids 0 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model bert_seq128_model/ --port 8860 --gpu_ids 0 > ${dir}server_log.txt 2>&1 &
check_result server 15 check_result server 15
check_gpu_memory 0
nvidia-smi nvidia-smi
head data-c.txt | ${py_version} bert_client.py --model bert_seq128_client/serving_client_conf.prototxt > ${dir}client_log.txt 2>&1 head data-c.txt | ${py_version} bert_client.py --model bert_seq128_client/serving_client_conf.prototxt > ${dir}client_log.txt 2>&1
check_result client "bert_GPU_RPC server test completed" check_result client "bert_GPU_RPC server test completed"
...@@ -429,6 +465,7 @@ function ResNet50_rpc() { ...@@ -429,6 +465,7 @@ function ResNet50_rpc() {
sed -i 's/9696/8863/g' resnet50_rpc_client.py sed -i 's/9696/8863/g' resnet50_rpc_client.py
${py_version} -m paddle_serving_server.serve --model ResNet50_vd_model --port 8863 --gpu_ids 0 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model ResNet50_vd_model --port 8863 --gpu_ids 0 > ${dir}server_log.txt 2>&1 &
check_result server 8 check_result server 8
check_gpu_memory 0
nvidia-smi nvidia-smi
${py_version} resnet50_rpc_client.py ResNet50_vd_client_config/serving_client_conf.prototxt > ${dir}client_log.txt 2>&1 ${py_version} resnet50_rpc_client.py ResNet50_vd_client_config/serving_client_conf.prototxt > ${dir}client_log.txt 2>&1
check_result client "ResNet50_GPU_RPC server test completed" check_result client "ResNet50_GPU_RPC server test completed"
...@@ -446,6 +483,7 @@ function ResNet101_rpc() { ...@@ -446,6 +483,7 @@ function ResNet101_rpc() {
sed -i "22cclient.connect(['127.0.0.1:8864'])" image_rpc_client.py sed -i "22cclient.connect(['127.0.0.1:8864'])" image_rpc_client.py
${py_version} -m paddle_serving_server.serve --model ResNet101_vd_model --port 8864 --gpu_ids 0 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model ResNet101_vd_model --port 8864 --gpu_ids 0 > ${dir}server_log.txt 2>&1 &
check_result server 8 check_result server 8
check_gpu_memory 0
nvidia-smi nvidia-smi
${py_version} image_rpc_client.py ResNet101_vd_client_config/serving_client_conf.prototxt > ${dir}client_log.txt 2>&1 ${py_version} image_rpc_client.py ResNet101_vd_client_config/serving_client_conf.prototxt > ${dir}client_log.txt 2>&1
check_result client "ResNet101_GPU_RPC server test completed" check_result client "ResNet101_GPU_RPC server test completed"
...@@ -536,10 +574,11 @@ function faster_rcnn_model_rpc() { ...@@ -536,10 +574,11 @@ function faster_rcnn_model_rpc() {
data_dir=${data}detection/faster_rcnn_r50_fpn_1x_coco/ data_dir=${data}detection/faster_rcnn_r50_fpn_1x_coco/
link_data ${data_dir} link_data ${data_dir}
sed -i 's/9494/8870/g' test_client.py sed -i 's/9494/8870/g' test_client.py
${py_version} -m paddle_serving_server.serve --model serving_server --port 8870 --gpu_ids 0 --thread 2 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model serving_server --port 8870 --gpu_ids 1 --thread 2 > ${dir}server_log.txt 2>&1 &
echo "faster rcnn running ..." echo "faster rcnn running ..."
nvidia-smi nvidia-smi
check_result server 10 check_result server 10
check_gpu_memory 1
${py_version} test_client.py 000000570688.jpg > ${dir}client_log.txt 2>&1 ${py_version} test_client.py 000000570688.jpg > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
check_result client "faster_rcnn_GPU_RPC server test completed" check_result client "faster_rcnn_GPU_RPC server test completed"
...@@ -556,6 +595,7 @@ function cascade_rcnn_rpc() { ...@@ -556,6 +595,7 @@ function cascade_rcnn_rpc() {
sed -i "s/9292/8879/g" test_client.py sed -i "s/9292/8879/g" test_client.py
${py_version} -m paddle_serving_server.serve --model serving_server --port 8879 --gpu_ids 0 --thread 2 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model serving_server --port 8879 --gpu_ids 0 --thread 2 > ${dir}server_log.txt 2>&1 &
check_result server 8 check_result server 8
check_gpu_memory 0
nvidia-smi nvidia-smi
${py_version} test_client.py > ${dir}client_log.txt 2>&1 ${py_version} test_client.py > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
...@@ -573,6 +613,7 @@ function deeplabv3_rpc() { ...@@ -573,6 +613,7 @@ function deeplabv3_rpc() {
sed -i "s/9494/8880/g" deeplabv3_client.py sed -i "s/9494/8880/g" deeplabv3_client.py
${py_version} -m paddle_serving_server.serve --model deeplabv3_server --gpu_ids 0 --port 8880 --thread 2 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model deeplabv3_server --gpu_ids 0 --port 8880 --thread 2 > ${dir}server_log.txt 2>&1 &
check_result server 10 check_result server 10
check_gpu_memory 0
nvidia-smi nvidia-smi
${py_version} deeplabv3_client.py > ${dir}client_log.txt 2>&1 ${py_version} deeplabv3_client.py > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
...@@ -590,6 +631,7 @@ function mobilenet_rpc() { ...@@ -590,6 +631,7 @@ function mobilenet_rpc() {
sed -i "s/9393/8881/g" mobilenet_tutorial.py sed -i "s/9393/8881/g" mobilenet_tutorial.py
${py_version} -m paddle_serving_server.serve --model mobilenet_v2_imagenet_model --gpu_ids 0 --port 8881 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model mobilenet_v2_imagenet_model --gpu_ids 0 --port 8881 > ${dir}server_log.txt 2>&1 &
check_result server 8 check_result server 8
check_gpu_memory 0
nvidia-smi nvidia-smi
${py_version} mobilenet_tutorial.py > ${dir}client_log.txt 2>&1 ${py_version} mobilenet_tutorial.py > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
...@@ -605,8 +647,9 @@ function unet_rpc() { ...@@ -605,8 +647,9 @@ function unet_rpc() {
data_dir=${data}unet_for_image_seg/ data_dir=${data}unet_for_image_seg/
link_data ${data_dir} link_data ${data_dir}
sed -i "s/9494/8882/g" seg_client.py sed -i "s/9494/8882/g" seg_client.py
${py_version} -m paddle_serving_server.serve --model unet_model --gpu_ids 0 --port 8882 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model unet_model --gpu_ids 1 --port 8882 > ${dir}server_log.txt 2>&1 &
check_result server 8 check_result server 8
check_gpu_memory 1
nvidia-smi nvidia-smi
${py_version} seg_client.py > ${dir}client_log.txt 2>&1 ${py_version} seg_client.py > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
...@@ -624,6 +667,7 @@ function resnetv2_rpc() { ...@@ -624,6 +667,7 @@ function resnetv2_rpc() {
sed -i 's/9393/8883/g' resnet50_v2_tutorial.py sed -i 's/9393/8883/g' resnet50_v2_tutorial.py
${py_version} -m paddle_serving_server.serve --model resnet_v2_50_imagenet_model --gpu_ids 0 --port 8883 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model resnet_v2_50_imagenet_model --gpu_ids 0 --port 8883 > ${dir}server_log.txt 2>&1 &
check_result server 10 check_result server 10
check_gpu_memory 0
nvidia-smi nvidia-smi
${py_version} resnet50_v2_tutorial.py > ${dir}client_log.txt 2>&1 ${py_version} resnet50_v2_tutorial.py > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
...@@ -671,8 +715,9 @@ function criteo_ctr_rpc_gpu() { ...@@ -671,8 +715,9 @@ function criteo_ctr_rpc_gpu() {
data_dir=${data}criteo_ctr/ data_dir=${data}criteo_ctr/
link_data ${data_dir} link_data ${data_dir}
sed -i "s/8885/8886/g" test_client.py sed -i "s/8885/8886/g" test_client.py
${py_version} -m paddle_serving_server.serve --model ctr_serving_model/ --port 8886 --gpu_ids 0 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model ctr_serving_model/ --port 8886 --gpu_ids 1 > ${dir}server_log.txt 2>&1 &
check_result server 8 check_result server 8
check_gpu_memory 1
nvidia-smi nvidia-smi
${py_version} test_client.py ctr_client_conf/serving_client_conf.prototxt raw_data/part-0 > ${dir}client_log.txt 2>&1 ${py_version} test_client.py ctr_client_conf/serving_client_conf.prototxt raw_data/part-0 > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
...@@ -691,6 +736,7 @@ function yolov4_rpc_gpu() { ...@@ -691,6 +736,7 @@ function yolov4_rpc_gpu() {
${py_version} -m paddle_serving_server.serve --model yolov4_model --port 8887 --gpu_ids 0 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model yolov4_model --port 8887 --gpu_ids 0 > ${dir}server_log.txt 2>&1 &
nvidia-smi nvidia-smi
check_result server 8 check_result server 8
check_gpu_memory 0
${py_version} test_client.py 000000570688.jpg > ${dir}client_log.txt 2>&1 ${py_version} test_client.py 000000570688.jpg > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
check_result client "yolov4_GPU_RPC server test completed" check_result client "yolov4_GPU_RPC server test completed"
...@@ -708,6 +754,7 @@ function senta_rpc_cpu() { ...@@ -708,6 +754,7 @@ function senta_rpc_cpu() {
${py_version} -m paddle_serving_server.serve --model yolov4_model --port 8887 --gpu_ids 0 > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model yolov4_model --port 8887 --gpu_ids 0 > ${dir}server_log.txt 2>&1 &
nvidia-smi nvidia-smi
check_result server 8 check_result server 8
check_gpu_memory 0
${py_version} test_client.py 000000570688.jpg > ${dir}client_log.txt 2>&1 ${py_version} test_client.py 000000570688.jpg > ${dir}client_log.txt 2>&1
nvidia-smi nvidia-smi
check_result client "senta_GPU_RPC server test completed" check_result client "senta_GPU_RPC server test completed"
...@@ -783,13 +830,14 @@ function ResNet50_http() { ...@@ -783,13 +830,14 @@ function ResNet50_http() {
cd ${build_path}/python/examples/imagenet cd ${build_path}/python/examples/imagenet
${py_version} resnet50_web_service.py ResNet50_vd_model gpu 8876 > ${dir}server_log.txt 2>&1 & ${py_version} resnet50_web_service.py ResNet50_vd_model gpu 8876 > ${dir}server_log.txt 2>&1 &
check_result server 10 check_result server 10
check_gpu_memory 0
curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"image": "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg"}], "fetch": ["score"]}' http://127.0.0.1:8876/image/prediction > ${dir}client_log.txt 2>&1 curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"image": "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg"}], "fetch": ["score"]}' http://127.0.0.1:8876/image/prediction > ${dir}client_log.txt 2>&1
check_result client "ResNet50_GPU_HTTP server test completed" check_result client "ResNet50_GPU_HTTP server test completed"
kill_server_process kill_server_process
} }
function bert_http() { function bert_http() {
dir=${log_dir}http_model/ResNet50_http/ dir=${log_dir}http_model/bert_http/
check_dir ${dir} check_dir ${dir}
unsetproxy unsetproxy
cd ${build_path}/python/examples/bert cd ${build_path}/python/examples/bert
...@@ -836,6 +884,7 @@ function grpc_yolov4() { ...@@ -836,6 +884,7 @@ function grpc_yolov4() {
echo -e "${GREEN_COLOR}grpc_impl_example_yolov4_GPU_gRPC server started${RES}" echo -e "${GREEN_COLOR}grpc_impl_example_yolov4_GPU_gRPC server started${RES}"
${py_version} -m paddle_serving_server.serve --model yolov4_model --port 9393 --gpu_ids 0 --use_multilang > ${dir}server_log.txt 2>&1 & ${py_version} -m paddle_serving_server.serve --model yolov4_model --port 9393 --gpu_ids 0 --use_multilang > ${dir}server_log.txt 2>&1 &
check_result server 15 check_result server 15
check_gpu_memory 0
echo -e "${GREEN_COLOR}grpc_impl_example_yolov4_GPU_gRPC client started${RES}" echo -e "${GREEN_COLOR}grpc_impl_example_yolov4_GPU_gRPC client started${RES}"
${py_version} test_client.py 000000570688.jpg > ${dir}client_log.txt 2>&1 ${py_version} test_client.py 000000570688.jpg > ${dir}client_log.txt 2>&1
check_result client "grpc_yolov4_GPU_GRPC server test completed" check_result client "grpc_yolov4_GPU_GRPC server test completed"
...@@ -857,6 +906,7 @@ function ocr_c++_service() { ...@@ -857,6 +906,7 @@ function ocr_c++_service() {
echo -e "${GREEN_COLOR}OCR_C++_Service_GPU_RPC server started${RES}" echo -e "${GREEN_COLOR}OCR_C++_Service_GPU_RPC server started${RES}"
$py_version -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --port 9293 --gpu_id 0 > ${dir}server_log.txt 2>&1 & $py_version -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --port 9293 --gpu_id 0 > ${dir}server_log.txt 2>&1 &
check_result server 8 check_result server 8
check_gpu_memory 0
echo -e "${GREEN_COLOR}OCR_C++_Service_GPU_RPC client started${RES}" echo -e "${GREEN_COLOR}OCR_C++_Service_GPU_RPC client started${RES}"
echo "------------------first:" echo "------------------first:"
$py_version ocr_cpp_client.py ocr_det_client ocr_rec_client $py_version ocr_cpp_client.py ocr_det_client ocr_rec_client
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册