提交 8a9f895d 编写于 作者: M MRXLT

fix ci

上级 668fd8f7
......@@ -17,8 +17,7 @@ from paddle_serving_app.reader import ChineseBertReader
import sys
client = Client()
client.load_client_config(
"./bert_multi_fetch_client/serving_client_conf.prototxt")
client.load_client_config("./bert_seq32_client/serving_client_conf.prototxt")
client.connect(["127.0.0.1:9292"])
reader = ChineseBertReader({"max_seq_len": 32})
......
......@@ -341,7 +341,7 @@ function python_test_multi_fetch() {
#download model (max_seq_len=32)
wget https://paddle-serving.bj.bcebos.com/bert_example/bert_multi_fetch.tar.gz
tar -xzvf bert_multi_fetch.tar.gz
check_cmd "python -m paddle_serving_server.serve --model bert_multi_fetch_model --port 9292 &"
check_cmd "python -m paddle_serving_server.serve --model bert_seq32_model --port 9292 &"
sleep 5
check_cmd "head -n 8 data-c.txt | python test_multi_fetch_client.py"
kill_server_process
......@@ -351,7 +351,7 @@ function python_test_multi_fetch() {
#download model (max_seq_len=32)
wget https://paddle-serving.bj.bcebos.com/bert_example/bert_multi_fetch.tar.gz
tar -xzvf bert_multi_fetch.tar.gz
check_cmd "python -m paddle_serving_server_gpu.serve --model bert_multi_fetch_model --port 9292 --gpu_ids 0 &"
check_cmd "python -m paddle_serving_server_gpu.serve --model bert_seq32_model --port 9292 --gpu_ids 0 &"
sleep 5
check_cmd "head -n 8 data-c.txt | python test_multi_fetch_client.py"
kill_server_process
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册