提交 d287744c 编写于 作者: B barrierye

add batch-test in ci

上级 9c58c6f9
......@@ -40,21 +40,28 @@ def single_func(idx, resource):
if args.batch_size >= 1:
feed_batch = []
for bi in range(args.batch_size):
word_ids, label = imdb_dataset.get_words_and_label(line)
word_ids, label = imdb_dataset.get_words_and_label(dataset[
bi])
feed_batch.append({"words": word_ids})
result = client.predict(feed=feed_batch, fetch=["prediction"])
if result is None:
raise ("predict failed.")
else:
print("unsupport batch size {}".format(args.batch_size))
elif args.request == "http":
for fn in filelist:
fin = open(fn)
for line in fin:
word_ids, label = imdb_dataset.get_words_and_label(line)
r = requests.post(
"http://{}/imdb/prediction".format(args.endpoint),
data={"words": word_ids,
"fetch": ["prediction"]})
#TODO: not support yet
raise ("no batch predict for http")
if args.batch_size >= 1:
feed_batch = []
for bi in range(args.batch_size):
word_ids, label = imdb_dataset.get_words_and_label(dataset[bi])
feed_batch.append(word_ids)
r = requests.post(
"http://{}/imdb/prediction".format(args.endpoint),
data={"words": feed_batch,
"fetch": ["prediction"]})
print(r)
end = time.time()
return [[end - start]]
......
......@@ -3,7 +3,7 @@ for thread_num in 1 2 4 8 16
do
for batch_size in 1 2 4 8 16 32 64 128 256 512
do
$PYTHONROOT/bin/python benchmark_batch.py --thread $thread_num --batch_size $batch_size --model imdbo_bow_client_conf/serving_client_conf.prototxt --request rpc > profile 2>&1
$PYTHONROOT/bin/python benchmark_batch.py --thread $thread_num --batch_size $batch_size --model imdb_bow_client_conf/serving_client_conf.prototxt --request rpc > profile 2>&1
echo "========================================"
echo "batch size : $batch_size" >> profile_log
$PYTHONROOT/bin/python ../util/show_profile.py profile $thread_num >> profile_log
......
......@@ -288,15 +288,6 @@ function python_test_bert() {
pip install paddle_serving_app
check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_chinese_L-12_H-768_A-12_client/serving_client_conf.prototxt"
kill_server_process
# python prepare_model.py 20
# sh get_data.sh
# check_cmd "python -m paddle_serving_server.serve --model bert_seq20_model/ --port 9292 &"
# sleep 5
# pip install paddle_serving_app
# check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_seq20_client/serving_client_conf.prototxt"
# kill_server_process
# ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
# ps -ef | grep "serving" | grep -v grep | awk '{print $2}' | xargs kill
echo "bert RPC inference pass"
;;
GPU)
......@@ -312,14 +303,6 @@ function python_test_bert() {
pip install paddle_serving_app
check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_chinese_L-12_H-768_A-12_client/serving_client_conf.prototxt"
kill_server_process
# python prepare_model.py 20
# sh get_data.sh
# check_cmd "python -m paddle_serving_server_gpu.serve --model bert_seq20_model/ --port 9292 --gpu_ids 0 &"
# sleep 5
# pip install paddle_serving_app
# check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_seq20_client/serving_client_conf.prototxt"
# kill_server_process
# ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
echo "bert RPC inference pass"
;;
*)
......@@ -337,11 +320,11 @@ function python_test_imdb() {
case $TYPE in
CPU)
sh get_data.sh
sleep 5
check_cmd "python -m paddle_serving_server.serve --model imdb_cnn_model/ --port 9292 &"
sleep 5
check_cmd "head test_data/part-0 | python test_client.py imdb_cnn_client_conf/serving_client_conf.prototxt imdb.vocab"
echo "imdb CPU RPC inference pass"
ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
kill_server_process
rm -rf work_dir1
sleep 5
......@@ -352,6 +335,13 @@ function python_test_imdb() {
ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
ps -ef | grep "text_classify_service.py" | grep -v grep | awk '{print $2}' | xargs kill
echo "imdb CPU HTTP inference pass"
# test batch predict
check_cmd "python -m paddle_serving_server.serve --model imdb_bow_model --thread 4 --port 9292 &"
sleep 5
check_cmd "python benchmark_batch.py --thread 4 --batch_size 8 --model imdb_bow_client_conf/serving_client_conf.prototxt --request rpc --endpoint 127.0.0.1:9292"
kill_server_process
echo "imdb CPU rpc batch inference pass"
;;
GPU)
echo "imdb ignore GPU test"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册