未验证 提交 ab129905 编写于 作者: D Dong Daxiang 提交者: GitHub

Merge pull request #471 from barrierye/add-batch-test

add batch predict UT
...@@ -24,7 +24,9 @@ class BertService(WebService): ...@@ -24,7 +24,9 @@ class BertService(WebService):
self.reader = BertReader(vocab_file="vocab.txt", max_seq_len=128) self.reader = BertReader(vocab_file="vocab.txt", max_seq_len=128)
def preprocess(self, feed={}, fetch=[]): def preprocess(self, feed={}, fetch=[]):
feed_res = self.reader.process(feed["words"].encode("utf-8")) feed_res = [{
"words": self.reader.process(ins["words"].encode("utf-8"))
} for ins in feed]
return feed_res, fetch return feed_res, fetch
......
...@@ -23,23 +23,14 @@ from paddle_serving_app import ImageReader ...@@ -23,23 +23,14 @@ from paddle_serving_app import ImageReader
class ImageService(WebService): class ImageService(WebService):
def preprocess(self, feed={}, fetch=[]): def preprocess(self, feed={}, fetch=[]):
reader = ImageReader() reader = ImageReader()
if "image" not in feed: feed_batch = []
raise ("feed data error!") for ins in feed:
if isinstance(feed["image"], list): if "image" not in ins:
feed_batch = [] raise ("feed data error!")
for image in feed["image"]: sample = base64.b64decode(ins["image"])
sample = base64.b64decode(image)
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img
feed_batch.append(res_feed)
return feed_batch, fetch
else:
sample = base64.b64decode(feed["image"])
img = reader.process_image(sample) img = reader.process_image(sample)
res_feed = {} feed_batch.append({"image": img})
res_feed["image"] = img return feed_batch, fetch
return res_feed, fetch
image_service = ImageService(name="image") image_service = ImageService(name="image")
......
...@@ -23,24 +23,14 @@ from paddle_serving_server_gpu.web_service import WebService ...@@ -23,24 +23,14 @@ from paddle_serving_server_gpu.web_service import WebService
class ImageService(WebService): class ImageService(WebService):
def preprocess(self, feed={}, fetch=[]): def preprocess(self, feed={}, fetch=[]):
reader = ImageReader() reader = ImageReader()
if "image" not in feed: feed_batch = []
raise ("feed data error!") for ins in feed:
print(type(feed["image"]), isinstance(feed["image"], list)) if "image" not in ins:
if isinstance(feed["image"], list): raise ("feed data error!")
feed_batch = [] sample = base64.b64decode(ins["image"])
for image in feed["image"]:
sample = base64.b64decode(image)
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img
feed_batch.append(res_feed)
return feed_batch, fetch
else:
sample = base64.b64decode(feed["image"])
img = reader.process_image(sample) img = reader.process_image(sample)
res_feed = {} feed_batch.append({"image": img})
res_feed["image"] = img return feed_batch, fetch
return res_feed, fetch
image_service = ImageService(name="image") image_service = ImageService(name="image")
......
...@@ -40,21 +40,29 @@ def single_func(idx, resource): ...@@ -40,21 +40,29 @@ def single_func(idx, resource):
if args.batch_size >= 1: if args.batch_size >= 1:
feed_batch = [] feed_batch = []
for bi in range(args.batch_size): for bi in range(args.batch_size):
word_ids, label = imdb_dataset.get_words_and_label(line) word_ids, label = imdb_dataset.get_words_and_label(dataset[
bi])
feed_batch.append({"words": word_ids}) feed_batch.append({"words": word_ids})
result = client.predict(feed=feed_batch, fetch=["prediction"]) result = client.predict(feed=feed_batch, fetch=["prediction"])
if result is None:
raise ("predict failed.")
else: else:
print("unsupport batch size {}".format(args.batch_size)) print("unsupport batch size {}".format(args.batch_size))
elif args.request == "http": elif args.request == "http":
for fn in filelist: if args.batch_size >= 1:
fin = open(fn) feed_batch = []
for line in fin: for bi in range(args.batch_size):
word_ids, label = imdb_dataset.get_words_and_label(line) feed_batch.append({"words": dataset[bi]})
r = requests.post( r = requests.post(
"http://{}/imdb/prediction".format(args.endpoint), "http://{}/imdb/prediction".format(args.endpoint),
data={"words": word_ids, json={"feed": feed_batch,
"fetch": ["prediction"]}) "fetch": ["prediction"]})
if r.status_code != 200:
print('HTTP status code -ne 200')
raise ("predict failed.")
else:
print("unsupport batch size {}".format(args.batch_size))
end = time.time() end = time.time()
return [[end - start]] return [[end - start]]
......
...@@ -3,7 +3,7 @@ for thread_num in 1 2 4 8 16 ...@@ -3,7 +3,7 @@ for thread_num in 1 2 4 8 16
do do
for batch_size in 1 2 4 8 16 32 64 128 256 512 for batch_size in 1 2 4 8 16 32 64 128 256 512
do do
$PYTHONROOT/bin/python benchmark_batch.py --thread $thread_num --batch_size $batch_size --model imdbo_bow_client_conf/serving_client_conf.prototxt --request rpc > profile 2>&1 $PYTHONROOT/bin/python benchmark_batch.py --thread $thread_num --batch_size $batch_size --model imdb_bow_client_conf/serving_client_conf.prototxt --request rpc > profile 2>&1
echo "========================================" echo "========================================"
echo "batch size : $batch_size" >> profile_log echo "batch size : $batch_size" >> profile_log
$PYTHONROOT/bin/python ../util/show_profile.py profile $thread_num >> profile_log $PYTHONROOT/bin/python ../util/show_profile.py profile $thread_num >> profile_log
......
...@@ -26,10 +26,9 @@ class IMDBService(WebService): ...@@ -26,10 +26,9 @@ class IMDBService(WebService):
self.dataset.load_resource(args["dict_file_path"]) self.dataset.load_resource(args["dict_file_path"])
def preprocess(self, feed={}, fetch=[]): def preprocess(self, feed={}, fetch=[]):
if "words" not in feed: res_feed = [{
exit(-1) "words": self.dataset.get_words_only(ins["words"])
res_feed = {} } for ins in feed]
res_feed["words"] = self.dataset.get_words_only(feed["words"])
return res_feed, fetch return res_feed, fetch
......
...@@ -108,15 +108,15 @@ class LACReader(object): ...@@ -108,15 +108,15 @@ class LACReader(object):
partial_word = "" partial_word = ""
for ind, tag in enumerate(tags): for ind, tag in enumerate(tags):
if partial_word == "": if partial_word == "":
partial_word = words[ind] partial_word = self.id2word_dict[str(words[ind])]
tags_out.append(tag.split('-')[0]) tags_out.append(tag.split('-')[0])
continue continue
if tag.endswith("-B") or (tag == "O" and tag[ind - 1] != "O"): if tag.endswith("-B") or (tag == "O" and tag[ind - 1] != "O"):
sent_out.append(partial_word) sent_out.append(partial_word)
tags_out.append(tag.split('-')[0]) tags_out.append(tag.split('-')[0])
partial_word = words[ind] partial_word = self.id2word_dict[str(words[ind])]
continue continue
partial_word += words[ind] partial_word += self.id2word_dict[str(words[ind])]
if len(sent_out) < len(tags_out): if len(sent_out) < len(tags_out):
sent_out.append(partial_word) sent_out.append(partial_word)
......
...@@ -22,15 +22,24 @@ class LACService(WebService): ...@@ -22,15 +22,24 @@ class LACService(WebService):
self.reader = LACReader("lac_dict") self.reader = LACReader("lac_dict")
def preprocess(self, feed={}, fetch=[]): def preprocess(self, feed={}, fetch=[]):
if "words" not in feed: feed_batch = []
raise ("feed data error!") for ins in feed:
feed_data = self.reader.process(feed["words"]) if "words" not in ins:
raise ("feed data error!")
feed_data = self.reader.process(ins["words"])
feed_batch.append({"words": feed_data})
fetch = ["crf_decode"] fetch = ["crf_decode"]
return {"words": feed_data}, fetch return feed_batch, fetch
def postprocess(self, feed={}, fetch=[], fetch_map={}): def postprocess(self, feed={}, fetch=[], fetch_map={}):
segs = self.reader.parse_result(feed["words"], fetch_map["crf_decode"]) batch_ret = []
return {"word_seg": "|".join(segs)} for idx, ins in enumerate(feed):
begin = fetch_map['crf_decode.lod'][idx]
end = fetch_map['crf_decode.lod'][idx + 1]
segs = self.reader.parse_result(ins["words"],
fetch_map["crf_decode"][begin:end])
batch_ret.append({"word_seg": "|".join(segs)})
return batch_ret
lac_service = LACService(name="lac") lac_service = LACService(name="lac")
...@@ -39,3 +48,4 @@ lac_service.load_reader() ...@@ -39,3 +48,4 @@ lac_service.load_reader()
lac_service.prepare_server( lac_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu") workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
lac_service.run_server() lac_service.run_server()
lac_service.run_flask()
...@@ -87,7 +87,7 @@ class SentaService(WebService): ...@@ -87,7 +87,7 @@ class SentaService(WebService):
if self.show: if self.show:
print("---- senta reader ----") print("---- senta reader ----")
print("feed_data", feed_data) print("feed_data", feed_data)
return {"words": feed_data}, fetch return [{"words": feed_data}], fetch
senta_service = SentaService(name="senta") senta_service = SentaService(name="senta")
......
...@@ -157,12 +157,20 @@ function python_test_fit_a_line() { ...@@ -157,12 +157,20 @@ function python_test_fit_a_line() {
check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction" check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction"
# check http code # check http code
http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction` http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction`
setproxy # recover proxy state
kill_server_process
if [ ${http_code} -ne 200 ]; then if [ ${http_code} -ne 200 ]; then
echo "HTTP status code -ne 200" echo "HTTP status code -ne 200"
exit 1 exit 1
fi fi
# test web batch
check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}, {\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction"
# check http code
http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}, {"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction`
if [ ${http_code} -ne 200 ]; then
echo "HTTP status code -ne 200"
exit 1
fi
setproxy # recover proxy state
kill_server_process
;; ;;
GPU) GPU)
export CUDA_VISIBLE_DEVICES=0 export CUDA_VISIBLE_DEVICES=0
...@@ -179,12 +187,20 @@ function python_test_fit_a_line() { ...@@ -179,12 +187,20 @@ function python_test_fit_a_line() {
check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction" check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction"
# check http code # check http code
http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction` http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction`
setproxy # recover proxy state
kill_server_process
if [ ${http_code} -ne 200 ]; then if [ ${http_code} -ne 200 ]; then
echo "HTTP status code -ne 200" echo "HTTP status code -ne 200"
exit 1 exit 1
fi fi
# test web batch
check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}, {\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction"
# check http code
http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}, {"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction`
if [ ${http_code} -ne 200 ]; then
echo "HTTP status code -ne 200"
exit 1
fi
setproxy # recover proxy state
kill_server_process
;; ;;
*) *)
echo "error type" echo "error type"
...@@ -288,15 +304,6 @@ function python_test_bert() { ...@@ -288,15 +304,6 @@ function python_test_bert() {
pip install paddle_serving_app pip install paddle_serving_app
check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_chinese_L-12_H-768_A-12_client/serving_client_conf.prototxt" check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_chinese_L-12_H-768_A-12_client/serving_client_conf.prototxt"
kill_server_process kill_server_process
# python prepare_model.py 20
# sh get_data.sh
# check_cmd "python -m paddle_serving_server.serve --model bert_seq20_model/ --port 9292 &"
# sleep 5
# pip install paddle_serving_app
# check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_seq20_client/serving_client_conf.prototxt"
# kill_server_process
# ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
# ps -ef | grep "serving" | grep -v grep | awk '{print $2}' | xargs kill
echo "bert RPC inference pass" echo "bert RPC inference pass"
;; ;;
GPU) GPU)
...@@ -312,14 +319,6 @@ function python_test_bert() { ...@@ -312,14 +319,6 @@ function python_test_bert() {
pip install paddle_serving_app pip install paddle_serving_app
check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_chinese_L-12_H-768_A-12_client/serving_client_conf.prototxt" check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_chinese_L-12_H-768_A-12_client/serving_client_conf.prototxt"
kill_server_process kill_server_process
# python prepare_model.py 20
# sh get_data.sh
# check_cmd "python -m paddle_serving_server_gpu.serve --model bert_seq20_model/ --port 9292 --gpu_ids 0 &"
# sleep 5
# pip install paddle_serving_app
# check_cmd "head -n 10 data-c.txt | python bert_client.py --model bert_seq20_client/serving_client_conf.prototxt"
# kill_server_process
# ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
echo "bert RPC inference pass" echo "bert RPC inference pass"
;; ;;
*) *)
...@@ -340,19 +339,29 @@ function python_test_imdb() { ...@@ -340,19 +339,29 @@ function python_test_imdb() {
case $TYPE in case $TYPE in
CPU) CPU)
sh get_data.sh sh get_data.sh
sleep 5
check_cmd "python -m paddle_serving_server.serve --model imdb_cnn_model/ --port 9292 &" check_cmd "python -m paddle_serving_server.serve --model imdb_cnn_model/ --port 9292 &"
sleep 5
check_cmd "head test_data/part-0 | python test_client.py imdb_cnn_client_conf/serving_client_conf.prototxt imdb.vocab" check_cmd "head test_data/part-0 | python test_client.py imdb_cnn_client_conf/serving_client_conf.prototxt imdb.vocab"
# test batch predict
check_cmd "python benchmark_batch.py --thread 4 --batch_size 8 --model imdb_bow_client_conf/serving_client_conf.prototxt --request rpc --endpoint 127.0.0.1:9292"
echo "imdb CPU RPC inference pass" echo "imdb CPU RPC inference pass"
ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill kill_server_process
rm -rf work_dir1 rm -rf work_dir1
sleep 5 sleep 5
check_cmd "python text_classify_service.py imdb_cnn_model/workdir/9292 imdb.vocab &" unsetproxy # maybe the proxy is used on iPipe, which makes web-test failed.
check_cmd "python text_classify_service.py imdb_cnn_model/ workdir/ 9292 imdb.vocab &"
sleep 5 sleep 5
check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"words\": \"i am very sad | 0\"}], \"fetch\":[\"prediction\"]}' http://127.0.0.1:9292/imdb/prediction" check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"words\": \"i am very sad | 0\"}], \"fetch\":[\"prediction\"]}' http://127.0.0.1:9292/imdb/prediction"
http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "i am very sad | 0"}], "fetch":["prediction"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9292/imdb/prediction`
if [ ${http_code} -ne 200 ]; then
echo "HTTP status code -ne 200"
exit 1
fi
# test batch predict
check_cmd "python benchmark_batch.py --thread 4 --batch_size 8 --model imdb_bow_client_conf/serving_client_conf.prototxt --request http --endpoint 127.0.0.1:9292"
setproxy # recover proxy state
kill_server_process kill_server_process
ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
ps -ef | grep "text_classify_service.py" | grep -v grep | awk '{print $2}' | xargs kill ps -ef | grep "text_classify_service.py" | grep -v grep | awk '{print $2}' | xargs kill
echo "imdb CPU HTTP inference pass" echo "imdb CPU HTTP inference pass"
;; ;;
...@@ -379,15 +388,30 @@ function python_test_lac() { ...@@ -379,15 +388,30 @@ function python_test_lac() {
sh get_data.sh sh get_data.sh
check_cmd "python -m paddle_serving_server.serve --model jieba_server_model/ --port 9292 &" check_cmd "python -m paddle_serving_server.serve --model jieba_server_model/ --port 9292 &"
sleep 5 sleep 5
check_cmd "echo "我爱北京天安门" | python lac_client.py jieba_client_conf/serving_client_conf.prototxt lac_dict/" check_cmd "echo \"我爱北京天安门\" | python lac_client.py jieba_client_conf/serving_client_conf.prototxt lac_dict/"
echo "lac CPU RPC inference pass" echo "lac CPU RPC inference pass"
ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill kill_server_process
unsetproxy # maybe the proxy is used on iPipe, which makes web-test failed.
check_cmd "python lac_web_service.py jieba_server_model/ lac_workdir 9292 &" check_cmd "python lac_web_service.py jieba_server_model/ lac_workdir 9292 &"
sleep 5 sleep 5
check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"words\": \"i am very sad | 0\"}], \"fetch\":[\"prediction\"]}' http://127.0.0.1:9292/imdb/prediction" check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"words\": \"我爱北京天安门\"}], \"fetch\":[\"word_seg\"]}' http://127.0.0.1:9292/lac/prediction"
# check http code
http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "我爱北京天安门"}], "fetch":["word_seg"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9292/lac/prediction`
if [ ${http_code} -ne 200 ]; then
echo "HTTP status code -ne 200"
exit 1
fi
# http batch
check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"words\": \"我爱北京天安门\"}, {\"words\": \"我爱北京天安门\"}], \"fetch\":[\"word_seg\"]}' http://127.0.0.1:9292/lac/prediction"
# check http code
http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "我爱北京天安门"}, {"words": "我爱北京天安门"}], "fetch":["word_seg"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9292/lac/prediction`
if [ ${http_code} -ne 200 ]; then
echo "HTTP status code -ne 200"
exit 1
fi
setproxy # recover proxy state
kill_server_process kill_server_process
ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
ps -ef | grep "lac_web_service" | grep -v grep | awk '{print $2}' | xargs kill ps -ef | grep "lac_web_service" | grep -v grep | awk '{print $2}' | xargs kill
echo "lac CPU HTTP inference pass" echo "lac CPU HTTP inference pass"
;; ;;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册