diff --git a/python/examples/bert/test_multi_fetch_client.py b/python/examples/bert/test_multi_fetch_client.py new file mode 100644 index 0000000000000000000000000000000000000000..3749e4d384498a858c6cc784c3bfc387d7fd791f --- /dev/null +++ b/python/examples/bert/test_multi_fetch_client.py @@ -0,0 +1,44 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle_serving_client import Client +from paddle_serving_app.reader import ChineseBertReader +import sys + +client = Client() +client.load_client_config( + "./bert_multi_fetch_client/serving_client_conf.prototxt") +client.connect(["127.0.0.1:9292"]) + +reader = ChineseBertReader({"max_seq_len": 32}) +fetch = ["sequence_10", "sequence_12", "pooled_output"] +expected_shape = { + "sequence_10": (4, 32, 768), + "sequence_12": (4, 32, 768), + "pooled_output": (4, 768) +} +batch_size = 4 +feed_batch = [] + +for line in sys.stdin: + feed = reader.process(line) + if len(feed_batch) < batch_size: + feed_batch.append(feed) + else: + fetch_map = client.predict(feed=feed_batch, fetch=fetch) + feed_batch = [] + for var_name in fetch: + if fetch_map[var_name].shape != expected_shape[var_name]: + print("fetch var {} shape error.".format(var_name)) + sys.exit(1) diff --git a/python/examples/fit_a_line/test_multi_process_client.py b/python/examples/fit_a_line/test_multi_process_client.py new file mode 100644 index 0000000000000000000000000000000000000000..46ba3b60b5ae09b568868531d32234ade50d8556 --- /dev/null +++ b/python/examples/fit_a_line/test_multi_process_client.py @@ -0,0 +1,36 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle_serving_client import Client +from paddle_serving_client.utils import MultiThreadRunner +import paddle + + +def single_func(idx, resource): + client = Client() + client.load_client_config( + "./uci_housing_client/serving_client_conf.prototxt") + client.connect(["127.0.0.1:9293", "127.0.0.1:9292"]) + test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=1) + for data in test_reader(): + fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) + return [[0]] + + +multi_thread_runner = MultiThreadRunner() +thread_num = 4 +result = multi_thread_runner.run(single_func, thread_num, {}) diff --git a/tools/serving_build.sh b/tools/serving_build.sh index c5823672739e70c61be4f0f7526e8fe89a742c2a..ffcd41e25b81f9f011112837525420cfcb8f7b93 100644 --- a/tools/serving_build.sh +++ b/tools/serving_build.sh @@ -343,7 +343,7 @@ function python_test_bert_multi_fetch() { tar -xzvf bert_multi_fetch.tar.gz check_cmd "python -m paddle_serving_server.serve --model bert_multi_fetch_model --port 9292 &" sleep 5 - check_cmd "head -n 8 data-c.txt | python bert_multi_fetch_client.py" + check_cmd "head -n 8 data-c.txt | python test_multi_fetch_client.py" kill_server_process echo "bert mutli fetch RPC inference pass" ;; @@ -353,7 +353,7 @@ function python_test_bert_multi_fetch() { tar -xzvf bert_multi_fetch.tar.gz check_cmd "python -m paddle_serving_server_gpu.serve --model bert_multi_fetch_model --port 9292 --gpu_ids 0 &" sleep 5 - check_cmd "head -n 8 data-c.txt | python bert_multi_fetch_client.py" + check_cmd "head -n 8 data-c.txt | python test_multi_fetch_client.py" kill_server_process echo "bert mutli fetch RPC inference pass" ;; @@ -368,7 +368,35 @@ function python_test_bert_multi_fetch() { } function python_test_multi_process(){ - + # pwd: /Serving/python/examples + local TYPT=$1 + export SERVING_BIN=${SERVING_WORKDIR}/build-server-${TYPE}/core/general-server/serving + cd bert # pwd: /Serving/python/examples/fit_a_line + case $TYPE in + CPU) + check_cmd "python -m paddle_serving_server.serve --model uci_housing_model --port 9292 &" + check_cmd "python -m paddle_serving_server.serve --model uci_housing_model --port 9293 &" + sleep 5 + check_cmd "python test_multi_process_client.py" + kill_server_process + echo "bert mutli rpc RPC inference pass" + ;; + GPU) + check_cmd "python -m paddle_serving_server_gpu.serve --model uci_housing_model --port 9292 --gpu_ids 0 &" + check_cmd "python -m paddle_serving_server_gpu.serve --model uci_housing_model --port 9293 --gpu_ids 0 &" + sleep 5 + check_cmd "python test_multi_process_client.py" + kill_server_process + echo "bert mutli process RPC inference pass" + ;; + *) + echo "error type" + exit 1 + ;; + esac + echo "test multi process $TYPE finished as expected." + unset SERVING_BIN + cd .. } function python_test_imdb() {