diff --git a/doc/GRPC_IMPL_CN.md b/doc/GRPC_IMPL_CN.md index f80e0f52c626ab51385c90abeb337d07d8644f42..a60943e0d5b76aba54b5d3a08ab003f5399ef26d 100644 --- a/doc/GRPC_IMPL_CN.md +++ b/doc/GRPC_IMPL_CN.md @@ -12,7 +12,7 @@ gRPC 接口实现形式类似 Web Service: def load_model_config(self, server_config_paths, client_config_path=None) ``` - 在一些例子中 bRPC Server 端与 bRPC Client 端的配置文件可能是不同的(如 cube local 例子中,Client 端的数据先交给 cube,经过 cube 处理后再交给预测库),所以 gRPC Server 端需要获取 gRPC Client 端的配置;同时为了取消 gRPC Client 端手动加载配置文件的过程,所以设计 gRPC Server 端同时加载两个配置文件。`client_config_path` 默认为 `server_config_path/serving_server_conf.prototxt`。 + 在一些例子中 bRPC Server 端与 bRPC Client 端的配置文件可能是不同的(如 cube local 例子中,Client 端的数据先交给 cube,经过 cube 处理后再交给预测库),所以 gRPC Server 端需要获取 gRPC Client 端的配置;同时为了取消 gRPC Client 端手动加载配置文件的过程,所以设计 gRPC Server 端同时加载两个配置文件。`client_config_path` 默认为 `/serving_server_conf.prototxt`。 2. gRPC Client 端取消 `load_client_config` 步骤: diff --git a/python/examples/grpc_impl_example/fit_a_line/get_data.sh b/python/examples/grpc_impl_example/fit_a_line/get_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..84a3966a0ef323cef4b146d8e9489c70a7a8ae35 --- /dev/null +++ b/python/examples/grpc_impl_example/fit_a_line/get_data.sh @@ -0,0 +1,2 @@ +wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz +tar -xzf uci_housing.tar.gz diff --git a/python/examples/grpc_impl_example/test_asyn_client.py b/python/examples/grpc_impl_example/fit_a_line/test_asyn_client.py similarity index 94% rename from python/examples/grpc_impl_example/test_asyn_client.py rename to python/examples/grpc_impl_example/fit_a_line/test_asyn_client.py index 56fbea01a73eb97bce84cb3484b710751d75eaa9..57ba15cb07078e2f1f6d940023e9f3983e4c22da 100644 --- a/python/examples/grpc_impl_example/test_asyn_client.py +++ b/python/examples/grpc_impl_example/fit_a_line/test_asyn_client.py @@ -13,16 +13,15 @@ # limitations under the License. # pylint: disable=doc-string-missing -from paddle_serving_client import MultiLangClient +from paddle_serving_client import MultiLangClient as Client +import paddle import functools -import sys import time import threading -client = MultiLangClient() +client = Client() client.connect(["127.0.0.1:9393"]) -import paddle test_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.uci_housing.test(), buf_size=500), diff --git a/python/examples/grpc_impl_example/fit_a_line/test_batch_client.py b/python/examples/grpc_impl_example/fit_a_line/test_batch_client.py new file mode 100644 index 0000000000000000000000000000000000000000..8ec0b77f8bc90f35299b65ad5b976f8cd11e79e3 --- /dev/null +++ b/python/examples/grpc_impl_example/fit_a_line/test_batch_client.py @@ -0,0 +1,31 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + +from paddle_serving_client import MultiLangClient as Client +import paddle + +client = Client() +client.connect(["127.0.0.1:9393"]) + +batch_size = 2 +test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=batch_size) + +for data in test_reader(): + batch_feed = [{"x": x[0]} for x in data] + fetch_map = client.predict(feed=batch_feed, fetch=["price"]) + print(fetch_map) diff --git a/python/examples/grpc_impl_example/fit_a_line/test_general_pb_client.py b/python/examples/grpc_impl_example/fit_a_line/test_general_pb_client.py new file mode 100644 index 0000000000000000000000000000000000000000..b5e7fb21904f0bcd5064ad2e024595d7ca8c6c01 --- /dev/null +++ b/python/examples/grpc_impl_example/fit_a_line/test_general_pb_client.py @@ -0,0 +1,30 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + +from paddle_serving_client import MultiLangClient as Client +import paddle + +client = Client() +client.connect(["127.0.0.1:9393"]) + +test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=1) + +for data in test_reader(): + fetch_map = client.predict( + feed={"x": data[0][0]}, fetch=["price"], is_python=False) + print("{} {}".format(fetch_map["price"][0], data[0][1][0])) diff --git a/python/examples/grpc_impl_example/fit_a_line/test_list_input_client.py b/python/examples/grpc_impl_example/fit_a_line/test_list_input_client.py new file mode 100644 index 0000000000000000000000000000000000000000..ccee596f28e079aef23a33412f13d6f4ac1528dc --- /dev/null +++ b/python/examples/grpc_impl_example/fit_a_line/test_list_input_client.py @@ -0,0 +1,29 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + +from paddle_serving_client import MultiLangClient as Client +import paddle + +client = Client() +client.connect(["127.0.0.1:9393"]) + +test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=1) + +for data in test_reader(): + fetch_map = client.predict(feed={"x": data[0][0].tolist()}, fetch=["price"]) + print("{} {}".format(fetch_map["price"][0], data[0][1][0])) diff --git a/python/examples/grpc_impl_example/test_server.py b/python/examples/grpc_impl_example/fit_a_line/test_server.py similarity index 94% rename from python/examples/grpc_impl_example/test_server.py rename to python/examples/grpc_impl_example/fit_a_line/test_server.py index 23eb938f0ee1bf6b195509816dea5221bbfa9218..6acc7bfe2e6d00621f32f1f7f437691fc15d20fc 100644 --- a/python/examples/grpc_impl_example/test_server.py +++ b/python/examples/grpc_impl_example/fit_a_line/test_server.py @@ -17,7 +17,7 @@ import os import sys from paddle_serving_server import OpMaker from paddle_serving_server import OpSeqMaker -from paddle_serving_server import MultiLangServer +from paddle_serving_server import MultiLangServer as Server op_maker = OpMaker() read_op = op_maker.create('general_reader') @@ -29,7 +29,7 @@ op_seq_maker.add_op(read_op) op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(response_op) -server = MultiLangServer() +server = Server() server.set_op_sequence(op_seq_maker.get_op_sequence()) server.load_model_config(sys.argv[1]) server.prepare_server(workdir="work_dir1", port=9393, device="cpu") diff --git a/python/examples/grpc_impl_example/fit_a_line/test_server_gpu.py b/python/examples/grpc_impl_example/fit_a_line/test_server_gpu.py new file mode 100644 index 0000000000000000000000000000000000000000..2b00fe8622c56a1f2012c96a46c51559ff5131fe --- /dev/null +++ b/python/examples/grpc_impl_example/fit_a_line/test_server_gpu.py @@ -0,0 +1,37 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + +import os +import sys +from paddle_serving_server_gpu import OpMaker +from paddle_serving_server_gpu import OpSeqMaker +from paddle_serving_server_gpu import MultiLangServer as Server + +op_maker = OpMaker() +read_op = op_maker.create('general_reader') +general_infer_op = op_maker.create('general_infer') +response_op = op_maker.create('general_response') + +op_seq_maker = OpSeqMaker() +op_seq_maker.add_op(read_op) +op_seq_maker.add_op(general_infer_op) +op_seq_maker.add_op(response_op) + +server = Server() +server.set_op_sequence(op_seq_maker.get_op_sequence()) +server.load_model_config(sys.argv[1]) +server.set_gpuid(0) +server.prepare_server(workdir="work_dir1", port=9393, device="gpu") +server.run_server() diff --git a/python/examples/grpc_impl_example/fit_a_line/test_sync_client.py b/python/examples/grpc_impl_example/fit_a_line/test_sync_client.py new file mode 100644 index 0000000000000000000000000000000000000000..df5775472844c338fa7506c780d0a5fee550198e --- /dev/null +++ b/python/examples/grpc_impl_example/fit_a_line/test_sync_client.py @@ -0,0 +1,29 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + +from paddle_serving_client import MultiLangClient as Client +import paddle + +client = Client() +client.connect(["127.0.0.1:9393"]) + +test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=1) + +for data in test_reader(): + fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) + print("{} {}".format(fetch_map["price"][0], data[0][1][0])) diff --git a/python/examples/grpc_impl_example/fit_a_line/test_timeout_client.py b/python/examples/grpc_impl_example/fit_a_line/test_timeout_client.py new file mode 100644 index 0000000000000000000000000000000000000000..14967681c2682278cec219e09d6d49c4144b4a60 --- /dev/null +++ b/python/examples/grpc_impl_example/fit_a_line/test_timeout_client.py @@ -0,0 +1,37 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + +from paddle_serving_client import MultiLangClient as Client +import paddle +import grpc + +client = Client() +client.connect(["127.0.0.1:9393"]) +client.set_rpc_timeout_ms(1) + +test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=1) + +for data in test_reader(): + try: + fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) + except grpc.RpcError as e: + status_code = e.code() + if grpc.StatusCode.DEADLINE_EXCEEDED == status_code: + print('timeout') + else: + print("{} {}".format(fetch_map["price"][0], data[0][1][0])) diff --git a/tools/serving_build.sh b/tools/serving_build.sh index c0e4f41aed02516791e2a0ab2137eca8d524f8a3..503d9632fad1a6747b4322e4d6baff790f45e19f 100644 --- a/tools/serving_build.sh +++ b/tools/serving_build.sh @@ -171,12 +171,6 @@ function python_test_fit_a_line() { fi setproxy # recover proxy state kill_server_process - - # test grpc impl - check_cmd "python -m paddle_serving_server.serve --model uci_housing_model --port 9393 --thread 4 --use_multilang> /dev/null &" - sleep 5 # wait for the server to start - check_cmd "python test_multilang_client.py uci_housing_client/serving_client_conf.prototxt > /dev/null" - kill_server_process ;; GPU) export CUDA_VISIBLE_DEVICES=0 @@ -207,12 +201,6 @@ function python_test_fit_a_line() { fi setproxy # recover proxy state kill_server_process - - # test grpc impl - check_cmd "python -m paddle_serving_server_gpu.serve --model uci_housing_model --port 9393 --thread 4 --gpu_ids 0 --use_multilang> /dev/null &" - sleep 5 # wait for the server to start - check_cmd "python test_multilang_client.py uci_housing_client/serving_client_conf.prototxt > /dev/null" - kill_server_process ;; *) echo "error type" @@ -511,6 +499,80 @@ function python_test_lac() { cd .. } +function python_test_grpc_impl() { + # pwd: /Serving/python/examples + cd grpc_impl_example # pwd: /Serving/python/examples/grpc_impl_example + local TYPE=$1 + export SERVING_BIN=${SERVING_WORKDIR}/build-server-${TYPE}/core/general-server/serving + case $TYPE in + CPU) + # test general case + cd fit_a_line # pwd: /Serving/python/examples/grpc_impl_example/fit_a_line + sh get_data.sh + + # one line command start + check_cmd "python -m paddle_serving_server.serve --model uci_housing_model --port 9393 --thread 4 --use_multilang > /dev/null &" + sleep 5 # wait for the server to start + check_cmd "python test_sync_client.py > /dev/null" + check_cmd "python test_asyn_client.py > /dev/null" + check_cmd "python test_general_pb_client.py > /dev/null" + check_cmd "python test_list_input_client.py > /dev/null" + check_cmd "python test_timeout_client.py > /dev/null" + check_cmd "python test_batch_client.py > /dev/null" + kill_server_process + + check_cmd "python test_server.py > /dev/null &" + sleep 5 # wait for the server to start + check_cmd "python test_sync_client.py > /dev/null" + check_cmd "python test_asyn_client.py > /dev/null" + check_cmd "python test_general_pb_client.py > /dev/null" + check_cmd "python test_list_input_client.py > /dev/null" + check_cmd "python test_timeout_client.py > /dev/null" + check_cmd "python test_batch_client.py > /dev/null" + kill_server_process + + cd .. # pwd: /Serving/python/examples/grpc_impl_example/fit_a_line + ;; + GPU) + export CUDA_VISIBLE_DEVICES=0 + # test general case + cd fit_a_line # pwd: /Serving/python/examples/grpc_impl_example/fit_a_line + sh get_data.sh + + # one line command start + check_cmd "python -m paddle_serving_server_gpu.serve --model uci_housing_model --port 9393 --thread 4 --gpu_ids 0 --use_multilang > /dev/null &" + sleep 5 # wait for the server to start + check_cmd "python test_sync_client.py > /dev/null" + check_cmd "python test_asyn_client.py > /dev/null" + check_cmd "python test_general_pb_client.py > /dev/null" + check_cmd "python test_list_input_client.py > /dev/null" + check_cmd "python test_timeout_client.py > /dev/null" + check_cmd "python test_batch_client.py > /dev/null" + kill_server_process + + check_cmd "python test_server_gpu.py > /dev/null &" + sleep 5 # wait for the server to start + check_cmd "python test_sync_client.py > /dev/null" + check_cmd "python test_asyn_client.py > /dev/null" + check_cmd "python test_general_pb_client.py > /dev/null" + check_cmd "python test_list_input_client.py > /dev/null" + check_cmd "python test_timeout_client.py > /dev/null" + check_cmd "python test_batch_client.py > /dev/null" + kill_server_process + + cd .. # pwd: /Serving/python/examples/grpc_impl_example/fit_a_line + ;; + *) + echo "error type" + exit 1 + ;; + esac + echo "test fit_a_line $TYPE part finished as expected." + rm -rf image kvdb log uci_housing* work* + unset SERVING_BIN + cd .. # pwd: /Serving/python/examples +} + function python_run_test() { # Using the compiled binary local TYPE=$1 # pwd: /Serving @@ -522,6 +584,7 @@ function python_run_test() { python_test_lac $TYPE # pwd: /Serving/python/examples python_test_multi_process $TYPE # pwd: /Serving/python/examples python_test_multi_fetch $TYPE # pwd: /Serving/python/examples + python_test_grpc_impl $TYPE # pwd: /Serving/python/examples echo "test python $TYPE part finished as expected." cd ../.. # pwd: /Serving }