diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index c86c3f46b3b3ef83fb5fe630031cf28a95c52649..279e3a895e975473fc5569c4716368c3dda1d9f1 100644 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -18,6 +18,7 @@ Usage: python -m paddle_serving_server.serve --model ./serving_server_model --port 9292 """ import argparse +from .web_service import WebService def parse_args(): # pylint: disable=doc-string-missing @@ -28,6 +29,8 @@ def parse_args(): # pylint: disable=doc-string-missing "--model", type=str, default="", help="Model for serving") parser.add_argument( "--port", type=int, default=9292, help="Port the server") + parser.add_argument( + "--name", type=str, default="None", help="Web service name") parser.add_argument( "--workdir", type=str, @@ -71,4 +74,13 @@ def start_standard_model(): # pylint: disable=doc-string-missing if __name__ == "__main__": - start_standard_model() + + args = parse_args() + if args.name == "None": + start_standard_model() + else: + service = WebService(name=args.name) + service.load_model_config(args.model) + service.prepare_server( + workdir=args.workdir, port=args.port, device=args.device) + service.run_server() diff --git a/python/paddle_serving_server/web_serve.py b/python/paddle_serving_server/web_serve.py deleted file mode 100644 index 46437ad5e53288c6ab03b32ea8882e1b3cfa66a3..0000000000000000000000000000000000000000 --- a/python/paddle_serving_server/web_serve.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Usage: - Host a trained paddle model with one line command - Example: - python -m paddle_serving_server.web_serve --model ./serving_server_model --port 9292 -""" -import argparse -from multiprocessing import Pool, Process -from .web_service import WebService - - -def parse_args(): # pylint: disable=doc-string-missing - parser = argparse.ArgumentParser("web_serve") - parser.add_argument( - "--thread", type=int, default=10, help="Concurrency of server") - parser.add_argument( - "--model", type=str, default="", help="Model for serving") - parser.add_argument( - "--port", type=int, default=9292, help="Port the server") - parser.add_argument( - "--workdir", - type=str, - default="workdir", - help="Working dir of current service") - parser.add_argument( - "--device", type=str, default="cpu", help="Type of device") - parser.add_argument( - "--name", type=str, default="default", help="Default service name") - return parser.parse_args() - - -if __name__ == "__main__": - args = parse_args() - service = WebService(name=args.name) - service.load_model_config(args.model) - service.prepare_server( - workdir=args.workdir, port=args.port, device=args.device) - service.run_server() diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index 8ee1e137fb8fe282d26bda95e4b4bffa6f670f11..02b55801c35fb5d1ed7e35c249ac07e4d3eb45ab 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -42,7 +42,7 @@ def serve_args(): "--device", type=str, default="gpu", help="Type of device") parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids") parser.add_argument( - "--name", type=str, default="default", help="Default service name") + "--name", type=str, default="None", help="Default service name") return parser.parse_args() diff --git a/python/paddle_serving_server_gpu/serve.py b/python/paddle_serving_server_gpu/serve.py index cc9b18f6920c46c5d0119e8adfaf8f76ecf2ad26..5d9d96d517d64b21313fda0b44a83b34142b014b 100644 --- a/python/paddle_serving_server_gpu/serve.py +++ b/python/paddle_serving_server_gpu/serve.py @@ -88,4 +88,18 @@ def start_multi_card(args): # pylint: disable=doc-string-missing if __name__ == "__main__": args = serve_args() - start_multi_card(args) + if args.name == "None": + start_multi_card(args) + else: + web_service = WebService(name=args.name) + web_service.load_model_config(args.model) + gpu_ids = [] + if args.gpu_ids == "": + if "CUDA_VISIBLE_DEVICES" in os.environ: + gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"] + if len(gpu_ids) > 0: + gpus = [int(x) for x in gpu_ids.split(",")] + web_service.set_gpus(gpus) + web_service.prepare_server( + workdir=args.workdir, port=args.port, device=args.device) + web_service.run_server() diff --git a/python/paddle_serving_server_gpu/web_serve.py b/python/paddle_serving_server_gpu/web_serve.py deleted file mode 100644 index 734e6d7b93b4f3ad22f330b1545b63c6ac6f2838..0000000000000000000000000000000000000000 --- a/python/paddle_serving_server_gpu/web_serve.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Usage: - Host a trained paddle model with one line command - Example: - python -m paddle_serving_server.web_serve --model ./serving_server_model --port 9292 -""" -import os -from multiprocessing import Pool, Process -from .web_service import WebService -import paddle_serving_server_gpu as serving -from paddle_serving_server_gpu import serve_args - -if __name__ == "__main__": - args = serve_args() - web_service = WebService(name=args.name) - web_service.load_model_config(args.model) - gpu_ids = [] - if args.gpu_ids == "": - if "CUDA_VISIBLE_DEVICES" in os.environ: - gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"] - if len(gpu_ids) > 0: - gpus = [int(x) for x in gpu_ids.split(",")] - web_service.set_gpus(gpus) - web_service.prepare_server( - workdir=args.workdir, port=args.port, device=args.device) - web_service.run_server() diff --git a/tools/serving_build.sh b/tools/serving_build.sh index 163f4f74429066581aa17cc78b3ab00947ba4d77..b810e3139803bd363c771c6f655cef6595177dc8 100644 --- a/tools/serving_build.sh +++ b/tools/serving_build.sh @@ -83,7 +83,7 @@ function python_test_fit_a_line() { check_cmd "python test_client.py uci_housing_client/serving_client_conf.prototxt > /dev/null" ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill # test web - check_cmd "python -m paddle_serving_server.web_serve --model uci_housing_model/ --name uci --port 9399 --name uci > /dev/null &" + check_cmd "python -m paddle_serving_server.serve --model uci_housing_model/ --name uci --port 9399 --name uci > /dev/null &" sleep 5 check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], \"fetch\":[\"price\"]}' http://127.0.0.1:9399/uci/prediction" ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill