未验证 提交 e57a67eb 编写于 作者: D Dong Daxiang 提交者: GitHub

Merge pull request #295 from guru4elephant/unify_serve_and_web_serve

Unify serve and web serve
......@@ -18,6 +18,7 @@ Usage:
python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
"""
import argparse
from .web_service import WebService
def parse_args(): # pylint: disable=doc-string-missing
......@@ -28,6 +29,8 @@ def parse_args(): # pylint: disable=doc-string-missing
"--model", type=str, default="", help="Model for serving")
parser.add_argument(
"--port", type=int, default=9292, help="Port the server")
parser.add_argument(
"--name", type=str, default="None", help="Web service name")
parser.add_argument(
"--workdir",
type=str,
......@@ -71,4 +74,13 @@ def start_standard_model(): # pylint: disable=doc-string-missing
if __name__ == "__main__":
start_standard_model()
args = parse_args()
if args.name == "None":
start_standard_model()
else:
service = WebService(name=args.name)
service.load_model_config(args.model)
service.prepare_server(
workdir=args.workdir, port=args.port, device=args.device)
service.run_server()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
Host a trained paddle model with one line command
Example:
python -m paddle_serving_server.web_serve --model ./serving_server_model --port 9292
"""
import argparse
from multiprocessing import Pool, Process
from .web_service import WebService
def parse_args(): # pylint: disable=doc-string-missing
parser = argparse.ArgumentParser("web_serve")
parser.add_argument(
"--thread", type=int, default=10, help="Concurrency of server")
parser.add_argument(
"--model", type=str, default="", help="Model for serving")
parser.add_argument(
"--port", type=int, default=9292, help="Port the server")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--device", type=str, default="cpu", help="Type of device")
parser.add_argument(
"--name", type=str, default="default", help="Default service name")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
service = WebService(name=args.name)
service.load_model_config(args.model)
service.prepare_server(
workdir=args.workdir, port=args.port, device=args.device)
service.run_server()
......@@ -42,7 +42,7 @@ def serve_args():
"--device", type=str, default="gpu", help="Type of device")
parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids")
parser.add_argument(
"--name", type=str, default="default", help="Default service name")
"--name", type=str, default="None", help="Default service name")
return parser.parse_args()
......
......@@ -88,4 +88,18 @@ def start_multi_card(args): # pylint: disable=doc-string-missing
if __name__ == "__main__":
args = serve_args()
start_multi_card(args)
if args.name == "None":
start_multi_card(args)
else:
web_service = WebService(name=args.name)
web_service.load_model_config(args.model)
gpu_ids = []
if args.gpu_ids == "":
if "CUDA_VISIBLE_DEVICES" in os.environ:
gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"]
if len(gpu_ids) > 0:
gpus = [int(x) for x in gpu_ids.split(",")]
web_service.set_gpus(gpus)
web_service.prepare_server(
workdir=args.workdir, port=args.port, device=args.device)
web_service.run_server()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
Host a trained paddle model with one line command
Example:
python -m paddle_serving_server.web_serve --model ./serving_server_model --port 9292
"""
import os
from multiprocessing import Pool, Process
from .web_service import WebService
import paddle_serving_server_gpu as serving
from paddle_serving_server_gpu import serve_args
if __name__ == "__main__":
args = serve_args()
web_service = WebService(name=args.name)
web_service.load_model_config(args.model)
gpu_ids = []
if args.gpu_ids == "":
if "CUDA_VISIBLE_DEVICES" in os.environ:
gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"]
if len(gpu_ids) > 0:
gpus = [int(x) for x in gpu_ids.split(",")]
web_service.set_gpus(gpus)
web_service.prepare_server(
workdir=args.workdir, port=args.port, device=args.device)
web_service.run_server()
......@@ -83,7 +83,7 @@ function python_test_fit_a_line() {
check_cmd "python test_client.py uci_housing_client/serving_client_conf.prototxt > /dev/null"
ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
# test web
check_cmd "python -m paddle_serving_server.web_serve --model uci_housing_model/ --name uci --port 9399 --name uci > /dev/null &"
check_cmd "python -m paddle_serving_server.serve --model uci_housing_model/ --name uci --port 9399 --name uci > /dev/null &"
sleep 5
check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], \"fetch\":[\"price\"]}' http://127.0.0.1:9399/uci/prediction"
ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册