提交 a8bc5b36 编写于 作者: M MRXLT 提交者: GitHub

Merge pull request #229 from guru4elephant/refine_fit_a_line

add WebService, fix imdb,fit-a-line example
# Fit a line example, prediction through rpc service
Start rpc service
``` shell
sh get_data.sh
python test_server.py uci_housing_model/
```
Prediction
``` shell
python test_client.py uci_housing_client/serving_client_conf.prototxt
```
# prediction through http service
Start a web service with default web service hosting modules
``` shell
python -m paddle_serving_server.web_serve --model uci_housing_model/ --thread 10 --name uci --port 9393 --name uci
```
Prediction through http post
``` shell
curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' http://127.0.0.1:9393/uci/prediction
```
\ No newline at end of file
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
tar -xzf uci_housing.tar.gz
......@@ -4,12 +4,12 @@ import sys
client = Client()
client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:9292"])
client.connect(["127.0.0.1:9393"])
test_reader = paddle.batch(paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500), batch_size=1)
for data in test_reader():
fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["y"])
print("{} {}".format(fetch_map["y"][0], data[0][1][0]))
fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
print("{} {}".format(fetch_map["price"][0], data[0][1][0]))
......@@ -7,13 +7,15 @@ from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(response_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.load_model_config(sys.argv[1])
server.prepare_server(workdir="work_dir1", port=9292, device="cpu")
server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
server.run_server()
wget https://paddle-serving.bj.bcebos.com/imdb-demo%2Fimdb_service.tar.gz
tar -xzf imdb_service.tar.gz
wget --no-check-certificate https://fleet.bj.bcebos.com/text_classification_data.tar.gz
tar -zxvf text_classification_data.tar.gz
python text_classify_service.py serving_server_model/ workdir imdb.vocab
......@@ -65,6 +65,6 @@ if __name__ == "__main__":
program=fluid.default_main_program(), dataset=dataset, debug=False)
logger.info("TRAIN --> pass: {}".format(i))
if i == 5:
serving_io.save_model("serving_server_model", "serving_client_conf",
serving_io.save_model("imdb_model", "imdb_client_conf",
{"words": data}, {"prediction": prediction},
fluid.default_main_program())
......@@ -12,28 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!flask/bin/python
from plugin_service import PluginService
from paddle_serving_server.web_service import WebService
from imdb_reader import IMDBDataset
import sys
class IMDBService(PluginService):
def prepare_service(self, args={}):
class IMDBService(WebService):
def prepare_dict(self, args={}):
if len(args) == 0:
exit(-1)
self.word_dict = {}
with open(args["dict_file_path"]) as fin:
idx = 0
for line in fin:
self.word_dict[idx] = idx
idx += 1
self.dataset = IMDBDataset()
self.dataset.load_resource(args["dict_file_path"])
def preprocess(self, feed={}, fetch=[]):
if "words" not in feed:
exit(-1)
res_feed = {}
res_feed["words"] = [self.word_dict[int(x)] for x in feed["words"]]
print(res_feed)
res_feed["words"] = self.dataset.get_words_and_label(feed["words"])[0]
return res_feed, fetch
imdb_service = IMDBService(name="imdb", model=sys.argv[1], port=9898)
imdb_service.prepare_service({"dict_file_path":sys.argv[2]})
imdb_service.start_service()
imdb_service = IMDBService(name="imdb")
imdb_service.load_model_config(sys.argv[1])
imdb_service.prepare_server(workdir=sys.argv[2], port=9393, device="cpu")
imdb_service.prepare_dict({"dict_file_path":sys.argv[3]})
imdb_service.run_server()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
Host a trained paddle model with one line command
Example:
python -m paddle_serving_server.web_serve --model ./serving_server_model --port 9292
"""
import argparse
from multiprocessing import Pool, Process
from .web_service import WebService
def parse_args():
parser = argparse.ArgumentParser("web_serve")
parser.add_argument("--thread", type=int, default=10, help="Concurrency of server")
parser.add_argument("--model", type=str, default="", help="Model for serving")
parser.add_argument("--port", type=int, default=9292, help="Port the server")
parser.add_argument("--workdir", type=str, default="workdir", help="Working dir of current service")
parser.add_argument("--device", type=str, default="cpu", help="Type of device")
parser.add_argument("--name", type=str, default="default", help="Default service name")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
service = WebService(name=args.name)
service.load_model_config(args.model)
service.prepare_server(workdir=args.workdir, port=args.port, device=args.device)
service.run_server()
......@@ -13,37 +13,69 @@
# limitations under the License.
#!flask/bin/python
from flask import Flask, request, abort
from multiprocessing import Pool, Process
from paddle_serving_server import OpMaker, OpSeqMaker, Server
from paddle_serving_client import Client
class PluginService(object):
def __init__(self, name="default_service", model="",
port=9091, concurrency=10):
class WebService(object):
def __init__(self, name="default_service"):
self.name = name
self.port = port
self.model = model
self.concurrency = concurrency
def prepare_service(self, args={}):
return
def load_model_config(self, model_config):
self.model_config = model_config
def start_service(self):
app_instance = Flask(__name__)
self.client_service = Client()
self.client_service.load_client_config(
"{}/serving_server_conf.prototxt".format(self.model))
self.client_service.connect(["127.0.0.1:9292"])
def _launch_rpc_service(self):
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(16)
server.load_model_config(self.model_config)
server.prepare_server(
workdir=self.workdir, port=self.port+1, device=self.device)
server.run_server()
def prepare_server(self, workdir="", port=9393, device="cpu"):
self.workdir = workdir
self.port = port
self.device = device
@app_instance.route('/PaddleServing/v1.0/prediction', methods=['POST'])
def _launch_web_service(self):
app_instance = Flask(__name__)
client_service = Client()
client_service.load_client_config(
"{}/serving_server_conf.prototxt".format(self.model_config))
client_service.connect(["127.0.0.1:{}".format(self.port+1)])
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=['POST'])
def get_prediction():
if not request.json:
abort(400)
if "fetch" not in request.json:
abort(400)
feed, fetch = self.preprocess(request.json, request.json["fetch"])
fetch_map = self.client_service.predict(feed=feed, fetch=fetch)
fetch_map = client_service.predict(feed=feed, fetch=fetch)
fetch_map = self.postprocess(feed=request.json, fetch=fetch, fetch_map=fetch_map)
app_instance.run(host="127.0.0.1", port=self.port,
threaded=False, processes=1)
return fetch_map
app_instance.run(host="127.0.0.1", port=self.port, threaded=False, processes=1)
def run_server(self):
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
print("http://{}:{}/{}/prediction".format(localIP, self.port, self.name))
p_rpc = Process(target=self._launch_rpc_service)
p_web = Process(target=self._launch_web_service)
p_rpc.start()
p_web.start()
p_web.join()
p_rpc.join()
def preprocess(self, feed={}, fetch=[]):
return feed, fetch
......
......@@ -30,6 +30,7 @@ max_version, mid_version, min_version = python_version()
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle',
'paddle_serving_client'
]
packages=['paddle_serving_server',
......
......@@ -30,6 +30,7 @@ max_version, mid_version, min_version = python_version()
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0','paddlepaddle',
'paddle_serving_client'
]
packages=['paddle_serving_server_gpu',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册