提交 bd42658a 编写于 作者: M MRXLT 提交者: GitHub

Merge pull request #604 from MRXLT/0.2.2-web-service

fix app && web service && update version
......@@ -26,7 +26,7 @@ serving_io.save_model("serving_model", "client_conf",
{"words": data}, {"prediction": prediction},
fluid.default_main_program())
```
代码示例中,`{"words": data}``{"prediction": prediction}`分别指定了模型的输入和输出,`"words"``"prediction"`是输和输出变量的别名,设计别名的目的是为了使开发者能够记忆自己训练模型的输入输出对应的字段。`data``prediction`则是Paddle训练过程中的`[Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable)`,通常代表张量([Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Tensor_cn.html#tensor))或变长张量([LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor))。调用保存命令后,会按照用户指定的`"serving_model"``"client_conf"`生成两个目录,内容如下:
代码示例中,`{"words": data}``{"prediction": prediction}`分别指定了模型的输入和输出,`"words"``"prediction"`是输和输出变量的别名,设计别名的目的是为了使开发者能够记忆自己训练模型的输入输出对应的字段。`data``prediction`则是Paddle训练过程中的`[Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable)`,通常代表张量([Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Tensor_cn.html#tensor))或变长张量([LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor))。调用保存命令后,会按照用户指定的`"serving_model"``"client_conf"`生成两个目录,内容如下:
``` shell
.
├── client_conf
......
......@@ -26,7 +26,7 @@ from batching import pad_batch_data
import tokenization
import requests
import json
from bert_reader import BertReader
from paddle_serving_app.reader import ChineseBertReader
args = benchmark_args()
......@@ -37,7 +37,7 @@ def single_func(idx, resource):
for line in fin:
dataset.append(line.strip())
if args.request == "rpc":
reader = BertReader(vocab_file="vocab.txt", max_seq_len=20)
reader = ChineseBertReader(vocab_file="vocab.txt", max_seq_len=20)
fetch = ["pooled_output"]
client = Client()
client.load_client_config(args.model)
......
......@@ -25,7 +25,7 @@ from paddlehub.common.logger import logger
import socket
from paddle_serving_client import Client
from paddle_serving_client.utils import benchmark_args
from paddle_serving_app import ChineseBertReader
from paddle_serving_app.reader import ChineseBertReader
args = benchmark_args()
......
......@@ -14,14 +14,14 @@
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_server_gpu.web_service import WebService
from bert_reader import BertReader
from paddle_serving_app.reader import ChineseBertReader
import sys
import os
class BertService(WebService):
def load(self):
self.reader = BertReader(vocab_file="vocab.txt", max_seq_len=128)
self.reader = ChineseBertReader(vocab_file="vocab.txt", max_seq_len=128)
def preprocess(self, feed=[], fetch=[]):
feed_res = [
......@@ -37,5 +37,5 @@ gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"]
bert_service.set_gpus(gpu_ids)
bert_service.prepare_server(
workdir="workdir", port=int(sys.argv[2]), device="gpu")
bert_service.run_server()
bert_service.run_flask()
bert_service.run_rpc_service()
bert_service.run_web_service()
......@@ -68,5 +68,5 @@ if device == "gpu":
image_service.set_gpus("0,1")
image_service.prepare_server(
workdir="workdir", port=int(sys.argv[3]), device=device)
image_service.run_server()
image_service.run_flask()
image_service.run_rpc_service()
image_service.run_web_service()
......@@ -16,7 +16,7 @@
import sys
import time
import requests
from paddle_serving_app import IMDBDataset
from paddle_serving_app.reader import IMDBDataset
from paddle_serving_client import Client
from paddle_serving_client.utils import MultiThreadRunner
from paddle_serving_client.utils import benchmark_args
......
......@@ -13,7 +13,7 @@
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client
from paddle_serving_app import IMDBDataset
from paddle_serving_app.reader import IMDBDataset
import sys
client = Client()
......
......@@ -14,7 +14,7 @@
# pylint: disable=doc-string-missing
from paddle_serving_server.web_service import WebService
from paddle_serving_app import IMDBDataset
from paddle_serving_app.reader import IMDBDataset
import sys
......@@ -37,5 +37,5 @@ imdb_service.load_model_config(sys.argv[1])
imdb_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
imdb_service.prepare_dict({"dict_file_path": sys.argv[4]})
imdb_service.run_server()
imdb_service.run_flask()
imdb_service.run_rpc_service()
imdb_service.run_web_service()
......@@ -47,5 +47,5 @@ lac_service.load_model_config(sys.argv[1])
lac_service.load_reader()
lac_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
lac_service.run_server()
lac_service.run_flask()
lac_service.run_rpc_service()
lac_service.run_web_service()
......@@ -14,7 +14,7 @@
from paddle_serving_app.reader import Sequential, File2Image, Resize, CenterCrop
from paddle_serving_app.reader import RGB2BGR, Transpose, Div, Normalize
from paddle_serving_app import Debugger
from paddle_serving_app.local_predict import Debugger
import sys
debugger = Debugger()
......
wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SentimentAnalysis/senta_bilstm.tar.gz --no-check-certificate
tar -xzvf senta_bilstm.tar.gz
wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/LexicalAnalysis/lac_model.tar.gz --no-check-certificate
wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/LexicalAnalysis/lac.tar.gz --no-check-certificate
tar -xzvf lac_model.tar.gz
wget https://paddle-serving.bj.bcebos.com/reader/lac/lac_dict.tar.gz --no-check-certificate
tar -xzvf lac_dict.tar.gz
......
......@@ -14,13 +14,10 @@
from paddle_serving_server_gpu.web_service import WebService
from paddle_serving_client import Client
from paddle_serving_app import LACReader, SentaReader
import numpy as np
from paddle_serving_app.reader import LACReader, SentaReader
import os
import io
import sys
import subprocess
from multiprocessing import Process, Queue
from multiprocessing import Process
class SentaService(WebService):
......@@ -33,10 +30,6 @@ class SentaService(WebService):
self.lac_client_config_path = lac_model_path + "/serving_server_conf.prototxt"
self.lac_dict_path = lac_dict_path
self.senta_dict_path = senta_dict_path
self.show = False
def show_detail(self, show=False):
self.show = show
def start_lac_service(self):
if not os.path.exists('./lac_serving'):
......@@ -64,34 +57,29 @@ class SentaService(WebService):
self.lac_client.connect(["127.0.0.1:{}".format(self.lac_port)])
def init_lac_reader(self):
self.lac_reader = LACReader(self.lac_dict_path)
self.lac_reader = LACReader()
def init_senta_reader(self):
self.senta_reader = SentaReader(vocab_path=self.senta_dict_path)
self.senta_reader = SentaReader()
def preprocess(self, feed=[], fetch=[]):
feed_data = self.lac_reader.process(feed[0]["words"])
if self.show:
print("---- lac reader ----")
print(feed_data)
lac_result = self.lac_predict(feed_data)
if self.show:
print("---- lac out ----")
print(lac_result)
segs = self.lac_reader.parse_result(feed[0]["words"],
lac_result["crf_decode"])
if self.show:
print("---- lac parse ----")
print(segs)
feed_data = self.senta_reader.process(segs)
if self.show:
print("---- senta reader ----")
print("feed_data", feed_data)
return [{"words": feed_data}], fetch
feed_data = [{
"words": self.lac_reader.process(x["words"])
} for x in feed]
lac_result = self.lac_client.predict(
feed=feed_data, fetch=["crf_decode"])
feed_batch = []
result_lod = lac_result["crf_decode.lod"]
for i in range(len(feed)):
segs = self.lac_reader.parse_result(
feed[i]["words"],
lac_result["crf_decode"][result_lod[i]:result_lod[i + 1]])
feed_data = self.senta_reader.process(segs)
feed_batch.append({"words": feed_data})
return feed_batch, fetch
senta_service = SentaService(name="senta")
#senta_service.show_detail(True)
senta_service.set_config(
lac_model_path="./lac_model",
lac_dict_path="./lac_dict",
......@@ -102,5 +90,5 @@ senta_service.prepare_server(
senta_service.init_lac_reader()
senta_service.init_senta_reader()
senta_service.init_lac_service()
senta_service.run_server()
senta_service.run_flask()
senta_service.run_rpc_service()
senta_service.run_web_service()
......@@ -158,7 +158,7 @@ Therefore, a local prediction tool is built into the paddle_serving_app, which i
Taking [fit_a_line prediction service](../examples/fit_a_line) as an example, the following code can be used to run local prediction.
```python
from paddle_serving_app import Debugger
from paddle_serving_app.local_predict import Debugger
import numpy as np
debugger = Debugger()
......
......@@ -147,7 +147,7 @@ Paddle Serving框架的server预测op使用了Paddle 的预测框架,在部署
[fit_a_line预测服务](../examples/fit_a_line)为例,使用以下代码即可执行本地预测。
```python
from paddle_serving_app import Debugger
from paddle_serving_app.local_predict import Debugger
import numpy as np
debugger = Debugger()
......
......@@ -11,10 +11,4 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .reader.chinese_bert_reader import ChineseBertReader
from .reader.image_reader import ImageReader, File2Image, URL2Image, Sequential, Normalize, CenterCrop, Resize, PadStride
from .reader.lac_reader import LACReader
from .reader.senta_reader import SentaReader
from .reader.imdb_reader import IMDBDataset
from .models import ServingModels
from .local_predict import Debugger
......@@ -37,7 +37,7 @@ class ServingModels(object):
object_detection_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/image/ObjectDetection/"
senta_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SentimentAnalysis/"
semantic_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SemanticRepresentation/"
wordseg_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/ChineseWordSegmentation/"
wordseg_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/LexicalAnalysis/"
self.url_dict = {}
......
......@@ -11,4 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .chinese_bert_reader import ChineseBertReader
from .image_reader import ImageReader, File2Image, URL2Image, Sequential, Normalize, CenterCrop, Resize, Transpose, Div, RGB2BGR, BGR2RGB, RCNNPostprocess, SegPostprocess, PadStride
from .lac_reader import LACReader
from .senta_reader import SentaReader
from .imdb_reader import IMDBDataset
......@@ -48,10 +48,16 @@ def load_kv_dict(dict_path,
class LACReader(object):
"""data reader"""
def __init__(self, dict_folder):
def __init__(self, dict_folder=""):
# read dict
#basepath = os.path.abspath(__file__)
#folder = os.path.dirname(basepath)
if dict_folder == "":
dict_folder = "lac_dict"
if not os.path.exists(dict_folder):
r = os.system(
"wget https://paddle-serving.bj.bcebos.com/reader/lac/lac_dict.tar.gz --no-check-certificate && tar -xzvf lac_dict.tar.gz"
)
word_dict_path = os.path.join(dict_folder, "word.dic")
label_dict_path = os.path.join(dict_folder, "tag.dic")
replace_dict_path = os.path.join(dict_folder, "q2b.dic")
......
......@@ -14,10 +14,11 @@
import sys
import io
import os
class SentaReader():
def __init__(self, vocab_path, max_seq_len=20):
def __init__(self, vocab_path="", max_seq_len=20):
self.max_seq_len = max_seq_len
self.word_dict = self.load_vocab(vocab_path)
......@@ -25,6 +26,13 @@ class SentaReader():
"""
load the given vocabulary
"""
if vocab_path == "":
vocab_path = "senta_vocab.txt"
if not os.path.exists(vocab_path):
r = os.system(
" wget https://paddle-serving.bj.bcebos.com/reader/senta/senta_vocab.txt --no-check-certificate"
)
vocab = {}
with io.open(vocab_path, 'r', encoding='utf8') as f:
for line in f:
......
......@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving App version string """
serving_app_version = "0.0.3"
serving_app_version = "0.1.0"
......@@ -207,8 +207,9 @@ class Client(object):
key))
if type(feed[key]).__module__ == np.__name__ and np.size(feed[
key]) != self.feed_tensor_len[key]:
raise SystemExit("The shape of feed tensor {} not match.".format(
key))
#raise SystemExit("The shape of feed tensor {} not match.".format(
# key))
pass
def predict(self, feed=None, fetch=None, need_variant_tag=False):
self.profile_.record('py_prepro_0')
......
......@@ -33,7 +33,6 @@ def save_model(server_model_folder,
executor = Executor(place=CPUPlace())
feed_var_names = [feed_var_dict[x].name for x in feed_var_dict]
#target_vars = list(fetch_var_dict.values())
target_vars = []
target_var_names = []
for key in sorted(fetch_var_dict.keys()):
......
......@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version = "0.2.2"
serving_server_version = "0.2.2"
module_proto_version = "0.2.2"
serving_client_version = "0.3.0"
serving_server_version = "0.3.0"
module_proto_version = "0.3.0"
......@@ -103,7 +103,7 @@ if __name__ == "__main__":
service.load_model_config(args.model)
service.prepare_server(
workdir=args.workdir, port=args.port, device=args.device)
service.run_server()
service.run_rpc_service()
app_instance = Flask(__name__)
......
......@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version = "0.2.2"
serving_server_version = "0.2.2"
module_proto_version = "0.2.2"
serving_client_version = "0.3.0"
serving_server_version = "0.3.0"
module_proto_version = "0.3.0"
......@@ -92,7 +92,7 @@ class WebService(object):
result = {"result": "Request Value Error"}
return result
def run_server(self):
def run_rpc_service(self):
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
......@@ -115,7 +115,7 @@ class WebService(object):
self.app_instance = app_instance
def run_flask(self):
def run_web_service(self):
self.app_instance.run(host="0.0.0.0",
port=self.port,
threaded=False,
......
......@@ -118,7 +118,7 @@ if __name__ == "__main__":
web_service.set_gpus(gpu_ids)
web_service.prepare_server(
workdir=args.workdir, port=args.port, device=args.device)
web_service.run_server()
web_service.run_rpc_service()
app_instance = Flask(__name__)
......
......@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version = "0.2.2"
serving_server_version = "0.2.2"
module_proto_version = "0.2.2"
serving_client_version = "0.3.0"
serving_server_version = "0.3.0"
module_proto_version = "0.3.0"
......@@ -133,12 +133,11 @@ class WebService(object):
result = self.postprocess(
feed=feed, fetch=fetch, fetch_map=fetch_map)
result = {"result": result}
result = {"result": fetch_map}
except ValueError:
result = {"result": "Request Value Error"}
return result
def run_server(self):
def run_rpc_service(self):
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
......@@ -165,7 +164,7 @@ class WebService(object):
self.app_instance = app_instance
def run_flask(self):
def run_web_service(self):
self.app_instance.run(host="0.0.0.0",
port=self.port,
threaded=False,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册