未验证 提交 bf3b7453 编写于 作者: 走神的阿圆's avatar 走神的阿圆 提交者: GitHub

Update serving docs. (#430)

* update serving_v2 docs
上级 41f6d967
...@@ -44,7 +44,6 @@ $ hub serving start --config config.json ...@@ -44,7 +44,6 @@ $ hub serving start --config config.json
"modules_info": { "modules_info": {
"yolov3_darknet53_coco2017": { "yolov3_darknet53_coco2017": {
"init_args": { "init_args": {
"directory": "./my_yolov3",
"version": "1.0.0" "version": "1.0.0"
}, },
"predict_args": { "predict_args": {
...@@ -54,8 +53,7 @@ $ hub serving start --config config.json ...@@ -54,8 +53,7 @@ $ hub serving start --config config.json
}, },
"lac": { "lac": {
"init_args": { "init_args": {
"version": "2.1.0", "version": "1.1.0"
"user_dict": "./dict.txt"
}, },
"predict_args": { "predict_args": {
"batch_size": 1, "batch_size": 1,
...@@ -127,16 +125,20 @@ $ hub serving start -c serving_config.json ...@@ -127,16 +125,20 @@ $ hub serving start -c serving_config.json
其中`serving_config.json`的内容如下: 其中`serving_config.json`的内容如下:
```json ```json
{ {
"modules_info": [ "modules_info": {
{ "lac": {
"module": "lac", "init_args": {
"version": "1.0.0", "version": "1.1.0"
"batch_size": 1 },
} "predict_args": {
], "batch_size": 1,
"use_gpu": false, "use_gpu": false
}
}
},
"port": 8866, "port": 8866,
"use_multiprocess": false "use_multiprocess": false,
"workers": 2
} }
``` ```
启动成功界面如图: 启动成功界面如图:
...@@ -256,7 +258,7 @@ $ PaddleHub Serving will stop. ...@@ -256,7 +258,7 @@ $ PaddleHub Serving will stop.
以lac(2.1.0)为例,使用上述方法进行请求将提示: 以lac(2.1.0)为例,使用上述方法进行请求将提示:
```python ```python
{ {
"Warnning": "This usage is out of date, please use 'application/json' as content-type to post to /predict/lac. See 'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.5/docs/tutorial/serving.md' for more details." "Warnning": "This usage is out of date, please use 'application/json' as content-type to post to /predict/lac. See 'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.6/docs/tutorial/serving.md' for more details."
} }
``` ```
对于lac(2.1.0),请求的方式如下: 对于lac(2.1.0),请求的方式如下:
...@@ -272,7 +274,7 @@ if __name__ == "__main__": ...@@ -272,7 +274,7 @@ if __name__ == "__main__":
# 对应本地部署,则为lac.analysis_lexical(data=text) # 对应本地部署,则为lac.analysis_lexical(data=text)
data = {"data": text} data = {"data": text}
# 指定预测方法为lac并发送post请求 # 指定预测方法为lac并发送post请求
url = "http://127.0.0.1:8866/predict/porn_detection_gru" url = "http://127.0.0.1:8866/predict/lac"
# 指定post请求的headers为application/json方式 # 指定post请求的headers为application/json方式
headers = {"Content-Type": "application/json"} headers = {"Content-Type": "application/json"}
...@@ -304,7 +306,7 @@ if __name__ == "__main__": ...@@ -304,7 +306,7 @@ if __name__ == "__main__":
] ]
} }
``` ```
此Demo的具体信息和代码请参见[LAC Serving_2.1.0](../../demo/serving/module_serving/lexical_analysis_lac) 此Demo的具体信息和代码请参见[LAC Serving_2.1.0](../../demo/serving/module_serving/lexical_analysis_lac/lac_2.1.0_serving_demo.py)
## Bert Service ## Bert Service
除了预训练模型一键服务部署功能之外,PaddleHub Serving还具有`Bert Service`功能,支持ernie_tiny、bert等模型快速部署,对外提供可靠的在线embedding服务,具体信息请参见[Bert Service](./bert_service.md) 除了预训练模型一键服务部署功能之外,PaddleHub Serving还具有`Bert Service`功能,支持ernie_tiny、bert等模型快速部署,对外提供可靠的在线embedding服务,具体信息请参见[Bert Service](./bert_service.md)
...@@ -313,6 +313,11 @@ class ServingCommand(BaseCommand): ...@@ -313,6 +313,11 @@ class ServingCommand(BaseCommand):
with open(self.args.config, "r") as fp: with open(self.args.config, "r") as fp:
self.args.config = json.load(fp) self.args.config = json.load(fp)
self.modules_info = self.args.config["modules_info"] self.modules_info = self.args.config["modules_info"]
if isinstance(self.module_info, list):
raise RuntimeError(
"This configuration method is outdated, see 'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.6/docs/tutorial/serving.md' for more details."
)
exit(1)
else: else:
raise RuntimeError("{} not exists.".format(self.args.config)) raise RuntimeError("{} not exists.".format(self.args.config))
exit(1) exit(1)
......
...@@ -26,7 +26,7 @@ import glob ...@@ -26,7 +26,7 @@ import glob
def predict_v2(module_info, input): def predict_v2(module_info, input):
serving_method_name = module_info["method_name"] serving_method_name = module_info["method_name"]
serving_method = getattr(module_info["module"], serving_method_name) serving_method = getattr(module_info["module"], serving_method_name)
predict_args = module_info["predict_args"] predict_args = module_info["predict_args"].copy()
predict_args.update({"data": input}) predict_args.update({"data": input})
for item in serving_method.__code__.co_varnames: for item in serving_method.__code__.co_varnames:
...@@ -39,7 +39,7 @@ def predict_v2(module_info, input): ...@@ -39,7 +39,7 @@ def predict_v2(module_info, input):
def predict_v2_advanced(module_info, input): def predict_v2_advanced(module_info, input):
serving_method_name = module_info["method_name"] serving_method_name = module_info["method_name"]
serving_method = getattr(module_info["module"], serving_method_name) serving_method = getattr(module_info["module"], serving_method_name)
predict_args = module_info["predict_args"] predict_args = module_info["predict_args"].copy()
predict_args.update(input) predict_args.update(input)
for item in serving_method.__code__.co_varnames: for item in serving_method.__code__.co_varnames:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册