diff --git a/README.md b/README.md index 8f52f5d64412917a6413a29875e4c8c640e2d5c7..a038c4592795c986a2c7cbe788244f5fe7cab776 100644 --- a/README.md +++ b/README.md @@ -66,15 +66,6 @@ For **Windows Users**, please read the document [Paddle Serving for Windows User

Pre-built services with Paddle Serving

-

Latest release

-

- Optical Character Recognition -
- Object Detection -
- Image Segmentation -

-

Chinese Word Segmentation

``` shell @@ -133,7 +124,8 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po | `use_trt` (Only for trt version) | - | - | Run inference with TensorRT | -``` python + +```python # A user can visit rpc service through paddle_serving_client API from paddle_serving_client import Client import numpy as np diff --git a/README_CN.md b/README_CN.md index 365caacb12d50490cc5f9753d95f5bb489693340..571b7b00c1252093887a1b5562e03437f51837c4 100644 --- a/README_CN.md +++ b/README_CN.md @@ -148,7 +148,7 @@ print(fetch_map) 在这里,`client.predict`函数具有两个参数。 `feed`是带有模型输入变量别名和值的`python dict`。 `fetch`被要从服务器返回的预测变量赋值。 在该示例中,在训练过程中保存可服务模型时,被赋值的tensor名为`"x"`和`"price"`。

HTTP服务

-用户也可以将数据格式处理逻辑放在服务器端进行,这样就可以直接用curl去访问服务,参考如下案例,在目录``python/examples/fit_a_line`` +用户也可以将数据格式处理逻辑放在服务器端进行,这样就可以直接用curl去访问服务,参考如下案例,在目录`python/examples/fit_a_line` ```python from paddle_serving_server.web_service import WebService diff --git a/doc/BERT_10_MINS.md b/doc/BERT_10_MINS.md index 53e51768d3eaee6a1faac8d9ae2c62e7f1aa63ee..7b981d7eda467197d1b1b741c35b00c97b74c532 100644 --- a/doc/BERT_10_MINS.md +++ b/doc/BERT_10_MINS.md @@ -56,21 +56,25 @@ the script of client side bert_client.py is as follow: [//file]:#bert_client.py ``` python -import os import sys from paddle_serving_client import Client +from paddle_serving_client.utils import benchmark_args from paddle_serving_app.reader import ChineseBertReader +import numpy as np +args = benchmark_args() -reader = ChineseBertReader() +reader = ChineseBertReader({"max_seq_len": 128}) fetch = ["pooled_output"] -endpoint_list = ["127.0.0.1:9292"] +endpoint_list = ['127.0.0.1:9292'] client = Client() -client.load_client_config("bert_seq20_client/serving_client_conf.prototxt") +client.load_client_config(args.model) client.connect(endpoint_list) for line in sys.stdin: feed_dict = reader.process(line) - result = client.predict(feed=feed_dict, fetch=fetch) + for key in feed_dict.keys(): + feed_dict[key] = np.array(feed_dict[key]).reshape((128, 1)) + result = client.predict(feed=feed_dict, fetch=fetch, batch=False) ``` run diff --git a/doc/BERT_10_MINS_CN.md b/doc/BERT_10_MINS_CN.md index e4904d86b6a056ba74b6ed85b47745575b749279..b0578e8e84de7e36694e879e5a64737d275c505c 100644 --- a/doc/BERT_10_MINS_CN.md +++ b/doc/BERT_10_MINS_CN.md @@ -52,18 +52,23 @@ pip install paddle_serving_app ``` python import sys from paddle_serving_client import Client +from paddle_serving_client.utils import benchmark_args from paddle_serving_app.reader import ChineseBertReader +import numpy as np +args = benchmark_args() -reader = ChineseBertReader() +reader = ChineseBertReader({"max_seq_len": 128}) fetch = ["pooled_output"] -endpoint_list = ["127.0.0.1:9292"] +endpoint_list = ['127.0.0.1:9292'] client = Client() -client.load_client_config("bert_seq20_client/serving_client_conf.prototxt") +client.load_client_config(args.model) client.connect(endpoint_list) for line in sys.stdin: feed_dict = reader.process(line) - result = client.predict(feed=feed_dict, fetch=fetch) + for key in feed_dict.keys(): + feed_dict[key] = np.array(feed_dict[key]).reshape((128, 1)) + result = client.predict(feed=feed_dict, fetch=fetch, batch=False) ``` 执行