diff --git a/README.md b/README.md index 1818ddd61cc5423c4a590815930d007303f18e81..afbfd208404bef99f51951b25108fbd17d5ae0be 100644 --- a/README.md +++ b/README.md @@ -111,9 +111,9 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po | `port` | int | `9292` | Exposed port of current service to users| | `name` | str | `""` | Service name, can be used to generate HTTP request url | | `model` | str | `""` | Path of paddle model directory to be served | -| `mem_optim` | bool | `False` | Enable memory / graphic memory optimization | -| `ir_optim` | bool | `False` | Enable analysis and optimization of calculation graph | -| `use_mkl` (Only for cpu version) | bool | `False` | Run inference with MKL | +| `mem_optim` | - | - | Enable memory / graphic memory optimization | +| `ir_optim` | - | - | Enable analysis and optimization of calculation graph | +| `use_mkl` (Only for cpu version) | - | - | Run inference with MKL | Here, we use `curl` to send a HTTP POST request to the service we just started. Users can use any python library to send HTTP POST as well, e.g, [requests](https://requests.readthedocs.io/en/master/). diff --git a/README_CN.md b/README_CN.md index 29cf095248f4c125b3dba7146e67efe8b7abae6c..24104f3c48f65d86845b0937650bf82600383eff 100644 --- a/README_CN.md +++ b/README_CN.md @@ -115,9 +115,9 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po | `port` | int | `9292` | Exposed port of current service to users| | `name` | str | `""` | Service name, can be used to generate HTTP request url | | `model` | str | `""` | Path of paddle model directory to be served | -| `mem_optim` | bool | `False` | Enable memory optimization | -| `ir_optim` | bool | `False` | Enable analysis and optimization of calculation graph | -| `use_mkl` (Only for cpu version) | bool | `False` | Run inference with MKL | +| `mem_optim` | - | - | Enable memory optimization | +| `ir_optim` | - | - | Enable analysis and optimization of calculation graph | +| `use_mkl` (Only for cpu version) | - | - | Run inference with MKL | 我们使用 `curl` 命令来发送HTTP POST请求给刚刚启动的服务。用户也可以调用python库来发送HTTP POST请求,请参考英文文档 [requests](https://requests.readthedocs.io/en/master/)。 diff --git a/doc/BERT_10_MINS.md b/doc/BERT_10_MINS.md index 71f6f065f4101aae01e077910fc5b6bd6b039b46..53e51768d3eaee6a1faac8d9ae2c62e7f1aa63ee 100644 --- a/doc/BERT_10_MINS.md +++ b/doc/BERT_10_MINS.md @@ -59,7 +59,7 @@ the script of client side bert_client.py is as follow: import os import sys from paddle_serving_client import Client -from paddle_serving_app import ChineseBertReader +from paddle_serving_app.reader import ChineseBertReader reader = ChineseBertReader() fetch = ["pooled_output"] diff --git a/doc/BERT_10_MINS_CN.md b/doc/BERT_10_MINS_CN.md index b7a5180da1bae2dafc431251f2b98c8a2041856a..e4904d86b6a056ba74b6ed85b47745575b749279 100644 --- a/doc/BERT_10_MINS_CN.md +++ b/doc/BERT_10_MINS_CN.md @@ -52,7 +52,7 @@ pip install paddle_serving_app ``` python import sys from paddle_serving_client import Client -from paddle_serving_app import ChineseBertReader +from paddle_serving_app.reader import ChineseBertReader reader = ChineseBertReader() fetch = ["pooled_output"] diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index 894b0c5b132845cbde589982e1fb471f028e820b..e75240dfafd436e5557a8f11396029e6be5868fe 100644 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -40,10 +40,14 @@ def parse_args(): # pylint: disable=doc-string-missing parser.add_argument( "--device", type=str, default="cpu", help="Type of device") parser.add_argument( - "--mem_optim", type=bool, default=False, help="Memory optimize") + "--mem_optim", + default=False, + action="store_true", + help="Memory optimize") parser.add_argument( - "--ir_optim", type=bool, default=False, help="Graph optimize") - parser.add_argument("--use_mkl", type=bool, default=False, help="Use MKL") + "--ir_optim", default=False, action="store_true", help="Graph optimize") + parser.add_argument( + "--use_mkl", default=False, action="store_true", help="Use MKL") parser.add_argument( "--max_body_size", type=int, diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index e40c0fa48763eaa66373e9f2149552c4f8693eb7..ace4905536e9c9a71d76d41da6836966986910a2 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -47,9 +47,12 @@ def serve_args(): parser.add_argument( "--name", type=str, default="None", help="Default service name") parser.add_argument( - "--mem_optim", type=bool, default=False, help="Memory optimize") + "--mem_optim", + default=False, + action="store_true", + help="Memory optimize") parser.add_argument( - "--ir_optim", type=bool, default=False, help="Graph optimize") + "--ir_optim", default=False, action="store_true", help="Graph optimize") parser.add_argument( "--max_body_size", type=int,