diff --git a/README.md b/README.md index f209e58b66cc4c056ff4ab30283213534eac52c0..04634afebfc699708b681a99257eabc0898f7356 100644 --- a/README.md +++ b/README.md @@ -61,9 +61,9 @@ Packages of Paddle Serving support Centos 6/7 and Ubuntu 16/18, or you can use H
``` shell -> python -m paddle_serving_app.package -get_model resnet_v2_50_imagenet +> python -m paddle_serving_app.package --get_model resnet_v2_50_imagenet > tar -xzf resnet_v2_50_imagenet.tar.gz > python resnet50_imagenet_classify.py resnet50_serving_model & > curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"image": "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg"}], "fetch": ["score"]}' http://127.0.0.1:9292/image/prediction @@ -111,9 +111,9 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po | `port` | int | `9292` | Exposed port of current service to users| | `name` | str | `""` | Service name, can be used to generate HTTP request url | | `model` | str | `""` | Path of paddle model directory to be served | -| `mem_optim` | bool | `False` | Enable memory / graphic memory optimization | -| `ir_optim` | bool | `False` | Enable analysis and optimization of calculation graph | -| `use_mkl` (Only for cpu version) | bool | `False` | Run inference with MKL | +| `mem_optim` | - | - | Enable memory / graphic memory optimization | +| `ir_optim` | - | - | Enable analysis and optimization of calculation graph | +| `use_mkl` (Only for cpu version) | - | - | Run inference with MKL | Here, we use `curl` to send a HTTP POST request to the service we just started. Users can use any python library to send HTTP POST as well, e.g, [requests](https://requests.readthedocs.io/en/master/). @@ -170,13 +170,13 @@ Here, `client.predict` function has two arguments. `feed` is a `python dict` wit ### About Efficiency - [How to profile Paddle Serving latency?](python/examples/util) -- [How to optimize performance?(Chinese)](doc/PERFORMANCE_OPTIM_CN.md) +- [How to optimize performance?](doc/PERFORMANCE_OPTIM.md) - [Deploy multi-services on one GPU(Chinese)](doc/MULTI_SERVICE_ON_ONE_GPU_CN.md) - [CPU Benchmarks(Chinese)](doc/BENCHMARKING.md) - [GPU Benchmarks(Chinese)](doc/GPU_BENCHMARKING.md) ### FAQ -- [FAQ(Chinese)](doc/deprecated/FAQ.md) +- [FAQ(Chinese)](doc/FAQ.md) ### Design diff --git a/README_CN.md b/README_CN.md index 05d3ad2100b15830d10c8bc4454a6d319d7b990b..7a42e6cd9c02fa6c51cba7a3228cd0916dd64de2 100644 --- a/README_CN.md +++ b/README_CN.md @@ -62,9 +62,9 @@ Paddle Serving安装包支持Centos 6/7和Ubuntu 16/18,或者您可以使用HT
``` shell -> python -m paddle_serving_app.package -get_model resnet_v2_50_imagenet +> python -m paddle_serving_app.package --get_model resnet_v2_50_imagenet > tar -xzf resnet_v2_50_imagenet.tar.gz > python resnet50_imagenet_classify.py resnet50_serving_model & > curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"image": "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg"}], "fetch": ["score"]}' http://127.0.0.1:9292/image/prediction @@ -115,9 +115,9 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po | `port` | int | `9292` | Exposed port of current service to users| | `name` | str | `""` | Service name, can be used to generate HTTP request url | | `model` | str | `""` | Path of paddle model directory to be served | -| `mem_optim` | bool | `False` | Enable memory optimization | -| `ir_optim` | bool | `False` | Enable analysis and optimization of calculation graph | -| `use_mkl` (Only for cpu version) | bool | `False` | Run inference with MKL | +| `mem_optim` | - | - | Enable memory optimization | +| `ir_optim` | - | - | Enable analysis and optimization of calculation graph | +| `use_mkl` (Only for cpu version) | - | - | Run inference with MKL | 我们使用 `curl` 命令来发送HTTP POST请求给刚刚启动的服务。用户也可以调用python库来发送HTTP POST请求,请参考英文文档 [requests](https://requests.readthedocs.io/en/master/)。 @@ -181,7 +181,7 @@ print(fetch_map) - [GPU版Benchmarks](doc/GPU_BENCHMARKING.md) ### FAQ -- [常见问答](doc/deprecated/FAQ.md) +- [常见问答](doc/FAQ.md) ### 设计文档 - [Paddle Serving设计文档](doc/DESIGN_DOC_CN.md) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index cb48373e281ba212b51ce5faf2e2c0487c835b96..c72a5cac52ccf1c03a0c132083e3ac43c83fb868 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -130,13 +130,7 @@ function(grpc_protobuf_generate_python SRCS) set(FIL_WE "${FIL_DIR}/${FIL_WE}") endif() endif() - list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py") - add_custom_command( - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py" - COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL} - DEPENDS ${ABS_FIL} ${PROTOBUF_PROTOC_EXECUTABLE} - COMMENT "Running Python protocol buffer compiler on ${FIL}" - VERBATIM ) + list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2_grpc.py") add_custom_command( OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2_grpc.py" diff --git a/doc/BERT_10_MINS.md b/doc/BERT_10_MINS.md index 71f6f065f4101aae01e077910fc5b6bd6b039b46..53e51768d3eaee6a1faac8d9ae2c62e7f1aa63ee 100644 --- a/doc/BERT_10_MINS.md +++ b/doc/BERT_10_MINS.md @@ -59,7 +59,7 @@ the script of client side bert_client.py is as follow: import os import sys from paddle_serving_client import Client -from paddle_serving_app import ChineseBertReader +from paddle_serving_app.reader import ChineseBertReader reader = ChineseBertReader() fetch = ["pooled_output"] diff --git a/doc/BERT_10_MINS_CN.md b/doc/BERT_10_MINS_CN.md index b7a5180da1bae2dafc431251f2b98c8a2041856a..e4904d86b6a056ba74b6ed85b47745575b749279 100644 --- a/doc/BERT_10_MINS_CN.md +++ b/doc/BERT_10_MINS_CN.md @@ -52,7 +52,7 @@ pip install paddle_serving_app ``` python import sys from paddle_serving_client import Client -from paddle_serving_app import ChineseBertReader +from paddle_serving_app.reader import ChineseBertReader reader = ChineseBertReader() fetch = ["pooled_output"] diff --git a/doc/FAQ.md b/doc/FAQ.md new file mode 100644 index 0000000000000000000000000000000000000000..3bdd2dfd4739b54bf39b6b3f561c43bab3edabde --- /dev/null +++ b/doc/FAQ.md @@ -0,0 +1,15 @@ +# FAQ + +- Q:如何调整RPC服务的等待时间,避免超时? + + A:使用set_rpc_timeout_ms设置更长的等待时间,单位为毫秒,默认时间为20秒。 + + 示例: + ``` + from paddle_serving_client import Client + + client = Client() + client.load_client_config(sys.argv[1]) + client.set_rpc_timeout_ms(100000) + client.connect(["127.0.0.1:9393"]) + ``` diff --git a/doc/IMDB_GO_CLIENT_CN.md b/doc/IMDB_GO_CLIENT_CN.md index 86355bd538d0abd995b7c47e34a5062fe1c09406..5067d1ef79218d176aee0c0d7d41506a0b6dc428 100644 --- a/doc/IMDB_GO_CLIENT_CN.md +++ b/doc/IMDB_GO_CLIENT_CN.md @@ -99,7 +99,7 @@ func main() { ### 基于IMDB测试集的预测 ```python -go run imdb_client.go serving_client_conf / serving_client_conf.stream.prototxt test.data> result +go run imdb_client.go serving_client_conf/serving_client_conf.stream.prototxt test.data> result ``` ### 计算精度 diff --git a/doc/PERFORMANCE_OPTIM.md b/doc/PERFORMANCE_OPTIM.md index eae128c40c0b5d40c0fc50346ca3f6e6c4c02eb5..651be1c139b5960fa287fc3e981f3039f9f098a2 100644 --- a/doc/PERFORMANCE_OPTIM.md +++ b/doc/PERFORMANCE_OPTIM.md @@ -16,5 +16,5 @@ Parameters for performance optimization: | Parameters | Type | Default | Description | | ---------- | ---- | ------- | ------------------------------------------------------------ | -| mem_optim | bool | False | Enable memory / graphic memory optimization | -| ir_optim | bool | Fasle | Enable analysis and optimization of calculation graph,including OP fusion, etc | +| mem_optim | - | - | Enable memory / graphic memory optimization | +| ir_optim | - | - | Enable analysis and optimization of calculation graph,including OP fusion, etc | diff --git a/doc/PERFORMANCE_OPTIM_CN.md b/doc/PERFORMANCE_OPTIM_CN.md index 1a2c3840942930060a1805bcb999f01b5780cbae..c35ea7a11c40ad2a5752d9add8fd8d9f8ddb2b64 100644 --- a/doc/PERFORMANCE_OPTIM_CN.md +++ b/doc/PERFORMANCE_OPTIM_CN.md @@ -16,5 +16,5 @@ | 参数 | 类型 | 默认值 | 含义 | | --------- | ---- | ------ | -------------------------------- | -| mem_optim | bool | False | 开启内存/显存优化 | -| ir_optim | bool | Fasle | 开启计算图分析优化,包括OP融合等 | +| mem_optim | - | - | 开启内存/显存优化 | +| ir_optim | - | - | 开启计算图分析优化,包括OP融合等 | diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 09a91809f52718eb7f52d234b3e8c5406883f419..9e32926732ef1b396473dab2a748f24f63e19e7a 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -24,6 +24,8 @@ import sys import grpc from .proto import multi_lang_general_model_service_pb2 +sys.path.append( + os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto')) from .proto import multi_lang_general_model_service_pb2_grpc int_type = 0 diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index 3306c040f11dac8834745777d65178624d922bd5..3a5c07011ace961fdfb61ebf3217ab1aab375e82 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -28,6 +28,9 @@ import fcntl import numpy as np import grpc from .proto import multi_lang_general_model_service_pb2 +import sys +sys.path.append( + os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto')) from .proto import multi_lang_general_model_service_pb2_grpc from multiprocessing import Pool, Process from concurrent import futures diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index 6ddee04fddf7ff056000b5e886fc99de75998393..e67cba7cd2bb89a8126c0a74393bdcec648eee17 100644 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -40,10 +40,14 @@ def parse_args(): # pylint: disable=doc-string-missing parser.add_argument( "--device", type=str, default="cpu", help="Type of device") parser.add_argument( - "--mem_optim", type=bool, default=False, help="Memory optimize") + "--mem_optim", + default=False, + action="store_true", + help="Memory optimize") parser.add_argument( - "--ir_optim", type=bool, default=False, help="Graph optimize") - parser.add_argument("--use_mkl", type=bool, default=False, help="Use MKL") + "--ir_optim", default=False, action="store_true", help="Graph optimize") + parser.add_argument( + "--use_mkl", default=False, action="store_true", help="Use MKL") parser.add_argument( "--max_body_size", type=int, diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index b26869a2380a3994b18c43c2295c6a40233bfb70..44733b154096255c3ce06e1be29d50d3e662269a 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -30,6 +30,9 @@ import fcntl import numpy as np import grpc from .proto import multi_lang_general_model_service_pb2 +import sys +sys.path.append( + os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto')) from .proto import multi_lang_general_model_service_pb2_grpc from multiprocessing import Pool, Process from concurrent import futures @@ -54,9 +57,12 @@ def serve_args(): parser.add_argument( "--name", type=str, default="None", help="Default service name") parser.add_argument( - "--mem_optim", type=bool, default=False, help="Memory optimize") + "--mem_optim", + default=False, + action="store_true", + help="Memory optimize") parser.add_argument( - "--ir_optim", type=bool, default=False, help="Graph optimize") + "--ir_optim", default=False, action="store_true", help="Graph optimize") parser.add_argument( "--max_body_size", type=int,