提交 ed37933f 编写于 作者: M MRXLT 提交者: GitHub

Merge pull request #791 from MRXLT/0.3.2-qa

update code && doc for 0.3.2
......@@ -6,7 +6,7 @@ include(framework/CMakeLists.txt)
include(tools/CMakeLists.txt)
include(src/CMakeLists.txt)
add_definitions(-D__STDC_FORMAT_MACROS)
add_library(pdserving ${pdserving_srcs})
set_source_files_properties(
${pdserving_srcs}
......
......@@ -63,6 +63,9 @@ If Python3 is used, replace `pip` with `pip3`.
## GOPATH Setting
## Compile Arguments
The default GOPATH is `$HOME/go`, which you can set to other values.
```shell
export GOPATH=$HOME/go
......@@ -78,13 +81,17 @@ go get -u github.com/golang/protobuf/protoc-gen-go
go get -u google.golang.org/grpc
```
## Compile Server
### Integrated CPU version paddle inference library
``` shell
mkdir server-build-cpu && cd server-build-cpu
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON ..
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \
-DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \
-DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \
-DSERVER=ON ..
make -j10
```
......@@ -94,7 +101,11 @@ you can execute `make install` to put targets under directory `./output`, you ne
``` shell
mkdir server-build-gpu && cd server-build-gpu
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON -DWITH_GPU=ON ..
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \
-DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \
-DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \
-DSERVER=ON \
-DWITH_GPU=ON ..
make -j10
```
......@@ -108,7 +119,10 @@ execute `make install` to put targets under directory `./output`
``` shell
mkdir client-build && cd client-build
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DCLIENT=ON ..
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \
-DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \
-DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \
-DCLIENT=ON ..
make -j10
```
......
# FAQ
- Q如何调整RPC服务的等待时间,避免超时?
- Q: 如何调整RPC服务的等待时间,避免超时?
A使用set_rpc_timeout_ms设置更长的等待时间,单位为毫秒,默认时间为20秒。
A: 使用set_rpc_timeout_ms设置更长的等待时间,单位为毫秒,默认时间为20秒。
示例:
```
......@@ -15,4 +15,13 @@
```
- Q: 如何使用自己编译的Paddle Serving进行预测?
A:通过pip命令安装自己编译出的whl包,并设置SERVING_BIN环境变量为编译出的serving二进制文件路径。
A: 通过pip命令安装自己编译出的whl包,并设置SERVING_BIN环境变量为编译出的serving二进制文件路径。
- Q: 执行GPU预测时遇到InvalidArgumentError: Device id must be less than GPU count, but received id is: 0. GPU count is: 0.
A: 将显卡驱动对应的libcuda.so的目录添加到LD_LIBRARY_PATH环境变量中
- Q: 执行GPU预测时遇到ExternalError: Cudnn error, CUDNN_STATUS_BAD_PARAM at (/home/scmbuild/workspaces_cluster.dev/baidu.lib.paddlepaddle/baidu/lib/paddlepaddle/Paddle/paddle/fluid/operators/batch_norm_op.cu:198)
A: 将cudnn的lib64路径添加到LD_LIBRARY_PATH,安装自pypi的Paddle Serving中post9版使用的是cudnn 7.3,post10使用的是cudnn 7.5。如果是使用自己编译的Paddle Serving,可以在log/serving.INFO日志文件中查看对应的cudnn版本。
- Q: 执行GPU预测时遇到Error: Failed to find dynamic library: libcublas.so
A: 将cuda的lib64路径添加到LD_LIBRARY_PATH, post9版本的Paddle Serving使用的是cuda 9.0,post10版本使用的cuda 10.0。
......@@ -43,6 +43,9 @@ if (SERVER)
endif()
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/gen_version.py
${CMAKE_CURRENT_BINARY_DIR}/gen_version.py)
set (SERVING_CLIENT_CORE ${PADDLE_SERVING_BINARY_DIR}/core/general-client/*.so)
message("python env: " ${py_env})
......@@ -50,6 +53,7 @@ if (APP)
add_custom_command(
OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_app/ ${PADDLE_SERVING_BINARY_DIR}/python/
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py "app"
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel
DEPENDS ${SERVING_APP_CORE} general_model_config_py_proto ${PY_FILES})
add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp)
......@@ -61,6 +65,7 @@ add_custom_command(
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_client/ ${PADDLE_SERVING_BINARY_DIR}/python/
COMMAND ${CMAKE_COMMAND} -E copy ${SERVING_CLIENT_CORE} ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/serving_client.so
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} python_tag.py
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py "client"
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel
DEPENDS ${SERVING_CLIENT_CORE} sdk_configure_py_proto ${PY_FILES})
add_custom_target(paddle_python ALL DEPENDS serving_client ${PADDLE_SERVING_BINARY_DIR}/.timestamp)
......@@ -71,6 +76,7 @@ if (SERVER)
add_custom_command(
OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server/ ${PADDLE_SERVING_BINARY_DIR}/python/
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py "server"
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel
DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES})
add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp)
......@@ -79,7 +85,8 @@ if (SERVER)
OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp
COMMAND cp -r
${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} paddle_serving_server_gpu/gen_cuda_version.py ${CUDA_VERSION_MAJOR}
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py
"server_gpu" ${CUDA_VERSION_MAJOR}
COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel
DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES})
add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp)
......
......@@ -30,7 +30,6 @@ client.load_client_config("yolov4_client/serving_client_conf.prototxt")
client.connect(['127.0.0.1:9393'])
im = preprocess(sys.argv[1])
print(im.shape)
fetch_map = client.predict(
feed={
"image": im,
......
......@@ -15,13 +15,29 @@
import sys
import re
import os
import subprocess
new_str = ""
with open("paddle_serving_server_gpu/version.py", "r") as f:
for line in f.readlines():
if re.match("cuda_version", line):
line = re.sub(r"\d+", sys.argv[1], line)
new_str = new_str + line
with open("paddle_serving_server_gpu/version.py", "w") as f:
f.write(new_str)
def update_info(file_name, feature, info):
new_str = ""
with open(file_name, "r") as f:
for line in f.readlines():
if re.match(feature, line):
if isinstance(info, str):
line = feature + " = \"" + info.strip() + "\"\n"
else:
line = feature + " = \"" + info.decode('utf-8').strip(
) + "\"\n"
new_str = new_str + line
with open(file_name, "w") as f:
f.write(new_str)
if len(sys.argv) > 2:
update_info("paddle_serving_server_gpu/version.py", "cuda_version",
sys.argv[2])
path = "paddle_serving_" + sys.argv[1]
commit_id = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
update_info(path + "/version.py", "commit_id", commit_id)
......@@ -13,3 +13,4 @@
# limitations under the License.
""" Paddle Serving App version string """
serving_app_version = "0.1.2"
commit_id = ""
......@@ -15,3 +15,4 @@
serving_client_version = "0.3.2"
serving_server_version = "0.3.2"
module_proto_version = "0.3.2"
commit_id = ""
......@@ -15,3 +15,4 @@
serving_client_version = "0.3.2"
serving_server_version = "0.3.2"
module_proto_version = "0.3.2"
commit_id = ""
......@@ -16,3 +16,4 @@ serving_client_version = "0.3.2"
serving_server_version = "0.3.2"
module_proto_version = "0.3.2"
cuda_version = "9"
commit_id = ""
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册