diff --git a/core/predictor/CMakeLists.txt b/core/predictor/CMakeLists.txt index 1b9dc7b29845a2b8c7f958c1d8e836cb57e91d41..6b5013c3edadb4592df40db539fa75fb9364d02f 100644 --- a/core/predictor/CMakeLists.txt +++ b/core/predictor/CMakeLists.txt @@ -6,7 +6,7 @@ include(framework/CMakeLists.txt) include(tools/CMakeLists.txt) include(src/CMakeLists.txt) - +add_definitions(-D__STDC_FORMAT_MACROS) add_library(pdserving ${pdserving_srcs}) set_source_files_properties( ${pdserving_srcs} diff --git a/doc/COMPILE.md b/doc/COMPILE.md index 466cef73a5f217cd2322fa5548c518a9004800c2..abb66084ac6f6c57c13c940eb10a87e2aba2daa2 100644 --- a/doc/COMPILE.md +++ b/doc/COMPILE.md @@ -63,6 +63,9 @@ If Python3 is used, replace `pip` with `pip3`. ## GOPATH Setting + +## Compile Arguments + The default GOPATH is `$HOME/go`, which you can set to other values. ```shell export GOPATH=$HOME/go @@ -78,13 +81,17 @@ go get -u github.com/golang/protobuf/protoc-gen-go go get -u google.golang.org/grpc ``` + ## Compile Server ### Integrated CPU version paddle inference library ``` shell mkdir server-build-cpu && cd server-build-cpu -cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON .. +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DSERVER=ON .. make -j10 ``` @@ -94,7 +101,11 @@ you can execute `make install` to put targets under directory `./output`, you ne ``` shell mkdir server-build-gpu && cd server-build-gpu -cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON -DWITH_GPU=ON .. +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DSERVER=ON \ + -DWITH_GPU=ON .. make -j10 ``` @@ -108,7 +119,10 @@ execute `make install` to put targets under directory `./output` ``` shell mkdir client-build && cd client-build -cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DCLIENT=ON .. +cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ + -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ + -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ + -DCLIENT=ON .. make -j10 ``` diff --git a/doc/FAQ.md b/doc/FAQ.md index eb4f05a28594effcf59aac880cf4d81846a3a925..119c5a9dbc7237b5dadbddd79fbb4d2340940273 100644 --- a/doc/FAQ.md +++ b/doc/FAQ.md @@ -1,8 +1,8 @@ # FAQ -- Q:如何调整RPC服务的等待时间,避免超时? +- Q: 如何调整RPC服务的等待时间,避免超时? - A:使用set_rpc_timeout_ms设置更长的等待时间,单位为毫秒,默认时间为20秒。 + A: 使用set_rpc_timeout_ms设置更长的等待时间,单位为毫秒,默认时间为20秒。 示例: ``` @@ -15,4 +15,13 @@ ``` - Q: 如何使用自己编译的Paddle Serving进行预测? - A:通过pip命令安装自己编译出的whl包,并设置SERVING_BIN环境变量为编译出的serving二进制文件路径。 + A: 通过pip命令安装自己编译出的whl包,并设置SERVING_BIN环境变量为编译出的serving二进制文件路径。 + +- Q: 执行GPU预测时遇到InvalidArgumentError: Device id must be less than GPU count, but received id is: 0. GPU count is: 0. + A: 将显卡驱动对应的libcuda.so的目录添加到LD_LIBRARY_PATH环境变量中 + +- Q: 执行GPU预测时遇到ExternalError: Cudnn error, CUDNN_STATUS_BAD_PARAM at (/home/scmbuild/workspaces_cluster.dev/baidu.lib.paddlepaddle/baidu/lib/paddlepaddle/Paddle/paddle/fluid/operators/batch_norm_op.cu:198) + A: 将cudnn的lib64路径添加到LD_LIBRARY_PATH,安装自pypi的Paddle Serving中post9版使用的是cudnn 7.3,post10使用的是cudnn 7.5。如果是使用自己编译的Paddle Serving,可以在log/serving.INFO日志文件中查看对应的cudnn版本。 + +- Q: 执行GPU预测时遇到Error: Failed to find dynamic library: libcublas.so + A: 将cuda的lib64路径添加到LD_LIBRARY_PATH, post9版本的Paddle Serving使用的是cuda 9.0,post10版本使用的cuda 10.0。 diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 4d6b3ce35aac3bc288b869b23498a19269de3169..4b20cb2001ebb595601f22fa6e4aab8dd5df18f4 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -43,6 +43,9 @@ if (SERVER) endif() endif() +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/gen_version.py + ${CMAKE_CURRENT_BINARY_DIR}/gen_version.py) + set (SERVING_CLIENT_CORE ${PADDLE_SERVING_BINARY_DIR}/core/general-client/*.so) message("python env: " ${py_env}) @@ -50,6 +53,7 @@ if (APP) add_custom_command( OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_app/ ${PADDLE_SERVING_BINARY_DIR}/python/ + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py "app" COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel DEPENDS ${SERVING_APP_CORE} general_model_config_py_proto ${PY_FILES}) add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) @@ -61,6 +65,7 @@ add_custom_command( COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_client/ ${PADDLE_SERVING_BINARY_DIR}/python/ COMMAND ${CMAKE_COMMAND} -E copy ${SERVING_CLIENT_CORE} ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_client/serving_client.so COMMAND env ${py_env} ${PYTHON_EXECUTABLE} python_tag.py + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py "client" COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel DEPENDS ${SERVING_CLIENT_CORE} sdk_configure_py_proto ${PY_FILES}) add_custom_target(paddle_python ALL DEPENDS serving_client ${PADDLE_SERVING_BINARY_DIR}/.timestamp) @@ -71,6 +76,7 @@ if (SERVER) add_custom_command( OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server/ ${PADDLE_SERVING_BINARY_DIR}/python/ + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py "server" COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) @@ -79,7 +85,8 @@ if (SERVER) OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/ - COMMAND env ${py_env} ${PYTHON_EXECUTABLE} paddle_serving_server_gpu/gen_cuda_version.py ${CUDA_VERSION_MAJOR} + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py + "server_gpu" ${CUDA_VERSION_MAJOR} COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) diff --git a/python/examples/yolov4/test_client.py b/python/examples/yolov4/test_client.py index 92dcd06552ca1fdd3f2d54060e9de501f052e349..2616e55766192fca676e58efc4f0a2a3d634f1d3 100644 --- a/python/examples/yolov4/test_client.py +++ b/python/examples/yolov4/test_client.py @@ -30,7 +30,6 @@ client.load_client_config("yolov4_client/serving_client_conf.prototxt") client.connect(['127.0.0.1:9393']) im = preprocess(sys.argv[1]) -print(im.shape) fetch_map = client.predict( feed={ "image": im, diff --git a/python/gen_version.py b/python/gen_version.py new file mode 100644 index 0000000000000000000000000000000000000000..258905f5815f6af01398479732b907c80cb9d739 --- /dev/null +++ b/python/gen_version.py @@ -0,0 +1,43 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import re +import os +import subprocess + + +def update_info(file_name, feature, info): + new_str = "" + with open(file_name, "r") as f: + for line in f.readlines(): + if re.match(feature, line): + if isinstance(info, str): + line = feature + " = \"" + info.strip() + "\"\n" + else: + line = feature + " = \"" + info.decode('utf-8').strip( + ) + "\"\n" + new_str = new_str + line + + with open(file_name, "w") as f: + f.write(new_str) + + +if len(sys.argv) > 2: + update_info("paddle_serving_server_gpu/version.py", "cuda_version", + sys.argv[2]) + +path = "paddle_serving_" + sys.argv[1] +commit_id = subprocess.check_output(['git', 'rev-parse', 'HEAD']) +update_info(path + "/version.py", "commit_id", commit_id) diff --git a/python/paddle_serving_app/version.py b/python/paddle_serving_app/version.py index 332cba98dd692c4e33da68d4de7763e83e3729b5..554162f4f29a6c28e328c735a71512cd48e59962 100644 --- a/python/paddle_serving_app/version.py +++ b/python/paddle_serving_app/version.py @@ -13,3 +13,4 @@ # limitations under the License. """ Paddle Serving App version string """ serving_app_version = "0.1.2" +commit_id = "" diff --git a/python/paddle_serving_client/version.py b/python/paddle_serving_client/version.py index f7fc14b2a7f0c25b471e8d3bb44e9d6db6839d01..015a73dca73360da228877cf5b41188dd396933c 100644 --- a/python/paddle_serving_client/version.py +++ b/python/paddle_serving_client/version.py @@ -15,3 +15,4 @@ serving_client_version = "0.3.2" serving_server_version = "0.3.2" module_proto_version = "0.3.2" +commit_id = "" diff --git a/python/paddle_serving_server/version.py b/python/paddle_serving_server/version.py index f7fc14b2a7f0c25b471e8d3bb44e9d6db6839d01..015a73dca73360da228877cf5b41188dd396933c 100644 --- a/python/paddle_serving_server/version.py +++ b/python/paddle_serving_server/version.py @@ -15,3 +15,4 @@ serving_client_version = "0.3.2" serving_server_version = "0.3.2" module_proto_version = "0.3.2" +commit_id = "" diff --git a/python/paddle_serving_server_gpu/gen_cuda_version.py b/python/paddle_serving_server_gpu/gen_cuda_version.py deleted file mode 100644 index 4a320a0e4dd9f9145a2c7682d5eecb7f582862b5..0000000000000000000000000000000000000000 --- a/python/paddle_serving_server_gpu/gen_cuda_version.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import re -import os - -new_str = "" -with open("paddle_serving_server_gpu/version.py", "r") as f: - for line in f.readlines(): - if re.match("cuda_version", line): - line = re.sub(r"\d+", sys.argv[1], line) - new_str = new_str + line - -with open("paddle_serving_server_gpu/version.py", "w") as f: - f.write(new_str) diff --git a/python/paddle_serving_server_gpu/version.py b/python/paddle_serving_server_gpu/version.py index 2272c3aa91f999697ea8ef3e2cdb585b01db8bed..3952f6e4058589e45de0618e5fc38e3d0aaf0c52 100644 --- a/python/paddle_serving_server_gpu/version.py +++ b/python/paddle_serving_server_gpu/version.py @@ -16,3 +16,4 @@ serving_client_version = "0.3.2" serving_server_version = "0.3.2" module_proto_version = "0.3.2" cuda_version = "9" +commit_id = ""