From e78bcbde4fa2937e7e9f9f4abbf0fd9f43983478 Mon Sep 17 00:00:00 2001 From: Jiawei Wang Date: Mon, 14 Dec 2020 19:17:40 +0800 Subject: [PATCH] fix cherry-pick conflict 920 --- doc/COMPILE.md | 20 +++++++++++++++++--- doc/COMPILE_CN.md | 17 ++++++++++++++++- doc/DOCKER_IMAGES.md | 1 + doc/DOCKER_IMAGES_CN.md | 1 + 4 files changed, 35 insertions(+), 4 deletions(-) diff --git a/doc/COMPILE.md b/doc/COMPILE.md index cf0bfdf2..84254f4f 100644 --- a/doc/COMPILE.md +++ b/doc/COMPILE.md @@ -100,14 +100,21 @@ make -j10 you can execute `make install` to put targets under directory `./output`, you need to add`-DCMAKE_INSTALL_PREFIX=./output`to specify output path to cmake command shown above. ### Integrated GPU version paddle inference library - +### CUDA_PATH is the cuda install path,use the command(whereis cuda) to check,it should be /usr/local/cuda. +### CUDNN_LIBRARY && CUDA_CUDART_LIBRARY is the lib path, it should be /usr/local/cuda/lib64/ + ``` shell +export CUDA_PATH='/usr/local' +export CUDNN_LIBRARY='/usr/local/cuda/lib64/' +export CUDA_CUDART_LIBRARY="/usr/local/cuda/lib64/" + mkdir server-build-gpu && cd server-build-gpu cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ - -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \ -DSERVER=ON \ -DWITH_GPU=ON .. make -j10 @@ -116,6 +123,10 @@ make -j10 ### Integrated TRT version paddle inference library ``` +export CUDA_PATH='/usr/local' +export CUDNN_LIBRARY='/usr/local/cuda/lib64/' +export CUDA_CUDART_LIBRARY="/usr/local/cuda/lib64/" + mkdir server-build-trt && cd server-build-trt cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ @@ -123,6 +134,7 @@ cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ -DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \ -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \ -DSERVER=ON \ -DWITH_GPU=ON \ -DWITH_TRT=ON .. @@ -166,12 +178,14 @@ make ## Install wheel package Regardless of the client, server or App part, after compiling, install the whl package in `python/dist/` in the temporary directory(`server-build-cpu`, `server-build-gpu`, `client-build`,`app-build`) of the compilation process. - +for example:cd server-build-cpu/python/dist && pip install -U xxxxx.whl ## Note When running the python server, it will check the `SERVING_BIN` environment variable. If you want to use your own compiled binary file, set the environment variable to the path of the corresponding binary file, usually`export SERVING_BIN=${BUILD_DIR}/core/general-server/serving`. +BUILD_DIR is the absolute path of server build CPU or server build GPU。 +for example: cd server-build-cpu && export SERVING_BIN=${PWD}/core/general-server/serving diff --git a/doc/COMPILE_CN.md b/doc/COMPILE_CN.md index b3619d9a..0a31cb1b 100644 --- a/doc/COMPILE_CN.md +++ b/doc/COMPILE_CN.md @@ -97,14 +97,20 @@ make -j10 可以执行`make install`把目标产出放在`./output`目录下,cmake阶段需添加`-DCMAKE_INSTALL_PREFIX=./output`选项来指定存放路径。 ### 集成GPU版本Paddle Inference Library - +### CUDA_PATH是cuda的安装路径,可以使用命令行whereis cuda命令确认你的cuda安装路径,通常应该是/usr/local/cuda +### CUDNN_LIBRARY CUDA_CUDART_LIBRARY 是cuda库文件的路径,通常应该是/usr/local/cuda/lib64/ ``` shell +export CUDA_PATH='/usr/local' +export CUDNN_LIBRARY='/usr/local/cuda/lib64/' +export CUDA_CUDART_LIBRARY="/usr/local/cuda/lib64/" + mkdir server-build-gpu && cd server-build-gpu cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python \ -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \ -DSERVER=ON \ -DWITH_GPU=ON .. make -j10 @@ -113,6 +119,10 @@ make -j10 ### 集成TensorRT版本Paddle Inference Library ``` +export CUDA_PATH='/usr/local' +export CUDNN_LIBRARY='/usr/local/cuda/lib64/' +export CUDA_CUDART_LIBRARY="/usr/local/cuda/lib64/" + mkdir server-build-trt && cd server-build-trt cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so \ @@ -120,6 +130,7 @@ cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ \ -DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \ -DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ -DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ + -DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \ -DSERVER=ON \ -DWITH_GPU=ON \ -DWITH_TRT=ON .. @@ -162,12 +173,16 @@ make ## 安装wheel包 无论是Client端,Server端还是App部分,编译完成后,安装编译过程临时目录(`server-build-cpu`、`server-build-gpu`、`client-build`、`app-build`)下的`python/dist/` 中的whl包即可。 +例如:cd server-build-cpu/python/dist && pip install -U xxxxx.whl + ## 注意事项 运行python端Server时,会检查`SERVING_BIN`环境变量,如果想使用自己编译的二进制文件,请将设置该环境变量为对应二进制文件的路径,通常是`export SERVING_BIN=${BUILD_DIR}/core/general-server/serving`。 +其中BUILD_DIR为server-build-cpu或server-build-gpu的绝对路径。 +可以cd server-build-cpu路径下,执行export SERVING_BIN=${PWD}/core/general-server/serving diff --git a/doc/DOCKER_IMAGES.md b/doc/DOCKER_IMAGES.md index dcaa34b1..582945be 100644 --- a/doc/DOCKER_IMAGES.md +++ b/doc/DOCKER_IMAGES.md @@ -28,6 +28,7 @@ You can get images in two ways: ## Image description Runtime images cannot be used for compilation. +If you want to customize your Serving based on source code, use the version with the suffix - devel. | Description | OS | TAG | Dockerfile | | :----------------------------------------------------------: | :-----: | :--------------------------: | :----------------------------------------------------------: | diff --git a/doc/DOCKER_IMAGES_CN.md b/doc/DOCKER_IMAGES_CN.md index 6865eb77..30aca584 100644 --- a/doc/DOCKER_IMAGES_CN.md +++ b/doc/DOCKER_IMAGES_CN.md @@ -28,6 +28,7 @@ ## 镜像说明 运行时镜像不能用于开发编译。 +若需要基于源代码二次开发编译,请使用后缀为-devel的版本。 | 镜像说明 | 操作系统 | TAG | Dockerfile | | -------------------------------------------------- | -------- | ---------------------------- | ------------------------------------------------------------ | -- GitLab