diff --git a/deploy/cpp/scripts/build.sh b/deploy/cpp/scripts/build.sh index b0588ed3017d97621e87b8f74e64ae6ecf870e84..4c89d79e8ac4d664e7edfdf4f57043f88d759cb8 100644 --- a/deploy/cpp/scripts/build.sh +++ b/deploy/cpp/scripts/build.sh @@ -1,5 +1,7 @@ # 是否使用GPU(即是否使用 CUDA) WITH_GPU=OFF +# 使用MKL or openblas +WITH_MKL=ON # 是否集成 TensorRT(仅WITH_GPU=ON 有效) WITH_TENSORRT=OFF # TensorRT 的lib路径 @@ -24,6 +26,7 @@ mkdir -p build cd build cmake .. \ -DWITH_GPU=${WITH_GPU} \ + -DWITH_MKL=${WITH_MKL} \ -DWITH_TENSORRT=${WITH_TENSORRT} \ -DTENSORRT_DIR=${TENSORRT_DIR} \ -DPADDLE_DIR=${PADDLE_DIR} \ diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp index 4031a9d8c41921b90d2e2a9782fb67d1642a2f5c..b925683191bbe57e59081a1e6800beff4c6143ef 100644 --- a/deploy/cpp/src/paddlex.cpp +++ b/deploy/cpp/src/paddlex.cpp @@ -255,7 +255,6 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { auto im_tensor = predictor_->GetInputTensor("image"); im_tensor->Reshape({1, 3, h, w}); im_tensor->copy_from_cpu(inputs_.im_data_.data()); - std::cout << "input image: " << h << " " << w << std::endl; // 使用加载的模型进行预测 predictor_->ZeroCopyRun(); diff --git a/docs/deploy/deploy_cpp_linux.md b/docs/deploy/deploy_cpp_linux.md index 28df77e7c9c0cf560ab65ba3957332c570125605..4b3a6e8159b5bcff313cfc2e752201a28a2058d1 100644 --- a/docs/deploy/deploy_cpp_linux.md +++ b/docs/deploy/deploy_cpp_linux.md @@ -19,16 +19,18 @@ ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`,`CUDA`,以及是否支持TensorRT,提供了不同的预编译版本: +PaddlePaddle C++ 预测库针对不同的`CPU`,`CUDA`,以及是否支持TensorRT,提供了不同的预编译版本,目前PaddleX依赖于Paddle1.7版本,以下提供了多个不同版本的Paddle预测库: | 版本说明 | 预测库(1.7.2版本) | | ---- | ---- | | ubuntu14.04_cpu_avx_mkl | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.7.2-cpu-avx-mkl/fluid_inference.tgz) | +| ubuntu14.04_cpu_avx_openblas | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.7.2-cpu-avx-openblas/fluid_inference.tgz) | +| ubuntu14.04_cpu_noavx_openblas | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.7.2-cpu-noavx-openblas/fluid_inference.tgz) | | ubuntu14.04_cuda9.0_cudnn7_avx_mkl | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.7.2-gpu-cuda9-cudnn7-avx-mkl/fluid_inference.tgz) | | ubuntu14.04_cuda10.0_cudnn7_avx_mkl | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.7.2-gpu-cuda10-cudnn7-avx-mkl/fluid_inference.tgz ) | | ubuntu14.04_cuda10.1_cudnn7.6_avx_mkl_trt6 | [fluid_inference.tgz](https://paddle-inference-lib.bj.bcebos.com/1.7.2-gpu-cuda10.1-cudnn7.6-avx-mkl-trt6%2Ffluid_inference.tgz) | -更多和更新的版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html#id1) +更多和更新的版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/windows_cpp_inference.html#id1) 下载并解压后`/root/projects/fluid_inference`目录包含内容为: ``` @@ -47,9 +49,10 @@ fluid_inference 编译`cmake`的命令在`scripts/build.sh`中,请根据实际情况修改主要参数,其主要内容说明如下: ``` - # 是否使用GPU(即是否使用 CUDA) WITH_GPU=OFF +# 使用MKL or openblas +WITH_MKL=ON # 是否集成 TensorRT(仅WITH_GPU=ON 有效) WITH_TENSORRT=OFF # TensorRT 的lib路径 @@ -74,6 +77,7 @@ mkdir -p build cd build cmake .. \ -DWITH_GPU=${WITH_GPU} \ + -DWITH_MKL=${WITH_MKL} \ -DWITH_TENSORRT=${WITH_TENSORRT} \ -DTENSORRT_DIR=${TENSORRT_DIR} \ -DPADDLE_DIR=${PADDLE_DIR} \ diff --git a/docs/deploy/deploy_cpp_win_vs2019.md b/docs/deploy/deploy_cpp_win_vs2019.md index 4618d0c839e1913141d221509ff29207c79b3a0b..836a2abfdcfbe2252a651ccf7bf22cda4922e932 100644 --- a/docs/deploy/deploy_cpp_win_vs2019.md +++ b/docs/deploy/deploy_cpp_win_vs2019.md @@ -27,7 +27,18 @@ git clone https://github.com/PaddlePaddle/PaddleX.git ### Step2: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/windows_cpp_inference.html) +PaddlePaddle C++ 预测库针对不同的`CPU`,`CUDA`,以及是否支持TensorRT,提供了不同的预编译版本,目前PaddleX依赖于Paddle1.7版本,以下提供了多个不同版本的Paddle预测库: + +| 版本说明 | 预测库(1.7.2版本) | 编译器 | 构建工具| cuDNN | CUDA +| ---- | ---- | ---- | ---- | ---- | ---- | +| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.2/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | +| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.2/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | +| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.2/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | +| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.2/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | +| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.2/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 9.0 | + + +更多和更新的版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html#id1) 解压后`D:\projects\fluid_inference*\`目录下主要包含的内容为: ```