diff --git a/deploy/cpp/CMakeLists.txt b/deploy/cpp/CMakeLists.txt index a57d4c084d739981ef042d3283f34d9624e4a4b2..e860efd689625992a14a275e83db3104c153f8b9 100644 --- a/deploy/cpp/CMakeLists.txt +++ b/deploy/cpp/CMakeLists.txt @@ -244,8 +244,19 @@ if (WIN32 AND WITH_MKL) ) endif() +if (WIN32 AND NOT WITH_MKL) + add_custom_command(TARGET main POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/openblas/lib/openblas.dll ./openblas.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/openblas/lib/openblas.dll ./release/openblas.dll + ) +endif() + if (WIN32) add_custom_command(TARGET main POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/onnxruntime/lib/onnxruntime.dll ./onnxruntime.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/paddle2onnx/lib/paddle2onnx.dll ./paddle2onnx.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/onnxruntime/lib/onnxruntime.dll ./release/onnxruntime.dll + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/paddle2onnx/lib/paddle2onnx.dll ./release/paddle2onnx.dll COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll ) endif() diff --git a/deploy/cpp/docs/windows_vs2019_build.md b/deploy/cpp/docs/windows_vs2019_build.md index e1ae374f7dc6e42a09522efe5a19b2ec91d8a330..9fbfcf2048916635ac2ed98a73849db97df36aa3 100755 --- a/deploy/cpp/docs/windows_vs2019_build.md +++ b/deploy/cpp/docs/windows_vs2019_build.md @@ -72,7 +72,12 @@ cd D:\projects\PaddleDetection\deploy\cpp | PADDLE_DIR | Paddle预测库的路径 | | PADDLE_LIB_NAME | Paddle 预测库名称 | -**注意:** 1. 使用`CPU`版预测库,请把`WITH_GPU`的勾去掉 2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉 3.如无需使用关键点模型可以把`WITH_KEYPOINT`勾去掉 +**注意:** + +1. 如果编译环境为CPU,需要下载`CPU`版预测库,请把`WITH_GPU`的勾去掉 +2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉 +3. 如无需使用关键点模型可以把`WITH_KEYPOINT`勾去掉 +4. Windows环境下,`PADDLE_LIB_NAME`需要设置为`paddle_inference` 执行如下命令项目文件: ``` @@ -84,6 +89,8 @@ cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=O cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\paddle_inference -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6 -DWITH_KEYPOINT=ON ``` + + 3. 编译 用`Visual Studio 16 2019`打开`cpp`文件夹下的`PaddleObjectDetector.sln`,将编译模式设置为`Release`,点击`生成`->`全部生成 @@ -147,6 +154,5 @@ cd D:\projects\PaddleDetection\deploy\cpp\out\build\x64-Release .\main --model_dir=D:\\models\\yolov3_darknet --model_dir_keypoint=D:\\models\\hrnet_w32_256x192 --image_file=D:\\images\\test.jpeg --device=GPU ``` - ## 性能测试 Benchmark请查看[BENCHMARK_INFER](../../BENCHMARK_INFER.md)