提交 45c1e7bb 编写于 作者: S silingtong123 提交者: liuwei1031

add prediction demo and script on windows (#21248)

上级 4b429c19
......@@ -69,6 +69,12 @@ if(WITH_GPU)
endif(NOT WIN32)
endif()
if (USE_TENSORRT AND WITH_GPU)
set(TENSORRT_ROOT ${PADDLE_LIB_THIRD_PARTY_PATH}tensorrt)
set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include)
set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib)
endif()
if (NOT WIN32)
if (USE_TENSORRT AND WITH_GPU)
include_directories("${TENSORRT_INCLUDE_DIR}")
......@@ -108,9 +114,12 @@ if(WITH_MKL)
endif(WIN32)
endif()
else()
set(MATH_LIB ${PADDLE_LIB_THIRD_PARTY_PATH}openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
if(WIN32)
set(MATH_DLL ${PADDLE_LIB_THIRD_PARTY_PATH}openblas/lib/openblas${CMAKE_SHARED_LIBRARY_SUFFIX})
# Note: fix the openblas static library not work on windows, and change the static library to import library.
set(MATH_LIB ${PADDLE_LIB_THIRD_PARTY_PATH}openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(MATH_LIB ${PADDLE_LIB_THIRD_PARTY_PATH}openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif()
......
@echo off
setlocal
set source_path=%~dp0
set build_path=%~dp0\build
setlocal enabledelayedexpansion
rem set gpu_inference
SET /P gpu_inference="Use GPU_inference_lib or not(Y/N), default: N =======>"
IF /i "%gpu_inference%"=="y" (
SET gpu_inference=Y
) else (
SET gpu_inference=N
)
SET /P use_mkl="Use MKL or not (Y/N), default: Y =======>"
if /i "%use_mkl%"=="N" (
set use_mkl=N
) else (
set use_mkl=Y
)
:set_paddle_infernece_lib
SET /P paddle_infernece_lib="Please input the path of paddle inference library, such as D:\fluid_inference_install_dir =======>"
set tmp_var=!paddle_infernece_lib!
call:remove_space
set paddle_infernece_lib=!tmp_var!
IF NOT EXIST "%paddle_infernece_lib%" (
echo "------------%paddle_infernece_lib% not exist------------"
goto set_paddle_infernece_lib
)
IF "%use_mkl%"=="N" (
IF NOT EXIST "%paddle_infernece_lib%\third_party\install\openblas" (
echo "------------It's not a OpenBlas inference library------------"
goto:eof
)
) else (
IF NOT EXIST "%paddle_infernece_lib%\third_party\install\mklml" (
echo "------------It's not a MKL inference library------------"
goto:eof
)
)
:set_path_cuda
if /i "!gpu_inference!"=="Y" (
SET /P cuda_lib_dir="Please input the path of cuda libraries, such as D:\cuda\lib\x64 =======>"
set tmp_var=!cuda_lib_dir!
call:remove_space
set cuda_lib_dir=!tmp_var!
IF NOT EXIST "!cuda_lib_dir!" (
echo "------------!cuda_lib_dir!not exist------------"
goto set_path_cuda
)
)
rem set_use_gpu
if /i "!gpu_inference!"=="Y" (
SET /P use_gpu="Use GPU or not(Y/N), default: N =======>"
)
if /i "%use_gpu%"=="Y" (
set use_gpu=Y
) else (
set use_gpu=N
)
rem set_path_vs_command_prompt
:set_vcvarsall_dir
SET /P vcvarsall_dir="Please input the path of visual studio command Prompt, such as C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat =======>"
set tmp_var=!vcvarsall_dir!
call:remove_space
set vcvarsall_dir=!tmp_var!
IF NOT EXIST "%vcvarsall_dir%" (
echo "------------%vcvarsall_dir% not exist------------"
goto set_vcvarsall_dir
)
rem set_demo_name
:set_demo_name
SET /P demo_name="Please input the demo name, default: windows_mobilenet =======>"
if "%demo_name%"=="" set demo_name=windows_mobilenet
IF NOT EXIST "%source_path%\%demo_name%.cc" (
echo "------------%source_path%\%demo_name%.cc not exist------------"
goto set_demo_name
)
if "%demo_name%"=="windows_mobilenet" set model_name=mobilenet
if "%demo_name%"=="vis_demo" set model_name=mobilenet
if "%demo_name%"=="simple_on_word2vec" set model_name=word2vec.inference.model
if "%demo_name%"=="trt_mobilenet_demo" (
echo "The trt_mobilenet_demo need tensorRT inference library"
if NOT exist "%paddle_infernece_lib%\third_party\install\tensorrt" (
echo "------------It's not a tensorRT inference library------------"
goto:eof
)
set model_name=mobilenet
)
rem download model
if NOT EXIST "%source_path%\%model_name%.tar.gz" (
if "%model_name%"=="mobilenet" (
call:download_model_mobilenet
)
if "%model_name%"=="word2vec.inference.model" (
call:download_model_word2vec
)
)
if EXIST "%source_path%\%model_name%.tar.gz" (
if NOT EXIST "%source_path%\%model_name%" (
SET /P python_path="Please input the path of python.exe, such as C:\Python35\python.exe, C:\Python35\python3.exe =======>"
set tmp_var=!python_path!
call:remove_space
set python_path=!tmp_var!
if "!python_path!"=="" (
set python_path=python.exe
) else (
if NOT exist "!python_path!" (
echo "------------!python_path! not exist------------"
goto:eof
)
)
md %source_path%\%model_name%
!python_path! %source_path%\untar_model.py %source_path%\%model_name%.tar.gz %source_path%\%model_name%
SET error_code=N
if "%model_name%"=="mobilenet" (
if NOT EXIST "%source_path%\%model_name%\model" set error_code=Y
) else (
if NOT EXIST "%source_path%\%model_name%\%model_name%" set error_code=Y
)
if "!error_code!"=="Y" (
echo "========= Unzip %model_name%.tar.gz failed ======="
del /f /s /q "%source_path%\%model_name%\*.*" >nul 2>&1
rd /s /q "%source_path%\%model_name%" >nul 2>&1
goto:eof
)
)
)
echo "=================================================================="
echo.
echo "use_gpu_inference=%gpu_inference%"
echo.
echo "use_mkl=%use_mkl%"
echo.
echo "use_gpu=%use_gpu%"
echo.
echo "paddle_infernece_lib=%paddle_infernece_lib%"
echo.
IF /i "%gpu_inference%"=="y" (
echo "cuda_lib_dir=%cuda_lib_dir%"
echo.
)
echo "vs_vcvarsall_dir=%vcvarsall_dir%"
echo.
echo "demo_name=%demo_name%"
echo.
if NOT "!python_path!"=="" (
echo "python_path=!python_path!"
echo.
)
echo "===================================================================="
pause
rem compile and run demo
if NOT EXIST "%build_path%" (
md %build_path%
cd %build_path%
) else (
del /f /s /q "%build_path%\*.*" >nul 2>&1
rd /s /q "%build_path%" >nul 2>&1
md %build_path%
cd %build_path%
)
if /i "%use_mkl%"=="N" (
set use_mkl=OFF
) else (
set use_mkl=ON
)
if /i "%gpu_inference%"=="Y" (
if "%demo_name%"=="trt_mobilenet_demo" (
cmake .. -G "Visual Studio 14 2015 Win64" -T host=x64 -DWITH_GPU=ON ^
-DWITH_MKL=%use_mkl% -DWITH_STATIC_LIB=ON -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=%demo_name% ^
-DPADDLE_LIB="%paddle_infernece_lib%" -DMSVC_STATIC_CRT=ON -DCUDA_LIB="%cuda_lib_dir%" -DUSE_TENSORRT=ON
) else (
cmake .. -G "Visual Studio 14 2015 Win64" -T host=x64 -DWITH_GPU=ON ^
-DWITH_MKL=%use_mkl% -DWITH_STATIC_LIB=ON -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=%demo_name% ^
-DPADDLE_LIB="%paddle_infernece_lib%" -DMSVC_STATIC_CRT=ON -DCUDA_LIB="%cuda_lib_dir%"
)
) else (
cmake .. -G "Visual Studio 14 2015 Win64" -T host=x64 -DWITH_GPU=OFF ^
-DWITH_MKL=%use_mkl% -DWITH_STATIC_LIB=ON -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=%demo_name% ^
-DPADDLE_LIB="%paddle_infernece_lib%" -DMSVC_STATIC_CRT=ON
)
call "%vcvarsall_dir%" amd64
msbuild /m /p:Configuration=Release %demo_name%.vcxproj
if /i "%use_gpu%"=="Y" (
SET use_gpu=true
) else (
SET use_gpu=false
)
if exist "%build_path%\Release\%demo_name%.exe" (
cd %build_path%\Release
set GLOG_v=4
if "%demo_name%"=="simple_on_word2vec" (
%demo_name%.exe --dirname="%source_path%\%model_name%\%model_name%" --use_gpu="%use_gpu%"
) else (
if "%demo_name%"=="windows_mobilenet" (
%demo_name%.exe --modeldir="%source_path%\%model_name%\model" --use_gpu="%use_gpu%"
) else (
if "%demo_name%"=="trt_mobilenet_demo" (
%demo_name%.exe --modeldir="%source_path%\%model_name%\model" --data=%source_path%\%model_name%\data.txt ^
--refer=%source_path%\%model_name%\result.txt
) else (
%demo_name%.exe --modeldir="%source_path%\%model_name%\model" --data=%source_path%\%model_name%\data.txt ^
--refer=%source_path%\%model_name%\result.txt --use_gpu="%use_gpu%"
)
)
)
) else (
echo "=========compilation fails!!=========="
)
echo.&pause&goto:eof
:download_model_mobilenet
powershell.exe (new-object System.Net.WebClient).DownloadFile('http://paddlemodels.bj.bcebos.com//inference-vis-demos/mobilenet.tar.gz', ^
'%source_path%\mobilenet.tar.gz')
goto:eof
:download_model_word2vec
powershell.exe (new-object System.Net.WebClient).DownloadFile('http://paddle-inference-dist.bj.bcebos.com/word2vec.inference.model.tar.gz', ^
'%source_path%\word2vec.inference.model.tar.gz')
goto:eof
:remove_space
:remove_left_space
if "%tmp_var:~0,1%"==" " (
set "tmp_var=%tmp_var:~1%"
goto remove_left_space
)
:remove_right_space
if "%tmp_var:~-1%"==" " (
set "tmp_var=%tmp_var:~0,-1%"
goto remove_left_space
)
goto:eof
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile, os
import sys
def untar(fname, dirs):
"""
extract the tar.gz file
:param fname: the name of tar.gz file
:param dirs: the path of decompressed file
:return: bool
"""
try:
t = tarfile.open(name=fname, mode='r:gz')
t.extractall(path=dirs)
return True
except Exception as e:
print(e)
return False
untar(sys.argv[1], sys.argv[2])
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <numeric>
#include <string>
#include <vector>
#include "paddle/include/paddle_inference_api.h"
DEFINE_string(modeldir, "", "Directory of the inference model.");
DEFINE_bool(use_gpu, false, "Whether use gpu.");
namespace paddle {
namespace demo {
void RunAnalysis() {
// 1. create AnalysisConfig
AnalysisConfig config;
if (FLAGS_modeldir.empty()) {
LOG(INFO) << "Usage: path\\mobilenet --modeldir=path/to/your/model";
exit(1);
}
// CreateConfig(&config);
if (FLAGS_use_gpu) {
config.EnableUseGpu(100, 0);
}
config.SetModel(FLAGS_modeldir + "/__model__",
FLAGS_modeldir + "/__params__");
// use ZeroCopyTensor, Must be set to false
config.SwitchUseFeedFetchOps(false);
// 2. create predictor, prepare input data
std::unique_ptr<PaddlePredictor> predictor = CreatePaddlePredictor(config);
int batch_size = 1;
int channels = 3;
int height = 300;
int width = 300;
int nums = batch_size * channels * height * width;
float* input = new float[nums];
for (int i = 0; i < nums; ++i) input[i] = 0;
// 3. create input tensor, use ZeroCopyTensor
auto input_names = predictor->GetInputNames();
auto input_t = predictor->GetInputTensor(input_names[0]);
input_t->Reshape({batch_size, channels, height, width});
input_t->copy_from_cpu(input);
// 4. run predictor
predictor->ZeroCopyRun();
// 5. get out put
std::vector<float> out_data;
auto output_names = predictor->GetOutputNames();
auto output_t = predictor->GetOutputTensor(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data());
delete[] input;
}
} // namespace demo
} // namespace paddle
int main(int argc, char** argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
paddle::demo::RunAnalysis();
std::cout << "=========================Runs successfully===================="
<< std::endl;
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册