Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
e66f92d1
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e66f92d1
编写于
12月 30, 2019
作者:
Z
zhouwei25
提交者:
Tao Luo
12月 30, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Modify demo_ci to support Windows, prepare for PR_Windows_Inference (#21873)
上级
c3527f55
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
130 addition
and
74 deletion
+130
-74
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
+29
-19
paddle/fluid/inference/api/demo_ci/run.sh
paddle/fluid/inference/api/demo_ci/run.sh
+101
-55
未找到文件。
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
浏览文件 @
e66f92d1
...
...
@@ -4,6 +4,7 @@ option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL.
option
(
WITH_GPU
"Compile demo with GPU/CPU, default use CPU."
OFF
)
option
(
WITH_STATIC_LIB
"Compile demo with static/shared library, default use static."
ON
)
option
(
USE_TENSORRT
"Compile demo with TensorRT."
OFF
)
if
(
NOT WITH_STATIC_LIB
)
add_definitions
(
"-DPADDLE_WITH_SHARED_LIB"
)
endif
()
...
...
@@ -44,6 +45,7 @@ link_directories("${PADDLE_LIB}/paddle/lib")
if
(
WIN32
)
add_definitions
(
"/DGOOGLE_GLOG_DLL_DECL="
)
option
(
MSVC_STATIC_CRT
"use static C Runtime library by default"
ON
)
if
(
MSVC_STATIC_CRT
)
set
(
CMAKE_C_FLAGS_DEBUG
"
${
CMAKE_C_FLAGS_DEBUG
}
/bigobj /MTd"
)
set
(
CMAKE_C_FLAGS_RELEASE
"
${
CMAKE_C_FLAGS_RELEASE
}
/bigobj /MT"
)
...
...
@@ -96,10 +98,8 @@ if(WITH_MKL)
set
(
MATH_LIB_PATH
"
${
PADDLE_LIB_THIRD_PARTY_PATH
}
mklml"
)
include_directories
(
"
${
MATH_LIB_PATH
}
/include"
)
if
(
WIN32
)
set
(
MATH_DLL
${
MATH_LIB_PATH
}
/lib/mklml
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
${
MATH_LIB_PATH
}
/lib/libiomp5md
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
${
MATH_LIB_PATH
}
/lib/msvcr120
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
set
(
MATH_LIB
${
MATH_LIB_PATH
}
/lib/mklml
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
${
MATH_LIB_PATH
}
/lib/libiomp5md
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
else
()
set
(
MATH_LIB
${
MATH_LIB_PATH
}
/lib/libmklml_intel
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
${
MATH_LIB_PATH
}
/lib/libiomp5
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
...
...
@@ -114,20 +114,23 @@ if(WITH_MKL)
endif
(
WIN32
)
endif
()
else
()
set
(
OPENBLAS_LIB_PATH
"
${
PADDLE_LIB_THIRD_PARTY_PATH
}
openblas"
)
include_directories
(
"
${
OPENBLAS_LIB_PATH
}
/include/openblas"
)
if
(
WIN32
)
set
(
MATH_DLL
${
PADDLE_LIB_THIRD_PARTY_PATH
}
openblas/lib/openblas
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
# Note: fix the openblas static library not work on windows, and change the static library to import library.
set
(
MATH_LIB
${
PADDLE_LIB_THIRD_PARTY_PATH
}
openblas/lib/openblas
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
set
(
MATH_LIB
${
OPENBLAS_LIB_PATH
}
/lib/openblas
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
else
()
set
(
MATH_LIB
${
PADDLE_LIB_THIRD_PARTY_PATH
}
openblas
/lib/libopenblas
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
set
(
MATH_LIB
${
OPENBLAS_LIB_PATH
}
/lib/libopenblas
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
endif
()
endif
()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if
(
WITH_STATIC_LIB
)
set
(
DEPS
${
PADDLE_LIB
}
/paddle/lib/libpaddle_fluid
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
else
()
set
(
DEPS
${
PADDLE_LIB
}
/paddle/lib/libpaddle_fluid
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
if
(
WIN32
)
set
(
DEPS
${
PADDLE_LIB
}
/paddle/lib/paddle_fluid
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
else
()
set
(
DEPS
${
PADDLE_LIB
}
/paddle/lib/libpaddle_fluid
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
endif
()
endif
()
if
(
NOT WIN32
)
...
...
@@ -151,7 +154,7 @@ if(WITH_GPU)
endif
()
set
(
DEPS
${
DEPS
}
${
CUDA_LIB
}
/libcudart
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
else
()
if
(
USE_TENSORRT
)
if
(
USE_TENSORRT
)
set
(
DEPS
${
DEPS
}
${
TENSORRT_LIB_DIR
}
/nvinfer
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
set
(
DEPS
${
DEPS
}
${
TENSORRT_LIB_DIR
}
/nvinfer_plugin
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
endif
()
...
...
@@ -170,16 +173,23 @@ if(WIN32)
${
CMAKE_BINARY_DIR
}
/
${
CMAKE_BUILD_TYPE
}
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
TENSORRT_LIB_DIR
}
/nvinfer_plugin
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
${
CMAKE_BINARY_DIR
}
/
${
CMAKE_BUILD_TYPE
}
)
)
endif
()
if
(
WITH_MKL
)
add_custom_command
(
TARGET
${
DEMO_NAME
}
POST_BUILD
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
MKLDNN_PATH
}
/lib/mkldnn.dll
${
CMAKE_BINARY_DIR
}
/
${
CMAKE_BUILD_TYPE
}
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
MATH_DLL
}
${
CMAKE_BINARY_DIR
}
/
${
CMAKE_BUILD_TYPE
}
)
else
()
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
MATH_LIB_PATH
}
/lib/mklml.dll
${
CMAKE_BINARY_DIR
}
/Release
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
MATH_LIB_PATH
}
/lib/libiomp5md.dll
${
CMAKE_BINARY_DIR
}
/Release
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
MATH_LIB_PATH
}
/lib/msvcr120.dll
${
CMAKE_BINARY_DIR
}
/Release
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
MKLDNN_PATH
}
/lib/mkldnn.dll
${
CMAKE_BINARY_DIR
}
/Release
)
else
()
add_custom_command
(
TARGET
${
DEMO_NAME
}
POST_BUILD
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
MATH_DLL
}
${
CMAKE_BINARY_DIR
}
/
${
CMAKE_BUILD_TYPE
}
)
endif
()
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
OPENBLAS_LIB_PATH
}
/lib/openblas.dll
${
CMAKE_BINARY_DIR
}
/Release
)
endif
()
if
(
NOT WITH_STATIC_LIB
)
add_custom_command
(
TARGET
${
DEMO_NAME
}
POST_BUILD
COMMAND
${
CMAKE_COMMAND
}
-E copy
${
PADDLE_LIB
}
/paddle/lib/paddle_fluid
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
${
CMAKE_BINARY_DIR
}
/Release
)
endif
()
endif
()
paddle/fluid/inference/api/demo_ci/run.sh
浏览文件 @
e66f92d1
...
...
@@ -6,6 +6,7 @@ TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
DATA_DIR
=
$4
# dataset
TENSORRT_INCLUDE_DIR
=
$5
# TensorRT header file dir, default to /usr/local/TensorRT/include
TENSORRT_LIB_DIR
=
$6
# TensorRT lib file dir, default to /usr/local/TensorRT/lib
inference_install_dir
=
${
PADDLE_ROOT
}
/build/fluid_inference_install_dir
cd
`
dirname
$0
`
...
...
@@ -35,7 +36,7 @@ function download() {
mkdir
-p
$dir_name
cd
$dir_name
if
[[
-e
"
${
PREFIX
}${
dir_name
}
.tar.gz"
]]
;
then
echo
"
${
PREFIX
}
{dir_name}.tar.gz has been downloaded."
echo
"
${
PREFIX
}
$
{
dir_name
}
.tar.gz has been downloaded."
else
wget
-q
${
URL_ROOT
}
$dir_name
.tar.gz
tar
xzf
*
.tar.gz
...
...
@@ -49,78 +50,123 @@ for vis_demo_name in $vis_demo_list; do
download
$vis_demo_name
done
# download word2vec data
mkdir
-p
word2vec
cd
word2vec
if
[[
-e
"word2vec.inference.model.tar.gz"
]]
;
then
echo
"word2vec.inference.model.tar.gz has been downloaded."
else
wget
-q
http://paddle-inference-dist.bj.bcebos.com/word2vec.inference.model.tar.gz
tar
xzf
*
.tar.gz
fi
# compile and test the demo
cd
$current_dir
mkdir
-p
build
cd
build
rm
-rf
*
for
WITH_STATIC_LIB
in
ON OFF
;
do
# TODO(Superjomn) reopen this
# something wrong with the TensorArray reset.
:
<<
D
# -----simple_on_word2vec-----
rm -rf *
cmake .. -DPADDLE_LIB=
${
inference_install_dir
}
\
if
[
$(
echo
`
uname
`
|
grep
"Win"
)
!=
""
]
;
then
# -----simple_on_word2vec on windows-----
cmake ..
-G
"Visual Studio 14 2015"
-A
x64
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
simple_on_word2vec
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB=
$WITH_STATIC_LIB
make -j
word2vec_model=
$DATA_DIR
'/word2vec/word2vec.inference.model'
if [ -d
$word2vec_model
]; then
for use_gpu in
$use_gpu_list
; do
./simple_on_word2vec
\
--dirname=
$word2vec_model
\
--use_gpu=
$use_gpu
if [
$?
-ne 0 ]; then
echo "simple_on_word2vec demo runs fail."
exit 1
fi
done
-DWITH_STATIC_LIB
=
ON
msbuild /maxcpucount /property:Configuration
=
Release cpp_inference_demo.sln
Release/simple_on_word2vec.exe
\
--dirname
=
$DATA_DIR
/word2vec/word2vec.inference.model
\
--use_gpu
=
False
if
[
$?
-ne
0
]
;
then
echo
"simple_on_word2vec demo runs fail."
exit
1
fi
D
# -----
----vis_demo----
-----
# -----
vis_demo on windows
-----
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
cmake ..
-
G
"Visual Studio 14 2015"
-A
x64
-
DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
vis_demo
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
$WITH_STATIC_LIB
make
-j
for
use_gpu
in
$use_gpu_list
;
do
for
vis_demo_name
in
$vis_demo_list
;
do
./vis_demo
\
--modeldir
=
$DATA_DIR
/
$vis_demo_name
/model
\
--data
=
$DATA_DIR
/
$vis_demo_name
/data.txt
\
--refer
=
$DATA_DIR
/
$vis_demo_name
/result.txt
\
--use_gpu
=
$use_gpu
if
[
$?
-ne
0
]
;
then
echo
"vis demo
$vis_demo_name
runs fail."
exit
1
fi
done
-DWITH_STATIC_LIB
=
ON
msbuild /maxcpucount /property:Configuration
=
Release cpp_inference_demo.sln
for
vis_demo_name
in
$vis_demo_list
;
do
Release/vis_demo.exe
\
--modeldir
=
$DATA_DIR
/
$vis_demo_name
/model
\
--data
=
$DATA_DIR
/
$vis_demo_name
/data.txt
\
--refer
=
$DATA_DIR
/
$vis_demo_name
/result.txt
\
--use_gpu
=
False
if
[
$?
-ne
0
]
;
then
echo
"vis demo
$vis_demo_name
runs fail."
exit
1
fi
done
else
for
WITH_STATIC_LIB
in
ON OFF
;
do
# -----simple_on_word2vec on linux/mac-----
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
simple_on_word2vec
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
$WITH_STATIC_LIB
make
-j
$(
nproc
)
word2vec_model
=
$DATA_DIR
'/word2vec/word2vec.inference.model'
if
[
-d
$word2vec_model
]
;
then
for
use_gpu
in
$use_gpu_list
;
do
./simple_on_word2vec
\
--dirname
=
$DATA_DIR
/word2vec/word2vec.inference.model
\
--use_gpu
=
$use_gpu
if
[
$?
-ne
0
]
;
then
echo
"simple_on_word2vec demo runs fail."
exit
1
fi
done
fi
# --------tensorrt mobilenet------
if
[
$USE_TENSORRT
==
ON
-a
$TEST_GPU_CPU
==
ON
]
;
then
# ---------vis_demo on linux/mac---------
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
trt_mobilenet
_demo
\
-DDEMO_NAME
=
vis
_demo
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
$WITH_STATIC_LIB
\
-DUSE_TENSORRT
=
$USE_TENSORRT
\
-DTENSORRT_INCLUDE_DIR
=
$TENSORRT_INCLUDE_DIR
\
-DTENSORRT_LIB_DIR
=
$TENSORRT_LIB_DIR
make
-j
./trt_mobilenet_demo
\
--modeldir
=
$DATA_DIR
/mobilenet/model
\
--data
=
$DATA_DIR
/mobilenet/data.txt
\
--refer
=
$DATA_DIR
/mobilenet/result.txt
if
[
$?
-ne
0
]
;
then
echo
"trt demo trt_mobilenet_demo runs fail."
exit
1
-DWITH_STATIC_LIB
=
$WITH_STATIC_LIB
make
-j
$(
nproc
)
for
use_gpu
in
$use_gpu_list
;
do
for
vis_demo_name
in
$vis_demo_list
;
do
./vis_demo
\
--modeldir
=
$DATA_DIR
/
$vis_demo_name
/model
\
--data
=
$DATA_DIR
/
$vis_demo_name
/data.txt
\
--refer
=
$DATA_DIR
/
$vis_demo_name
/result.txt
\
--use_gpu
=
$use_gpu
if
[
$?
-ne
0
]
;
then
echo
"vis demo
$vis_demo_name
runs fail."
exit
1
fi
done
done
# --------tensorrt mobilenet on linux/mac------
if
[
$USE_TENSORRT
==
ON
-a
$TEST_GPU_CPU
==
ON
]
;
then
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
trt_mobilenet_demo
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
$WITH_STATIC_LIB
\
-DUSE_TENSORRT
=
$USE_TENSORRT
\
-DTENSORRT_INCLUDE_DIR
=
$TENSORRT_INCLUDE_DIR
\
-DTENSORRT_LIB_DIR
=
$TENSORRT_LIB_DIR
make
-j
$(
nproc
)
./trt_mobilenet_demo
\
--modeldir
=
$DATA_DIR
/mobilenet/model
\
--data
=
$DATA_DIR
/mobilenet/data.txt
\
--refer
=
$DATA_DIR
/mobilenet/result.txt
if
[
$?
-ne
0
]
;
then
echo
"trt demo trt_mobilenet_demo runs fail."
exit
1
fi
fi
fi
done
done
fi
set
+x
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录