Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
34ed7d13
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
34ed7d13
编写于
10月 18, 2018
作者:
T
Tao Luo
提交者:
GitHub
10月 18, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13924 from luotao1/clean_inference_lib
Clean inference lib
上级
078223b3
6ea9d1b5
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
73 addition
and
43 deletion
+73
-43
CMakeLists.txt
CMakeLists.txt
+3
-0
cmake/inference_lib.cmake
cmake/inference_lib.cmake
+37
-17
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
+3
-3
paddle/fluid/inference/api/demo_ci/run.sh
paddle/fluid/inference/api/demo_ci/run.sh
+5
-4
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
+3
-5
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
+1
-1
paddle/fluid/inference/api/demo_ci/utils.h
paddle/fluid/inference/api/demo_ci/utils.h
+1
-1
paddle/fluid/inference/api/demo_ci/vis_demo.cc
paddle/fluid/inference/api/demo_ci/vis_demo.cc
+12
-7
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
+1
-2
paddle/fluid/inference/tests/api/tester_helper.h
paddle/fluid/inference/tests/api/tester_helper.h
+1
-2
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+6
-1
未找到文件。
CMakeLists.txt
浏览文件 @
34ed7d13
...
...
@@ -127,6 +127,9 @@ set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
set
(
FLUID_INSTALL_DIR
"
${
CMAKE_BINARY_DIR
}
/fluid_install_dir"
CACHE STRING
"A path setting fluid shared and static libraries"
)
set
(
FLUID_INFERENCE_INSTALL_DIR
"
${
CMAKE_BINARY_DIR
}
/fluid_inference_install_dir"
CACHE STRING
"A path setting fluid inference shared and static libraries"
)
if
(
WITH_C_API AND WITH_PYTHON
)
message
(
WARNING
"It is suggest not embedded a python interpreter in Paddle "
"when using C-API. It will give an unpredictable behavior when using a "
...
...
cmake/inference_lib.cmake
浏览文件 @
34ed7d13
...
...
@@ -150,16 +150,16 @@ if (WITH_ANAKIN AND WITH_MKL)
SRCS
${
PADDLE_BINARY_DIR
}
/paddle/fluid/inference/api/libinference_anakin_api*
# compiled anakin api
${
ANAKIN_INSTALL_DIR
}
# anakin release
DSTS
${
dst_dir
}
/inference
/anakin
${
FLUID_INSTALL_DIR
}
/third_party/install/anakin
)
DSTS
${
FLUID_INSTALL_DIR
}
/third_party/install
/anakin
${
FLUID_INSTALL_DIR
}
/third_party/install/anakin
)
list
(
APPEND inference_deps anakin_inference_lib
)
endif
()
set
(
module
"inference"
)
copy
(
inference_lib DEPS
${
inference_deps
}
SRCS
${
src_dir
}
/
${
module
}
/*.h
${
PADDLE_BINARY_DIR
}
/paddle/fluid/inference/libpaddle_fluid.*
${
src_dir
}
/
${
module
}
/api/paddle_inference_api.h
${
src_dir
}
/
${
module
}
/api/demo_ci
${
src_dir
}
/
${
module
}
/api/paddle_inference_api.h
${
PADDLE_BINARY_DIR
}
/paddle/fluid/inference/api/paddle_inference_pass.h
DSTS
${
dst_dir
}
/
${
module
}
${
dst_dir
}
/
${
module
}
${
dst_dir
}
/
${
module
}
${
dst_dir
}
/
${
module
}
${
dst_dir
}
/
${
module
}
DSTS
${
dst_dir
}
/
${
module
}
${
dst_dir
}
/
${
module
}
${
dst_dir
}
/
${
module
}
${
dst_dir
}
/
${
module
}
)
set
(
module
"platform"
)
...
...
@@ -188,18 +188,38 @@ copy(cmake_cache
# This command generates a complete fluid library for both train and inference
add_custom_target
(
fluid_lib_dist DEPENDS
${
fluid_lib_dist_dep
}
)
# Following commands generate a inference-only fluid library
# third_party, version.txt and CMakeCache.txt are the same position with ${FLUID_INSTALL_DIR}
copy
(
third_party DEPS fluid_lib_dist
SRCS
${
FLUID_INSTALL_DIR
}
/third_party
${
FLUID_INSTALL_DIR
}
/CMakeCache.txt
DSTS
${
FLUID_INFERENCE_INSTALL_DIR
}
${
FLUID_INFERENCE_INSTALL_DIR
}
)
# only need libpaddle_fluid.so/a and paddle_inference_api.h for inference-only library
copy
(
inference_api_lib DEPS fluid_lib_dist
SRCS
${
FLUID_INSTALL_DIR
}
/paddle/fluid/inference/libpaddle_fluid.*
${
FLUID_INSTALL_DIR
}
/paddle/fluid/inference/paddle_inference_api.h
DSTS
${
FLUID_INFERENCE_INSTALL_DIR
}
/paddle/lib
${
FLUID_INFERENCE_INSTALL_DIR
}
/paddle/include
)
add_custom_target
(
inference_lib_dist DEPENDS third_party inference_api_lib
)
# paddle fluid version
execute_process
(
COMMAND
${
GIT_EXECUTABLE
}
log --pretty=format:%H -1
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
OUTPUT_VARIABLE PADDLE_GIT_COMMIT
)
set
(
version_file
${
FLUID_INSTALL_DIR
}
/version.txt
)
file
(
WRITE
${
version_file
}
"GIT COMMIT ID:
${
PADDLE_GIT_COMMIT
}
\n
"
"WITH_MKL:
${
WITH_MKL
}
\n
"
"WITH_GPU:
${
WITH_GPU
}
\n
"
)
if
(
WITH_GPU
)
file
(
APPEND
${
version_file
}
"CUDA version:
${
CUDA_VERSION
}
\n
"
"CUDNN version: v
${
CUDNN_MAJOR_VERSION
}
\n
"
)
endif
()
function
(
version version_file
)
execute_process
(
COMMAND
${
GIT_EXECUTABLE
}
log --pretty=format:%H -1
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
OUTPUT_VARIABLE PADDLE_GIT_COMMIT
)
file
(
WRITE
${
version_file
}
"GIT COMMIT ID:
${
PADDLE_GIT_COMMIT
}
\n
"
"WITH_MKL:
${
WITH_MKL
}
\n
"
"WITH_MKLDNN:
${
WITH_MKLDNN
}
\n
"
"WITH_GPU:
${
WITH_GPU
}
\n
"
)
if
(
WITH_GPU
)
file
(
APPEND
${
version_file
}
"CUDA version:
${
CUDA_VERSION
}
\n
"
"CUDNN version: v
${
CUDNN_MAJOR_VERSION
}
\n
"
)
endif
()
endfunction
()
version
(
${
FLUID_INSTALL_DIR
}
/version.txt
)
version
(
${
FLUID_INFERENCE_INSTALL_DIR
}
/version.txt
)
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
浏览文件 @
34ed7d13
...
...
@@ -77,7 +77,7 @@ endif(NOT WIN32)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/protobuf/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/glog/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/gflags/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/paddle/
fluid/inference
"
)
link_directories
(
"
${
PADDLE_LIB
}
/paddle/
lib
"
)
add_executable
(
${
DEMO_NAME
}
${
DEMO_NAME
}
.cc
)
...
...
@@ -97,10 +97,10 @@ endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if
(
WITH_STATIC_LIB
)
set
(
DEPS
${
PADDLE_LIB
}
/paddle/
fluid/inference
/libpaddle_fluid
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
${
PADDLE_LIB
}
/paddle/
lib
/libpaddle_fluid
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
else
()
set
(
DEPS
${
PADDLE_LIB
}
/paddle/
fluid/inference
/libpaddle_fluid
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
${
PADDLE_LIB
}
/paddle/
lib
/libpaddle_fluid
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
endif
()
if
(
NOT WIN32
)
...
...
paddle/fluid/inference/api/demo_ci/run.sh
浏览文件 @
34ed7d13
...
...
@@ -5,12 +5,13 @@ TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
DATA_DIR
=
$4
# dataset
TENSORRT_INCLUDE_DIR
=
$5
# TensorRT header file dir, defalut to /usr/local/TensorRT/include
TENSORRT_LIB_DIR
=
$6
# TensorRT lib file dir, default to /usr/local/TensorRT/lib
inference_install_dir
=
${
PADDLE_ROOT
}
/build/fluid_inference_install_dir
cd
`
dirname
$0
`
current_dir
=
`
pwd
`
if
[
$2
==
ON
]
;
then
# You can export yourself if move the install path
MKL_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir
/third_party/install/mklml/lib
MKL_LIB
=
${
inference_install_dir
}
/third_party/install/mklml/lib
export
LD_LIBRARY_PATH
=
$LD_LIBRARY_PATH
:
${
MKL_LIB
}
fi
if
[
$3
==
ON
]
;
then
...
...
@@ -55,7 +56,7 @@ cd build
for
WITH_STATIC_LIB
in
ON OFF
;
do
# -----simple_on_word2vec-----
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
simple_on_word2vec
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
...
...
@@ -75,7 +76,7 @@ for WITH_STATIC_LIB in ON OFF; do
fi
# ---------vis_demo---------
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
vis_demo
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
...
...
@@ -98,7 +99,7 @@ for WITH_STATIC_LIB in ON OFF; do
# --------tensorrt mobilenet------
if
[
$USE_TENSORRT
==
ON
-a
$TEST_GPU_CPU
==
ON
]
;
then
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
trt_mobilenet_demo
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
...
...
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
浏览文件 @
34ed7d13
...
...
@@ -23,7 +23,7 @@ limitations under the License. */
#include <memory>
#include <thread> //NOLINT
#include "paddle/
fluid/inferenc
e/paddle_inference_api.h"
#include "paddle/
includ
e/paddle_inference_api.h"
DEFINE_string
(
dirname
,
""
,
"Directory of the inference model."
);
DEFINE_bool
(
use_gpu
,
false
,
"Whether use gpu."
);
...
...
@@ -42,8 +42,7 @@ void Main(bool use_gpu) {
config
.
use_gpu
=
use_gpu
;
config
.
fraction_of_gpu_memory
=
0.15
;
config
.
device
=
0
;
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
for
(
int
batch_id
=
0
;
batch_id
<
3
;
batch_id
++
)
{
//# 2. Prepare input.
...
...
@@ -85,8 +84,7 @@ void MainThreads(int num_threads, bool use_gpu) {
config
.
use_gpu
=
use_gpu
;
config
.
fraction_of_gpu_memory
=
0.15
;
config
.
device
=
0
;
auto
main_predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
auto
main_predictor
=
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
std
::
vector
<
std
::
thread
>
threads
;
for
(
int
tid
=
0
;
tid
<
num_threads
;
++
tid
)
{
...
...
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
浏览文件 @
34ed7d13
...
...
@@ -18,7 +18,7 @@ limitations under the License. */
#include <gflags/gflags.h>
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
#include "
paddle/fluid/inference/demo_ci/utils.h"
#include "
utils.h" // NOLINT
DECLARE_double
(
fraction_of_gpu_memory_to_use
);
DEFINE_string
(
modeldir
,
""
,
"Directory of the inference model."
);
...
...
paddle/fluid/inference/api/demo_ci/utils.h
浏览文件 @
34ed7d13
...
...
@@ -18,7 +18,7 @@
#include <iostream>
#include <string>
#include <vector>
#include "paddle/
fluid/inferenc
e/paddle_inference_api.h"
#include "paddle/
includ
e/paddle_inference_api.h"
namespace
paddle
{
namespace
demo
{
...
...
paddle/fluid/inference/api/demo_ci/vis_demo.cc
浏览文件 @
34ed7d13
...
...
@@ -18,7 +18,7 @@ limitations under the License. */
#include <gflags/gflags.h>
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
#include "
paddle/fluid/inference/demo_ci/utils.h"
#include "
utils.h" // NOLINT
#ifdef PADDLE_WITH_CUDA
DECLARE_double
(
fraction_of_gpu_memory_to_use
);
...
...
@@ -34,12 +34,13 @@ DEFINE_bool(use_gpu, false, "Whether use gpu.");
namespace
paddle
{
namespace
demo
{
using
contrib
::
AnalysisConfig
;
/*
* Use the native fluid engine to inference the demo.
* Use the native
and analysis
fluid engine to inference the demo.
*/
void
Main
(
bool
use_gpu
)
{
std
::
unique_ptr
<
PaddlePredictor
>
predictor
;
Native
Config
config
;
std
::
unique_ptr
<
PaddlePredictor
>
predictor
,
analysis_predictor
;
Analysis
Config
config
;
config
.
param_file
=
FLAGS_modeldir
+
"/__params__"
;
config
.
prog_file
=
FLAGS_modeldir
+
"/__model__"
;
config
.
use_gpu
=
use_gpu
;
...
...
@@ -49,8 +50,8 @@ void Main(bool use_gpu) {
}
VLOG
(
3
)
<<
"init predictor"
;
predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
predictor
=
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
analysis_predictor
=
CreatePaddlePredictor
<
AnalysisConfig
>
(
config
);
VLOG
(
3
)
<<
"begin to process data"
;
// Just a single batch of data.
...
...
@@ -68,7 +69,7 @@ void Main(bool use_gpu) {
input
.
dtype
=
PaddleDType
::
FLOAT32
;
VLOG
(
3
)
<<
"run executor"
;
std
::
vector
<
PaddleTensor
>
output
;
std
::
vector
<
PaddleTensor
>
output
,
analysis_output
;
predictor
->
Run
({
input
},
&
output
,
1
);
VLOG
(
3
)
<<
"output.size "
<<
output
.
size
();
...
...
@@ -77,6 +78,10 @@ void Main(bool use_gpu) {
// compare with reference result
CheckOutput
(
FLAGS_refer
,
tensor
);
// the analysis_output has some diff with native_output,
// TODO(luotao): add CheckOutput for analysis_output later.
analysis_predictor
->
Run
({
input
},
&
analysis_output
,
1
);
}
}
// namespace demo
...
...
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
浏览文件 @
34ed7d13
...
...
@@ -311,8 +311,7 @@ TEST(Analyzer_rnn1, ZeroCopy) {
auto
predictor
=
CreatePaddlePredictor
<
AnalysisConfig
>
(
config
);
config
.
use_feed_fetch_ops
=
true
;
auto
native_predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
auto
native_predictor
=
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
config
.
use_feed_fetch_ops
=
true
;
// the analysis predictor needs feed/fetch.
auto
analysis_predictor
=
CreatePaddlePredictor
<
AnalysisConfig
>
(
config
);
...
...
paddle/fluid/inference/tests/api/tester_helper.h
浏览文件 @
34ed7d13
...
...
@@ -79,8 +79,7 @@ std::unique_ptr<PaddlePredictor> CreateTestPredictor(
if
(
use_analysis
)
{
return
CreatePaddlePredictor
<
contrib
::
AnalysisConfig
>
(
config
);
}
else
{
return
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
return
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
}
}
...
...
paddle/scripts/paddle_build.sh
浏览文件 @
34ed7d13
...
...
@@ -661,6 +661,7 @@ function gen_fluid_lib() {
EOF
cmake ..
-DWITH_DISTRIBUTE
=
OFF
make
-j
`
nproc
`
fluid_lib_dist
make
-j
`
nproc
`
inference_lib_dist
fi
}
...
...
@@ -674,6 +675,8 @@ EOF
cd
${
PADDLE_ROOT
}
/build
cp
-r
fluid_install_dir fluid
tar
-czf
fluid.tgz fluid
cp
-r
fluid_inference_install_dir fluid_inference
tar
-czf
fluid_inference.tgz fluid_inference
fi
}
...
...
@@ -685,7 +688,9 @@ function test_fluid_lib() {
========================================
EOF
cd
${
PADDLE_ROOT
}
/paddle/fluid/inference/api/demo_ci
./run.sh
${
PADDLE_ROOT
}
${
WITH_MKL
:-
ON
}
${
WITH_GPU
:-
OFF
}
${
INFERENCE_DEMO_INSTALL_DIR
}
${
TENSORRT_INCLUDE_DIR
:-
/usr/local/TensorRT/include
}
${
TENSORRT_LIB_DIR
:-
/usr/local/TensorRT/lib
}
./run.sh
${
PADDLE_ROOT
}
${
WITH_MKL
:-
ON
}
${
WITH_GPU
:-
OFF
}
${
INFERENCE_DEMO_INSTALL_DIR
}
\
${
TENSORRT_INCLUDE_DIR
:-
/usr/local/TensorRT/include
}
\
${
TENSORRT_LIB_DIR
:-
/usr/local/TensorRT/lib
}
./clean.sh
fi
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录