Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
13603774
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
13603774
编写于
7月 11, 2018
作者:
T
Tao Luo
提交者:
GitHub
7月 11, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #12049 from luotao1/demo
add independent inference demo on teamcity
上级
be2d9dc2
57b30c2b
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
164 addition
and
26 deletion
+164
-26
paddle/contrib/inference/demo/CMakeLists.txt
paddle/contrib/inference/demo/CMakeLists.txt
+0
-2
paddle/contrib/inference/demo_ci/CMakeLists.txt
paddle/contrib/inference/demo_ci/CMakeLists.txt
+77
-0
paddle/contrib/inference/demo_ci/run.sh
paddle/contrib/inference/demo_ci/run.sh
+34
-0
paddle/contrib/inference/demo_ci/simple_on_word2vec.cc
paddle/contrib/inference/demo_ci/simple_on_word2vec.cc
+39
-23
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+14
-1
未找到文件。
paddle/contrib/inference/demo/CMakeLists.txt
浏览文件 @
13603774
...
...
@@ -13,8 +13,6 @@
# limitations under the License.
#
inference_api_test
(
simple_on_word2vec ARGS test_word2vec
)
option
(
WITH_INFERENCE_DEMO
"Compile with Inference demo"
OFF
)
if
(
NOT WITH_INFERENCE_DEMO
)
return
()
...
...
paddle/contrib/inference/demo_ci/CMakeLists.txt
0 → 100644
浏览文件 @
13603774
cmake_minimum_required
(
VERSION 3.0
)
project
(
cpp_inference_demo CXX C
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-std=c++11"
)
if
(
NOT DEFINED PADDLE_LIB
)
message
(
FATAL_ERROR
"please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib"
)
endif
()
if
(
NOT DEFINED DEMO_NAME
)
message
(
FATAL_ERROR
"please set DEMO_NAME with -DDEMO_NAME=demo_name"
)
endif
()
option
(
WITH_MKL
"Compile demo with MKL/OpenBlas support, default use MKL."
ON
)
option
(
WITH_GPU
"Compile demo with GPU/CPU, default use CPU."
OFF
)
option
(
WITH_STATIC_LIB
"Compile demo with static/shared library, default use static."
ON
)
if
(
WITH_GPU
)
set
(
CUDA_LIB
"/usr/local/cuda/lib64/"
CACHE STRING
"CUDA Library"
)
endif
()
include_directories
(
"
${
PADDLE_LIB
}
"
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/install/protobuf/include"
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/install/glog/include"
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/install/gflags/include"
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/install/snappy/include"
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/install/snappystream/include"
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/install/zlib/include"
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/boost"
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/eigen3"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/snappy/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/snappystream/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/protobuf/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/glog/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/gflags/lib"
)
link_directories
(
"
${
PADDLE_LIB
}
/third_party/install/zlib/lib"
)
add_executable
(
${
DEMO_NAME
}
${
DEMO_NAME
}
.cc
)
if
(
WITH_MKL
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/install/mklml/include"
)
set
(
MATH_LIB
${
PADDLE_LIB
}
/third_party/install/mklml/lib/libmklml_intel.so
${
PADDLE_LIB
}
/third_party/install/mklml/lib/libiomp5.so
)
set
(
MKLDNN_PATH
"
${
PADDLE_LIB
}
/third_party/install/mkldnn"
)
if
(
EXISTS
${
MKLDNN_PATH
}
)
include_directories
(
"
${
MKLDNN_PATH
}
/include"
)
set
(
MKLDNN_LIB
${
MKLDNN_PATH
}
/lib/libmkldnn.so.0
)
endif
()
else
()
set
(
MATH_LIB
${
PADDLE_LIB
}
/third_party/install/openblas/lib/libopenblas.a
)
endif
()
if
(
WITH_STATIC_LIB
)
set
(
DEPS
"-Wl,--whole-archive"
${
PADDLE_LIB
}
/paddle/fluid/inference/libpaddle_fluid.a
"-Wl,--no-whole-archive"
${
PADDLE_LIB
}
/contrib/inference/libpaddle_inference_api.a
)
else
()
# Note: libpaddle_inference_api.so must put before libpaddle_fluid.so
set
(
DEPS
${
PADDLE_LIB
}
/contrib/inference/libpaddle_inference_api.so
${
PADDLE_LIB
}
/paddle/fluid/inference/libpaddle_fluid.so
)
endif
()
set
(
EXTERNAL_LIB
"-lrt -ldl -lpthread"
)
set
(
DEPS
${
DEPS
}
${
MATH_LIB
}
${
MKLDNN_LIB
}
glog gflags protobuf snappystream snappy z
${
EXTERNAL_LIB
}
)
if
(
WITH_GPU
)
set
(
DEPS
${
DEPS
}
${
CUDA_LIB
}
/libcudart.so
)
endif
()
target_link_libraries
(
${
DEMO_NAME
}
${
DEPS
}
)
paddle/contrib/inference/demo_ci/run.sh
0 → 100755
浏览文件 @
13603774
set
-x
PADDLE_ROOT
=
$1
WITH_MKL
=
$2
WITH_GPU
=
$3
if
[
$3
==
"ON"
]
;
then
use_gpu_list
=
'true false'
else
use_gpu_list
=
'false'
fi
mkdir
-p
build
cd
build
for
WITH_STATIC_LIB
in
false
;
do
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
-DWITH_MKL
=
$WITH_MKL
\
-DDEMO_NAME
=
simple_on_word2vec
\
-DWITH_GPU
=
$WITH_GPU
\
-DWITH_STATIC_LIB
=
$WITH_STATIC_LIB
make
for
use_gpu
in
$use_gpu_list
;
do
./simple_on_word2vec
\
--dirname
=
${
PADDLE_ROOT
}
/build/python/paddle/fluid/tests/book/word2vec.inference.model
\
--use_gpu
=
$use_gpu
done
done
if
[
$?
-eq
0
]
;
then
exit
0
else
echo
"inference demo runs fail."
exit
1
fi
set
+x
paddle/contrib/inference/demo/simple_on_word2vec.cc
→
paddle/contrib/inference/demo
_ci
/simple_on_word2vec.cc
浏览文件 @
13603774
...
...
@@ -16,21 +16,27 @@ limitations under the License. */
* This file contains a simple demo for how to take a model for inference.
*/
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include <memory>
#include <thread>
#include "paddle/contrib/inference/paddle_inference_api.h"
#include "contrib/inference/paddle_inference_api.h"
#include "paddle/fluid/platform/enforce.h"
DEFINE_string
(
dirname
,
""
,
"Directory of the inference model."
);
DEFINE_bool
(
use_gpu
,
false
,
"Whether use gpu."
);
namespace
paddle
{
namespace
demo
{
DEFINE_string
(
dirname
,
""
,
"Directory of the inference model."
);
void
Main
(
bool
use_gpu
)
{
//# 1. Create PaddlePredictor with a config.
NativeConfig
config
;
config
.
model_dir
=
FLAGS_dirname
+
"word2vec.inference.model"
;
if
(
FLAGS_dirname
.
empty
())
{
LOG
(
INFO
)
<<
"Usage: ./simple_on_word2vec --dirname=path/to/your/model"
;
exit
(
1
);
}
config
.
model_dir
=
FLAGS_dirname
;
config
.
use_gpu
=
use_gpu
;
config
.
fraction_of_gpu_memory
=
0.15
;
config
.
device
=
0
;
...
...
@@ -54,12 +60,16 @@ void Main(bool use_gpu) {
CHECK
(
predictor
->
Run
(
slots
,
&
outputs
));
//# 4. Get output.
ASSERT_EQ
(
outputs
.
size
(),
1UL
);
LOG
(
INFO
)
<<
"output buffer size: "
<<
outputs
.
front
().
data
.
length
();
PADDLE_ENFORCE
(
outputs
.
size
(),
1UL
);
// Check the output buffer size and result of each tid.
PADDLE_ENFORCE
(
outputs
.
front
().
data
.
length
(),
33168UL
);
float
result
[
5
]
=
{
0.00129761
,
0.00151112
,
0.000423564
,
0.00108815
,
0.000932706
};
const
size_t
num_elements
=
outputs
.
front
().
data
.
length
()
/
sizeof
(
float
);
// The outputs' buffers are in CPU memory.
for
(
size_t
i
=
0
;
i
<
std
::
min
(
5UL
,
num_elements
);
i
++
)
{
LOG
(
INFO
)
<<
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
];
PADDLE_ENFORCE
(
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
],
result
[
i
]);
}
}
}
...
...
@@ -68,7 +78,7 @@ void MainThreads(int num_threads, bool use_gpu) {
// Multi-threads only support on CPU
// 0. Create PaddlePredictor with a config.
NativeConfig
config
;
config
.
model_dir
=
FLAGS_dirname
+
"word2vec.inference.model"
;
config
.
model_dir
=
FLAGS_dirname
;
config
.
use_gpu
=
use_gpu
;
config
.
fraction_of_gpu_memory
=
0.15
;
config
.
device
=
0
;
...
...
@@ -94,14 +104,17 @@ void MainThreads(int num_threads, bool use_gpu) {
CHECK
(
predictor
->
Run
(
inputs
,
&
outputs
));
// 4. Get output.
ASSERT_EQ
(
outputs
.
size
(),
1UL
);
LOG
(
INFO
)
<<
"TID: "
<<
tid
<<
", "
<<
"output buffer size: "
<<
outputs
.
front
().
data
.
length
();
PADDLE_ENFORCE
(
outputs
.
size
(),
1UL
);
// Check the output buffer size and result of each tid.
PADDLE_ENFORCE
(
outputs
.
front
().
data
.
length
(),
33168UL
);
float
result
[
5
]
=
{
0.00129761
,
0.00151112
,
0.000423564
,
0.00108815
,
0.000932706
};
const
size_t
num_elements
=
outputs
.
front
().
data
.
length
()
/
sizeof
(
float
);
// The outputs' buffers are in CPU memory.
for
(
size_t
i
=
0
;
i
<
std
::
min
(
5UL
,
num_elements
);
i
++
)
{
LOG
(
INFO
)
<<
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
];
PADDLE_ENFORCE
(
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
],
result
[
i
]);
}
}
});
...
...
@@ -111,15 +124,18 @@ void MainThreads(int num_threads, bool use_gpu) {
}
}
TEST
(
demo
,
word2vec_cpu
)
{
Main
(
false
/*use_gpu*/
);
}
TEST
(
demo_multi_threads
,
word2vec_cpu_1
)
{
MainThreads
(
1
,
false
/*use_gpu*/
);
}
TEST
(
demo_multi_threads
,
word2vec_cpu_4
)
{
MainThreads
(
4
,
false
/*use_gpu*/
);
}
#ifdef PADDLE_WITH_CUDA
TEST
(
demo
,
word2vec_gpu
)
{
Main
(
true
/*use_gpu*/
);
}
TEST
(
demo_multi_threads
,
word2vec_gpu_1
)
{
MainThreads
(
1
,
true
/*use_gpu*/
);
}
TEST
(
demo_multi_threads
,
word2vec_gpu_4
)
{
MainThreads
(
4
,
true
/*use_gpu*/
);
}
#endif
}
// namespace demo
}
// namespace paddle
int
main
(
int
argc
,
char
**
argv
)
{
google
::
ParseCommandLineFlags
(
&
argc
,
&
argv
,
true
);
paddle
::
demo
::
Main
(
false
/* use_gpu*/
);
paddle
::
demo
::
MainThreads
(
1
,
false
/* use_gpu*/
);
paddle
::
demo
::
MainThreads
(
4
,
false
/* use_gpu*/
);
if
(
FLAGS_use_gpu
)
{
paddle
::
demo
::
Main
(
true
/*use_gpu*/
);
paddle
::
demo
::
MainThreads
(
1
,
true
/*use_gpu*/
);
paddle
::
demo
::
MainThreads
(
4
,
true
/*use_gpu*/
);
}
return
0
;
}
paddle/scripts/paddle_build.sh
浏览文件 @
13603774
...
...
@@ -510,11 +510,23 @@ function gen_fluid_inference_lib() {
EOF
make
-j
`
nproc
`
inference_lib_dist
cd
${
PADDLE_ROOT
}
/build
mv
fluid_install_dir fluid
cp
-r
fluid_install_dir fluid
tar
-cf
fluid.tgz fluid
fi
}
function
test_fluid_inference_lib
()
{
if
[
${
WITH_C_API
:-
OFF
}
==
"OFF"
]
;
then
cat
<<
EOF
========================================
Testing fluid inference library ...
========================================
EOF
cd
${
PADDLE_ROOT
}
/paddle/contrib/inference/demo_ci
sh run.sh
${
PADDLE_ROOT
}
${
WITH_MKL
:-
ON
}
${
WITH_GPU
:-
OFF
}
fi
}
function
main
()
{
set
-e
local
CMD
=
$1
...
...
@@ -568,6 +580,7 @@ function main() {
run_test
gen_capi_package
gen_fluid_inference_lib
test_fluid_inference_lib
;;
*
)
print_usage
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录