Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
f8a74ccc
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
f8a74ccc
编写于
7月 10, 2018
作者:
L
Luo Tao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add shared library test
上级
2d9bd762
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
45 addition
and
32 deletion
+45
-32
paddle/contrib/inference/demo_ci/CMakeLists.txt
paddle/contrib/inference/demo_ci/CMakeLists.txt
+16
-12
paddle/contrib/inference/demo_ci/run.sh
paddle/contrib/inference/demo_ci/run.sh
+17
-14
paddle/contrib/inference/demo_ci/simple_on_word2vec.cc
paddle/contrib/inference/demo_ci/simple_on_word2vec.cc
+12
-6
未找到文件。
paddle/contrib/inference/demo_ci/CMakeLists.txt
浏览文件 @
f8a74ccc
...
...
@@ -11,9 +11,9 @@ if(NOT DEFINED DEMO_NAME)
message
(
FATAL_ERROR
"please set DEMO_NAME with -DDEMO_NAME=demo_name"
)
endif
()
option
(
WITH_MKL
DNN
"Compile PaddlePaddle with MKLDNN"
OFF
)
option
(
WITH_
MKL
"Compile PaddlePaddle with MKL support, default use openblas."
ON
)
option
(
WITH_
GPU
"Compile PaddlePaddle with GPU, default use CPU."
OFF
)
option
(
WITH_MKL
"Compile demo with MKL/OpenBlas support, default use MKL."
ON
)
option
(
WITH_
GPU
"Compile demo with GPU/CPU, default use CPU."
OFF
)
option
(
WITH_
STATIC_LIB
"Compile demo with static/shared library, default use static."
ON
)
if
(
WITH_GPU
)
set
(
CUDA_LIB
"/usr/local/cuda/lib64/"
CACHE STRING
"CUDA Library"
)
...
...
@@ -52,17 +52,21 @@ else()
set
(
MATH_LIB
${
PADDLE_LIB
}
/third_party/install/openblas/lib/libopenblas.a
)
endif
()
set
(
ARCHIVE_START
"-Wl,--whole-archive"
)
set
(
ARCHIVE_END
"-Wl,--no-whole-archive"
)
if
(
WITH_STATIC_LIB
)
set
(
DEPS
"-Wl,--whole-archive"
${
PADDLE_LIB
}
/paddle/fluid/inference/libpaddle_fluid.a
"-Wl,--no-whole-archive"
${
PADDLE_LIB
}
/contrib/inference/libpaddle_inference_api.a
)
else
()
set
(
DEPS
${
PADDLE_LIB
}
/paddle/fluid/inference/libpaddle_fluid.so
${
PADDLE_LIB
}
/contrib/inference/libpaddle_inference_api.so
)
endif
()
set
(
EXTERNAL_LIB
"-lrt -ldl -lpthread"
)
set
(
DEPS
${
ARCHIVE_START
}
${
PADDLE_LIB
}
/paddle/fluid/inference/libpaddle_fluid.a
${
ARCHIVE_END
}
${
PADDLE_LIB
}
/contrib/inference/libpaddle_inference_api.a
${
MATH_LIB
}
${
MKLDNN_LIB
}
set
(
DEPS
${
DEPS
}
${
MATH_LIB
}
${
MKLDNN_LIB
}
glog gflags protobuf snappystream snappy z
${
EXTERNAL_LIB
}
)
if
(
WITH_GPU
)
...
...
paddle/contrib/inference/demo_ci/run.sh
浏览文件 @
f8a74ccc
...
...
@@ -2,25 +2,28 @@ set -x
PADDLE_ROOT
=
$1
WITH_MKL
=
$2
WITH_GPU
=
$3
mkdir
-p
build
cd
build
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
-DWITH_MKL
=
$WITH_MKL
\
-DDEMO_NAME
=
simple_on_word2vec
\
-DWITH_GPU
=
$WITH_GPU
make
if
[
$3
==
"ON"
]
;
then
use_gpu_list
=
'true false'
else
use_gpu_list
=
'false'
fi
for
use_gpu
in
$use_gpu_list
;
do
./simple_on_word2vec
\
--dirname
=
${
PADDLE_ROOT
}
/build/python/paddle/fluid/tests/book/word2vec.inference.model
\
--use_gpu
=
$use_gpu
mkdir
-p
build
cd
build
for
WITH_STATIC_LIB
in
true false
;
do
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
-DWITH_MKL
=
$WITH_MKL
\
-DDEMO_NAME
=
simple_on_word2vec
\
-DWITH_GPU
=
$WITH_GPU
\
-DWITH_STATIC_LIB
=
$WITH_STATIC_LIB
make
for
use_gpu
in
$use_gpu_list
;
do
./simple_on_word2vec
\
--dirname
=
${
PADDLE_ROOT
}
/build/python/paddle/fluid/tests/book/word2vec.inference.model
\
--use_gpu
=
$use_gpu
done
done
if
[
$?
-eq
0
]
;
then
exit
0
...
...
paddle/contrib/inference/demo_ci/simple_on_word2vec.cc
浏览文件 @
f8a74ccc
...
...
@@ -61,11 +61,15 @@ void Main(bool use_gpu) {
//# 4. Get output.
PADDLE_ENFORCE
(
outputs
.
size
(),
1UL
);
LOG
(
INFO
)
<<
"output buffer size: "
<<
outputs
.
front
().
data
.
length
();
// Check the output buffer size and result of each tid.
PADDLE_ENFORCE
(
outputs
.
front
().
data
.
length
(),
33168UL
);
float
result
[
5
]
=
{
0.00129761
,
0.00151112
,
0.000423564
,
0.00108815
,
0.000932706
};
const
size_t
num_elements
=
outputs
.
front
().
data
.
length
()
/
sizeof
(
float
);
// The outputs' buffers are in CPU memory.
for
(
size_t
i
=
0
;
i
<
std
::
min
(
5UL
,
num_elements
);
i
++
)
{
LOG
(
INFO
)
<<
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
];
PADDLE_ENFORCE
(
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
],
result
[
i
]);
}
}
}
...
...
@@ -101,13 +105,16 @@ void MainThreads(int num_threads, bool use_gpu) {
// 4. Get output.
PADDLE_ENFORCE
(
outputs
.
size
(),
1UL
);
LOG
(
INFO
)
<<
"TID: "
<<
tid
<<
", "
<<
"output buffer size: "
<<
outputs
.
front
().
data
.
length
();
// Check the output buffer size and result of each tid.
PADDLE_ENFORCE
(
outputs
.
front
().
data
.
length
(),
33168UL
);
float
result
[
5
]
=
{
0.00129761
,
0.00151112
,
0.000423564
,
0.00108815
,
0.000932706
};
const
size_t
num_elements
=
outputs
.
front
().
data
.
length
()
/
sizeof
(
float
);
// The outputs' buffers are in CPU memory.
for
(
size_t
i
=
0
;
i
<
std
::
min
(
5UL
,
num_elements
);
i
++
)
{
LOG
(
INFO
)
<<
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
];
PADDLE_ENFORCE
(
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
],
result
[
i
]);
}
}
});
...
...
@@ -126,7 +133,6 @@ int main(int argc, char** argv) {
paddle
::
demo
::
MainThreads
(
1
,
false
/* use_gpu*/
);
paddle
::
demo
::
MainThreads
(
4
,
false
/* use_gpu*/
);
if
(
FLAGS_use_gpu
)
{
LOG
(
INFO
)
<<
"use_gpu=true"
;
paddle
::
demo
::
Main
(
true
/*use_gpu*/
);
paddle
::
demo
::
MainThreads
(
1
,
true
/*use_gpu*/
);
paddle
::
demo
::
MainThreads
(
4
,
true
/*use_gpu*/
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录