Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
746a62eb
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
746a62eb
编写于
6月 07, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add gpu tests
上级
35e820dc
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
39 addition
and
10 deletion
+39
-10
paddle/contrib/inference/demo/simple_on_word2vec.cc
paddle/contrib/inference/demo/simple_on_word2vec.cc
+6
-4
paddle/contrib/inference/test_paddle_inference_api_impl.cc
paddle/contrib/inference/test_paddle_inference_api_impl.cc
+33
-6
未找到文件。
paddle/contrib/inference/demo/simple_on_word2vec.cc
浏览文件 @
746a62eb
...
...
@@ -66,12 +66,12 @@ void Main(bool use_gpu) {
}
}
void
MainThreads
(
int
num_threads
)
{
void
MainThreads
(
int
num_threads
,
bool
use_gpu
)
{
// Multi-threads only support on CPU
// 0. Create PaddlePredictor with a config.
NativeConfig
config
;
config
.
model_dir
=
FLAGS_dirname
+
"word2vec.inference.model"
;
config
.
use_gpu
=
false
;
config
.
use_gpu
=
use_gpu
;
auto
main_predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
...
...
@@ -113,11 +113,13 @@ void MainThreads(int num_threads) {
}
TEST
(
demo
,
word2vec_cpu
)
{
Main
(
false
/*use_gpu*/
);
}
TEST
(
demo_multi_threads
,
word2vec_cpu_1
)
{
MainThreads
(
1
);
}
TEST
(
demo_multi_threads
,
word2vec_cpu_4
)
{
MainThreads
(
4
);
}
TEST
(
demo_multi_threads
,
word2vec_cpu_1
)
{
MainThreads
(
1
,
false
/*use_gpu*/
);
}
TEST
(
demo_multi_threads
,
word2vec_cpu_4
)
{
MainThreads
(
4
,
false
/*use_gpu*/
);
}
#ifdef PADDLE_WITH_CUDA
TEST
(
demo
,
word2vec_gpu
)
{
Main
(
true
/*use_gpu*/
);
}
TEST
(
demo_multi_threads
,
word2vec_gpu_1
)
{
MainThreads
(
1
,
true
/*use_gpu*/
);
}
TEST
(
demo_multi_threads
,
word2vec_gpu_4
)
{
MainThreads
(
4
,
true
/*use_gpu*/
);
}
#endif
}
// namespace demo
...
...
paddle/contrib/inference/test_paddle_inference_api_impl.cc
浏览文件 @
746a62eb
...
...
@@ -56,9 +56,10 @@ NativeConfig GetConfig() {
return
config
;
}
TEST
(
paddle_inference_api_impl
,
word2vec
)
{
void
MainWord2Vec
(
bool
use_gpu
)
{
NativeConfig
config
=
GetConfig
();
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
config
.
use_gpu
=
use_gpu
;
framework
::
LoDTensor
first_word
,
second_word
,
third_word
,
fourth_word
;
framework
::
LoD
lod
{{
0
,
1
}};
...
...
@@ -106,11 +107,12 @@ TEST(paddle_inference_api_impl, word2vec) {
free
(
outputs
[
0
].
data
.
data
);
}
TEST
(
paddle_inference_api_impl
,
image_classification
)
{
void
MainImageClassification
(
bool
use_gpu
)
{
int
batch_size
=
2
;
bool
use_mkldnn
=
false
;
bool
repeat
=
false
;
NativeConfig
config
=
GetConfig
();
config
.
use_gpu
=
use_gpu
;
config
.
model_dir
=
FLAGS_dirname
+
"image_classification_resnet.inference.model"
;
...
...
@@ -155,9 +157,9 @@ TEST(paddle_inference_api_impl, image_classification) {
free
(
data
);
}
TEST
(
paddle_inference_api_native_multithreads
,
word2vec
)
{
void
MainThreadsWord2Vec
(
bool
use_gpu
)
{
NativeConfig
config
=
GetConfig
();
config
.
use_gpu
=
false
;
config
.
use_gpu
=
use_gpu
;
auto
main_predictor
=
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
// prepare inputs data and reference results
...
...
@@ -216,11 +218,11 @@ TEST(paddle_inference_api_native_multithreads, word2vec) {
}
}
TEST
(
paddle_inference_api_native_multithreads
,
image_classification
)
{
void
MainThreadsImageClassification
(
bool
use_gpu
)
{
constexpr
int
num_jobs
=
4
;
// each job run 1 batch
constexpr
int
batch_size
=
1
;
NativeConfig
config
=
GetConfig
();
config
.
use_gpu
=
false
;
config
.
use_gpu
=
use_gpu
;
config
.
model_dir
=
FLAGS_dirname
+
"image_classification_resnet.inference.model"
;
...
...
@@ -269,4 +271,29 @@ TEST(paddle_inference_api_native_multithreads, image_classification) {
}
}
TEST
(
inference_api_native
,
word2vec_cpu
)
{
MainWord2Vec
(
false
/*use_gpu*/
);
}
TEST
(
inference_api_native
,
word2vec_cpu_threads
)
{
MainThreadsWord2Vec
(
false
/*use_gpu*/
);
}
TEST
(
inference_api_native
,
image_classification_cpu
)
{
MainThreadsImageClassification
(
false
/*use_gpu*/
);
}
TEST
(
inference_api_native
,
image_classification_cpu_threads
)
{
MainThreadsImageClassification
(
false
/*use_gpu*/
);
}
#ifdef PADDLE_WITH_CUDA
TEST
(
inference_api_native
,
word2vec_gpu
)
{
MainWord2Vec
(
true
/*use_gpu*/
);
}
TEST
(
inference_api_native
,
word2vec_gpu_threads
)
{
MainThreadsWord2Vec
(
true
/*use_gpu*/
);
}
TEST
(
inference_api_native
,
image_classification_gpu
)
{
MainThreadsImageClassification
(
true
/*use_gpu*/
);
}
TEST
(
inference_api_native
,
image_classification_gpu_threads
)
{
MainThreadsImageClassification
(
true
/*use_gpu*/
);
}
#endif
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录