Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
6b72a3e1
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6b72a3e1
编写于
6月 22, 2019
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
enable 4 models speed test and threads test
上级
7cf536f0
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
100 addition
and
16 deletion
+100
-16
paddle/fluid/lite/api/inceptionv4_test.cc
paddle/fluid/lite/api/inceptionv4_test.cc
+16
-4
paddle/fluid/lite/api/mobilenetv1_test.cc
paddle/fluid/lite/api/mobilenetv1_test.cc
+16
-4
paddle/fluid/lite/api/mobilenetv2_test.cc
paddle/fluid/lite/api/mobilenetv2_test.cc
+16
-4
paddle/fluid/lite/api/resnet50_test.cc
paddle/fluid/lite/api/resnet50_test.cc
+16
-4
paddle/fluid/lite/api/test_helper.h
paddle/fluid/lite/api/test_helper.h
+36
-0
未找到文件。
paddle/fluid/lite/api/inceptionv4_test.cc
浏览文件 @
6b72a3e1
...
@@ -16,20 +16,19 @@
...
@@ -16,20 +16,19 @@
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include <vector>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/test_helper.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
#include "paddle/fluid/lite/operators/use_ops.h"
// for eval
DEFINE_string
(
model_dir
,
""
,
""
);
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
#ifdef LITE_WITH_ARM
#ifdef LITE_WITH_ARM
TEST
(
InceptionV4
,
test
)
{
TEST
(
InceptionV4
,
test
)
{
DeviceInfo
::
Init
();
DeviceInfo
::
Init
();
DeviceInfo
::
Global
().
SetRunMode
(
LITE_POWER_HIGH
,
FLAGS_threads
);
lite
::
Predictor
predictor
;
lite
::
Predictor
predictor
;
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
...
@@ -44,7 +43,20 @@ TEST(InceptionV4, test) {
...
@@ -44,7 +43,20 @@ TEST(InceptionV4, test) {
data
[
i
]
=
1
;
data
[
i
]
=
1
;
}
}
predictor
.
Run
();
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
predictor
.
Run
();
}
auto
start
=
GetCurrentUS
();
for
(
int
i
=
0
;
i
<
FLAGS_repeats
;
++
i
)
{
predictor
.
Run
();
}
LOG
(
INFO
)
<<
"================== Speed Report ==================="
;
LOG
(
INFO
)
<<
"Model: "
<<
FLAGS_model_dir
<<
", threads num "
<<
FLAGS_threads
<<
", warmup: "
<<
FLAGS_warmup
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
(
GetCurrentUS
()
-
start
)
/
FLAGS_repeats
/
1000.0
<<
" ms in average."
;
auto
*
out
=
predictor
.
GetOutput
(
0
);
auto
*
out
=
predictor
.
GetOutput
(
0
);
std
::
vector
<
float
>
results
({
0.00078033
,
0.00083865
,
0.00060029
,
0.00057083
,
std
::
vector
<
float
>
results
({
0.00078033
,
0.00083865
,
0.00060029
,
0.00057083
,
...
...
paddle/fluid/lite/api/mobilenetv1_test.cc
浏览文件 @
6b72a3e1
...
@@ -16,20 +16,19 @@
...
@@ -16,20 +16,19 @@
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include <vector>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/test_helper.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
#include "paddle/fluid/lite/operators/use_ops.h"
// for eval
DEFINE_string
(
model_dir
,
""
,
""
);
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
#ifdef LITE_WITH_ARM
#ifdef LITE_WITH_ARM
TEST
(
MobileNetV1
,
test
)
{
TEST
(
MobileNetV1
,
test
)
{
DeviceInfo
::
Init
();
DeviceInfo
::
Init
();
DeviceInfo
::
Global
().
SetRunMode
(
LITE_POWER_HIGH
,
FLAGS_threads
);
lite
::
Predictor
predictor
;
lite
::
Predictor
predictor
;
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
...
@@ -44,7 +43,20 @@ TEST(MobileNetV1, test) {
...
@@ -44,7 +43,20 @@ TEST(MobileNetV1, test) {
data
[
i
]
=
1
;
data
[
i
]
=
1
;
}
}
predictor
.
Run
();
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
predictor
.
Run
();
}
auto
start
=
GetCurrentUS
();
for
(
int
i
=
0
;
i
<
FLAGS_repeats
;
++
i
)
{
predictor
.
Run
();
}
LOG
(
INFO
)
<<
"================== Speed Report ==================="
;
LOG
(
INFO
)
<<
"Model: "
<<
FLAGS_model_dir
<<
", threads num "
<<
FLAGS_threads
<<
", warmup: "
<<
FLAGS_warmup
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
(
GetCurrentUS
()
-
start
)
/
FLAGS_repeats
/
1000.0
<<
" ms in average."
;
auto
*
out
=
predictor
.
GetOutput
(
0
);
auto
*
out
=
predictor
.
GetOutput
(
0
);
std
::
vector
<
float
>
results
({
1.91308980e-04
,
5.92055148e-04
,
1.12303176e-04
,
std
::
vector
<
float
>
results
({
1.91308980e-04
,
5.92055148e-04
,
1.12303176e-04
,
...
...
paddle/fluid/lite/api/mobilenetv2_test.cc
浏览文件 @
6b72a3e1
...
@@ -16,20 +16,19 @@
...
@@ -16,20 +16,19 @@
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include <vector>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/test_helper.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
#include "paddle/fluid/lite/operators/use_ops.h"
// for eval
DEFINE_string
(
model_dir
,
""
,
""
);
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
#ifdef LITE_WITH_ARM
#ifdef LITE_WITH_ARM
TEST
(
MobileNetV2
,
test
)
{
TEST
(
MobileNetV2
,
test
)
{
DeviceInfo
::
Init
();
DeviceInfo
::
Init
();
DeviceInfo
::
Global
().
SetRunMode
(
LITE_POWER_HIGH
,
FLAGS_threads
);
lite
::
Predictor
predictor
;
lite
::
Predictor
predictor
;
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
...
@@ -44,7 +43,20 @@ TEST(MobileNetV2, test) {
...
@@ -44,7 +43,20 @@ TEST(MobileNetV2, test) {
data
[
i
]
=
1
;
data
[
i
]
=
1
;
}
}
predictor
.
Run
();
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
predictor
.
Run
();
}
auto
start
=
GetCurrentUS
();
for
(
int
i
=
0
;
i
<
FLAGS_repeats
;
++
i
)
{
predictor
.
Run
();
}
LOG
(
INFO
)
<<
"================== Speed Report ==================="
;
LOG
(
INFO
)
<<
"Model: "
<<
FLAGS_model_dir
<<
", threads num "
<<
FLAGS_threads
<<
", warmup: "
<<
FLAGS_warmup
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
(
GetCurrentUS
()
-
start
)
/
FLAGS_repeats
/
1000.0
<<
" ms in average."
;
auto
*
out
=
predictor
.
GetOutput
(
0
);
auto
*
out
=
predictor
.
GetOutput
(
0
);
std
::
vector
<
float
>
results
({
0.00097802
,
0.00099822
,
0.00103093
,
0.00100121
,
std
::
vector
<
float
>
results
({
0.00097802
,
0.00099822
,
0.00103093
,
0.00100121
,
...
...
paddle/fluid/lite/api/resnet50_test.cc
浏览文件 @
6b72a3e1
...
@@ -16,20 +16,19 @@
...
@@ -16,20 +16,19 @@
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include <vector>
#include <vector>
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/cxx_api.h"
#include "paddle/fluid/lite/api/test_helper.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/mir/use_passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/kernels/use_kernels.h"
#include "paddle/fluid/lite/operators/use_ops.h"
#include "paddle/fluid/lite/operators/use_ops.h"
// for eval
DEFINE_string
(
model_dir
,
""
,
""
);
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
#ifdef LITE_WITH_ARM
#ifdef LITE_WITH_ARM
TEST
(
ResNet50
,
test
)
{
TEST
(
ResNet50
,
test
)
{
DeviceInfo
::
Init
();
DeviceInfo
::
Init
();
DeviceInfo
::
Global
().
SetRunMode
(
LITE_POWER_HIGH
,
FLAGS_threads
);
lite
::
Predictor
predictor
;
lite
::
Predictor
predictor
;
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)}});
...
@@ -44,7 +43,20 @@ TEST(ResNet50, test) {
...
@@ -44,7 +43,20 @@ TEST(ResNet50, test) {
data
[
i
]
=
1
;
data
[
i
]
=
1
;
}
}
predictor
.
Run
();
for
(
int
i
=
0
;
i
<
FLAGS_warmup
;
++
i
)
{
predictor
.
Run
();
}
auto
start
=
GetCurrentUS
();
for
(
int
i
=
0
;
i
<
FLAGS_repeats
;
++
i
)
{
predictor
.
Run
();
}
LOG
(
INFO
)
<<
"================== Speed Report ==================="
;
LOG
(
INFO
)
<<
"Model: "
<<
FLAGS_model_dir
<<
", threads num "
<<
FLAGS_threads
<<
", warmup: "
<<
FLAGS_warmup
<<
", repeats: "
<<
FLAGS_repeats
<<
", spend "
<<
(
GetCurrentUS
()
-
start
)
/
FLAGS_repeats
/
1000.0
<<
" ms in average."
;
auto
*
out
=
predictor
.
GetOutput
(
0
);
auto
*
out
=
predictor
.
GetOutput
(
0
);
std
::
vector
<
float
>
results
({
2.41399175e-04
,
4.13724629e-04
,
2.64324830e-04
,
std
::
vector
<
float
>
results
({
2.41399175e-04
,
4.13724629e-04
,
2.64324830e-04
,
...
...
paddle/fluid/lite/api/test_helper.h
0 → 100644
浏览文件 @
6b72a3e1
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <gflags/gflags.h>
#include <time.h>
// for eval
DEFINE_string
(
model_dir
,
""
,
"model dir"
);
DEFINE_int32
(
warmup
,
0
,
"warmup times"
);
DEFINE_int32
(
repeats
,
1
,
"repeats times"
);
DEFINE_int32
(
threads
,
1
,
"threads num"
);
namespace
paddle
{
namespace
lite
{
inline
double
GetCurrentUS
()
{
struct
timeval
time
;
gettimeofday
(
&
time
,
NULL
);
return
1e+6
*
time
.
tv_sec
+
time
.
tv_usec
;
}
}
// namespace lite
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录