Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
e043ea96
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e043ea96
编写于
1月 25, 2019
作者:
T
tensor-tang
提交者:
GitHub
1月 25, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #15515 from tensor-tang/jit/benchmark
jit benchmark use tensor with alignment
上级
c5855506
b67584a6
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
71 addition
and
38 deletion
+71
-38
paddle/fluid/operators/jit/CMakeLists.txt
paddle/fluid/operators/jit/CMakeLists.txt
+1
-1
paddle/fluid/operators/jit/benchmark.cc
paddle/fluid/operators/jit/benchmark.cc
+70
-37
未找到文件。
paddle/fluid/operators/jit/CMakeLists.txt
浏览文件 @
e043ea96
...
@@ -21,5 +21,5 @@ endif()
...
@@ -21,5 +21,5 @@ endif()
cc_library
(
jit_kernel_helper SRCS
${
jit_kernel_cc_srcs
}
DEPS
${
JIT_KERNEL_DEPS
}
)
cc_library
(
jit_kernel_helper SRCS
${
jit_kernel_cc_srcs
}
DEPS
${
JIT_KERNEL_DEPS
}
)
cc_test
(
jit_kernel_test SRCS test.cc DEPS jit_kernel_helper
)
cc_test
(
jit_kernel_test SRCS test.cc DEPS jit_kernel_helper
)
if
(
NOT WIN32
)
if
(
NOT WIN32
)
cc_binary
(
jit_kernel_benchmark SRCS benchmark.cc DEPS jit_kernel_helper device_tracer
)
cc_binary
(
jit_kernel_benchmark SRCS benchmark.cc DEPS jit_kernel_helper device_tracer
tensor
)
endif
()
endif
()
paddle/fluid/operators/jit/benchmark.cc
浏览文件 @
e043ea96
...
@@ -18,6 +18,7 @@
...
@@ -18,6 +18,7 @@
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "glog/logging.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/jit/kernels.h"
#include "paddle/fluid/operators/jit/kernels.h"
#include "paddle/fluid/platform/device_tracer.h"
#include "paddle/fluid/platform/device_tracer.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/place.h"
...
@@ -155,14 +156,22 @@ void BenchAllImpls(const typename KernelTuples::attr_type& attr, Args... args) {
...
@@ -155,14 +156,22 @@ void BenchAllImpls(const typename KernelTuples::attr_type& attr, Args... args) {
LOG
(
INFO
)
<<
loginfos
.
str
();
LOG
(
INFO
)
<<
loginfos
.
str
();
}
}
using
Tensor
=
paddle
::
framework
::
Tensor
;
template
<
paddle
::
operators
::
jit
::
KernelType
KT
,
typename
T
,
typename
PlaceType
>
template
<
paddle
::
operators
::
jit
::
KernelType
KT
,
typename
T
,
typename
PlaceType
>
void
BenchXYZNKernel
()
{
void
BenchXYZNKernel
()
{
for
(
int
d
:
TestSizes
())
{
for
(
int
d
:
TestSizes
())
{
std
::
vector
<
T
>
x
(
d
),
y
(
d
),
z
(
d
);
Tensor
x
,
y
,
z
;
RandomVec
<
T
>
(
d
,
x
.
data
());
x
.
Resize
({
d
});
RandomVec
<
T
>
(
d
,
y
.
data
());
y
.
Resize
({
d
});
BenchAllImpls
<
KT
,
jit
::
XYZNTuples
<
T
>
,
PlaceType
>
(
d
,
x
.
data
(),
y
.
data
(),
z
.
Resize
({
d
});
z
.
data
(),
d
);
T
*
x_data
=
x
.
mutable_data
<
T
>
(
PlaceType
());
T
*
y_data
=
y
.
mutable_data
<
T
>
(
PlaceType
());
T
*
z_data
=
z
.
mutable_data
<
T
>
(
PlaceType
());
RandomVec
<
T
>
(
d
,
x_data
);
RandomVec
<
T
>
(
d
,
y_data
);
BenchAllImpls
<
KT
,
jit
::
XYZNTuples
<
T
>
,
PlaceType
>
(
d
,
x
.
data
<
T
>
(),
y
.
data
<
T
>
(),
z_data
,
d
);
}
}
}
}
...
@@ -170,9 +179,13 @@ template <paddle::operators::jit::KernelType KT, typename T, typename PlaceType>
...
@@ -170,9 +179,13 @@ template <paddle::operators::jit::KernelType KT, typename T, typename PlaceType>
void
BenchAXYNKernel
()
{
void
BenchAXYNKernel
()
{
for
(
int
d
:
TestSizes
())
{
for
(
int
d
:
TestSizes
())
{
const
T
a
=
static_cast
<
T
>
(
3
);
const
T
a
=
static_cast
<
T
>
(
3
);
std
::
vector
<
T
>
x
(
d
),
y
(
d
);
Tensor
x
,
y
;
RandomVec
<
T
>
(
d
,
x
.
data
());
x
.
Resize
({
d
});
BenchAllImpls
<
KT
,
jit
::
AXYNTuples
<
T
>
,
PlaceType
>
(
d
,
&
a
,
x
.
data
(),
y
.
data
(),
y
.
Resize
({
d
});
T
*
x_data
=
x
.
mutable_data
<
T
>
(
PlaceType
());
T
*
y_data
=
y
.
mutable_data
<
T
>
(
PlaceType
());
RandomVec
<
T
>
(
d
,
x_data
);
BenchAllImpls
<
KT
,
jit
::
AXYNTuples
<
T
>
,
PlaceType
>
(
d
,
&
a
,
x
.
data
<
T
>
(),
y_data
,
d
);
d
);
}
}
}
}
...
@@ -180,9 +193,13 @@ void BenchAXYNKernel() {
...
@@ -180,9 +193,13 @@ void BenchAXYNKernel() {
template
<
paddle
::
operators
::
jit
::
KernelType
KT
,
typename
T
,
typename
PlaceType
>
template
<
paddle
::
operators
::
jit
::
KernelType
KT
,
typename
T
,
typename
PlaceType
>
void
BenchXYNKernel
()
{
void
BenchXYNKernel
()
{
for
(
int
d
:
TestSizes
())
{
for
(
int
d
:
TestSizes
())
{
std
::
vector
<
T
>
x
(
d
),
y
(
d
);
Tensor
x
,
y
;
RandomVec
<
T
>
(
d
,
x
.
data
());
x
.
Resize
({
d
});
BenchAllImpls
<
KT
,
jit
::
XYNTuples
<
T
>
,
PlaceType
>
(
d
,
x
.
data
(),
y
.
data
(),
d
);
y
.
Resize
({
d
});
T
*
x_data
=
x
.
mutable_data
<
T
>
(
PlaceType
());
T
*
y_data
=
y
.
mutable_data
<
T
>
(
PlaceType
());
RandomVec
<
T
>
(
d
,
x_data
);
BenchAllImpls
<
KT
,
jit
::
XYNTuples
<
T
>
,
PlaceType
>
(
d
,
x
.
data
<
T
>
(),
y_data
,
d
);
}
}
}
}
...
@@ -192,16 +209,23 @@ void BenchLSTMKernel() {
...
@@ -192,16 +209,23 @@ void BenchLSTMKernel() {
for
(
int
d
:
TestSizes
())
{
for
(
int
d
:
TestSizes
())
{
const
jit
::
lstm_attr_t
attr
(
d
,
jit
::
kVSigmoid
,
jit
::
kVTanh
,
jit
::
kVTanh
,
const
jit
::
lstm_attr_t
attr
(
d
,
jit
::
kVSigmoid
,
jit
::
kVTanh
,
jit
::
kVTanh
,
use_peephole
);
use_peephole
);
std
::
vector
<
T
>
x
(
4
*
d
),
ct_1
(
d
),
ct
(
d
),
ht
(
d
),
wp
(
3
*
d
),
checked
(
2
*
d
);
Tensor
x
,
ct_1
,
ct
,
ht
,
wp
,
checked
;
RandomVec
<
T
>
(
4
*
d
,
x
.
data
(),
-
2.
f
,
2.
f
);
x
.
Resize
({
4
*
d
});
RandomVec
<
T
>
(
3
*
d
,
wp
.
data
(),
-
2.
f
,
2.
f
);
ct_1
.
Resize
({
d
});
RandomVec
<
T
>
(
d
,
ct_1
.
data
(),
-
2.
f
,
2.
f
);
ct
.
Resize
({
d
});
const
T
*
ct_1_data
=
ct_1
.
data
();
ht
.
Resize
({
d
});
const
T
*
wp_data
=
wp
.
data
();
wp
.
Resize
({
3
*
d
});
T
*
x_data
=
x
.
data
();
checked
.
Resize
({
2
*
d
});
T
*
checked_data
=
checked
.
data
();
auto
place
=
PlaceType
();
T
*
ct_data
=
ct
.
data
();
RandomVec
<
T
>
(
x
.
numel
(),
x
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
T
*
ht_data
=
ht
.
data
();
RandomVec
<
T
>
(
wp
.
numel
(),
wp
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
ct_1
.
numel
(),
ct_1
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
const
T
*
ct_1_data
=
ct_1
.
data
<
T
>
();
const
T
*
wp_data
=
wp
.
data
<
T
>
();
T
*
x_data
=
x
.
mutable_data
<
T
>
(
place
);
T
*
checked_data
=
checked
.
mutable_data
<
T
>
(
place
);
T
*
ct_data
=
ct
.
mutable_data
<
T
>
(
place
);
T
*
ht_data
=
ht
.
mutable_data
<
T
>
(
place
);
jit
::
lstm_t
step
;
jit
::
lstm_t
step
;
step
.
gates
=
x_data
;
step
.
gates
=
x_data
;
step
.
ct_1
=
ct_1_data
;
step
.
ct_1
=
ct_1_data
;
...
@@ -220,12 +244,16 @@ template <paddle::operators::jit::KernelType KT, typename T, typename PlaceType>
...
@@ -220,12 +244,16 @@ template <paddle::operators::jit::KernelType KT, typename T, typename PlaceType>
void
BenchGRUKernel
()
{
void
BenchGRUKernel
()
{
for
(
int
d
:
TestSizes
())
{
for
(
int
d
:
TestSizes
())
{
const
jit
::
gru_attr_t
attr
(
d
,
jit
::
kVSigmoid
,
jit
::
kVTanh
);
const
jit
::
gru_attr_t
attr
(
d
,
jit
::
kVSigmoid
,
jit
::
kVTanh
);
std
::
vector
<
T
>
x
(
3
*
d
),
ht_1
(
d
),
ht
(
d
);
auto
place
=
PlaceType
();
RandomVec
<
T
>
(
3
*
d
,
x
.
data
(),
-
2.
f
,
2.
f
);
Tensor
x
,
ht_1
,
ht
;
RandomVec
<
T
>
(
d
,
ht_1
.
data
(),
-
2.
f
,
2.
f
);
x
.
Resize
({
3
*
d
});
const
T
*
ht_1_data
=
ht_1
.
data
();
ht_1
.
Resize
({
d
});
T
*
x_data
=
x
.
data
();
ht
.
Resize
({
d
});
T
*
ht_data
=
ht
.
data
();
RandomVec
<
T
>
(
3
*
d
,
x
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
d
,
ht_1
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
const
T
*
ht_1_data
=
ht_1
.
data
<
T
>
();
T
*
x_data
=
x
.
mutable_data
<
T
>
(
place
);
T
*
ht_data
=
ht
.
mutable_data
<
T
>
(
place
);
jit
::
gru_t
step
;
jit
::
gru_t
step
;
step
.
gates
=
x_data
;
step
.
gates
=
x_data
;
step
.
ht_1
=
ht_1_data
;
step
.
ht_1
=
ht_1_data
;
...
@@ -243,10 +271,12 @@ void BenchSeqPoolKernel() {
...
@@ -243,10 +271,12 @@ void BenchSeqPoolKernel() {
jit
::
seq_pool_attr_t
attr
(
w
,
type
);
jit
::
seq_pool_attr_t
attr
(
w
,
type
);
for
(
int
h
:
TestSizes
())
{
for
(
int
h
:
TestSizes
())
{
attr
.
h
=
h
;
attr
.
h
=
h
;
std
::
vector
<
T
>
x
(
h
*
w
),
y
(
w
);
Tensor
x
,
y
;
RandomVec
<
T
>
(
h
*
w
,
x
.
data
(),
-
2.
f
,
2.
f
);
x
.
Resize
({
h
*
w
});
const
T
*
x_data
=
x
.
data
();
y
.
Resize
({
w
});
T
*
y_data
=
y
.
data
();
RandomVec
<
T
>
(
h
*
w
,
x
.
mutable_data
<
T
>
(
PlaceType
()),
-
2.
f
,
2.
f
);
const
T
*
x_data
=
x
.
data
<
T
>
();
T
*
y_data
=
y
.
mutable_data
<
T
>
(
PlaceType
());
BenchAllImpls
<
KT
,
jit
::
SeqPoolTuples
<
T
>
,
PlaceType
>
(
attr
,
x_data
,
BenchAllImpls
<
KT
,
jit
::
SeqPoolTuples
<
T
>
,
PlaceType
>
(
attr
,
x_data
,
y_data
,
&
attr
);
y_data
,
&
attr
);
}
}
...
@@ -259,12 +289,15 @@ void BenchMatMulKernel() {
...
@@ -259,12 +289,15 @@ void BenchMatMulKernel() {
for
(
int
m
:
{
1
,
2
,
3
,
4
})
{
for
(
int
m
:
{
1
,
2
,
3
,
4
})
{
for
(
int
n
:
TestSizes
())
{
for
(
int
n
:
TestSizes
())
{
for
(
int
k
:
TestSizes
())
{
for
(
int
k
:
TestSizes
())
{
std
::
vector
<
T
>
a
(
m
*
k
),
b
(
k
*
n
),
c
(
m
*
n
);
Tensor
a
,
b
,
c
;
RandomVec
<
T
>
(
m
*
k
,
a
.
data
(),
-
2.
f
,
2.
f
);
a
.
Resize
({
m
*
k
});
RandomVec
<
T
>
(
k
*
n
,
b
.
data
(),
-
2.
f
,
2.
f
);
b
.
Resize
({
k
*
n
});
const
T
*
a_data
=
a
.
data
();
c
.
Resize
({
m
*
n
});
const
T
*
b_data
=
b
.
data
();
RandomVec
<
T
>
(
m
*
k
,
a
.
mutable_data
<
T
>
(
PlaceType
()),
-
2.
f
,
2.
f
);
T
*
c_data
=
c
.
data
();
RandomVec
<
T
>
(
k
*
n
,
b
.
mutable_data
<
T
>
(
PlaceType
()),
-
2.
f
,
2.
f
);
const
T
*
a_data
=
a
.
data
<
T
>
();
const
T
*
b_data
=
b
.
data
<
T
>
();
T
*
c_data
=
c
.
mutable_data
<
T
>
(
PlaceType
());
BenchAllImpls
<
KT
,
jit
::
MatMulTuples
<
T
>
,
PlaceType
>
(
k
,
a_data
,
b_data
,
BenchAllImpls
<
KT
,
jit
::
MatMulTuples
<
T
>
,
PlaceType
>
(
k
,
a_data
,
b_data
,
c_data
,
m
,
n
,
k
);
c_data
,
m
,
n
,
k
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录