Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
e043ea96
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e043ea96
编写于
1月 25, 2019
作者:
T
tensor-tang
提交者:
GitHub
1月 25, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #15515 from tensor-tang/jit/benchmark
jit benchmark use tensor with alignment
上级
c5855506
b67584a6
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
71 addition
and
38 deletion
+71
-38
paddle/fluid/operators/jit/CMakeLists.txt
paddle/fluid/operators/jit/CMakeLists.txt
+1
-1
paddle/fluid/operators/jit/benchmark.cc
paddle/fluid/operators/jit/benchmark.cc
+70
-37
未找到文件。
paddle/fluid/operators/jit/CMakeLists.txt
浏览文件 @
e043ea96
...
...
@@ -21,5 +21,5 @@ endif()
cc_library
(
jit_kernel_helper SRCS
${
jit_kernel_cc_srcs
}
DEPS
${
JIT_KERNEL_DEPS
}
)
cc_test
(
jit_kernel_test SRCS test.cc DEPS jit_kernel_helper
)
if
(
NOT WIN32
)
cc_binary
(
jit_kernel_benchmark SRCS benchmark.cc DEPS jit_kernel_helper device_tracer
)
cc_binary
(
jit_kernel_benchmark SRCS benchmark.cc DEPS jit_kernel_helper device_tracer
tensor
)
endif
()
paddle/fluid/operators/jit/benchmark.cc
浏览文件 @
e043ea96
...
...
@@ -18,6 +18,7 @@
#include <vector>
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/jit/kernels.h"
#include "paddle/fluid/platform/device_tracer.h"
#include "paddle/fluid/platform/place.h"
...
...
@@ -155,14 +156,22 @@ void BenchAllImpls(const typename KernelTuples::attr_type& attr, Args... args) {
LOG
(
INFO
)
<<
loginfos
.
str
();
}
using
Tensor
=
paddle
::
framework
::
Tensor
;
template
<
paddle
::
operators
::
jit
::
KernelType
KT
,
typename
T
,
typename
PlaceType
>
void
BenchXYZNKernel
()
{
for
(
int
d
:
TestSizes
())
{
std
::
vector
<
T
>
x
(
d
),
y
(
d
),
z
(
d
);
RandomVec
<
T
>
(
d
,
x
.
data
());
RandomVec
<
T
>
(
d
,
y
.
data
());
BenchAllImpls
<
KT
,
jit
::
XYZNTuples
<
T
>
,
PlaceType
>
(
d
,
x
.
data
(),
y
.
data
(),
z
.
data
(),
d
);
Tensor
x
,
y
,
z
;
x
.
Resize
({
d
});
y
.
Resize
({
d
});
z
.
Resize
({
d
});
T
*
x_data
=
x
.
mutable_data
<
T
>
(
PlaceType
());
T
*
y_data
=
y
.
mutable_data
<
T
>
(
PlaceType
());
T
*
z_data
=
z
.
mutable_data
<
T
>
(
PlaceType
());
RandomVec
<
T
>
(
d
,
x_data
);
RandomVec
<
T
>
(
d
,
y_data
);
BenchAllImpls
<
KT
,
jit
::
XYZNTuples
<
T
>
,
PlaceType
>
(
d
,
x
.
data
<
T
>
(),
y
.
data
<
T
>
(),
z_data
,
d
);
}
}
...
...
@@ -170,9 +179,13 @@ template <paddle::operators::jit::KernelType KT, typename T, typename PlaceType>
void
BenchAXYNKernel
()
{
for
(
int
d
:
TestSizes
())
{
const
T
a
=
static_cast
<
T
>
(
3
);
std
::
vector
<
T
>
x
(
d
),
y
(
d
);
RandomVec
<
T
>
(
d
,
x
.
data
());
BenchAllImpls
<
KT
,
jit
::
AXYNTuples
<
T
>
,
PlaceType
>
(
d
,
&
a
,
x
.
data
(),
y
.
data
(),
Tensor
x
,
y
;
x
.
Resize
({
d
});
y
.
Resize
({
d
});
T
*
x_data
=
x
.
mutable_data
<
T
>
(
PlaceType
());
T
*
y_data
=
y
.
mutable_data
<
T
>
(
PlaceType
());
RandomVec
<
T
>
(
d
,
x_data
);
BenchAllImpls
<
KT
,
jit
::
AXYNTuples
<
T
>
,
PlaceType
>
(
d
,
&
a
,
x
.
data
<
T
>
(),
y_data
,
d
);
}
}
...
...
@@ -180,9 +193,13 @@ void BenchAXYNKernel() {
template
<
paddle
::
operators
::
jit
::
KernelType
KT
,
typename
T
,
typename
PlaceType
>
void
BenchXYNKernel
()
{
for
(
int
d
:
TestSizes
())
{
std
::
vector
<
T
>
x
(
d
),
y
(
d
);
RandomVec
<
T
>
(
d
,
x
.
data
());
BenchAllImpls
<
KT
,
jit
::
XYNTuples
<
T
>
,
PlaceType
>
(
d
,
x
.
data
(),
y
.
data
(),
d
);
Tensor
x
,
y
;
x
.
Resize
({
d
});
y
.
Resize
({
d
});
T
*
x_data
=
x
.
mutable_data
<
T
>
(
PlaceType
());
T
*
y_data
=
y
.
mutable_data
<
T
>
(
PlaceType
());
RandomVec
<
T
>
(
d
,
x_data
);
BenchAllImpls
<
KT
,
jit
::
XYNTuples
<
T
>
,
PlaceType
>
(
d
,
x
.
data
<
T
>
(),
y_data
,
d
);
}
}
...
...
@@ -192,16 +209,23 @@ void BenchLSTMKernel() {
for
(
int
d
:
TestSizes
())
{
const
jit
::
lstm_attr_t
attr
(
d
,
jit
::
kVSigmoid
,
jit
::
kVTanh
,
jit
::
kVTanh
,
use_peephole
);
std
::
vector
<
T
>
x
(
4
*
d
),
ct_1
(
d
),
ct
(
d
),
ht
(
d
),
wp
(
3
*
d
),
checked
(
2
*
d
);
RandomVec
<
T
>
(
4
*
d
,
x
.
data
(),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
3
*
d
,
wp
.
data
(),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
d
,
ct_1
.
data
(),
-
2.
f
,
2.
f
);
const
T
*
ct_1_data
=
ct_1
.
data
();
const
T
*
wp_data
=
wp
.
data
();
T
*
x_data
=
x
.
data
();
T
*
checked_data
=
checked
.
data
();
T
*
ct_data
=
ct
.
data
();
T
*
ht_data
=
ht
.
data
();
Tensor
x
,
ct_1
,
ct
,
ht
,
wp
,
checked
;
x
.
Resize
({
4
*
d
});
ct_1
.
Resize
({
d
});
ct
.
Resize
({
d
});
ht
.
Resize
({
d
});
wp
.
Resize
({
3
*
d
});
checked
.
Resize
({
2
*
d
});
auto
place
=
PlaceType
();
RandomVec
<
T
>
(
x
.
numel
(),
x
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
wp
.
numel
(),
wp
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
ct_1
.
numel
(),
ct_1
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
const
T
*
ct_1_data
=
ct_1
.
data
<
T
>
();
const
T
*
wp_data
=
wp
.
data
<
T
>
();
T
*
x_data
=
x
.
mutable_data
<
T
>
(
place
);
T
*
checked_data
=
checked
.
mutable_data
<
T
>
(
place
);
T
*
ct_data
=
ct
.
mutable_data
<
T
>
(
place
);
T
*
ht_data
=
ht
.
mutable_data
<
T
>
(
place
);
jit
::
lstm_t
step
;
step
.
gates
=
x_data
;
step
.
ct_1
=
ct_1_data
;
...
...
@@ -220,12 +244,16 @@ template <paddle::operators::jit::KernelType KT, typename T, typename PlaceType>
void
BenchGRUKernel
()
{
for
(
int
d
:
TestSizes
())
{
const
jit
::
gru_attr_t
attr
(
d
,
jit
::
kVSigmoid
,
jit
::
kVTanh
);
std
::
vector
<
T
>
x
(
3
*
d
),
ht_1
(
d
),
ht
(
d
);
RandomVec
<
T
>
(
3
*
d
,
x
.
data
(),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
d
,
ht_1
.
data
(),
-
2.
f
,
2.
f
);
const
T
*
ht_1_data
=
ht_1
.
data
();
T
*
x_data
=
x
.
data
();
T
*
ht_data
=
ht
.
data
();
auto
place
=
PlaceType
();
Tensor
x
,
ht_1
,
ht
;
x
.
Resize
({
3
*
d
});
ht_1
.
Resize
({
d
});
ht
.
Resize
({
d
});
RandomVec
<
T
>
(
3
*
d
,
x
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
d
,
ht_1
.
mutable_data
<
T
>
(
place
),
-
2.
f
,
2.
f
);
const
T
*
ht_1_data
=
ht_1
.
data
<
T
>
();
T
*
x_data
=
x
.
mutable_data
<
T
>
(
place
);
T
*
ht_data
=
ht
.
mutable_data
<
T
>
(
place
);
jit
::
gru_t
step
;
step
.
gates
=
x_data
;
step
.
ht_1
=
ht_1_data
;
...
...
@@ -243,10 +271,12 @@ void BenchSeqPoolKernel() {
jit
::
seq_pool_attr_t
attr
(
w
,
type
);
for
(
int
h
:
TestSizes
())
{
attr
.
h
=
h
;
std
::
vector
<
T
>
x
(
h
*
w
),
y
(
w
);
RandomVec
<
T
>
(
h
*
w
,
x
.
data
(),
-
2.
f
,
2.
f
);
const
T
*
x_data
=
x
.
data
();
T
*
y_data
=
y
.
data
();
Tensor
x
,
y
;
x
.
Resize
({
h
*
w
});
y
.
Resize
({
w
});
RandomVec
<
T
>
(
h
*
w
,
x
.
mutable_data
<
T
>
(
PlaceType
()),
-
2.
f
,
2.
f
);
const
T
*
x_data
=
x
.
data
<
T
>
();
T
*
y_data
=
y
.
mutable_data
<
T
>
(
PlaceType
());
BenchAllImpls
<
KT
,
jit
::
SeqPoolTuples
<
T
>
,
PlaceType
>
(
attr
,
x_data
,
y_data
,
&
attr
);
}
...
...
@@ -259,12 +289,15 @@ void BenchMatMulKernel() {
for
(
int
m
:
{
1
,
2
,
3
,
4
})
{
for
(
int
n
:
TestSizes
())
{
for
(
int
k
:
TestSizes
())
{
std
::
vector
<
T
>
a
(
m
*
k
),
b
(
k
*
n
),
c
(
m
*
n
);
RandomVec
<
T
>
(
m
*
k
,
a
.
data
(),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
k
*
n
,
b
.
data
(),
-
2.
f
,
2.
f
);
const
T
*
a_data
=
a
.
data
();
const
T
*
b_data
=
b
.
data
();
T
*
c_data
=
c
.
data
();
Tensor
a
,
b
,
c
;
a
.
Resize
({
m
*
k
});
b
.
Resize
({
k
*
n
});
c
.
Resize
({
m
*
n
});
RandomVec
<
T
>
(
m
*
k
,
a
.
mutable_data
<
T
>
(
PlaceType
()),
-
2.
f
,
2.
f
);
RandomVec
<
T
>
(
k
*
n
,
b
.
mutable_data
<
T
>
(
PlaceType
()),
-
2.
f
,
2.
f
);
const
T
*
a_data
=
a
.
data
<
T
>
();
const
T
*
b_data
=
b
.
data
<
T
>
();
T
*
c_data
=
c
.
mutable_data
<
T
>
(
PlaceType
());
BenchAllImpls
<
KT
,
jit
::
MatMulTuples
<
T
>
,
PlaceType
>
(
k
,
a_data
,
b_data
,
c_data
,
m
,
n
,
k
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录