Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
0596cd88
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0596cd88
编写于
12月 22, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine test recurrent layer
上级
290edd8f
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
52 addition
and
67 deletion
+52
-67
paddle/gserver/tests/test_RecurrentLayer.cpp
paddle/gserver/tests/test_RecurrentLayer.cpp
+52
-67
未找到文件。
paddle/gserver/tests/test_RecurrentLayer.cpp
浏览文件 @
0596cd88
...
...
@@ -222,6 +222,7 @@ TEST(Layer, RecurrentLayer) {
#define protected public
#include "paddle/gserver/layers/GatedRecurrentLayer.h"
#include "paddle/gserver/layers/LstmLayer.h"
#include "paddle/gserver/layers/RecurrentLayer.h"
template
<
class
T
>
class
TestRecurrentLayer
{
public:
...
...
@@ -422,6 +423,8 @@ TEST(Layer, LstmLayer) {
#ifdef PADDLE_WITH_MKLML
#include "paddle/gserver/layers/MKLPackedRecurrentLayer.h"
LayerPtr
initMKLPackedLayer
(
LayerConfig
layerConfig
,
bool
reversed
,
int
layerSize
,
...
...
@@ -453,7 +456,31 @@ LayerPtr initMKLPackedLayer(LayerConfig layerConfig,
return
testLayer
;
}
void
checkMKLPackedLayer
(
LayerPtr
testLayer1
,
LayerPtr
testLayer2
)
{
void
checkMKLPackedLayer
(
LayerConfig
layerConfig1
,
LayerConfig
layerConfig2
,
bool
reversed
,
int
layerSize
,
int
batchSize
,
bool
useBatch1
,
bool
useBatch2
)
{
LayerPtr
dataLayer
;
ParameterPtr
para
,
bias
;
if
(
layerConfig1
.
type
()
==
"recurrent"
)
{
dataLayer
=
creatDataLayer
(
"layer_0"
,
batchSize
,
layerSize
,
false
);
para
=
creatParameter
(
"para_0"
,
0
,
layerSize
*
layerSize
,
false
);
bias
=
nullptr
;
}
else
if
(
layerConfig1
.
type
()
==
"gated_recurrent"
)
{
dataLayer
=
creatDataLayer
(
"layer_0"
,
batchSize
,
layerSize
*
3
,
false
);
para
=
creatParameter
(
"para_0"
,
0
,
layerSize
*
layerSize
*
3
,
false
);
bias
=
creatParameterBias
(
"bias_0"
,
1
,
layerSize
*
3
,
false
);
}
LayerPtr
testLayer1
=
initMKLPackedLayer
(
layerConfig1
,
reversed
,
layerSize
,
dataLayer
,
para
,
bias
);
LayerPtr
testLayer2
=
initMKLPackedLayer
(
layerConfig2
,
reversed
,
layerSize
,
dataLayer
,
para
,
bias
);
const
VectorPtr
&
weightGrad
=
(
testLayer1
->
getParameters
()[
0
])
->
getBuf
(
PARAMETER_GRADIENT
);
const
MatrixPtr
&
inputGrad
=
testLayer1
->
getPrev
(
0
)
->
getOutputGrad
();
...
...
@@ -462,78 +489,34 @@ void checkMKLPackedLayer(LayerPtr testLayer1, LayerPtr testLayer2) {
CpuMatrix
input_grad1
(
inputGrad
->
getHeight
(),
inputGrad
->
getWidth
());
CpuMatrix
input_grad2
(
inputGrad
->
getHeight
(),
inputGrad
->
getWidth
());
CpuMatrix
outputGrad
(
inputGrad
->
getHeight
(),
inputGrad
->
getWidth
());
outputGrad
.
randomizeUniform
();
for
(
int
i
=
0
;
i
<
2
;
i
++
)
{
FLAGS_rnn_use_batch
=
true
;
FLAGS_rnn_use_batch
=
useBatch1
;
testLayer1
->
forward
(
PASS_GC
);
testLayer1
->
getOutputGrad
()
->
copyFrom
(
outputGrad
);
weightGrad
->
zero
();
inputGrad
->
zero
();
testLayer1
->
backward
(
nullptr
);
wgt_grad1
.
copyFrom
(
*
weightGrad
);
input_grad1
.
copyFrom
(
*
inputGrad
);
FLAGS_rnn_use_batch
=
true
;
testLayer2
->
forward
(
PASS_GC
);
testLayer2
->
getOutputGrad
()
->
copyFrom
(
outputGrad
);
weightGrad
->
zero
();
inputGrad
->
zero
();
testLayer2
->
backward
(
nullptr
);
wgt_grad2
.
copyFrom
(
*
weightGrad
);
input_grad2
.
copyFrom
(
*
inputGrad
);
checkError
(
*
testLayer1
->
getOutputValue
(),
*
testLayer2
->
getOutputValue
());
checkError
(
wgt_grad1
,
wgt_grad2
);
checkError
(
input_grad1
,
input_grad2
);
}
for
(
int
i
=
0
;
i
<
2
;
i
++
)
{
CpuMatrix
outputValue
(
testLayer2
->
getOutputValue
()
->
getHeight
(),
testLayer2
->
getOutputValue
()
->
getWidth
());
FLAGS_rnn_use_batch
=
true
;
FLAGS_rnn_use_batch
=
useBatch2
;
testLayer2
->
forward
(
PASS_GC
);
outputValue
.
copyFrom
(
*
testLayer2
->
getOutputValue
());
testLayer2
->
getOutputGrad
()
->
copyFrom
(
outputGrad
);
testLayer1
->
getOutputGrad
()
->
randomizeUniform
();
testLayer2
->
getOutputGrad
()
->
copyFrom
(
*
testLayer1
->
getOutputGrad
());
weightGrad
->
zero
();
inputGrad
->
zero
();
testLayer
2
->
backward
(
nullptr
);
FLAGS_rnn_use_batch
=
useBatch1
;
testLayer
1
->
backward
(
nullptr
);
wgt_grad1
.
copyFrom
(
*
weightGrad
);
input_grad1
.
copyFrom
(
*
inputGrad
);
FLAGS_rnn_use_batch
=
false
;
testLayer2
->
getOutputValue
()
->
zero
();
testLayer2
->
forward
(
PASS_GC
);
testLayer2
->
getOutputGrad
()
->
copyFrom
(
outputGrad
);
weightGrad
->
zero
();
inputGrad
->
zero
();
FLAGS_rnn_use_batch
=
useBatch2
;
testLayer2
->
backward
(
nullptr
);
wgt_grad2
.
copyFrom
(
*
weightGrad
);
input_grad2
.
copyFrom
(
*
inputGrad
);
checkError
(
outputValue
,
*
testLayer2
->
getOutputValue
());
checkError
(
*
testLayer1
->
getOutputValue
()
,
*
testLayer2
->
getOutputValue
());
checkError
(
wgt_grad1
,
wgt_grad2
);
checkError
(
input_grad1
,
input_grad2
);
}
...
...
@@ -556,20 +539,22 @@ TEST(MKLPackedLayer, RecurrentLayer) {
for
(
auto
layerSize
:
{
32
,
64
,
128
,
256
,
512
})
{
for
(
auto
batchSize
:
{
1
,
5
,
100
,
500
})
{
for
(
auto
reversed
:
{
true
,
false
})
{
LOG
(
INFO
)
<<
" layerSize="
<<
layerSize
<<
" batchSize="
<<
batchSize
<<
" reversed="
<<
reversed
;
LayerPtr
dataLayer
=
creatDataLayer
(
"layer_0"
,
batchSize
,
layerSize
,
false
);
ParameterPtr
para
=
creatParameter
(
"para_0"
,
0
,
layerSize
*
layerSize
,
false
);
LayerPtr
testLayer1
=
initMKLPackedLayer
(
layerConfig1
,
reversed
,
layerSize
,
dataLayer
,
para
);
LayerPtr
testLayer2
=
initMKLPackedLayer
(
layerConfig2
,
reversed
,
layerSize
,
dataLayer
,
para
);
checkMKLPackedLayer
(
testLayer1
,
testLayer2
);
for
(
auto
paddle_use_batch
:
{
true
,
false
})
{
for
(
auto
MKLPacked_use_batch
:
{
true
,
false
})
{
LOG
(
INFO
)
<<
" layerSize="
<<
layerSize
<<
" batchSize="
<<
batchSize
<<
" reversed="
<<
reversed
<<
" paddle_use_batch="
<<
paddle_use_batch
<<
" MKLPacked_use_batch="
<<
MKLPacked_use_batch
;
checkMKLPackedLayer
(
layerConfig1
,
layerConfig2
,
reversed
,
layerSize
,
batchSize
,
paddle_use_batch
,
MKLPacked_use_batch
);
}
}
}
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录