Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
ad6b5319
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ad6b5319
编写于
10月 24, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add unit test for mkldnn_batch_norm layer
上级
64eaeba1
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
84 addition
and
9 deletion
+84
-9
paddle/gserver/tests/MKLDNNTester.cpp
paddle/gserver/tests/MKLDNNTester.cpp
+20
-9
paddle/gserver/tests/MKLDNNTester.h
paddle/gserver/tests/MKLDNNTester.h
+4
-0
paddle/gserver/tests/test_MKLDNN.cpp
paddle/gserver/tests/test_MKLDNN.cpp
+60
-0
未找到文件。
paddle/gserver/tests/MKLDNNTester.cpp
浏览文件 @
ad6b5319
...
...
@@ -91,10 +91,16 @@ void MKLDNNTester::setInputImgSize() {
// init randome parameters of ref, and copy to mkldnn
void
MKLDNNTester
::
randomWgtDatas
()
{
EXPECT_EQ
(
parameters_
[
DNN
].
size
(),
parameters_
[
REF
].
size
());
const
bool
isBN
=
refLayer_
->
getType
()
==
"batch_norm"
;
for
(
size_t
i
=
0
;
i
<
parameters_
[
REF
].
size
();
++
i
)
{
const
VectorPtr
&
dnnValue
=
parameters_
[
DNN
][
i
]
->
getBuf
(
PARAMETER_VALUE
);
const
VectorPtr
&
refValue
=
parameters_
[
REF
][
i
]
->
getBuf
(
PARAMETER_VALUE
);
parameters_
[
REF
][
i
]
->
randomize
();
if
(
isBN
&&
i
==
2
)
{
// this param is moving average in batch norm, which must larger than 0
real
offset
=
fabs
(
refValue
->
getMin
())
+
1.0
;
refValue
->
add
(
offset
);
}
dnnValue
->
copyFrom
(
*
refValue
);
VLOG
(
MKLDNN_TESTS
)
<<
"Random weight "
<<
parameters_
[
DNN
][
i
]
->
getName
();
...
...
@@ -132,8 +138,7 @@ void MKLDNNTester::checkForward() {
void
MKLDNNTester
::
checkBackwardData
()
{
VLOG
(
MKLDNN_TESTS
)
<<
"Check Backward Data"
;
// TODO(TJ): uncomment me when batch norm ready
// const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
const
bool
isBN
=
refLayer_
->
getType
()
==
"batch_norm"
;
for
(
size_t
i
=
0
;
i
<
dataLayers_
[
DNN
].
size
();
++
i
)
{
const
MatrixPtr
&
dnnDiff
=
dataLayers_
[
DNN
][
i
]
->
getOutputGrad
();
const
MatrixPtr
&
refDiff
=
dataLayers_
[
REF
][
i
]
->
getOutputGrad
();
...
...
@@ -144,11 +149,11 @@ void MKLDNNTester::checkBackwardData() {
double
delta
=
compareMatrix
(
dnnDiff
,
refDiff
);
EXPECT_LE
(
fabs
(
delta
),
eps_
);
// TODO(TJ): uncomment me when batch norm ready
// if (isBN) {
// // the other two inputs in batch norm are for moving mean and var
//
break;
//
}
if
(
isBN
)
{
// the other two inputs in batch norm are for moving mean and var
// do not have grad to compare
break
;
}
}
}
...
...
@@ -308,10 +313,14 @@ double MKLDNNTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) {
void
MKLDNNTester
::
runOnce
()
{
// test forward
randomBotDatas
();
dnnLayer_
->
forward
(
PASS_TRAIN
);
refLayer_
->
forward
(
PASS_TRAIN
);
dnnLayer_
->
forward
(
passType_
);
refLayer_
->
forward
(
passType_
);
checkForward
();
if
(
passType_
==
PASS_TEST
)
{
return
;
}
// test backward
// simple updater
UpdateCallback
updateCallback
=
[](
Parameter
*
para
)
{
...
...
@@ -343,6 +352,7 @@ void MKLDNNTester::run(const TestConfig& dnn,
size_t
batchSize
,
size_t
inputImgH
,
size_t
inputImgW
,
PassType
passType
,
bool
printDetails
,
size_t
iter
,
float
epsilon
)
{
...
...
@@ -361,6 +371,7 @@ void MKLDNNTester::run(const TestConfig& dnn,
ih_
=
inputImgH
;
iw_
=
inputImgW
;
passType_
=
passType
;
log_
=
printDetails
;
iter_
=
iter
;
eps_
=
epsilon
;
...
...
paddle/gserver/tests/MKLDNNTester.h
浏览文件 @
ad6b5319
...
...
@@ -62,12 +62,15 @@ protected:
float
eps_
;
/// input image size, default 1
size_t
ih_
,
iw_
;
/// passType, PASS_TRAIN, PASS_TEST or PASS_GC (Gradient Check pass)
PassType
passType_
;
public:
explicit
MKLDNNTester
(
size_t
iter
=
3
,
float
epsilon
=
1e-4
)
{
iter_
=
iter
;
eps_
=
epsilon
;
log_
=
false
;
passType_
=
PASS_TRAIN
;
}
~
MKLDNNTester
()
{}
...
...
@@ -78,6 +81,7 @@ public:
size_t
batchSize
,
size_t
inputImgH
=
1
,
size_t
inputImgW
=
1
,
PassType
passType
=
PASS_TRAIN
,
bool
printDetails
=
false
,
size_t
iter
=
3
,
float
epsilon
=
1e-4
);
...
...
paddle/gserver/tests/test_MKLDNN.cpp
浏览文件 @
ad6b5319
...
...
@@ -212,6 +212,66 @@ TEST(MKLDNNLayer, PoolLayer) {
testPoolLayer
({
2
,
8
,
56
,
56
,
29
,
29
,
3
,
3
,
1
,
1
,
2
,
2
});
}
struct
testBatchNormDesc
{
int
bs
;
int
ic
;
int
ih
,
iw
;
};
static
void
getMKLDNNBatchNormConfig
(
TestConfig
&
cfg
,
const
testBatchNormDesc
&
pm
)
{
cfg
.
layerConfig
.
set_size
(
pm
.
ic
*
pm
.
ih
*
pm
.
iw
);
cfg
.
layerConfig
.
set_type
(
"mkldnn_batch_norm"
);
cfg
.
biasSize
=
pm
.
ic
;
cfg
.
inputDefs
.
push_back
(
{
INPUT_DATA
,
"layer_0"
,
/* size of input layer= */
size_t
(
pm
.
ic
*
pm
.
ih
*
pm
.
iw
),
/* size of weight= */
size_t
(
pm
.
ic
)});
cfg
.
inputDefs
.
push_back
(
{
INPUT_DATA
,
"layer_1_moving_mean"
,
1
,
size_t
(
pm
.
ic
)});
cfg
.
inputDefs
.
back
().
isStatic
=
true
;
cfg
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_2_moving_var"
,
1
,
size_t
(
pm
.
ic
)});
cfg
.
inputDefs
.
back
().
isStatic
=
true
;
LayerInputConfig
*
input
=
cfg
.
layerConfig
.
add_inputs
();
// TODO(TJ): uncomment me when refine and support comparing all zeroes vector
// cfg.layerConfig.set_active_type("relu");
cfg
.
layerConfig
.
add_inputs
();
cfg
.
layerConfig
.
add_inputs
();
ImageConfig
*
img_conf
=
input
->
mutable_image_conf
();
img_conf
->
set_channels
(
pm
.
ic
);
img_conf
->
set_img_size_y
(
pm
.
ih
);
img_conf
->
set_img_size
(
pm
.
iw
);
}
void
testBatchNormLayer
(
const
testBatchNormDesc
&
pm
)
{
TestConfig
dnnConfig
;
getMKLDNNBatchNormConfig
(
dnnConfig
,
pm
);
TestConfig
refConfig
=
dnnConfig
;
refConfig
.
layerConfig
.
set_type
(
"batch_norm"
);
// for PASS_TRAIN, use_global_stats always should be false, and batchsize != 1
VLOG
(
MKLDNN_TESTS
)
<<
"check train phase"
;
dnnConfig
.
layerConfig
.
set_use_global_stats
(
false
);
refConfig
.
layerConfig
.
set_use_global_stats
(
false
);
MKLDNNTester
tester
;
tester
.
run
(
dnnConfig
,
refConfig
,
pm
.
bs
,
pm
.
ih
,
pm
.
iw
,
PASS_TRAIN
);
// for PASS_TEST, check use_global_stats true and false, and batchsize 1
VLOG
(
MKLDNN_TESTS
)
<<
"check test phase"
;
for
(
auto
useGS
:
{
false
,
true
})
{
dnnConfig
.
layerConfig
.
set_use_global_stats
(
useGS
);
refConfig
.
layerConfig
.
set_use_global_stats
(
useGS
);
MKLDNNTester
tester
;
for
(
auto
bs
:
{
pm
.
bs
,
1
})
{
tester
.
run
(
dnnConfig
,
refConfig
,
bs
,
pm
.
ih
,
pm
.
iw
,
PASS_TEST
);
}
}
}
TEST
(
MKLDNNLayer
,
BatchNormLayer
)
{
testBatchNormLayer
({
4
,
10
,
6
,
6
});
testBatchNormLayer
({
16
,
32
,
16
,
16
});
}
struct
testActDesc
{
int
bs
,
ic
,
ih
,
iw
;
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录