Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
44002846
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
44002846
编写于
9月 20, 2017
作者:
T
Tao Luo
提交者:
GitHub
9月 20, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #4224 from tensor-tang/act
some addition for mkldnn_act
上级
6e358758
eb26fdce
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
77 addition
and
75 deletion
+77
-75
paddle/gserver/activations/MKLDNNActivation.h
paddle/gserver/activations/MKLDNNActivation.h
+3
-2
paddle/gserver/layers/MKLDNNConvLayer.cpp
paddle/gserver/layers/MKLDNNConvLayer.cpp
+2
-1
paddle/gserver/layers/MKLDNNFcLayer.cpp
paddle/gserver/layers/MKLDNNFcLayer.cpp
+1
-0
paddle/gserver/layers/MKLDNNLayer.h
paddle/gserver/layers/MKLDNNLayer.h
+5
-7
paddle/gserver/tests/test_MKLDNN.cpp
paddle/gserver/tests/test_MKLDNN.cpp
+62
-65
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+4
-0
未找到文件。
paddle/gserver/activations/MKLDNNActivation.h
浏览文件 @
44002846
...
@@ -131,8 +131,9 @@ public:
...
@@ -131,8 +131,9 @@ public:
fwdPD_
.
reset
(
new
eltwise_fwd
::
primitive_desc
(
fwdDesc
,
eng
));
fwdPD_
.
reset
(
new
eltwise_fwd
::
primitive_desc
(
fwdDesc
,
eng
));
// use inplace for forward but save input value before submit
// use inplace for forward but save input value before submit
inVal_
=
val_
;
inVal_
=
val_
;
if
(
act
.
grad
)
{
copyInVal_
=
nullptr
;
// only copy when need do backward
if
(
act
.
grad
&&
algo
==
mkldnn
::
algorithm
::
eltwise_tanh
)
{
// tanh need save src input for backward
inVal_
=
MKLDNNMatrix
::
create
(
nullptr
,
val_
->
getPrimitiveDesc
());
inVal_
=
MKLDNNMatrix
::
create
(
nullptr
,
val_
->
getPrimitiveDesc
());
copyInVal_
=
std
::
make_shared
<
mkldnn
::
reorder
>
(
*
val_
,
*
inVal_
);
copyInVal_
=
std
::
make_shared
<
mkldnn
::
reorder
>
(
*
val_
,
*
inVal_
);
CHECK
(
copyInVal_
)
<<
"should not be emptry"
;
CHECK
(
copyInVal_
)
<<
"should not be emptry"
;
...
...
paddle/gserver/layers/MKLDNNConvLayer.cpp
浏览文件 @
44002846
...
@@ -449,13 +449,14 @@ void MKLDNNConvLayer::resetOutGrad(
...
@@ -449,13 +449,14 @@ void MKLDNNConvLayer::resetOutGrad(
cvtOutGrad_
=
nullptr
;
cvtOutGrad_
=
nullptr
;
if
(
!
outputIsOnlyMKLDNN
())
{
if
(
!
outputIsOnlyMKLDNN
())
{
const
MatrixPtr
&
cpuOut
=
getOutput
(
CPU_DEVICE
).
grad
;
const
MatrixPtr
&
cpuOut
=
getOutput
(
CPU_DEVICE
).
grad
;
outMat
->
setData
(
cpuOut
->
getData
());
// same PrimitiveDesc with cpuInVal_
// same PrimitiveDesc with cpuInVal_
CHECK
(
cpuOutVal_
);
CHECK
(
cpuOutVal_
);
cpuOutGrad_
=
MKLDNNMatrix
::
create
(
cpuOut
,
cpuOutVal_
->
getPrimitiveDesc
());
cpuOutGrad_
=
MKLDNNMatrix
::
create
(
cpuOut
,
cpuOutVal_
->
getPrimitiveDesc
());
if
(
cpuOutGrad_
->
getPrimitiveDesc
()
==
out
->
getPrimitiveDesc
())
{
if
(
cpuOutGrad_
->
getPrimitiveDesc
()
==
out
->
getPrimitiveDesc
())
{
outMat
->
setData
(
cpuOut
->
getData
());
out
=
cpuOutGrad_
;
out
=
cpuOutGrad_
;
}
else
{
}
else
{
out
=
MKLDNNMatrix
::
create
(
nullptr
,
wgtPD
->
diff_dst_primitive_desc
());
cvtOutGrad_
=
MKLDNNMatrix
::
createReorder
(
cpuOutGrad_
,
out
);
cvtOutGrad_
=
MKLDNNMatrix
::
createReorder
(
cpuOutGrad_
,
out
);
CHECK
(
cvtOutGrad_
);
CHECK
(
cvtOutGrad_
);
}
}
...
...
paddle/gserver/layers/MKLDNNFcLayer.cpp
浏览文件 @
44002846
...
@@ -232,6 +232,7 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
...
@@ -232,6 +232,7 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
void
MKLDNNFcLayer
::
resetOutGrad
(
MKLDNNMatrixPtr
&
out
)
{
void
MKLDNNFcLayer
::
resetOutGrad
(
MKLDNNMatrixPtr
&
out
)
{
// TODO(TJ): merge outgrad
// TODO(TJ): merge outgrad
int
device
=
outputIsOnlyMKLDNN
()
?
MKLDNN_DEVICE
:
CPU_DEVICE
;
int
device
=
outputIsOnlyMKLDNN
()
?
MKLDNN_DEVICE
:
CPU_DEVICE
;
output_
.
grad
->
setData
(
getOutput
(
device
).
grad
->
getData
());
// for MKLDNN device:
// for MKLDNN device:
// can not directly cast outputgrad to mkldnnmatrix,
// can not directly cast outputgrad to mkldnnmatrix,
// since each layer can not write the inputgrad to mkldnn inputgrad.
// since each layer can not write the inputgrad to mkldnn inputgrad.
...
...
paddle/gserver/layers/MKLDNNLayer.h
浏览文件 @
44002846
...
@@ -141,18 +141,16 @@ public:
...
@@ -141,18 +141,16 @@ public:
}
}
void
backward
(
const
UpdateCallback
&
callback
)
override
{
void
backward
(
const
UpdateCallback
&
callback
)
override
{
/* Do derivation */
{
if
(
needResetBwd_
)
{
resetBwd
(
pipelineBwd_
,
inGrad_
,
wgtGrad_
,
biasGrad_
,
outGrad_
);
needResetBwd_
=
false
;
}
{
REGISTER_TIMER_INFO
(
"BpActTimer"
,
getName
().
c_str
());
REGISTER_TIMER_INFO
(
"BpActTimer"
,
getName
().
c_str
());
backwardActivation
();
backwardActivation
();
}
}
{
{
REGISTER_TIMER_INFO
(
"mkldnn_bwdTimer"
,
getName
().
c_str
());
REGISTER_TIMER_INFO
(
"mkldnn_bwdTimer"
,
getName
().
c_str
());
if
(
needResetBwd_
)
{
resetBwd
(
pipelineBwd_
,
inGrad_
,
wgtGrad_
,
biasGrad_
,
outGrad_
);
needResetBwd_
=
false
;
}
stream_
->
submit
(
pipelineBwd_
);
stream_
->
submit
(
pipelineBwd_
);
}
}
...
...
paddle/gserver/tests/test_MKLDNN.cpp
浏览文件 @
44002846
...
@@ -26,17 +26,26 @@ DECLARE_bool(thread_local_rand_use_global_seed);
...
@@ -26,17 +26,26 @@ DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool
(
use_gpu
);
DECLARE_bool
(
use_gpu
);
DECLARE_bool
(
use_mkldnn
);
DECLARE_bool
(
use_mkldnn
);
struct
testFCDesc
{
#define RUN_MKLDNN_TEST(DNN_CONFIG, REF_CONFIG, DESC) \
MKLDNNTester tester; \
for (auto bs : {DESC.bs, 1}) { \
tester.run(DNN_CONFIG, REF_CONFIG, bs, DESC.ih, DESC.iw); \
}
#define RUN_MKLDNN_TEST_LAYER(DNN_CONFIG, REF_TYPE, DESC) \
TestConfig ref = DNN_CONFIG; \
ref.layerConfig.set_type(REF_TYPE); \
RUN_MKLDNN_TEST(DNN_CONFIG, ref, DESC)
struct
testFcDesc
{
int
bs
;
int
bs
;
int
ic
;
int
ic
;
int
oc
;
int
oc
;
int
ih
,
iw
;
// oh == ow == 1
int
ih
,
iw
;
// oh == ow == 1
};
};
void
testFcLayer
(
const
testFCDesc
&
pm
)
{
static
void
getMKLDNNFcConfig
(
TestConfig
&
cfg
,
const
testFcDesc
&
pm
)
{
const
std
::
string
compareTypes
[]
=
{
"mkldnn_fc"
,
"fc"
};
cfg
.
layerConfig
.
set_type
(
"mkldnn_fc"
);
TestConfig
cfg
;
cfg
.
layerConfig
.
set_type
(
compareTypes
[
0
]);
cfg
.
layerConfig
.
set_size
(
pm
.
oc
);
cfg
.
layerConfig
.
set_size
(
pm
.
oc
);
cfg
.
inputDefs
.
push_back
(
cfg
.
inputDefs
.
push_back
(
{
INPUT_DATA
,
{
INPUT_DATA
,
...
@@ -44,25 +53,25 @@ void testFcLayer(const testFCDesc& pm) {
...
@@ -44,25 +53,25 @@ void testFcLayer(const testFCDesc& pm) {
/* size of input layer= */
size_t
(
pm
.
ic
*
pm
.
ih
*
pm
.
iw
),
/* size of input layer= */
size_t
(
pm
.
ic
*
pm
.
ih
*
pm
.
iw
),
/* size of weight= */
size_t
(
pm
.
oc
*
pm
.
ic
*
pm
.
ih
*
pm
.
iw
)});
/* size of weight= */
size_t
(
pm
.
oc
*
pm
.
ic
*
pm
.
ih
*
pm
.
iw
)});
cfg
.
layerConfig
.
add_inputs
();
cfg
.
layerConfig
.
add_inputs
();
}
MKLDNNTester
tester
;
void
testFcLayer
(
const
testFcDesc
&
pm
)
{
TestConfig
dnnConfig
;
getMKLDNNFcConfig
(
dnnConfig
,
pm
);
for
(
auto
biasSize
:
{
pm
.
oc
,
0
})
{
for
(
auto
biasSize
:
{
pm
.
oc
,
0
})
{
cfg
.
biasSize
=
biasSize
;
dnnConfig
.
biasSize
=
biasSize
;
TestConfig
ref
=
cfg
;
RUN_MKLDNN_TEST_LAYER
(
dnnConfig
,
"fc"
,
pm
)
ref
.
layerConfig
.
set_type
(
compareTypes
[
1
]);
for
(
auto
bs
:
{
pm
.
bs
,
1
})
{
tester
.
run
(
cfg
,
ref
,
bs
,
pm
.
ih
,
pm
.
iw
);
}
}
}
}
}
TEST
(
MKLDNNLayer
,
FcLayer
)
{
TEST
(
MKLDNNLayer
,
FcLayer
)
{
testFcLayer
({
/*bs*/
2
,
/*ic*/
2
,
/*oc*/
3
,
/*ih*/
1
,
/*iw*/
1
});
/* bs, ic, ih, iw, oc */
testFcLayer
({
/*bs*/
3
,
/*ic*/
7
,
/*oc*/
19
,
/*ih*/
1
,
/*iw*/
1
});
testFcLayer
({
2
,
2
,
1
,
1
,
3
});
testFcLayer
({
/*bs*/
8
,
/*ic*/
16
,
/*oc*/
32
,
/*ih*/
13
,
/*iw*/
13
});
testFcLayer
({
3
,
7
,
1
,
1
,
19
});
testFcLayer
({
/*bs*/
4
,
/*ic*/
12
,
/*oc*/
18
,
/*ih*/
13
,
/*iw*/
11
});
testFcLayer
({
8
,
16
,
13
,
13
,
32
});
testFcLayer
({
/*bs*/
2
,
/*ic*/
64
,
/*oc*/
32
,
/*ih*/
16
,
/*iw*/
16
});
testFcLayer
({
4
,
12
,
13
,
13
,
18
});
testFcLayer
({
/*bs*/
15
,
/*ic*/
3
,
/*oc*/
6
,
/*ih*/
16
,
/*iw*/
16
});
testFcLayer
({
2
,
64
,
16
,
16
,
32
});
testFcLayer
({
15
,
3
,
16
,
16
,
6
});
}
}
struct
testConvDesc
{
struct
testConvDesc
{
...
@@ -75,13 +84,10 @@ struct testConvDesc {
...
@@ -75,13 +84,10 @@ struct testConvDesc {
int
dh
,
dw
;
int
dh
,
dw
;
};
};
void
testConvLayer
(
const
testConvDesc
&
pm
)
{
static
void
getMKLDNNConvConfig
(
TestConfig
&
cfg
,
const
testConvDesc
&
pm
)
{
const
std
::
string
compareTypes
[]
=
{
"mkldnn_conv"
,
"exconv"
};
cfg
.
layerConfig
.
set_type
(
"mkldnn_conv"
);
TestConfig
cfg
;
cfg
.
layerConfig
.
set_type
(
compareTypes
[
0
]);
cfg
.
layerConfig
.
set_num_filters
(
pm
.
oc
);
cfg
.
layerConfig
.
set_num_filters
(
pm
.
oc
);
cfg
.
layerConfig
.
set_size
(
pm
.
oc
*
pm
.
oh
*
pm
.
ow
);
cfg
.
layerConfig
.
set_size
(
pm
.
oc
*
pm
.
oh
*
pm
.
ow
);
// cfg.layerConfig.set_partial_sum(1); // TODO: check it
cfg
.
layerConfig
.
set_shared_biases
(
true
);
cfg
.
layerConfig
.
set_shared_biases
(
true
);
cfg
.
inputDefs
.
push_back
(
cfg
.
inputDefs
.
push_back
(
{
INPUT_DATA
,
{
INPUT_DATA
,
...
@@ -115,15 +121,14 @@ void testConvLayer(const testConvDesc& pm) {
...
@@ -115,15 +121,14 @@ void testConvLayer(const testConvDesc& pm) {
int
oh
=
outputSize
(
pm
.
ih
,
fh
,
pm
.
ph
,
pm
.
sh
,
true
);
int
oh
=
outputSize
(
pm
.
ih
,
fh
,
pm
.
ph
,
pm
.
sh
,
true
);
CHECK_EQ
(
ow
,
pm
.
ow
)
<<
"output size check failed"
;
CHECK_EQ
(
ow
,
pm
.
ow
)
<<
"output size check failed"
;
CHECK_EQ
(
oh
,
pm
.
oh
)
<<
"output size check failed"
;
CHECK_EQ
(
oh
,
pm
.
oh
)
<<
"output size check failed"
;
}
MKLDNNTester
tester
;
void
testConvLayer
(
const
testConvDesc
&
pm
)
{
TestConfig
dnnConfig
;
getMKLDNNConvConfig
(
dnnConfig
,
pm
);
for
(
auto
biasSize
:
{
pm
.
oc
,
0
})
{
for
(
auto
biasSize
:
{
pm
.
oc
,
0
})
{
cfg
.
biasSize
=
biasSize
;
dnnConfig
.
biasSize
=
biasSize
;
TestConfig
ref
=
cfg
;
RUN_MKLDNN_TEST_LAYER
(
dnnConfig
,
"exconv"
,
pm
)
ref
.
layerConfig
.
set_type
(
compareTypes
[
1
]);
for
(
auto
bs
:
{
pm
.
bs
,
1
})
{
tester
.
run
(
cfg
,
ref
,
bs
,
pm
.
ih
,
pm
.
iw
);
}
}
}
}
}
...
@@ -143,7 +148,7 @@ TEST(MKLDNNLayer, ConvLayer) {
...
@@ -143,7 +148,7 @@ TEST(MKLDNNLayer, ConvLayer) {
}
}
struct
testPoolDesc
{
struct
testPoolDesc
{
int
bs
,
ch
;
// input channel and output channel are the same
int
bs
,
ic
;
// input channel and output channel are the same
int
ih
,
iw
;
int
ih
,
iw
;
int
oh
,
ow
;
int
oh
,
ow
;
int
fh
,
fw
;
int
fh
,
fw
;
...
@@ -151,19 +156,18 @@ struct testPoolDesc {
...
@@ -151,19 +156,18 @@ struct testPoolDesc {
int
sh
,
sw
;
int
sh
,
sw
;
};
};
void
testPoolLayer
(
const
testPoolDesc
&
pm
)
{
static
void
getMKLDNNPoolConfig
(
TestConfig
&
cfg
,
const
testPoolDesc
&
pm
)
{
const
std
::
string
compareTypes
[]
=
{
"mkldnn_pool"
,
"pool"
};
cfg
.
layerConfig
.
set_type
(
"mkldnn_pool"
);
TestConfig
cfg
;
cfg
.
layerConfig
.
set_size
(
pm
.
ic
*
pm
.
oh
*
pm
.
ow
);
cfg
.
layerConfig
.
set_type
(
compareTypes
[
0
]);
cfg
.
layerConfig
.
set_size
(
pm
.
ch
*
pm
.
oh
*
pm
.
ow
);
cfg
.
inputDefs
.
push_back
(
cfg
.
inputDefs
.
push_back
(
{
INPUT_DATA
,
{
INPUT_DATA
,
"layer_0"
,
"layer_0"
,
/* size of input layer= */
size_t
(
pm
.
ch
*
pm
.
ih
*
pm
.
iw
),
/* size of input layer= */
size_t
(
pm
.
ic
*
pm
.
ih
*
pm
.
iw
),
0
});
0
});
LayerInputConfig
*
input
=
cfg
.
layerConfig
.
add_inputs
();
LayerInputConfig
*
input
=
cfg
.
layerConfig
.
add_inputs
();
PoolConfig
*
pool
=
input
->
mutable_pool_conf
();
PoolConfig
*
pool
=
input
->
mutable_pool_conf
();
pool
->
set_channels
(
pm
.
ch
);
pool
->
set_pool_type
(
"avg-projection"
);
pool
->
set_channels
(
pm
.
ic
);
pool
->
set_img_size
(
pm
.
iw
);
pool
->
set_img_size
(
pm
.
iw
);
pool
->
set_img_size_y
(
pm
.
ih
);
pool
->
set_img_size_y
(
pm
.
ih
);
pool
->
set_output_x
(
pm
.
ow
);
pool
->
set_output_x
(
pm
.
ow
);
...
@@ -179,20 +183,21 @@ void testPoolLayer(const testPoolDesc& pm) {
...
@@ -179,20 +183,21 @@ void testPoolLayer(const testPoolDesc& pm) {
int
ow
=
outputSize
(
pm
.
iw
,
pm
.
fw
,
pm
.
pw
,
pm
.
sw
,
false
);
int
ow
=
outputSize
(
pm
.
iw
,
pm
.
fw
,
pm
.
pw
,
pm
.
sw
,
false
);
CHECK_EQ
(
ow
,
pm
.
ow
)
<<
"output size check failed"
;
CHECK_EQ
(
ow
,
pm
.
ow
)
<<
"output size check failed"
;
CHECK_EQ
(
oh
,
pm
.
oh
)
<<
"output size check failed"
;
CHECK_EQ
(
oh
,
pm
.
oh
)
<<
"output size check failed"
;
}
MKLDNNTester
tester
;
void
testPoolLayer
(
const
testPoolDesc
&
pm
)
{
TestConfig
dnnConfig
;
getMKLDNNPoolConfig
(
dnnConfig
,
pm
);
LayerInputConfig
*
input
=
dnnConfig
.
layerConfig
.
mutable_inputs
(
0
);
PoolConfig
*
pool
=
input
->
mutable_pool_conf
();
for
(
auto
type
:
{
"max-projection"
,
"avg-projection"
})
{
for
(
auto
type
:
{
"max-projection"
,
"avg-projection"
})
{
pool
->
set_pool_type
(
type
);
pool
->
set_pool_type
(
type
);
TestConfig
ref
=
cfg
;
RUN_MKLDNN_TEST_LAYER
(
dnnConfig
,
"pool"
,
pm
)
ref
.
layerConfig
.
set_type
(
compareTypes
[
1
]);
for
(
auto
bs
:
{
pm
.
bs
,
1
})
{
tester
.
run
(
cfg
,
ref
,
bs
,
pm
.
ih
,
pm
.
iw
);
}
}
}
}
}
TEST
(
MKLDNNLayer
,
PoolLayer
)
{
TEST
(
MKLDNNLayer
,
PoolLayer
)
{
/* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw*/
/* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw
*/
testPoolLayer
({
2
,
1
,
4
,
4
,
2
,
2
,
3
,
3
,
0
,
0
,
2
,
2
});
testPoolLayer
({
2
,
1
,
4
,
4
,
2
,
2
,
3
,
3
,
0
,
0
,
2
,
2
});
testPoolLayer
({
10
,
8
,
16
,
16
,
8
,
8
,
2
,
2
,
0
,
0
,
2
,
2
});
testPoolLayer
({
10
,
8
,
16
,
16
,
8
,
8
,
2
,
2
,
0
,
0
,
2
,
2
});
testPoolLayer
({
4
,
2
,
5
,
5
,
3
,
3
,
3
,
3
,
1
,
1
,
2
,
2
});
testPoolLayer
({
4
,
2
,
5
,
5
,
3
,
3
,
3
,
3
,
1
,
1
,
2
,
2
});
...
@@ -204,44 +209,36 @@ TEST(MKLDNNLayer, PoolLayer) {
...
@@ -204,44 +209,36 @@ TEST(MKLDNNLayer, PoolLayer) {
}
}
struct
testActDesc
{
struct
testActDesc
{
int
bs
,
ch
;
int
bs
,
ic
,
ih
,
iw
;
int
ih
,
iw
;
};
};
static
void
getAddtoConfig
(
TestConfig
&
cfg
,
const
testActDesc
&
pm
)
{
static
void
getAddtoConfig
(
TestConfig
&
cfg
,
const
testActDesc
&
pm
)
{
cfg
.
biasSize
=
0
;
cfg
.
biasSize
=
0
;
cfg
.
layerConfig
.
set_type
(
"addto"
);
cfg
.
layerConfig
.
set_type
(
"addto"
);
cfg
.
layerConfig
.
set_size
(
pm
.
ch
*
pm
.
ih
*
pm
.
iw
);
size_t
layerSize
=
pm
.
ih
*
pm
.
ih
*
pm
.
iw
;
cfg
.
inputDefs
.
push_back
(
cfg
.
layerConfig
.
set_size
(
layerSize
);
{
INPUT_DATA
,
cfg
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
layerSize
,
0
});
"layer_0"
,
/* size of input layer= */
size_t
(
pm
.
ch
*
pm
.
ih
*
pm
.
iw
),
0
});
cfg
.
layerConfig
.
add_inputs
();
cfg
.
layerConfig
.
add_inputs
();
}
}
void
testActivation
(
std
::
string
&
type
,
const
testActDesc
&
pm
)
{
void
testActivation
(
std
::
string
&
actType
,
const
testActDesc
&
pm
)
{
const
std
::
string
compareTypes
[]
=
{
type
,
type
.
erase
(
0
,
7
)};
// TODO(TJ): mkldnn_softmax not implemented, paddle do not have elu activation
if
(
actType
==
"mkldnn_softmax"
||
actType
==
"mkldnn_elu"
)
{
return
;
}
const
std
::
string
compareTypes
[]
=
{
actType
,
actType
.
erase
(
0
,
7
)};
TestConfig
cfg
;
TestConfig
cfg
;
getAddtoConfig
(
cfg
,
pm
);
getAddtoConfig
(
cfg
,
pm
);
TestConfig
ref
=
cfg
;
TestConfig
ref
=
cfg
;
cfg
.
layerConfig
.
set_active_type
(
compareTypes
[
0
]);
cfg
.
layerConfig
.
set_active_type
(
compareTypes
[
0
]);
ref
.
layerConfig
.
set_active_type
(
compareTypes
[
1
]);
ref
.
layerConfig
.
set_active_type
(
compareTypes
[
1
]);
MKLDNNTester
tester
;
RUN_MKLDNN_TEST
(
cfg
,
ref
,
pm
)
for
(
auto
bs
:
{
pm
.
bs
,
1
})
{
tester
.
run
(
cfg
,
ref
,
bs
,
pm
.
ih
,
pm
.
iw
);
}
}
}
TEST
(
MKLDNNActivation
,
Activations
)
{
TEST
(
MKLDNNActivation
,
Activations
)
{
auto
types
=
MKLDNNActivation
::
getAllRegisteredTypes
();
auto
types
=
MKLDNNActivation
::
getAllRegisteredTypes
();
// TODO(TJ): mkldnn_softmax not implemented, paddle do not have elu activation
std
::
set
<
string
>
excluded
{
"mkldnn_softmax"
,
"mkldnn_elu"
};
for
(
auto
type
:
types
)
{
for
(
auto
type
:
types
)
{
if
(
excluded
.
count
(
type
))
{
/* bs, c, h, w*/
continue
;
}
testActivation
(
type
,
{
16
,
64
,
32
,
32
});
testActivation
(
type
,
{
16
,
64
,
32
,
32
});
}
}
}
}
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
44002846
...
@@ -1565,6 +1565,10 @@ class LayerBase(object):
...
@@ -1565,6 +1565,10 @@ class LayerBase(object):
self
.
config
=
g_config
.
model_config
.
layers
.
add
()
self
.
config
=
g_config
.
model_config
.
layers
.
add
()
assert
isinstance
(
self
.
config
,
LayerConfig
)
assert
isinstance
(
self
.
config
,
LayerConfig
)
use_mkldnn
=
bool
(
int
(
g_command_config_args
.
get
(
"use_mkldnn"
,
0
)))
mkldnn_acts
=
[
'relu'
,
'tanh'
]
if
use_mkldnn
and
active_type
in
mkldnn_acts
:
active_type
=
"mkldnn_"
+
active_type
self
.
config
.
name
=
name
self
.
config
.
name
=
name
self
.
config
.
type
=
type
self
.
config
.
type
=
type
self
.
config
.
active_type
=
active_type
self
.
config
.
active_type
=
active_type
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录