Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
bc0d2557
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
接近 2 年 前同步成功
通知
707
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
bc0d2557
编写于
11月 20, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
make MKLDNNLayer input value as a vector
上级
3117d977
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
129 addition
and
150 deletion
+129
-150
paddle/gserver/layers/MKLDNNAddtoLayer.cpp
paddle/gserver/layers/MKLDNNAddtoLayer.cpp
+5
-6
paddle/gserver/layers/MKLDNNAddtoLayer.h
paddle/gserver/layers/MKLDNNAddtoLayer.h
+1
-14
paddle/gserver/layers/MKLDNNBatchNormLayer.cpp
paddle/gserver/layers/MKLDNNBatchNormLayer.cpp
+10
-9
paddle/gserver/layers/MKLDNNBatchNormLayer.h
paddle/gserver/layers/MKLDNNBatchNormLayer.h
+1
-1
paddle/gserver/layers/MKLDNNConcatLayer.cpp
paddle/gserver/layers/MKLDNNConcatLayer.cpp
+4
-10
paddle/gserver/layers/MKLDNNConcatLayer.h
paddle/gserver/layers/MKLDNNConcatLayer.h
+1
-15
paddle/gserver/layers/MKLDNNConvLayer.cpp
paddle/gserver/layers/MKLDNNConvLayer.cpp
+13
-13
paddle/gserver/layers/MKLDNNConvLayer.h
paddle/gserver/layers/MKLDNNConvLayer.h
+1
-1
paddle/gserver/layers/MKLDNNFcLayer.cpp
paddle/gserver/layers/MKLDNNFcLayer.cpp
+18
-17
paddle/gserver/layers/MKLDNNFcLayer.h
paddle/gserver/layers/MKLDNNFcLayer.h
+1
-1
paddle/gserver/layers/MKLDNNLayer.cpp
paddle/gserver/layers/MKLDNNLayer.cpp
+31
-37
paddle/gserver/layers/MKLDNNLayer.h
paddle/gserver/layers/MKLDNNLayer.h
+36
-19
paddle/gserver/layers/MKLDNNPoolLayer.cpp
paddle/gserver/layers/MKLDNNPoolLayer.cpp
+6
-6
paddle/gserver/layers/MKLDNNPoolLayer.h
paddle/gserver/layers/MKLDNNPoolLayer.h
+1
-1
未找到文件。
paddle/gserver/layers/MKLDNNAddtoLayer.cpp
浏览文件 @
bc0d2557
...
@@ -57,16 +57,15 @@ void MKLDNNAddtoLayer::reshape(
...
@@ -57,16 +57,15 @@ void MKLDNNAddtoLayer::reshape(
}
}
void
MKLDNNAddtoLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNAddtoLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
resetFwdBuffers
(
inVals_
,
biasVal_
,
out
);
resetFwdBuffers
(
inputs
,
biasVal_
,
out
);
in
=
inVals_
[
0
];
std
::
shared_ptr
<
sum
::
primitive_desc
>
fwdPD
;
std
::
shared_ptr
<
sum
::
primitive_desc
>
fwdPD
;
std
::
shared_ptr
<
sum
::
primitive_desc
>
biasPD
;
std
::
shared_ptr
<
sum
::
primitive_desc
>
biasPD
;
resetFwdPD
(
fwdPD
,
biasPD
,
in
Vals_
,
biasVal_
,
out
);
resetFwdPD
(
fwdPD
,
biasPD
,
in
puts
,
biasVal_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD
,
biasPD
,
in
Vals_
,
biasVal_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD
,
biasPD
,
in
puts
,
biasVal_
,
out
);
}
}
void
MKLDNNAddtoLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNAddtoLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
...
@@ -206,7 +205,7 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
...
@@ -206,7 +205,7 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
inputs
.
resize
(
inputLayers_
.
size
());
inputs
.
resize
(
inputLayers_
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
resetInGrad
(
inputs
[
i
],
inVal
_
->
getPrimitiveDesc
(),
i
);
resetInGrad
(
inputs
[
i
],
inVal
s_
[
i
]
->
getPrimitiveDesc
(),
i
);
CHECK_PRIMITIVE_DESC_EQ
(
inputs
[
i
],
out
->
getPrimitiveDesc
());
CHECK_PRIMITIVE_DESC_EQ
(
inputs
[
i
],
out
->
getPrimitiveDesc
());
}
}
...
...
paddle/gserver/layers/MKLDNNAddtoLayer.h
浏览文件 @
bc0d2557
...
@@ -26,7 +26,6 @@ namespace paddle {
...
@@ -26,7 +26,6 @@ namespace paddle {
*/
*/
class
MKLDNNAddtoLayer
:
public
MKLDNNLayer
{
class
MKLDNNAddtoLayer
:
public
MKLDNNLayer
{
protected:
protected:
std
::
vector
<
MKLDNNMatrixPtr
>
inVals_
;
std
::
vector
<
MKLDNNMatrixPtr
>
inGrads_
;
std
::
vector
<
MKLDNNMatrixPtr
>
inGrads_
;
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed
...
@@ -53,7 +52,7 @@ public:
...
@@ -53,7 +52,7 @@ public:
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
override
;
MKLDNNMatrixPtr
&
out
)
override
;
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
...
@@ -62,18 +61,6 @@ public:
...
@@ -62,18 +61,6 @@ public:
void
updateWeights
(
const
UpdateCallback
&
callback
)
override
;
void
updateWeights
(
const
UpdateCallback
&
callback
)
override
;
void
printValueFormat
()
override
{
for
(
size_t
i
=
0
;
i
<
inVals_
.
size
();
++
i
)
{
VLOG
(
MKLDNN_FMTS
)
<<
i
<<
" input: "
<<
inVals_
[
i
]
->
getFormat
()
<<
" >>>"
;
}
if
(
outVal_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
outVal_
->
getFormat
()
<<
" >>> "
;
}
if
(
extOutVal_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
extOutVal_
->
getFormat
();
}
}
void
printGradFormat
()
override
{
void
printGradFormat
()
override
{
if
(
extOutGrad_
)
{
if
(
extOutGrad_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
extOutGrad_
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
extOutGrad_
->
getFormat
();
...
...
paddle/gserver/layers/MKLDNNBatchNormLayer.cpp
浏览文件 @
bc0d2557
...
@@ -128,7 +128,7 @@ void MKLDNNBatchNormLayer::reshape(
...
@@ -128,7 +128,7 @@ void MKLDNNBatchNormLayer::reshape(
}
}
void
MKLDNNBatchNormLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNBatchNormLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
// In training phase, it will always calculate mean and var,
// In training phase, it will always calculate mean and var,
// so useGlobalStats must be false.
// so useGlobalStats must be false.
...
@@ -138,11 +138,11 @@ void MKLDNNBatchNormLayer::resetFwd(std::vector<primitive>& pipeline,
...
@@ -138,11 +138,11 @@ void MKLDNNBatchNormLayer::resetFwd(std::vector<primitive>& pipeline,
useGlobalStats_
=
false
;
useGlobalStats_
=
false
;
}
}
resetFwdBuffers
(
in
,
wgtVal_
,
out
);
resetFwdBuffers
(
in
puts
[
0
]
,
wgtVal_
,
out
);
resetFwdPD
(
fwdPD_
,
in
,
wgtVal_
,
out
);
resetFwdPD
(
fwdPD_
,
in
puts
[
0
]
,
wgtVal_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
,
wgtVal_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
puts
[
0
]
,
wgtVal_
,
out
);
}
}
void
MKLDNNBatchNormLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNBatchNormLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
...
@@ -256,9 +256,9 @@ void MKLDNNBatchNormLayer::resetFwdPipeline(
...
@@ -256,9 +256,9 @@ void MKLDNNBatchNormLayer::resetFwdPipeline(
void
MKLDNNBatchNormLayer
::
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
void
MKLDNNBatchNormLayer
::
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal
_
&&
outVal_
);
CHECK
(
inVal
s_
[
0
]
&&
outVal_
);
resetOutGrad
(
out
,
outVal_
->
getPrimitiveDesc
());
resetOutGrad
(
out
,
outVal_
->
getPrimitiveDesc
());
resetInGrad
(
in
,
inVal
_
->
getPrimitiveDesc
());
resetInGrad
(
in
,
inVal
s_
[
0
]
->
getPrimitiveDesc
());
if
(
gradScaleShift_
)
{
if
(
gradScaleShift_
)
{
CHECK
(
wgtVal_
);
CHECK
(
wgtVal_
);
resetWithMatrix
(
wgt
,
gradScaleShift_
,
wgtVal_
->
getPrimitiveDesc
());
resetWithMatrix
(
wgt
,
gradScaleShift_
,
wgtVal_
->
getPrimitiveDesc
());
...
@@ -293,11 +293,12 @@ void MKLDNNBatchNormLayer::resetBwdPipeline(
...
@@ -293,11 +293,12 @@ void MKLDNNBatchNormLayer::resetBwdPipeline(
if
(
pd
==
nullptr
)
{
if
(
pd
==
nullptr
)
{
return
;
return
;
}
}
CHECK
(
inVal
_
);
CHECK
(
inVal
s_
[
0
]
);
bwdData_
.
reset
(
bwdData_
.
reset
(
wgt
&&
wgtVal_
wgt
&&
wgtVal_
?
new
bn_bwd
(
*
pd
,
*
inVal_
,
*
mean_
,
*
var_
,
*
out
,
*
wgtVal_
,
*
in
,
*
wgt
)
?
new
bn_bwd
(
:
new
bn_bwd
(
*
pd
,
*
inVal_
,
*
mean_
,
*
var_
,
*
out
,
*
in
));
*
pd
,
*
inVals_
[
0
],
*
mean_
,
*
var_
,
*
out
,
*
wgtVal_
,
*
in
,
*
wgt
)
:
new
bn_bwd
(
*
pd
,
*
inVals_
[
0
],
*
mean_
,
*
var_
,
*
out
,
*
in
));
pipeline
.
push_back
(
*
bwdData_
);
pipeline
.
push_back
(
*
bwdData_
);
}
}
...
...
paddle/gserver/layers/MKLDNNBatchNormLayer.h
浏览文件 @
bc0d2557
...
@@ -76,7 +76,7 @@ public:
...
@@ -76,7 +76,7 @@ public:
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
override
;
MKLDNNMatrixPtr
&
out
)
override
;
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
...
...
paddle/gserver/layers/MKLDNNConcatLayer.cpp
浏览文件 @
bc0d2557
...
@@ -59,15 +59,14 @@ void MKLDNNConcatLayer::reshape(
...
@@ -59,15 +59,14 @@ void MKLDNNConcatLayer::reshape(
}
}
void
MKLDNNConcatLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNConcatLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
resetFwdBuffers
(
inVals_
,
out
);
resetFwdBuffers
(
inputs
,
out
);
in
=
inVals_
[
0
];
std
::
shared_ptr
<
concat
::
primitive_desc
>
fwdPD
;
std
::
shared_ptr
<
concat
::
primitive_desc
>
fwdPD
;
resetFwdPD
(
fwdPD
,
in
Vals_
,
out
);
resetFwdPD
(
fwdPD
,
in
puts
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD
,
in
Vals_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD
,
in
puts
,
out
);
}
}
void
MKLDNNConcatLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNConcatLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
...
@@ -157,14 +156,9 @@ void MKLDNNConcatLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
...
@@ -157,14 +156,9 @@ void MKLDNNConcatLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
inputs
.
resize
(
inputLayers_
.
size
());
inputs
.
resize
(
inputLayers_
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CHECK
(
inVals_
[
i
]);
CHECK
(
inVals_
[
i
]);
// resetInGrad will use inVal_
// TODO(TJ): change move inVals_ to MKLDNNLayer ans remove inVal_
inVal_
=
inVals_
[
i
];
resetInGrad
(
inputs
[
i
],
inVals_
[
i
]
->
getPrimitiveDesc
(),
i
);
resetInGrad
(
inputs
[
i
],
inVals_
[
i
]
->
getPrimitiveDesc
(),
i
);
CHECK_PRIMITIVE_DESC_EQ
(
inputs
[
i
],
inVals_
[
i
]
->
getPrimitiveDesc
());
CHECK_PRIMITIVE_DESC_EQ
(
inputs
[
i
],
inVals_
[
i
]
->
getPrimitiveDesc
());
}
}
// change back, inVal_ always save the input 0
inVal_
=
inVals_
[
0
];
}
}
void
MKLDNNConcatLayer
::
resetBwdPipeline
(
void
MKLDNNConcatLayer
::
resetBwdPipeline
(
...
...
paddle/gserver/layers/MKLDNNConcatLayer.h
浏览文件 @
bc0d2557
...
@@ -26,7 +26,6 @@ namespace paddle {
...
@@ -26,7 +26,6 @@ namespace paddle {
*/
*/
class
MKLDNNConcatLayer
:
public
MKLDNNLayer
{
class
MKLDNNConcatLayer
:
public
MKLDNNLayer
{
protected:
protected:
std
::
vector
<
MKLDNNMatrixPtr
>
inVals_
;
std
::
vector
<
MKLDNNMatrixPtr
>
inGrads_
;
std
::
vector
<
MKLDNNMatrixPtr
>
inGrads_
;
std
::
vector
<
std
::
shared_ptr
<
mkldnn
::
primitive
>>
bwds_
;
std
::
vector
<
std
::
shared_ptr
<
mkldnn
::
primitive
>>
bwds_
;
// input channel numbers
// input channel numbers
...
@@ -50,7 +49,7 @@ public:
...
@@ -50,7 +49,7 @@ public:
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
override
;
MKLDNNMatrixPtr
&
out
)
override
;
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
...
@@ -68,19 +67,6 @@ public:
...
@@ -68,19 +67,6 @@ public:
<<
", "
<<
ow_
;
<<
", "
<<
ow_
;
}
}
void
printValueFormat
()
override
{
for
(
size_t
i
=
0
;
i
<
inVals_
.
size
();
++
i
)
{
VLOG
(
MKLDNN_FMTS
)
<<
"Input "
<<
i
<<
", "
<<
inputLayers_
[
i
]
->
getName
()
<<
": "
<<
inVals_
[
i
]
->
getFormat
()
<<
" >>>"
;
}
if
(
outVal_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
outVal_
->
getFormat
()
<<
" >>> "
;
}
if
(
extOutVal_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
extOutVal_
->
getFormat
();
}
}
void
printGradFormat
()
override
{
void
printGradFormat
()
override
{
if
(
extOutGrad_
)
{
if
(
extOutGrad_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
extOutGrad_
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
extOutGrad_
->
getFormat
();
...
...
paddle/gserver/layers/MKLDNNConvLayer.cpp
浏览文件 @
bc0d2557
...
@@ -105,13 +105,13 @@ void MKLDNNConvLayer::reshape(
...
@@ -105,13 +105,13 @@ void MKLDNNConvLayer::reshape(
}
}
void
MKLDNNConvLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNConvLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
resetFwdPD
(
fwdPD_
);
resetFwdPD
(
fwdPD_
);
resetFwdBuffers
(
fwdPD_
,
in
,
wgtVal_
,
biasVal_
,
out
);
resetFwdBuffers
(
fwdPD_
,
in
puts
[
0
]
,
wgtVal_
,
biasVal_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
,
wgtVal_
,
biasVal_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
puts
[
0
]
,
wgtVal_
,
biasVal_
,
out
);
}
}
void
MKLDNNConvLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNConvLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
...
@@ -232,14 +232,14 @@ void MKLDNNConvLayer::resetBwdWgtPD(
...
@@ -232,14 +232,14 @@ void MKLDNNConvLayer::resetBwdWgtPD(
loadConvSettings
(
wgtDims
,
biasDims
,
strides
,
dilations
,
padL
,
padR
);
loadConvSettings
(
wgtDims
,
biasDims
,
strides
,
dilations
,
padL
,
padR
);
// create backward weight using input, output and weight value memory desc
// create backward weight using input, output and weight value memory desc
CHECK
(
inVal
_
)
<<
"Should have internal input value"
;
CHECK
(
inVal
s_
[
0
]
)
<<
"Should have internal input value"
;
CHECK
(
outVal_
)
<<
"Should have internal output value"
;
CHECK
(
outVal_
)
<<
"Should have internal output value"
;
CHECK
(
wgtVal_
)
<<
"Should have weight value"
;
CHECK
(
wgtVal_
)
<<
"Should have weight value"
;
algorithm
algo
=
algorithm
::
convolution_direct
;
algorithm
algo
=
algorithm
::
convolution_direct
;
padding_kind
padKind
=
padding_kind
::
zero
;
padding_kind
padKind
=
padding_kind
::
zero
;
auto
bwdWgtDesc
=
biasVal_
!=
nullptr
auto
bwdWgtDesc
=
biasVal_
!=
nullptr
?
conv_bwdWgt
::
desc
(
algo
,
?
conv_bwdWgt
::
desc
(
algo
,
inVal
_
->
getMemoryDesc
(),
inVal
s_
[
0
]
->
getMemoryDesc
(),
wgtVal_
->
getMemoryDesc
(),
wgtVal_
->
getMemoryDesc
(),
biasVal_
->
getMemoryDesc
(),
biasVal_
->
getMemoryDesc
(),
outVal_
->
getMemoryDesc
(),
outVal_
->
getMemoryDesc
(),
...
@@ -248,7 +248,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
...
@@ -248,7 +248,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
padR
,
padR
,
padKind
)
padKind
)
:
conv_bwdWgt
::
desc
(
algo
,
:
conv_bwdWgt
::
desc
(
algo
,
inVal
_
->
getMemoryDesc
(),
inVal
s_
[
0
]
->
getMemoryDesc
(),
wgtVal_
->
getMemoryDesc
(),
wgtVal_
->
getMemoryDesc
(),
outVal_
->
getMemoryDesc
(),
outVal_
->
getMemoryDesc
(),
strides
,
strides
,
...
@@ -256,7 +256,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
...
@@ -256,7 +256,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
padR
,
padR
,
padKind
);
padKind
);
pd
.
reset
(
new
conv_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
*
fwdPD_
));
pd
.
reset
(
new
conv_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
*
fwdPD_
));
CHECK_PRIMITIVE_DESC_EQ
(
inVal
_
,
pd
->
src_primitive_desc
());
CHECK_PRIMITIVE_DESC_EQ
(
inVal
s_
[
0
]
,
pd
->
src_primitive_desc
());
CHECK_PRIMITIVE_DESC_EQ
(
CHECK_PRIMITIVE_DESC_EQ
(
outVal_
,
outVal_
,
pd
->
diff_dst_primitive_desc
(),
pd
->
diff_dst_primitive_desc
(),
...
@@ -276,12 +276,12 @@ void MKLDNNConvLayer::resetBwdDataPD(
...
@@ -276,12 +276,12 @@ void MKLDNNConvLayer::resetBwdDataPD(
memory
::
dims
wgtDims
,
biasDims
,
strides
,
dilations
,
padL
,
padR
;
memory
::
dims
wgtDims
,
biasDims
,
strides
,
dilations
,
padL
,
padR
;
loadConvSettings
(
wgtDims
,
biasDims
,
strides
,
dilations
,
padL
,
padR
);
loadConvSettings
(
wgtDims
,
biasDims
,
strides
,
dilations
,
padL
,
padR
);
CHECK
(
inVal
_
)
<<
"Should have internal input value"
;
CHECK
(
inVal
s_
[
0
]
)
<<
"Should have internal input value"
;
CHECK
(
outVal_
)
<<
"Should have internal output value"
;
CHECK
(
outVal_
)
<<
"Should have internal output value"
;
// create backward data using input and output value memory desc
// create backward data using input and output value memory desc
// but using weight memory desc with any format
// but using weight memory desc with any format
auto
bwdDataDesc
=
conv_bwdData
::
desc
(
algorithm
::
convolution_direct
,
auto
bwdDataDesc
=
conv_bwdData
::
desc
(
algorithm
::
convolution_direct
,
inVal
_
->
getMemoryDesc
(),
inVal
s_
[
0
]
->
getMemoryDesc
(),
MKLDNNMatrix
::
createMemoryDesc
(
wgtDims
),
MKLDNNMatrix
::
createMemoryDesc
(
wgtDims
),
outVal_
->
getMemoryDesc
(),
outVal_
->
getMemoryDesc
(),
strides
,
strides
,
...
@@ -290,7 +290,7 @@ void MKLDNNConvLayer::resetBwdDataPD(
...
@@ -290,7 +290,7 @@ void MKLDNNConvLayer::resetBwdDataPD(
padding_kind
::
zero
);
padding_kind
::
zero
);
pd
.
reset
(
new
conv_bwdData
::
primitive_desc
(
bwdDataDesc
,
engine_
,
*
fwdPD_
));
pd
.
reset
(
new
conv_bwdData
::
primitive_desc
(
bwdDataDesc
,
engine_
,
*
fwdPD_
));
CHECK_PRIMITIVE_DESC_EQ
(
CHECK_PRIMITIVE_DESC_EQ
(
inVal
_
,
inVal
s_
[
0
]
,
pd
->
diff_src_primitive_desc
(),
pd
->
diff_src_primitive_desc
(),
"primitive desc of in value and grad should be equal"
);
"primitive desc of in value and grad should be equal"
);
CHECK_PRIMITIVE_DESC_EQ
(
CHECK_PRIMITIVE_DESC_EQ
(
...
@@ -342,12 +342,12 @@ void MKLDNNConvLayer::resetBwdPipeline(
...
@@ -342,12 +342,12 @@ void MKLDNNConvLayer::resetBwdPipeline(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal
_
);
CHECK
(
inVal
s_
[
0
]
);
// add bwdWgt handle
// add bwdWgt handle
if
(
bias
)
{
if
(
bias
)
{
bwdWgt_
.
reset
(
new
conv_bwdWgt
(
*
wgtPD
,
*
inVal
_
,
*
out
,
*
wgt
,
*
bias
));
bwdWgt_
.
reset
(
new
conv_bwdWgt
(
*
wgtPD
,
*
inVal
s_
[
0
]
,
*
out
,
*
wgt
,
*
bias
));
}
else
{
}
else
{
bwdWgt_
.
reset
(
new
conv_bwdWgt
(
*
wgtPD
,
*
inVal
_
,
*
out
,
*
wgt
));
bwdWgt_
.
reset
(
new
conv_bwdWgt
(
*
wgtPD
,
*
inVal
s_
[
0
]
,
*
out
,
*
wgt
));
}
}
pipeline
.
push_back
(
*
bwdWgt_
);
pipeline
.
push_back
(
*
bwdWgt_
);
...
...
paddle/gserver/layers/MKLDNNConvLayer.h
浏览文件 @
bc0d2557
...
@@ -72,7 +72,7 @@ public:
...
@@ -72,7 +72,7 @@ public:
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
override
;
MKLDNNMatrixPtr
&
out
)
override
;
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
...
...
paddle/gserver/layers/MKLDNNFcLayer.cpp
浏览文件 @
bc0d2557
...
@@ -87,13 +87,13 @@ void MKLDNNFcLayer::reshape(
...
@@ -87,13 +87,13 @@ void MKLDNNFcLayer::reshape(
}
}
void
MKLDNNFcLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNFcLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
resetFwdBuffers
(
in
,
wgtVal_
,
biasVal_
,
out
);
resetFwdBuffers
(
in
puts
[
0
]
,
wgtVal_
,
biasVal_
,
out
);
resetFwdPD
(
fwdPD_
,
in
,
wgtVal_
,
biasVal_
,
out
);
resetFwdPD
(
fwdPD_
,
in
puts
[
0
]
,
wgtVal_
,
biasVal_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
,
wgtVal_
,
biasVal_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
puts
[
0
]
,
wgtVal_
,
biasVal_
,
out
);
}
}
void
MKLDNNFcLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNFcLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
...
@@ -189,9 +189,9 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
...
@@ -189,9 +189,9 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal
_
&&
outVal_
);
CHECK
(
inVal
s_
[
0
]
&&
outVal_
);
resetOutGrad
(
out
,
outVal_
->
getPrimitiveDesc
());
resetOutGrad
(
out
,
outVal_
->
getPrimitiveDesc
());
resetInGrad
(
in
,
inVal
_
->
getPrimitiveDesc
());
resetInGrad
(
in
,
inVal
s_
[
0
]
->
getPrimitiveDesc
());
CHECK
(
wgtVal_
);
CHECK
(
wgtVal_
);
resetWithMatrix
(
wgt
,
weight_
->
getWGrad
(),
wgtVal_
->
getPrimitiveDesc
());
resetWithMatrix
(
wgt
,
weight_
->
getWGrad
(),
wgtVal_
->
getPrimitiveDesc
());
...
@@ -208,14 +208,15 @@ void MKLDNNFcLayer::resetBwdWgtPD(
...
@@ -208,14 +208,15 @@ void MKLDNNFcLayer::resetBwdWgtPD(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal_
);
CHECK
(
inVals_
[
0
]);
fc_bwdWgt
::
desc
bwdWgtDesc
=
bias
?
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
fc_bwdWgt
::
desc
bwdWgtDesc
=
wgt
->
getMemoryDesc
(),
bias
?
fc_bwdWgt
::
desc
(
inVals_
[
0
]
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
())
bias
->
getMemoryDesc
(),
:
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
out
->
getMemoryDesc
())
wgt
->
getMemoryDesc
(),
:
fc_bwdWgt
::
desc
(
inVals_
[
0
]
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
pd
.
reset
(
new
fc_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
*
fwdPD_
));
pd
.
reset
(
new
fc_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
*
fwdPD_
));
}
}
...
@@ -241,11 +242,11 @@ void MKLDNNFcLayer::resetBwdPipeline(
...
@@ -241,11 +242,11 @@ void MKLDNNFcLayer::resetBwdPipeline(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal
_
);
CHECK
(
inVal
s_
[
0
]
);
if
(
bias
)
{
if
(
bias
)
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
*
bwdWgtPD
,
*
inVal
_
,
*
out
,
*
wgt
,
*
bias
));
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
*
bwdWgtPD
,
*
inVal
s_
[
0
]
,
*
out
,
*
wgt
,
*
bias
));
}
else
{
}
else
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
*
bwdWgtPD
,
*
inVal
_
,
*
out
,
*
wgt
));
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
*
bwdWgtPD
,
*
inVal
s_
[
0
]
,
*
out
,
*
wgt
));
}
}
pipeline
.
push_back
(
*
bwdWgt_
);
pipeline
.
push_back
(
*
bwdWgt_
);
...
...
paddle/gserver/layers/MKLDNNFcLayer.h
浏览文件 @
bc0d2557
...
@@ -55,7 +55,7 @@ public:
...
@@ -55,7 +55,7 @@ public:
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
override
;
MKLDNNMatrixPtr
&
out
)
override
;
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
...
...
paddle/gserver/layers/MKLDNNLayer.cpp
浏览文件 @
bc0d2557
...
@@ -53,25 +53,17 @@ void MKLDNNLayer::forward(PassType passType) {
...
@@ -53,25 +53,17 @@ void MKLDNNLayer::forward(PassType passType) {
VLOG
(
MKLDNN_BASE
)
<<
getName
()
<<
" reset mkldnn forward"
;
VLOG
(
MKLDNN_BASE
)
<<
getName
()
<<
" reset mkldnn forward"
;
// reset when input total sizes changed, not only the batchsize
// reset when input total sizes changed, not only the batchsize
inputElemenCnt_
=
elemenCnt
;
inputElemenCnt_
=
elemenCnt
;
pipelineFwd_
.
clear
();
reshape
(
bs_
,
ic_
,
ih_
,
iw_
,
oc_
,
oh_
,
ow_
);
reshape
(
bs_
,
ic_
,
ih_
,
iw_
,
oc_
,
oh_
,
ow_
);
printSizeInfo
();
printSizeInfo
();
//
all cpu device output grad or value share output's
//
the output_.value and output_.grad are shared with CPU device
shareCPUDevice
();
shareCPUDevice
();
resetFwd
(
pipelineFwd_
,
inVal_
,
outVal_
);
// MKLDNNLayer output value should be MKLDNNMatrix
pipelineFwd_
.
clear
();
// so external output value is necessary.
inVals_
.
resize
(
inputLayers_
.
size
(),
nullptr
);
// Then external input value is not necessary,
extInVals_
.
resize
(
inputLayers_
.
size
(),
nullptr
);
// since input may be mkldnn internal buffer.
cvtInVals_
.
resize
(
inputLayers_
.
size
(),
nullptr
);
CHECK
(
extOutVal_
)
<<
"external output value is necessary"
;
resetFwd
(
pipelineFwd_
,
inVals_
,
outVal_
);
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
extOutVal_
);
prepareValueConversions
(
pipelineFwd_
);
CHECK
(
inVal_
&&
outVal_
)
<<
"internal memories are necessary"
;
if
(
cvtInVal_
)
{
pipelineFwd_
.
insert
(
pipelineFwd_
.
begin
(),
*
cvtInVal_
);
}
if
(
cvtOutVal_
)
{
pipelineFwd_
.
push_back
(
*
cvtOutVal_
);
}
convertWeightsFromPaddle
();
convertWeightsFromPaddle
();
printValueFormat
();
printValueFormat
();
needResetBwd_
=
true
;
needResetBwd_
=
true
;
...
@@ -80,8 +72,8 @@ void MKLDNNLayer::forward(PassType passType) {
...
@@ -80,8 +72,8 @@ void MKLDNNLayer::forward(PassType passType) {
if
(
inputLayers_
[
0
]
->
getType
()
==
"data"
&&
inputLayers_
.
size
()
==
1
)
{
if
(
inputLayers_
[
0
]
->
getType
()
==
"data"
&&
inputLayers_
.
size
()
==
1
)
{
// Update input value data when input layer is "data" type,
// Update input value data when input layer is "data" type,
// since the input value data address might be changed.
// since the input value data address might be changed.
CHECK
(
extInVal
_
);
CHECK
(
extInVal
s_
[
0
]
);
extInVal
_
->
setData
(
getInputValue
(
0
,
CPU_DEVICE
)
->
getData
());
extInVal
s_
[
0
]
->
setData
(
getInputValue
(
0
,
CPU_DEVICE
)
->
getData
());
}
}
if
(
!
outputOnlyMKLDNN_
)
{
if
(
!
outputOnlyMKLDNN_
)
{
...
@@ -141,8 +133,8 @@ void MKLDNNLayer::backward(const UpdateCallback& callback) {
...
@@ -141,8 +133,8 @@ void MKLDNNLayer::backward(const UpdateCallback& callback) {
void
MKLDNNLayer
::
reshapeInput
(
int
&
batchsize
,
void
MKLDNNLayer
::
reshapeInput
(
int
&
batchsize
,
int
&
height
,
int
&
height
,
int
&
width
,
int
&
width
,
size_t
i
nputI
dx
)
{
size_t
idx
)
{
const
Argument
&
input
=
inputLayers_
[
i
nputI
dx
]
->
getOutput
();
const
Argument
&
input
=
inputLayers_
[
idx
]
->
getOutput
();
batchsize
=
input
.
getBatchSize
();
batchsize
=
input
.
getBatchSize
();
int
h
=
input
.
getFrameHeight
();
int
h
=
input
.
getFrameHeight
();
int
w
=
input
.
getFrameWidth
();
int
w
=
input
.
getFrameWidth
();
...
@@ -176,29 +168,30 @@ void MKLDNNLayer::resetWithMatrix(MKLDNNMatrixPtr& dnn,
...
@@ -176,29 +168,30 @@ void MKLDNNLayer::resetWithMatrix(MKLDNNMatrixPtr& dnn,
void
MKLDNNLayer
::
resetInValue
(
void
MKLDNNLayer
::
resetInValue
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
in
,
const
std
::
shared_ptr
<
memory
::
primitive_desc
>&
intPD
,
const
std
::
shared_ptr
<
memory
::
primitive_desc
>&
intPD
,
size_t
i
nputI
dx
,
size_t
idx
,
int
inputChannel
)
{
int
inputChannel
)
{
cvtInVal
_
=
nullptr
;
cvtInVal
s_
[
idx
]
=
nullptr
;
extInVal
_
=
nullptr
;
extInVal
s_
[
idx
]
=
nullptr
;
in
=
nullptr
;
in
=
nullptr
;
inputChannel
=
inputChannel
==
0
?
ic_
:
inputChannel
;
inputChannel
=
inputChannel
==
0
?
ic_
:
inputChannel
;
CHECK_GT
(
bs_
*
inputChannel
*
ih_
*
iw_
,
0
);
CHECK_GT
(
bs_
*
inputChannel
*
ih_
*
iw_
,
0
);
auto
extPD
=
MKLDNNMatrix
::
createPrimitiveDesc
(
auto
extPD
=
MKLDNNMatrix
::
createPrimitiveDesc
(
{
bs_
,
inputChannel
,
ih_
,
iw_
},
format
::
nchw
,
engine_
);
{
bs_
,
inputChannel
,
ih_
,
iw_
},
format
::
nchw
,
engine_
);
const
MatrixPtr
&
inMat
=
inputLayers_
[
inputIdx
]
->
getOutputValue
();
const
MatrixPtr
&
inMat
=
inputLayers_
[
idx
]
->
getOutputValue
();
extInVal_
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
inMat
);
extInVals_
[
idx
]
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
inMat
);
CHECK_EQ
(
inputIsOnlyMKLDNN
(),
extInVal_
!=
nullptr
);
CHECK_EQ
(
inputIsOnlyMKLDNN
(),
extInVals_
[
idx
]
!=
nullptr
);
if
(
extInVal_
==
nullptr
||
extInVal_
->
getFormat
()
==
format
::
nc
)
{
if
(
extInVals_
[
idx
]
==
nullptr
||
extInVal_
=
MKLDNNMatrix
::
create
(
extPD
,
inMat
);
extInVals_
[
idx
]
->
getFormat
()
==
format
::
nc
)
{
extInVals_
[
idx
]
=
MKLDNNMatrix
::
create
(
extPD
,
inMat
);
}
}
in
=
extInVal
_
;
in
=
extInVal
s_
[
idx
]
;
if
(
nullptr
==
intPD
||
in
->
getPrimitiveDesc
()
==
*
intPD
)
{
if
(
nullptr
==
intPD
||
in
->
getPrimitiveDesc
()
==
*
intPD
)
{
return
;
return
;
}
}
// need create reorder
// need create reorder
in
=
MKLDNNMatrix
::
create
(
*
intPD
);
in
=
MKLDNNMatrix
::
create
(
*
intPD
);
cvtInVal
_
=
MKLDNNMatrix
::
createReorder
(
extInVal_
,
in
);
cvtInVal
s_
[
idx
]
=
MKLDNNMatrix
::
createReorder
(
extInVals_
[
idx
]
,
in
);
CHECK
(
cvtInVal
_
)
<<
"should not be emptry"
;
CHECK
(
cvtInVal
s_
[
idx
]
)
<<
"should not be emptry"
;
}
}
void
MKLDNNLayer
::
resetOutValue
(
MKLDNNMatrixPtr
&
out
,
void
MKLDNNLayer
::
resetOutValue
(
MKLDNNMatrixPtr
&
out
,
...
@@ -220,11 +213,11 @@ void MKLDNNLayer::resetOutValue(MKLDNNMatrixPtr& out,
...
@@ -220,11 +213,11 @@ void MKLDNNLayer::resetOutValue(MKLDNNMatrixPtr& out,
void
MKLDNNLayer
::
resetInGrad
(
MKLDNNMatrixPtr
&
in
,
void
MKLDNNLayer
::
resetInGrad
(
MKLDNNMatrixPtr
&
in
,
memory
::
primitive_desc
intPD
,
memory
::
primitive_desc
intPD
,
size_t
i
nputI
dx
)
{
size_t
idx
)
{
cvtInGrad_
=
nullptr
;
cvtInGrad_
=
nullptr
;
extInGrad_
=
nullptr
;
extInGrad_
=
nullptr
;
in
=
nullptr
;
in
=
nullptr
;
LayerPtr
&
input
=
inputLayers_
[
i
nputI
dx
];
LayerPtr
&
input
=
inputLayers_
[
idx
];
if
(
input
->
getOutputGrad
()
==
nullptr
)
{
if
(
input
->
getOutputGrad
()
==
nullptr
)
{
// no need input grad
// no need input grad
return
;
return
;
...
@@ -239,7 +232,7 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
...
@@ -239,7 +232,7 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
in
=
MKLDNNMatrix
::
create
(
intPD
,
inMat
);
in
=
MKLDNNMatrix
::
create
(
intPD
,
inMat
);
Argument
&
arg
=
input
->
getOutput
(
this
->
getName
());
Argument
&
arg
=
input
->
getOutput
(
this
->
getName
());
arg
.
grad
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
in
);
arg
.
grad
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
in
);
CHECK_PRIMITIVE_DESC_EQ
(
inVal
_
,
intPD
);
CHECK_PRIMITIVE_DESC_EQ
(
inVal
s_
[
idx
]
,
intPD
);
if
(
inputIsOnlyMKLDNN
())
{
if
(
inputIsOnlyMKLDNN
())
{
return
;
return
;
}
}
...
@@ -249,10 +242,11 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
...
@@ -249,10 +242,11 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
return
;
return
;
}
}
// need create reorder
// need create reorder
CHECK
(
extInVal_
!=
nullptr
&&
isPaddleFormat
(
extInVal_
->
getFormat
()))
CHECK
(
extInVals_
[
idx
]
!=
nullptr
&&
isPaddleFormat
(
extInVals_
[
idx
]
->
getFormat
()))
<<
"should have external input value and the format must be nchw(nc)"
;
<<
"should have external input value and the format must be nchw(nc)"
;
extInGrad_
=
MKLDNNMatrix
::
create
(
extInVal
_
->
getPrimitiveDesc
(),
inMat
);
extInGrad_
=
MKLDNNMatrix
::
create
(
extInVal
s_
[
idx
]
->
getPrimitiveDesc
(),
inMat
);
CHECK_PRIMITIVE_DESC_EQ
(
inVal
_
,
intPD
);
CHECK_PRIMITIVE_DESC_EQ
(
inVal
s_
[
idx
]
,
intPD
);
in
=
MKLDNNMatrix
::
create
(
intPD
);
in
=
MKLDNNMatrix
::
create
(
intPD
);
cvtInGrad_
=
MKLDNNMatrix
::
createReorder
(
in
,
extInGrad_
);
cvtInGrad_
=
MKLDNNMatrix
::
createReorder
(
in
,
extInGrad_
);
CHECK
(
cvtInGrad_
);
CHECK
(
cvtInGrad_
);
...
...
paddle/gserver/layers/MKLDNNLayer.h
浏览文件 @
bc0d2557
...
@@ -68,17 +68,17 @@ protected:
...
@@ -68,17 +68,17 @@ protected:
* When all layers are mkldnn layers, they could save internal data.
* When all layers are mkldnn layers, they could save internal data.
*/
*/
// below MKLDNNMatrix buffers are all internal buffers
// below MKLDNNMatrix buffers are all internal buffers
MKLDNNMatrixPtr
inVal
_
;
std
::
vector
<
MKLDNNMatrixPtr
>
inVals
_
;
MKLDNNMatrixPtr
inGrad_
;
MKLDNNMatrixPtr
inGrad_
;
MKLDNNMatrixPtr
outVal_
;
MKLDNNMatrixPtr
outVal_
;
MKLDNNMatrixPtr
outGrad_
;
MKLDNNMatrixPtr
outGrad_
;
// below are external value and grad
// below are external value and grad
MKLDNNMatrixPtr
extInVal
_
;
std
::
vector
<
MKLDNNMatrixPtr
>
extInVals
_
;
MKLDNNMatrixPtr
extInGrad_
;
MKLDNNMatrixPtr
extInGrad_
;
MKLDNNMatrixPtr
extOutVal_
;
MKLDNNMatrixPtr
extOutVal_
;
MKLDNNMatrixPtr
extOutGrad_
;
MKLDNNMatrixPtr
extOutGrad_
;
// convert handle between external and internal buffers
// convert handle between external and internal buffers
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtInVal
_
;
std
::
vector
<
std
::
shared_ptr
<
mkldnn
::
reorder
>>
cvtInVals
_
;
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtInGrad_
;
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtInGrad_
;
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtOutVal_
;
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtOutVal_
;
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtOutGrad_
;
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtOutGrad_
;
...
@@ -138,7 +138,7 @@ public:
...
@@ -138,7 +138,7 @@ public:
* weight and bias buffers should be coverd by child class itself
* weight and bias buffers should be coverd by child class itself
*/
*/
virtual
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
virtual
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
=
0
;
MKLDNNMatrixPtr
&
out
)
=
0
;
/**
/**
...
@@ -176,10 +176,7 @@ protected:
...
@@ -176,10 +176,7 @@ protected:
/**
/**
* reshape the input image sizes and input batchsize
* reshape the input image sizes and input batchsize
*/
*/
void
reshapeInput
(
int
&
batchsize
,
void
reshapeInput
(
int
&
batchsize
,
int
&
height
,
int
&
width
,
size_t
idx
=
0
);
int
&
height
,
int
&
width
,
size_t
inputIdx
=
0
);
/**
/**
* reshape output image sizes
* reshape output image sizes
...
@@ -202,7 +199,7 @@ protected:
...
@@ -202,7 +199,7 @@ protected:
void
resetInValue
(
void
resetInValue
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
in
,
const
std
::
shared_ptr
<
mkldnn
::
memory
::
primitive_desc
>&
intPD
=
nullptr
,
const
std
::
shared_ptr
<
mkldnn
::
memory
::
primitive_desc
>&
intPD
=
nullptr
,
size_t
i
nputI
dx
=
0
,
size_t
idx
=
0
,
int
inputChannel
=
0
);
int
inputChannel
=
0
);
/**
/**
...
@@ -218,7 +215,7 @@ protected:
...
@@ -218,7 +215,7 @@ protected:
*/
*/
void
resetInGrad
(
MKLDNNMatrixPtr
&
in
,
void
resetInGrad
(
MKLDNNMatrixPtr
&
in
,
mkldnn
::
memory
::
primitive_desc
intPD
,
mkldnn
::
memory
::
primitive_desc
intPD
,
size_t
i
nputI
dx
=
0
);
size_t
idx
=
0
);
/**
/**
* reset output grad from internal primitive desc.
* reset output grad from internal primitive desc.
...
@@ -296,17 +293,19 @@ protected:
...
@@ -296,17 +293,19 @@ protected:
* print the mkldnn memory format of value
* print the mkldnn memory format of value
*/
*/
virtual
void
printValueFormat
()
{
virtual
void
printValueFormat
()
{
if
(
extInVal_
)
{
for
(
size_t
i
=
0
;
i
<
inVals_
.
size
();
++
i
)
{
VLOG
(
MKLDNN_FMTS
)
<<
extInVal_
->
getFormat
()
<<
" >>> "
;
if
(
!
inVals_
[
i
])
{
}
continue
;
if
(
inVal_
)
{
}
VLOG
(
MKLDNN_FMTS
)
<<
inVal_
->
getFormat
()
<<
" >>>"
;
VLOG
(
MKLDNN_FMTS
)
<<
"Input "
<<
i
<<
", "
<<
inputLayers_
[
i
]
->
getName
()
<<
": "
<<
(
extInVals_
[
i
]
?
extInVals_
[
i
]
->
getFormat
()
:
inVals_
[
i
]
->
getFormat
())
<<
" >>> "
<<
inVals_
[
i
]
->
getFormat
()
<<
" >>>"
;
}
}
if
(
outVal_
)
{
if
(
outVal_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
outVal_
->
getFormat
()
<<
" >>> "
;
VLOG
(
MKLDNN_FMTS
)
<<
outVal_
->
getFormat
()
<<
" >>> "
}
<<
(
extOutVal_
?
extOutVal_
->
getFormat
()
if
(
extOutVal_
)
{
:
outVal_
->
getFormat
());
VLOG
(
MKLDNN_FMTS
)
<<
extOutVal_
->
getFormat
();
}
}
if
(
wgtVal_
)
{
if
(
wgtVal_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
"Weight value format: "
<<
wgtVal_
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
"Weight value format: "
<<
wgtVal_
->
getFormat
();
...
@@ -437,6 +436,24 @@ private:
...
@@ -437,6 +436,24 @@ private:
outputOtherDevice_
[
i
].
cpuSequenceDims
=
output_
.
cpuSequenceDims
;
outputOtherDevice_
[
i
].
cpuSequenceDims
=
output_
.
cpuSequenceDims
;
}
}
}
}
void
prepareValueConversions
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
)
{
// MKLDNNLayer output value should be MKLDNNMatrix
// so external output value is necessary.
// Then external input value is not necessary,
// since input may be mkldnn internal buffer.
CHECK
(
extOutVal_
)
<<
"external output value is necessary"
;
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
extOutVal_
);
CHECK
(
inVals_
[
0
]
&&
outVal_
)
<<
"internal memories are necessary"
;
for
(
size_t
i
=
0
;
i
<
cvtInVals_
.
size
();
++
i
)
{
if
(
cvtInVals_
[
i
])
{
pipeline
.
insert
(
pipeline
.
begin
(),
*
cvtInVals_
[
i
]);
}
}
if
(
cvtOutVal_
)
{
pipeline
.
push_back
(
*
cvtOutVal_
);
}
}
};
};
}
// namespace paddle
}
// namespace paddle
paddle/gserver/layers/MKLDNNPoolLayer.cpp
浏览文件 @
bc0d2557
...
@@ -74,13 +74,13 @@ void MKLDNNPoolLayer::reshape(
...
@@ -74,13 +74,13 @@ void MKLDNNPoolLayer::reshape(
}
}
void
MKLDNNPoolLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNPoolLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
resetFwdBuffers
(
in
,
out
);
resetFwdBuffers
(
in
puts
[
0
]
,
out
);
resetFwdPD
(
fwdPD_
,
in
,
out
);
resetFwdPD
(
fwdPD_
,
in
puts
[
0
]
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
puts
[
0
]
,
out
);
}
}
void
MKLDNNPoolLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNPoolLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
...
@@ -147,9 +147,9 @@ void MKLDNNPoolLayer::resetFwdPipeline(
...
@@ -147,9 +147,9 @@ void MKLDNNPoolLayer::resetFwdPipeline(
void
MKLDNNPoolLayer
::
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
void
MKLDNNPoolLayer
::
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal
_
&&
outVal_
);
CHECK
(
inVal
s_
[
0
]
&&
outVal_
);
resetOutGrad
(
out
,
outVal_
->
getPrimitiveDesc
());
resetOutGrad
(
out
,
outVal_
->
getPrimitiveDesc
());
resetInGrad
(
in
,
inVal
_
->
getPrimitiveDesc
());
resetInGrad
(
in
,
inVal
s_
[
0
]
->
getPrimitiveDesc
());
}
}
void
MKLDNNPoolLayer
::
resetBwdPD
(
std
::
shared_ptr
<
pool_bwd
::
primitive_desc
>&
pd
,
void
MKLDNNPoolLayer
::
resetBwdPD
(
std
::
shared_ptr
<
pool_bwd
::
primitive_desc
>&
pd
,
...
...
paddle/gserver/layers/MKLDNNPoolLayer.h
浏览文件 @
bc0d2557
...
@@ -56,7 +56,7 @@ public:
...
@@ -56,7 +56,7 @@ public:
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
&
oc
,
int
&
oh
,
int
&
ow
)
override
;
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
out
)
override
;
MKLDNNMatrixPtr
&
out
)
override
;
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录