Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
5c892db6
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5c892db6
编写于
10月 20, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove unused code
refine comments and bias fix typo and todo
上级
4f41eaf7
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
37 addition
and
38 deletion
+37
-38
paddle/gserver/layers/MKLDNNConvLayer.cpp
paddle/gserver/layers/MKLDNNConvLayer.cpp
+4
-4
paddle/gserver/layers/MKLDNNFcLayer.cpp
paddle/gserver/layers/MKLDNNFcLayer.cpp
+9
-12
paddle/gserver/layers/MKLDNNLayer.cpp
paddle/gserver/layers/MKLDNNLayer.cpp
+4
-3
paddle/gserver/layers/MKLDNNLayer.h
paddle/gserver/layers/MKLDNNLayer.h
+20
-19
未找到文件。
paddle/gserver/layers/MKLDNNConvLayer.cpp
浏览文件 @
5c892db6
...
@@ -210,11 +210,11 @@ void MKLDNNConvLayer::resetFwdBuffers(
...
@@ -210,11 +210,11 @@ void MKLDNNConvLayer::resetFwdBuffers(
resetWithMatrix
(
wgt
,
weight_
->
getW
(),
pd
->
weights_primitive_desc
());
resetWithMatrix
(
wgt
,
weight_
->
getW
(),
pd
->
weights_primitive_desc
());
bias
=
nullptr
;
if
(
biases_
&&
biases_
->
getW
())
{
if
(
biases_
==
nullptr
||
biases_
->
getW
()
==
nullptr
)
{
resetWithMatrix
(
bias
,
biases_
->
getW
(),
pd
->
bias_primitive_desc
());
return
;
}
else
{
bias
=
nullptr
;
}
}
resetWithMatrix
(
bias
,
biases_
->
getW
(),
pd
->
bias_primitive_desc
());
}
}
void
MKLDNNConvLayer
::
resetFwdPipeline
(
void
MKLDNNConvLayer
::
resetFwdPipeline
(
...
...
paddle/gserver/layers/MKLDNNFcLayer.cpp
浏览文件 @
5c892db6
...
@@ -134,10 +134,6 @@ void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
...
@@ -134,10 +134,6 @@ void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
CHECK
(
in
);
CHECK
(
in
);
in
->
downSpatial
();
in
->
downSpatial
();
// if (extInVal_) {
// extInVal_->downSpatial();
// }
auto
outPD
=
auto
outPD
=
MKLDNNMatrix
::
createPrimitiveDesc
({
bs_
,
oc_
},
format
::
nc
,
engine_
);
MKLDNNMatrix
::
createPrimitiveDesc
({
bs_
,
oc_
},
format
::
nc
,
engine_
);
resetOutValue
(
out
,
outPD
);
resetOutValue
(
out
,
outPD
);
...
@@ -153,11 +149,12 @@ void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
...
@@ -153,11 +149,12 @@ void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
resetWithMatrix
(
wgt
,
weight_
->
getW
(),
wgtPD
);
resetWithMatrix
(
wgt
,
weight_
->
getW
(),
wgtPD
);
wgt
->
downSpatial
();
wgt
->
downSpatial
();
if
(
biases_
==
nullptr
||
biases_
->
getW
()
==
nullptr
)
{
if
(
biases_
&&
biases_
->
getW
())
{
return
;
auto
biasPD
=
MKLDNNMatrix
::
createPrimitiveDesc
({
oc_
},
format
::
x
,
engine_
);
resetWithMatrix
(
bias
,
biases_
->
getW
(),
biasPD
);
}
else
{
bias
=
nullptr
;
}
}
auto
biasPD
=
MKLDNNMatrix
::
createPrimitiveDesc
({
oc_
},
format
::
x
,
engine_
);
resetWithMatrix
(
bias
,
biases_
->
getW
(),
biasPD
);
}
}
void
MKLDNNFcLayer
::
resetFwdPD
(
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
void
MKLDNNFcLayer
::
resetFwdPD
(
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
...
@@ -207,11 +204,11 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
...
@@ -207,11 +204,11 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
CHECK
(
wgtVal_
);
CHECK
(
wgtVal_
);
resetWithMatrix
(
wgt
,
weight_
->
getWGrad
(),
wgtVal_
->
getPrimitiveDesc
());
resetWithMatrix
(
wgt
,
weight_
->
getWGrad
(),
wgtVal_
->
getPrimitiveDesc
());
bias
=
nullptr
;
if
(
biasVal_
)
{
if
(
biasVal_
==
nullptr
)
{
resetWithMatrix
(
bias
,
biases_
->
getWGrad
(),
biasVal_
->
getPrimitiveDesc
());
return
;
}
else
{
bias
=
nullptr
;
}
}
resetWithMatrix
(
bias
,
biases_
->
getWGrad
(),
biasVal_
->
getPrimitiveDesc
());
}
}
void
MKLDNNFcLayer
::
resetBwdWgtPD
(
void
MKLDNNFcLayer
::
resetBwdWgtPD
(
...
...
paddle/gserver/layers/MKLDNNLayer.cpp
浏览文件 @
5c892db6
...
@@ -60,7 +60,7 @@ void MKLDNNLayer::forward(PassType passType) {
...
@@ -60,7 +60,7 @@ void MKLDNNLayer::forward(PassType passType) {
resetFwd
(
pipelineFwd_
,
inVal_
,
wgtVal_
,
biasVal_
,
outVal_
);
resetFwd
(
pipelineFwd_
,
inVal_
,
wgtVal_
,
biasVal_
,
outVal_
);
// MKLDNNLayer output value should be MKLDNNMatrix
// MKLDNNLayer output value should be MKLDNNMatrix
// so external output value is necessary.
// so external output value is necessary.
//
t
hen external input value is not necessary,
//
T
hen external input value is not necessary,
// since input may be mkldnn internal buffer.
// since input may be mkldnn internal buffer.
CHECK
(
extOutVal_
)
<<
"external output value is necessary"
;
CHECK
(
extOutVal_
)
<<
"external output value is necessary"
;
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
extOutVal_
);
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
extOutVal_
);
...
@@ -235,8 +235,8 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
...
@@ -235,8 +235,8 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
in
=
MKLDNNMatrix
::
create
(
intPD
,
inMat
);
in
=
MKLDNNMatrix
::
create
(
intPD
,
inMat
);
Argument
&
arg
=
input
->
getOutput
(
this
->
getName
());
Argument
&
arg
=
input
->
getOutput
(
this
->
getName
());
arg
.
grad
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
in
);
arg
.
grad
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
in
);
CHECK
(
inVal_
!=
nullptr
&&
inVal_
->
getPrimitiveDesc
()
==
intPD
)
CHECK
(
inVal_
);
<<
"should have internal input value and
primitive desc must equal"
;
CHECK
(
inVal_
->
getPrimitiveDesc
()
==
intPD
)
<<
"the
primitive desc must equal"
;
if
(
inputIsOnlyMKLDNN
())
{
if
(
inputIsOnlyMKLDNN
())
{
return
;
return
;
}
}
...
@@ -246,6 +246,7 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
...
@@ -246,6 +246,7 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
return
;
return
;
}
}
// need create reorder
// need create reorder
// TODO(TJ): add macro definition to simplify it
CHECK
(
extInVal_
!=
nullptr
&&
isPaddleFormat
(
extInVal_
->
getFormat
()))
CHECK
(
extInVal_
!=
nullptr
&&
isPaddleFormat
(
extInVal_
->
getFormat
()))
<<
"should have external input value and the format must be nchw(nc)"
;
<<
"should have external input value and the format must be nchw(nc)"
;
extInGrad_
=
MKLDNNMatrix
::
create
(
extInVal_
->
getPrimitiveDesc
(),
inMat
);
extInGrad_
=
MKLDNNMatrix
::
create
(
extInVal_
->
getPrimitiveDesc
(),
inMat
);
...
...
paddle/gserver/layers/MKLDNNLayer.h
浏览文件 @
5c892db6
...
@@ -58,14 +58,15 @@ protected:
...
@@ -58,14 +58,15 @@ protected:
std
::
vector
<
mkldnn
::
primitive
>
pipelineFwd_
;
std
::
vector
<
mkldnn
::
primitive
>
pipelineFwd_
;
std
::
vector
<
mkldnn
::
primitive
>
pipelineBwd_
;
std
::
vector
<
mkldnn
::
primitive
>
pipelineBwd_
;
/// value and grad are seperated as internal and external buffers.
/* Value and grad are seperated as internal and external buffers.
/// each MKLDNNLayer must init or reset internal buffer at least,
* Each MKLDNNLayer must init or reset internal buffer at least,
/// and the external buffer format is always nchw of nc(when h==w==1),
* and the external buffer format is always nchw of nc(when h==w==1),
/// which is the same format as paddle.
* which is the same format as paddle.
/// The output_.value and output_.grad always save the external data,
* The output_.value and output_.grad always save the external data,
/// when mixed with cpu device.
* when mixed with cpu device.
/// When all layers are mkldnn layers, they could save internal data.
* When all layers are mkldnn layers, they could save internal data.
/// below MKLDNNMatrix buffers are all internal buffers
*/
// below MKLDNNMatrix buffers are all internal buffers
MKLDNNMatrixPtr
inVal_
;
MKLDNNMatrixPtr
inVal_
;
MKLDNNMatrixPtr
inGrad_
;
MKLDNNMatrixPtr
inGrad_
;
MKLDNNMatrixPtr
outVal_
;
MKLDNNMatrixPtr
outVal_
;
...
@@ -120,8 +121,8 @@ public:
...
@@ -120,8 +121,8 @@ public:
~
MKLDNNLayer
()
{}
~
MKLDNNLayer
()
{}
virtual
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
);
virtual
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
);
v
oid
forward
(
PassType
passType
)
override
;
v
irtual
void
forward
(
PassType
passType
)
;
v
oid
backward
(
const
UpdateCallback
&
callback
)
override
;
v
irtual
void
backward
(
const
UpdateCallback
&
callback
)
;
/**
/**
* reshape the input image sizes
* reshape the input image sizes
...
@@ -217,7 +218,7 @@ protected:
...
@@ -217,7 +218,7 @@ protected:
* reset output grad from internal primitive desc.
* reset output grad from internal primitive desc.
* merge grad if necessary.
* merge grad if necessary.
* reset both internal and external buffer and create reorder if necessary.
* reset both internal and external buffer and create reorder if necessary.
* note: about merge grad, when this layer has se
rv
al outputs,
* note: about merge grad, when this layer has se
ver
al outputs,
* it could not be mixed with cpu device,
* it could not be mixed with cpu device,
* since it can not get memory desc from cpu device.
* since it can not get memory desc from cpu device.
*/
*/
...
@@ -225,7 +226,7 @@ protected:
...
@@ -225,7 +226,7 @@ protected:
/**
/**
* reset the merge grad primitive if necessary.
* reset the merge grad primitive if necessary.
* note: do not support the grads
are
mixed with cpu device,
* note: do not support the grads mixed with cpu device,
* since it can not get memory desc from cpu device.
* since it can not get memory desc from cpu device.
*/
*/
void
resetMergeGrad
(
MKLDNNMatrixPtr
&
out
);
void
resetMergeGrad
(
MKLDNNMatrixPtr
&
out
);
...
@@ -313,17 +314,17 @@ protected:
...
@@ -313,17 +314,17 @@ protected:
* print the mkldnn memory format of grad
* print the mkldnn memory format of grad
*/
*/
virtual
void
printGradFormat
()
{
virtual
void
printGradFormat
()
{
if
(
extInGrad_
)
{
if
(
extOutGrad_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
extInGrad_
->
getFormat
()
<<
" <<< "
;
VLOG
(
MKLDNN_FMTS
)
<<
extOutGrad_
->
getFormat
();
}
if
(
inGrad_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
inGrad_
->
getFormat
()
<<
" <<<"
;
}
}
if
(
outGrad_
)
{
if
(
outGrad_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
outGrad_
->
getFormat
()
<<
" <<< "
;
VLOG
(
MKLDNN_FMTS
)
<<
outGrad_
->
getFormat
()
<<
" <<< "
;
}
}
if
(
extOutGrad_
)
{
if
(
inGrad_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
extOutGrad_
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
inGrad_
->
getFormat
()
<<
" <<<"
;
}
if
(
extInGrad_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
extInGrad_
->
getFormat
()
<<
" <<< "
;
}
}
if
(
wgtGrad_
)
{
if
(
wgtGrad_
)
{
VLOG
(
MKLDNN_FMTS
)
<<
"Weight grad format: "
<<
wgtGrad_
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
"Weight grad format: "
<<
wgtGrad_
->
getFormat
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录