Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
93e22e7b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
93e22e7b
编写于
11月 07, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
enable bias for mkldnn_addto
上级
6f43c936
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
99 addition
and
15 deletion
+99
-15
paddle/gserver/layers/MKLDNNAddtoLayer.cpp
paddle/gserver/layers/MKLDNNAddtoLayer.cpp
+76
-7
paddle/gserver/layers/MKLDNNAddtoLayer.h
paddle/gserver/layers/MKLDNNAddtoLayer.h
+21
-1
paddle/gserver/tests/test_MKLDNN.cpp
paddle/gserver/tests/test_MKLDNN.cpp
+2
-7
未找到文件。
paddle/gserver/layers/MKLDNNAddtoLayer.cpp
浏览文件 @
93e22e7b
...
@@ -62,16 +62,14 @@ void MKLDNNAddtoLayer::resetFwd(std::vector<primitive>& pipeline,
...
@@ -62,16 +62,14 @@ void MKLDNNAddtoLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
if
(
biases_
)
{
resetFwdBuffers
(
inVals_
,
bias
,
out
);
LOG
(
FATAL
)
<<
"not implemented yet"
;
}
resetFwdBuffers
(
inVals_
,
out
);
in
=
inVals_
[
0
];
in
=
inVals_
[
0
];
std
::
shared_ptr
<
sum
::
primitive_desc
>
fwdPD
;
std
::
shared_ptr
<
sum
::
primitive_desc
>
fwdPD
;
resetFwdPD
(
fwdPD
,
inVals_
,
out
);
std
::
shared_ptr
<
sum
::
primitive_desc
>
biasPD
;
resetFwdPD
(
fwdPD
,
biasPD
,
inVals_
,
bias
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD
,
inVals_
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD
,
biasPD
,
inVals_
,
bias
,
out
);
}
}
void
MKLDNNAddtoLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
void
MKLDNNAddtoLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
...
@@ -79,7 +77,7 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
...
@@ -79,7 +77,7 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
resetBwdBuffers
(
inGrads_
,
out
);
resetBwdBuffers
(
inGrads_
,
bias
,
out
);
in
=
inGrads_
[
0
];
in
=
inGrads_
[
0
];
// backward only need share output grad to input grad
// backward only need share output grad to input grad
...
@@ -89,6 +87,20 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
...
@@ -89,6 +87,20 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
inputLayers_
[
i
]
->
getOutputGrad
()
->
setData
(
inGrads_
[
i
]
->
getData
());
inputLayers_
[
i
]
->
getOutputGrad
()
->
setData
(
inGrads_
[
i
]
->
getData
());
}
}
}
}
// backward bias
bwdBias_
=
nullptr
;
if
(
bias
)
{
std
::
vector
<
double
>
scales
(
bs_
,
1.0
);
std
::
vector
<
memory
::
primitive_desc
>
srcPDs
(
bs_
,
bias
->
getPrimitiveDesc
());
auto
biasPD
=
sum
::
primitive_desc
(
bias
->
getMemoryDesc
(),
scales
,
srcPDs
);
std
::
vector
<
primitive
::
at
>
srcs
;
for
(
size_t
i
=
0
;
i
<
grads_
.
size
();
++
i
)
{
srcs
.
push_back
(
*
(
grads_
[
i
]));
}
bwdBias_
.
reset
(
new
sum
(
biasPD
,
srcs
,
*
bias
));
pipeline
.
push_back
(
*
bwdBias_
);
}
}
}
void
MKLDNNAddtoLayer
::
updateWeights
(
const
UpdateCallback
&
callback
)
{
void
MKLDNNAddtoLayer
::
updateWeights
(
const
UpdateCallback
&
callback
)
{
...
@@ -97,7 +109,25 @@ void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
...
@@ -97,7 +109,25 @@ void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
}
}
}
}
void
MKLDNNAddtoLayer
::
prepareBias
(
MKLDNNMatrixPtr
&
bias
,
const
MatrixPtr
&
biasMat
,
const
MKLDNNMatrixPtr
&
out
,
std
::
vector
<
MKLDNNMatrixPtr
>&
outs
)
{
auto
pd
=
MKLDNNMatrix
::
createPrimitiveDesc
(
{(
int
)
layerSize_
},
memory
::
format
::
x
,
engine_
);
bias
=
MKLDNNMatrix
::
create
(
pd
,
biasMat
);
outs
.
clear
();
real
*
data
=
out
->
getData
();
CHECK_EQ
(
bs_
*
layerSize_
,
out
->
getElementCnt
());
for
(
int
i
=
0
;
i
<
bs_
;
++
i
)
{
MatrixPtr
tmp
=
Matrix
::
create
(
data
+
i
*
layerSize_
,
1
,
layerSize_
,
false
,
false
);
outs
.
push_back
(
MKLDNNMatrix
::
create
(
bias
->
getPrimitiveDesc
(),
tmp
));
}
}
void
MKLDNNAddtoLayer
::
resetFwdBuffers
(
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
void
MKLDNNAddtoLayer
::
resetFwdBuffers
(
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
inputs
.
resize
(
inputLayers_
.
size
());
inputs
.
resize
(
inputLayers_
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
...
@@ -110,10 +140,18 @@ void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
...
@@ -110,10 +140,18 @@ void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
}
}
resetOutValue
(
out
,
inputs
[
0
]
->
getPrimitiveDesc
());
resetOutValue
(
out
,
inputs
[
0
]
->
getPrimitiveDesc
());
if
(
biases_
&&
biases_
->
getW
())
{
prepareBias
(
bias
,
biases_
->
getW
(),
out
,
vals_
);
}
else
{
bias
=
nullptr
;
}
}
}
void
MKLDNNAddtoLayer
::
resetFwdPD
(
std
::
shared_ptr
<
sum
::
primitive_desc
>&
pd
,
void
MKLDNNAddtoLayer
::
resetFwdPD
(
std
::
shared_ptr
<
sum
::
primitive_desc
>&
pd
,
std
::
shared_ptr
<
sum
::
primitive_desc
>&
biasPD
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
bias
,
MKLDNNMatrixPtr
out
)
{
MKLDNNMatrixPtr
out
)
{
std
::
vector
<
double
>
scales
(
inputs
.
size
(),
1.0
);
std
::
vector
<
double
>
scales
(
inputs
.
size
(),
1.0
);
std
::
vector
<
memory
::
primitive_desc
>
srcPDs
;
std
::
vector
<
memory
::
primitive_desc
>
srcPDs
;
...
@@ -123,12 +161,23 @@ void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd,
...
@@ -123,12 +161,23 @@ void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd,
CHECK
(
out
);
CHECK
(
out
);
pd
.
reset
(
new
sum
::
primitive_desc
(
out
->
getMemoryDesc
(),
scales
,
srcPDs
));
pd
.
reset
(
new
sum
::
primitive_desc
(
out
->
getMemoryDesc
(),
scales
,
srcPDs
));
CHECK_PRIMITIVE_DESC_EQ
(
out
,
pd
->
dst_primitive_desc
());
CHECK_PRIMITIVE_DESC_EQ
(
out
,
pd
->
dst_primitive_desc
());
biasPD
=
nullptr
;
if
(
bias
)
{
std
::
vector
<
double
>
scales
(
2
,
1.0
);
std
::
vector
<
memory
::
primitive_desc
>
srcPDs
(
2
,
bias
->
getPrimitiveDesc
());
biasPD
.
reset
(
new
sum
::
primitive_desc
(
bias
->
getMemoryDesc
(),
scales
,
srcPDs
));
CHECK_PRIMITIVE_DESC_EQ
(
bias
,
biasPD
->
dst_primitive_desc
());
}
}
}
void
MKLDNNAddtoLayer
::
resetFwdPipeline
(
void
MKLDNNAddtoLayer
::
resetFwdPipeline
(
std
::
vector
<
primitive
>&
pipeline
,
std
::
vector
<
primitive
>&
pipeline
,
std
::
shared_ptr
<
sum
::
primitive_desc
>&
pd
,
std
::
shared_ptr
<
sum
::
primitive_desc
>&
pd
,
std
::
shared_ptr
<
sum
::
primitive_desc
>&
biasPD
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
std
::
vector
<
primitive
::
at
>
srcs
;
std
::
vector
<
primitive
::
at
>
srcs
;
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
...
@@ -136,9 +185,23 @@ void MKLDNNAddtoLayer::resetFwdPipeline(
...
@@ -136,9 +185,23 @@ void MKLDNNAddtoLayer::resetFwdPipeline(
}
}
fwd_
.
reset
(
new
sum
(
*
pd
,
srcs
,
*
out
));
fwd_
.
reset
(
new
sum
(
*
pd
,
srcs
,
*
out
));
pipeline
.
push_back
(
*
fwd_
);
pipeline
.
push_back
(
*
fwd_
);
fwdBias_
.
clear
();
if
(
biasPD
==
nullptr
||
bias
==
nullptr
)
{
return
;
}
fwdBias_
.
resize
(
vals_
.
size
());
for
(
size_t
i
=
0
;
i
<
vals_
.
size
();
++
i
)
{
std
::
vector
<
primitive
::
at
>
srcs
;
srcs
.
push_back
(
*
(
vals_
[
i
]));
srcs
.
push_back
(
*
bias
);
fwdBias_
[
i
].
reset
(
new
sum
(
*
biasPD
,
srcs
,
*
vals_
[
i
]));
pipeline
.
push_back
(
*
fwdBias_
[
i
]);
}
}
}
void
MKLDNNAddtoLayer
::
resetBwdBuffers
(
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
void
MKLDNNAddtoLayer
::
resetBwdBuffers
(
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
outVal_
);
CHECK
(
outVal_
);
resetOutGrad
(
out
,
outVal_
->
getPrimitiveDesc
());
resetOutGrad
(
out
,
outVal_
->
getPrimitiveDesc
());
...
@@ -149,6 +212,12 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
...
@@ -149,6 +212,12 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
resetInGrad
(
inputs
[
i
],
inVal_
->
getPrimitiveDesc
(),
i
);
resetInGrad
(
inputs
[
i
],
inVal_
->
getPrimitiveDesc
(),
i
);
CHECK_PRIMITIVE_DESC_EQ
(
inputs
[
i
],
out
->
getPrimitiveDesc
());
CHECK_PRIMITIVE_DESC_EQ
(
inputs
[
i
],
out
->
getPrimitiveDesc
());
}
}
if
(
biases_
&&
biases_
->
getWGrad
())
{
prepareBias
(
bias
,
biases_
->
getWGrad
(),
out
,
grads_
);
}
else
{
bias
=
nullptr
;
}
}
}
}
// namespace paddle
}
// namespace paddle
paddle/gserver/layers/MKLDNNAddtoLayer.h
浏览文件 @
93e22e7b
...
@@ -32,9 +32,15 @@ protected:
...
@@ -32,9 +32,15 @@ protected:
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed
size_t
layerSize_
;
size_t
layerSize_
;
// TODO(TJ): this part has not been optimized by MKL-DNN
std
::
unique_ptr
<
Weight
>
biases_
;
std
::
unique_ptr
<
Weight
>
biases_
;
// buffers for adding bias
std
::
vector
<
MKLDNNMatrixPtr
>
vals_
;
std
::
vector
<
MKLDNNMatrixPtr
>
grads_
;
// primitives for adding bias
std
::
vector
<
std
::
shared_ptr
<
mkldnn
::
primitive
>>
fwdBias_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
bwdBias_
;
public:
public:
explicit
MKLDNNAddtoLayer
(
const
LayerConfig
&
config
)
:
MKLDNNLayer
(
config
)
{}
explicit
MKLDNNAddtoLayer
(
const
LayerConfig
&
config
)
:
MKLDNNLayer
(
config
)
{}
...
@@ -91,20 +97,34 @@ protected:
...
@@ -91,20 +97,34 @@ protected:
* reset pipeline.
* reset pipeline.
*/
*/
void
resetFwdBuffers
(
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
void
resetFwdBuffers
(
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
MKLDNNMatrixPtr
&
out
);
void
resetFwdPD
(
std
::
shared_ptr
<
mkldnn
::
sum
::
primitive_desc
>&
pd
,
void
resetFwdPD
(
std
::
shared_ptr
<
mkldnn
::
sum
::
primitive_desc
>&
pd
,
std
::
shared_ptr
<
mkldnn
::
sum
::
primitive_desc
>&
biasPD
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
bias
,
MKLDNNMatrixPtr
out
);
MKLDNNMatrixPtr
out
);
void
resetFwdPipeline
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
resetFwdPipeline
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
std
::
shared_ptr
<
mkldnn
::
sum
::
primitive_desc
>&
pd
,
std
::
shared_ptr
<
mkldnn
::
sum
::
primitive_desc
>&
pd
,
std
::
shared_ptr
<
mkldnn
::
sum
::
primitive_desc
>&
biasPD
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
MKLDNNMatrixPtr
&
out
);
/**
/**
* Backward functions: reset buffers(inputs, output, bias)
* Backward functions: reset buffers(inputs, output, bias)
*/
*/
void
resetBwdBuffers
(
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
void
resetBwdBuffers
(
std
::
vector
<
MKLDNNMatrixPtr
>&
inputs
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
MKLDNNMatrixPtr
&
out
);
/**
* prepare for bias
*/
void
prepareBias
(
MKLDNNMatrixPtr
&
bias
,
const
MatrixPtr
&
biasMat
,
const
MKLDNNMatrixPtr
&
out
,
std
::
vector
<
MKLDNNMatrixPtr
>&
outs
);
};
};
}
// namespace paddle
}
// namespace paddle
paddle/gserver/tests/test_MKLDNN.cpp
浏览文件 @
93e22e7b
...
@@ -300,13 +300,8 @@ void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) {
...
@@ -300,13 +300,8 @@ void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) {
TestConfig
dnnConfig
;
TestConfig
dnnConfig
;
getAddtoConfig
(
dnnConfig
,
pm
,
nInputs
);
getAddtoConfig
(
dnnConfig
,
pm
,
nInputs
);
dnnConfig
.
layerConfig
.
set_type
(
"mkldnn_addto"
);
dnnConfig
.
layerConfig
.
set_type
(
"mkldnn_addto"
);
// TODO(TJ): test with bias
for
(
auto
withBias
:
{
false
,
true
})
{
for
(
auto
withBias
:
{
false
})
{
dnnConfig
.
biasSize
=
withBias
?
pm
.
ic
*
pm
.
ih
*
pm
.
iw
:
0
;
if
(
withBias
)
{
dnnConfig
.
biasSize
=
pm
.
ic
*
pm
.
ih
*
pm
.
iw
;
}
else
{
dnnConfig
.
biasSize
=
0
;
}
RUN_MKLDNN_TEST_LAYER
(
dnnConfig
,
"addto"
,
pm
)
RUN_MKLDNN_TEST_LAYER
(
dnnConfig
,
"addto"
,
pm
)
}
}
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录