Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0c951176
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0c951176
编写于
8月 07, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
pass mkldnn gtest
上级
ec9009f3
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
112 addition
and
54 deletion
+112
-54
paddle/gserver/layers/MkldnnFcLayer.cpp
paddle/gserver/layers/MkldnnFcLayer.cpp
+20
-4
paddle/gserver/layers/MkldnnFcLayer.h
paddle/gserver/layers/MkldnnFcLayer.h
+8
-3
paddle/gserver/layers/MkldnnLayer.cpp
paddle/gserver/layers/MkldnnLayer.cpp
+40
-22
paddle/gserver/layers/MkldnnLayer.h
paddle/gserver/layers/MkldnnLayer.h
+26
-1
paddle/gserver/tests/MkldnnTester.cpp
paddle/gserver/tests/MkldnnTester.cpp
+12
-18
paddle/gserver/tests/test_Mkldnn.cpp
paddle/gserver/tests/test_Mkldnn.cpp
+6
-6
未找到文件。
paddle/gserver/layers/MkldnnFcLayer.cpp
浏览文件 @
0c951176
...
...
@@ -42,7 +42,6 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
// create weight
weight_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
oc_
,
iLayerSize_
,
parameters_
[
0
],
0
));
initWgt
();
// create biases
if
(
biasParameter_
.
get
()
!=
NULL
)
{
...
...
@@ -51,20 +50,36 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
return
true
;
}
void
MkldnnFcLayer
::
initWgt
()
{
void
MkldnnFcLayer
::
cvtWgtFromPaddle
()
{
if
(
hasInitedWgt_
)
{
return
;
}
// The weight_ is transposed from initial paddle weight
MatrixPtr
paddleWgt
=
Matrix
::
create
(
weight_
->
getW
()
->
getData
(),
iLayerSize_
,
oc_
,
false
,
false
);
std
::
ostringstream
ostr
;
paddleWgt
->
print
(
ostr
);
VLOG
(
DNN_
BASE
)
<<
ostr
.
str
();
VLOG
(
DNN_
ALL
)
<<
"Initial Weight from paddle: "
<<
std
::
endl
<<
ostr
.
str
();
//
Firstly in mkldnn, the matrix is transposed from initial paddle weight
//
The mkldnn weight is transposed from initial paddle matrix
MatrixPtr
paddleWgtT
;
paddleWgt
->
transpose
(
paddleWgtT
,
true
);
weight_
->
getW
()
->
copyFrom
(
*
paddleWgtT
);
hasInitedWgt_
=
true
;
}
void
MkldnnFcLayer
::
cvtWgtToPaddle
()
{
MatrixPtr
dnnWgt
=
weight_
->
getW
();
MatrixPtr
paddleWgt
;
dnnWgt
->
transpose
(
paddleWgt
,
true
);
// copy paddle weight and override on weight_
MatrixPtr
dnnWgtT
=
Matrix
::
create
(
dnnWgt
->
getData
(),
dnnWgt
->
getWidth
(),
dnnWgt
->
getHeight
(),
false
,
false
);
dnnWgtT
->
copyFrom
(
*
paddleWgt
);
}
void
MkldnnFcLayer
::
reshape
()
{
...
...
@@ -86,6 +101,7 @@ void MkldnnFcLayer::reshape() {
ic_
=
iLayerSize_
/
(
ih_
*
iw_
);
CHECK_EQ
(
size_t
(
ic_
*
ih_
*
iw_
),
iLayerSize_
)
<<
"not divisible"
;
CHECK_EQ
(
size_t
(
oc_
),
getSize
());
printSizeInfo
();
// reset output
output_
.
setFrameHeight
(
oh_
);
...
...
paddle/gserver/layers/MkldnnFcLayer.h
浏览文件 @
0c951176
...
...
@@ -29,25 +29,30 @@ protected:
// input layer size, can not be change after init
size_t
iLayerSize_
;
// == ic * ih * iw
bool
hasInitedWgt_
;
// fc weight and bias
std
::
unique_ptr
<
Weight
>
weight_
;
std
::
unique_ptr
<
Weight
>
biases_
;
public:
explicit
MkldnnFcLayer
(
const
LayerConfig
&
config
)
:
MkldnnLayer
(
config
)
{}
explicit
MkldnnFcLayer
(
const
LayerConfig
&
config
)
:
MkldnnLayer
(
config
),
hasInitedWgt_
(
false
)
{}
~
MkldnnFcLayer
()
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
void
initWgt
()
;
void
cvtWgtFromPaddle
()
override
;
void
reshape
()
;
void
cvtWgtToPaddle
()
override
;
void
forward
(
PassType
passType
)
override
;
void
backward
(
const
UpdateCallback
&
callback
)
override
;
void
reshape
();
};
}
// namespace paddle
paddle/gserver/layers/MkldnnLayer.cpp
浏览文件 @
0c951176
...
...
@@ -25,11 +25,18 @@ namespace paddle {
bool
MkldnnLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
if
(
!
Layer
::
init
(
layerMap
,
parameterMap
))
{
return
false
;
}
CHECK
(
FLAGS_use_mkldnn
)
<<
"MkldnnLayers only support use_mkldnn."
<<
"Please set WITH_MKLDNN=ON "
<<
"and set use_mkldnn=True"
;
stream_
.
reset
(
new
MkldnnStream
());
engine_
=
CpuEngine
::
Instance
().
getEngine
();
// TODO(TJ): deivecId
return
Layer
::
init
(
layerMap
,
parameterMap
)
;
return
true
;
}
void
MkldnnLayer
::
resetForwardFC
(
int
bs
,
...
...
@@ -42,7 +49,6 @@ void MkldnnLayer::resetForwardFC(int bs,
real
*
wgtData
,
real
*
biasData
)
{
bool
hasSpatial
=
ih
==
1
&&
iw
==
1
?
false
:
true
;
engine_
=
CpuEngine
::
Instance
().
getEngine
();
mem
::
desc
botMD
=
hasSpatial
?
createMD
({
bs
,
ic
,
ih
,
iw
},
format
::
nchw
)
:
createMD
({
bs
,
ic
},
format
::
nc
);
...
...
@@ -52,21 +58,21 @@ void MkldnnLayer::resetForwardFC(int bs,
:
createMD
({},
format
::
format_undef
);
mem
::
desc
topMD
=
createMD
({
bs
,
oc
},
format
::
nc
);
inVal_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
botMD
,
engine_
),
botData
));
wgtVal_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
wgtMD
,
engine_
),
wgtData
));
outVal_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
topMD
,
engine_
),
topData
));
mkldnn
::
prop_kind
pk
=
mkldnn
::
prop_kind
::
forward
;
fc_fwd
::
desc
fwdDesc
=
biasData
!=
NULL
?
fc_fwd
::
desc
(
pk
,
botMD
,
wgtMD
,
biasMD
,
topMD
)
:
fc_fwd
::
desc
(
pk
,
botMD
,
wgtMD
,
topMD
);
fc_fwd
::
primitive_desc
fwdPD
=
fc_fwd
::
primitive_desc
(
fwdDesc
,
engine_
);
mem
bot
=
mem
(
mem
::
primitive_desc
(
botMD
,
engine_
),
botData
);
mem
wgt
=
mem
(
mem
::
primitive_desc
(
wgtMD
,
engine_
),
wgtData
);
mem
top
=
mem
(
mem
::
primitive_desc
(
topMD
,
engine_
),
topData
);
if
(
biasData
!=
NULL
)
{
mem
bias
=
mem
(
mem
::
primitive_desc
(
biasMD
,
engine_
),
biasData
);
fwd_
.
reset
(
new
fc_fwd
(
fwdPD
,
bot
,
wgt
,
bias
,
top
));
biasVal_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
biasMD
,
engine_
),
biasData
)
);
fwd_
.
reset
(
new
fc_fwd
(
fwdPD
,
*
inVal_
,
*
wgtVal_
,
*
biasVal_
,
*
outVal_
));
}
else
{
fwd_
.
reset
(
new
fc_fwd
(
fwdPD
,
bot
,
wgt
,
top
));
fwd_
.
reset
(
new
fc_fwd
(
fwdPD
,
*
inVal_
,
*
wgtVal_
,
*
outVal_
));
}
pipelineFwd_
.
clear
();
pipelineFwd_
.
push_back
(
*
fwd_
);
...
...
@@ -84,8 +90,12 @@ void MkldnnLayer::mkldnnForwardFC(int bs,
// if input size changed, reset it
resetForwardFC
(
bs
,
ic
,
ih
,
iw
,
botData
,
oc
,
topData
,
wgtData
,
biasData
);
this
->
cvtWgtFromPaddle
();
// update input, since the data might be changed if this is after data layer
inVal_
->
set_data_handle
(
botData
);
// just forward
// update botdata
stream_
->
submit
(
pipelineFwd_
);
}
...
...
@@ -112,6 +122,10 @@ void MkldnnLayer::resetBackwardFC(int bs,
mem
::
desc
biasMD
=
biasDiff
!=
NULL
?
createMD
({
oc
},
format
::
x
)
:
createMD
({},
format
::
format_undef
);
inVal_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
botMD
,
engine_
),
botData
));
wgtGrad_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
wgtMD
,
engine_
),
wgtDiff
));
outGrad_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
topMD
,
engine_
),
topDiff
));
fc_fwd
::
desc
fwdDesc
=
fc_fwd
::
desc
(
mkldnn
::
prop_kind
::
forward
,
botMD
,
wgtMD
,
topMD
);
fc_fwd
::
primitive_desc
fwdPD
=
fc_fwd
::
primitive_desc
(
fwdDesc
,
engine_
);
...
...
@@ -121,15 +135,12 @@ void MkldnnLayer::resetBackwardFC(int bs,
fc_bwdWgt
::
primitive_desc
bwdWgtPD
=
fc_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
fwdPD
);
mem
botVal
=
mem
(
mem
::
primitive_desc
(
botMD
,
engine_
),
botData
);
mem
wgtGrad
=
mem
(
mem
::
primitive_desc
(
wgtMD
,
engine_
),
wgtDiff
);
mem
topGrad
=
mem
(
mem
::
primitive_desc
(
topMD
,
engine_
),
topDiff
);
if
(
biasDiff
!=
NULL
)
{
mem
biasGrad
=
mem
(
mem
::
primitive_desc
(
biasMD
,
engine_
),
biasDiff
);
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
bwdWgtPD
,
botVal
,
topGrad
,
wgtGrad
,
biasGrad
));
biasGrad_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
biasMD
,
engine_
),
biasDiff
));
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
bwdWgtPD
,
*
inVal_
,
*
outGrad_
,
*
wgtGrad_
,
*
biasGrad_
));
}
else
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
bwdWgtPD
,
botVal
,
topGrad
,
wgtGrad
));
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
bwdWgtPD
,
*
inVal_
,
*
outGrad_
,
*
wgtGrad_
));
}
pipelineBwd_
.
clear
();
pipelineBwd_
.
push_back
(
*
bwdWgt_
);
...
...
@@ -142,9 +153,9 @@ void MkldnnLayer::resetBackwardFC(int bs,
fc_bwdData
::
desc
bwdDataDesc
=
fc_bwdData
::
desc
(
botMD
,
wgtMD
,
topMD
);
fc_bwdData
::
primitive_desc
bwdDataPD
=
fc_bwdData
::
primitive_desc
(
bwdDataDesc
,
engine_
,
fwdPD
);
mem
botGrad
=
mem
(
mem
::
primitive_desc
(
botMD
,
engine_
),
botDiff
);
mem
wgtVal
=
mem
(
mem
::
primitive_desc
(
wgtMD
,
engine_
),
wgtData
);
bwdData_
.
reset
(
new
fc_bwdData
(
bwdDataPD
,
topGrad
,
wgtVal
,
botGrad
));
inGrad_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
botMD
,
engine_
),
botDiff
)
);
wgtVal_
.
reset
(
new
mem
(
mem
::
primitive_desc
(
wgtMD
,
engine_
),
wgtData
)
);
bwdData_
.
reset
(
new
fc_bwdData
(
bwdDataPD
,
*
outGrad_
,
*
wgtVal_
,
*
inGrad_
));
pipelineBwd_
.
push_back
(
*
bwdData_
);
}
...
...
@@ -172,11 +183,18 @@ void MkldnnLayer::mkldnnBackwardFC(int bs,
wgtData
,
biasDiff
);
// just forward
// update botdata
// update data
outGrad_
->
set_data_handle
(
topDiff
);
stream_
->
submit
(
pipelineBwd_
);
}
void
MkldnnLayer
::
printSizeInfo
()
{
VLOG
(
DNN_SIZES
)
<<
"bs: "
<<
bs_
<<
", ic: "
<<
ic_
<<
", ih: "
<<
ih_
<<
", iw: "
<<
iw_
<<
", oc: "
<<
oc_
<<
", oh: "
<<
oh_
<<
", ow: "
<<
ow_
;
}
mem
::
desc
MkldnnLayer
::
createMD
(
mem
::
dims
dims
,
mem
::
format
fmt
,
mem
::
data_type
type
)
{
...
...
paddle/gserver/layers/MkldnnLayer.h
浏览文件 @
0c951176
...
...
@@ -40,13 +40,24 @@ protected:
// mkldnn engine, stream and primivtives
mkldnn
::
engine
engine_
;
std
::
shared_ptr
<
MkldnnStream
>
stream_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
fwd_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
bwdWgt_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
bwdData_
;
std
::
vector
<
mkldnn
::
primitive
>
pipelineFwd_
;
std
::
vector
<
mkldnn
::
primitive
>
pipelineBwd_
;
// TODO(TJ): change below memory as MkldnnMatrixPtr type
// input == bottom, output == top
// value == data, grad == diff
std
::
shared_ptr
<
mkldnn
::
memory
>
inVal_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
inGrad_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
outVal_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
outGrad_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
wgtVal_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
wgtGrad_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
biasVal_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
biasGrad_
;
public:
explicit
MkldnnLayer
(
const
LayerConfig
&
config
)
:
Layer
(
config
),
...
...
@@ -67,6 +78,20 @@ public:
virtual
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
);
virtual
void
printSizeInfo
();
/**
* convert weight from paddle format to mkldnn format
* weight_ will be override
*/
virtual
void
cvtWgtFromPaddle
()
{
;
}
/**
* convert mkldnn weight to paddle format
* weight_ will be override
*/
virtual
void
cvtWgtToPaddle
()
{
;
}
void
resetForwardFC
(
int
bs
,
int
ic
,
int
ih
,
...
...
paddle/gserver/tests/MkldnnTester.cpp
浏览文件 @
0c951176
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#include "MkldnnTester.h"
#include "paddle/gserver/layers/MkldnnBase.h"
#include "paddle/gserver/layers/MkldnnLayer.h"
namespace
paddle
{
...
...
@@ -145,7 +146,10 @@ void MkldnnTester::checkBackwardWgts() {
vector
<
VectorPtr
>
dnnWgts
;
// used to temply save mkldnn weights
saveWgt
(
parameters_
[
DNN
],
dnnWgts
);
// TODO(TJ): cvtWgtToPaddle
const
MkldnnLayerPtr
dnnlayer
=
std
::
dynamic_pointer_cast
<
MkldnnLayer
>
(
dnnLayer_
);
CHECK
(
dnnlayer
);
dnnlayer
->
cvtWgtToPaddle
();
for
(
size_t
i
=
0
;
i
<
parameters_
[
DNN
].
size
();
++
i
)
{
const
VectorPtr
&
dnn
=
parameters_
[
DNN
][
i
]
->
getBuf
(
PARAMETER_VALUE
);
const
VectorPtr
&
ref
=
parameters_
[
REF
][
i
]
->
getBuf
(
PARAMETER_VALUE
);
...
...
@@ -233,11 +237,10 @@ void MkldnnTester::printMatrix(const MatrixPtr& m) {
if
(
!
log_
)
{
return
;
}
#ifdef _DEBUG
std
::
ostream
str
;
m
->
print
(
str
);
VLOG
(
lvl_
)
<<
str
;
#endif
std
::
ostringstream
ostr
;
m
->
print
(
ostr
);
VLOG
(
lvl_
)
<<
std
::
endl
<<
ostr
.
str
();
}
void
MkldnnTester
::
printVector
(
const
VectorPtr
&
v
)
{
...
...
@@ -245,15 +248,9 @@ void MkldnnTester::printVector(const VectorPtr& v) {
return
;
}
CHECK
(
v
);
CHECK
(
v
->
getData
());
const
real
*
pd
=
v
->
getData
();
const
size_t
sz
=
v
->
getSize
();
std
::
stringstream
row
;
for
(
size_t
i
=
0
;
i
<
sz
;
++
i
)
{
row
<<
pd
[
i
]
<<
", "
;
}
VLOG
(
lvl_
)
<<
row
.
str
();
std
::
ostringstream
ostr
;
v
->
print
(
ostr
,
v
->
getSize
());
VLOG
(
lvl_
)
<<
std
::
endl
<<
ostr
.
str
();
}
double
MkldnnTester
::
getDelta
(
const
real
*
d1
,
...
...
@@ -335,7 +332,6 @@ void MkldnnTester::run(const TestConfig& dnn,
// Firstly always set flag false to initial from paddle weight
TestConfig
first
=
dnn
;
// first.layerConfig.set_init_wgt_from_mkldnn(false);
// reset and run once
reset
(
first
,
ref
,
batchSize
);
...
...
@@ -348,8 +344,6 @@ void MkldnnTester::run(const TestConfig& dnn,
// firstly get the flag
bool
initWgtFromMkldnn
=
false
;
// dnn.layerConfig.has_init_wgt_from_mkldnn() &&
// dnn.layerConfig.init_wgt_from_mkldnn();
if
(
initWgtFromMkldnn
)
{
// after run once the mkldnn weight has been stored in dnnlayer
...
...
paddle/gserver/tests/test_Mkldnn.cpp
浏览文件 @
0c951176
...
...
@@ -55,12 +55,12 @@ void testFcLayer(const testFCDesc& pm) {
}
TEST
(
MkldnnLayer
,
fcLayer
)
{
testFcLayer
({
2
,
2
,
3
,
1
,
1
});
/*
testFcLayer({16, 32, 64
, 1, 1});
testFcLayer
({
2
,
2
,
3
,
1
,
1
});
testFcLayer
({
3
,
7
,
19
,
1
,
1
});
testFcLayer
({
8
,
16
,
32
,
13
,
13
});
testFcLayer
({
4
,
12
,
18
,
13
,
11
});
testFcLayer
({
2
,
64
,
32
,
16
,
16
});
testFcLayer({15, 3, 6, 16, 16});*/
testFcLayer
({
15
,
3
,
6
,
16
,
16
});
}
// TODO(TJ): add branch test
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录