Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
e08651f9
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e08651f9
编写于
8月 17, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove flag use_mkldnn_wgt
上级
0945dc1b
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
27 addition
and
14 deletion
+27
-14
paddle/gserver/layers/MKLDNNFcLayer.cpp
paddle/gserver/layers/MKLDNNFcLayer.cpp
+6
-2
paddle/gserver/tests/MKLDNNTester.cpp
paddle/gserver/tests/MKLDNNTester.cpp
+20
-7
paddle/gserver/tests/MKLDNNTester.h
paddle/gserver/tests/MKLDNNTester.h
+1
-1
paddle/trainer/TrainerConfigHelper.cpp
paddle/trainer/TrainerConfigHelper.cpp
+0
-2
paddle/utils/Flags.cpp
paddle/utils/Flags.cpp
+0
-1
paddle/utils/Flags.h
paddle/utils/Flags.h
+0
-1
未找到文件。
paddle/gserver/layers/MKLDNNFcLayer.cpp
浏览文件 @
e08651f9
...
@@ -57,11 +57,14 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap,
...
@@ -57,11 +57,14 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap,
}
}
void
MKLDNNFcLayer
::
convertWeightsFromPaddle
()
{
void
MKLDNNFcLayer
::
convertWeightsFromPaddle
()
{
if
(
FLAGS_use_mkldnn_wgt
)
{
if
(
hasInitedWgt_
)
{
return
;
return
;
}
}
if
(
hasInitedWgt_
)
{
// TODO(TJ): dst format should get from wgtVal_
int
dstFmt
=
PARAM_FORMAT_MKLDNN_OI
;
int
srcFmt
=
weight_
->
getParameterPtr
()
->
getHeaderFormat
();
if
(
srcFmt
==
dstFmt
)
{
return
;
return
;
}
}
...
@@ -78,6 +81,7 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() {
...
@@ -78,6 +81,7 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() {
MatrixPtr
paddleWgtT
;
MatrixPtr
paddleWgtT
;
paddleWgt
->
transpose
(
paddleWgtT
,
true
);
paddleWgt
->
transpose
(
paddleWgtT
,
true
);
weight_
->
getW
()
->
copyFrom
(
*
paddleWgtT
);
weight_
->
getW
()
->
copyFrom
(
*
paddleWgtT
);
weight_
->
getParameterPtr
()
->
setHeaderFormat
(
dstFmt
);
hasInitedWgt_
=
true
;
hasInitedWgt_
=
true
;
}
}
...
...
paddle/gserver/tests/MKLDNNTester.cpp
浏览文件 @
e08651f9
...
@@ -330,9 +330,7 @@ void MKLDNNTester::run(const TestConfig& dnn,
...
@@ -330,9 +330,7 @@ void MKLDNNTester::run(const TestConfig& dnn,
log_
=
log
;
log_
=
log
;
lvl_
=
level
;
lvl_
=
level
;
// Firstly test FLAGS_use_mkldnn_wgt = false
// Firstly test mkldnn init from PARAM_FORMAT_ORIGINAL weight
FLAGS_use_mkldnn_wgt
=
false
;
// reset and run once
reset
(
dnn
,
ref
,
batchSize
);
reset
(
dnn
,
ref
,
batchSize
);
randomWgtDatas
();
randomWgtDatas
();
clearWgtDiffs
();
clearWgtDiffs
();
...
@@ -342,17 +340,32 @@ void MKLDNNTester::run(const TestConfig& dnn,
...
@@ -342,17 +340,32 @@ void MKLDNNTester::run(const TestConfig& dnn,
runOnce
();
runOnce
();
}
}
// Then test FLAGS_use_mkldnn_wgt = true
if
(
parameters_
[
DNN
].
empty
())
{
FLAGS_use_mkldnn_wgt
=
true
;
// has no paramters
// after run once the mkldnn weight has been stored in dnnlayer
return
;
}
// After run some iters, the mkldnn weight has been stored in dnnLayer
// and we can also get the mkldnn weight paramter header format
// Weight param should always be index 0 (and bias index 1).
// TODO(TJ): should also considerate mean and var format when batchnorm ready
int
dnnWgtFmt
=
parameters_
[
DNN
][
0
]
->
getHeaderFormat
();
int
refWgtFmt
=
parameters_
[
REF
][
0
]
->
getHeaderFormat
();
if
(
dnnWgtFmt
==
refWgtFmt
)
{
// weight format are equal, so no need check more
return
;
}
// then save the weights and restart again
// then save the weights and restart again
vector
<
VectorPtr
>
dnnWgts
,
refWgts
;
vector
<
VectorPtr
>
dnnWgts
,
refWgts
;
CHECK_EQ
(
parameters_
[
DNN
].
size
(),
parameters_
[
REF
].
size
());
CHECK_EQ
(
parameters_
[
DNN
].
size
(),
parameters_
[
REF
].
size
());
saveWgt
(
parameters_
[
DNN
],
dnnWgts
);
saveWgt
(
parameters_
[
DNN
],
dnnWgts
);
saveWgt
(
parameters_
[
REF
],
refWgts
);
saveWgt
(
parameters_
[
REF
],
refWgts
);
// restart again with
flag true
// restart again with
dnn weight format
reset
(
dnn
,
ref
,
batchSize
);
reset
(
dnn
,
ref
,
batchSize
);
// TODO(TJ): should also considerate mean and var format when batchnorm ready
parameters_
[
DNN
][
0
]
->
setHeaderFormat
(
dnnWgtFmt
);
// restore wgt
// restore wgt
restoreWgt
(
dnnWgts
,
parameters_
[
DNN
]);
restoreWgt
(
dnnWgts
,
parameters_
[
DNN
]);
...
...
paddle/gserver/tests/MKLDNNTester.h
浏览文件 @
e08651f9
...
@@ -108,7 +108,7 @@ private:
...
@@ -108,7 +108,7 @@ private:
* if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the
* if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the
* max(diff/ref)
* max(diff/ref)
* else return sum(abs(a-b)) / sum(abs(b))
* else return sum(abs(a-b)) / sum(abs(b))
* The return value should smaller than eps when passing.
* The return value should
be
smaller than eps when passing.
*/
*/
double
getDelta
(
const
real
*
d1
,
double
getDelta
(
const
real
*
d1
,
const
real
*
d2
,
const
real
*
d2
,
...
...
paddle/trainer/TrainerConfigHelper.cpp
浏览文件 @
e08651f9
...
@@ -29,7 +29,6 @@ DECLARE_bool(with_gpu);
...
@@ -29,7 +29,6 @@ DECLARE_bool(with_gpu);
DECLARE_bool
(
parallel_nn
);
DECLARE_bool
(
parallel_nn
);
DECLARE_string
(
config_args
);
DECLARE_string
(
config_args
);
DECLARE_bool
(
use_mkldnn
);
DECLARE_bool
(
use_mkldnn
);
DECLARE_bool
(
use_mkldnn_wgt
);
const
char
*
kConfigParserModuleName
=
"paddle.trainer.config_parser"
;
const
char
*
kConfigParserModuleName
=
"paddle.trainer.config_parser"
;
const
char
*
kConfigParserFuncName
=
"parse_config_and_serialize"
;
const
char
*
kConfigParserFuncName
=
"parse_config_and_serialize"
;
...
@@ -47,7 +46,6 @@ TrainerConfigHelper::TrainerConfigHelper(const std::string &configFilePath)
...
@@ -47,7 +46,6 @@ TrainerConfigHelper::TrainerConfigHelper(const std::string &configFilePath)
<<
",with_cost="
<<
FLAGS_with_cost
<<
",use_gpu="
<<
FLAGS_use_gpu
<<
",with_cost="
<<
FLAGS_with_cost
<<
",use_gpu="
<<
FLAGS_use_gpu
<<
",parallel_nn="
<<
FLAGS_parallel_nn
<<
",parallel_nn="
<<
FLAGS_parallel_nn
<<
",use_mkldnn="
<<
FLAGS_use_mkldnn
<<
",use_mkldnn="
<<
FLAGS_use_mkldnn
<<
",use_mkldnn_wgt="
<<
FLAGS_use_mkldnn_wgt
<<
",cudnn_version="
<<
hl_get_cudnn_lib_version
();
<<
",cudnn_version="
<<
hl_get_cudnn_lib_version
();
if
(
!
FLAGS_config_args
.
empty
())
{
if
(
!
FLAGS_config_args
.
empty
())
{
configArgs
<<
","
<<
FLAGS_config_args
;
configArgs
<<
","
<<
FLAGS_config_args
;
...
...
paddle/utils/Flags.cpp
浏览文件 @
e08651f9
...
@@ -27,7 +27,6 @@ DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training");
...
@@ -27,7 +27,6 @@ DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training");
DEFINE_bool
(
use_mkldnn
,
false
,
"Only support CPU training"
);
DEFINE_bool
(
use_mkldnn
,
false
,
"Only support CPU training"
);
#endif
#endif
DEFINE_bool
(
use_mkldnn_wgt
,
false
,
"Init weight from CPU weight"
);
DEFINE_bool
(
parallel_nn
,
DEFINE_bool
(
parallel_nn
,
false
,
false
,
"Whether to use multi-threads to calculate one neural network."
"Whether to use multi-threads to calculate one neural network."
...
...
paddle/utils/Flags.h
浏览文件 @
e08651f9
...
@@ -41,4 +41,3 @@ DECLARE_string(predict_file);
...
@@ -41,4 +41,3 @@ DECLARE_string(predict_file);
DECLARE_bool
(
prev_batch_state
);
DECLARE_bool
(
prev_batch_state
);
DECLARE_string
(
init_model_path
);
DECLARE_string
(
init_model_path
);
DECLARE_bool
(
use_mkldnn
);
DECLARE_bool
(
use_mkldnn
);
DECLARE_bool
(
use_mkldnn_wgt
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录