Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
a475a57d
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a475a57d
编写于
8月 10, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rename files and classes, use uppercase of Mkldnn and Cpu
上级
b2bd6713
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
83 addition
and
83 deletion
+83
-83
paddle/gserver/CMakeLists.txt
paddle/gserver/CMakeLists.txt
+4
-4
paddle/gserver/layers/MKLDNNBase.h
paddle/gserver/layers/MKLDNNBase.h
+13
-13
paddle/gserver/layers/MKLDNNFcLayer.cpp
paddle/gserver/layers/MKLDNNFcLayer.cpp
+11
-11
paddle/gserver/layers/MKLDNNFcLayer.h
paddle/gserver/layers/MKLDNNFcLayer.h
+6
-6
paddle/gserver/layers/MKLDNNLayer.h
paddle/gserver/layers/MKLDNNLayer.h
+11
-11
paddle/gserver/tests/CMakeLists.txt
paddle/gserver/tests/CMakeLists.txt
+4
-4
paddle/gserver/tests/MKLDNNTester.cpp
paddle/gserver/tests/MKLDNNTester.cpp
+27
-27
paddle/gserver/tests/MKLDNNTester.h
paddle/gserver/tests/MKLDNNTester.h
+4
-4
paddle/gserver/tests/test_MKLDNN.cpp
paddle/gserver/tests/test_MKLDNN.cpp
+3
-3
未找到文件。
paddle/gserver/CMakeLists.txt
浏览文件 @
a475a57d
...
...
@@ -25,13 +25,13 @@ filter_test(GSERVER_HEADER)
filter_test
(
GSERVER_SOURCES
)
if
(
NOT WITH_MKLDNN
)
file
(
GLOB_RECURSE DNN_HEADER RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"M
kldnn
*.h"
)
file
(
GLOB_RECURSE DNN_SOURCES RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"M
kldnn
*.cpp"
)
file
(
GLOB_RECURSE DNN_HEADER RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"M
KLDNN
*.h"
)
file
(
GLOB_RECURSE DNN_SOURCES RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"M
KLDNN
*.cpp"
)
list
(
REMOVE_ITEM GSERVER_HEADER
${
DNN_HEADER
}
)
list
(
REMOVE_ITEM GSERVER_SOURCES
${
DNN_SOURCES
}
)
message
(
STATUS
"Skip compiling with M
kldnnlayers and Mkldnn
Activations"
)
message
(
STATUS
"Skip compiling with M
KLDNNLayers and MKLDNN
Activations"
)
else
()
message
(
STATUS
"Compile with M
kldnnlayers and Mkldnn
Activations"
)
message
(
STATUS
"Compile with M
KLDNNLayers and MKLDNN
Activations"
)
endif
()
if
(
NOT WITH_GPU
)
...
...
paddle/gserver/layers/M
kldnn
Base.h
→
paddle/gserver/layers/M
KLDNN
Base.h
浏览文件 @
a475a57d
...
...
@@ -30,26 +30,26 @@ typedef enum {
* @brief MKLDNN CPU engine.
*
*/
class
C
pu
Engine
{
class
C
PU
Engine
{
public:
static
C
pu
Engine
&
Instance
()
{
static
C
PU
Engine
&
Instance
()
{
// Thread-safe in C++11.
static
C
pu
Engine
myInstance
;
static
C
PU
Engine
myInstance
;
return
myInstance
;
}
// Disallow copy or move
C
puEngine
(
const
Cpu
Engine
&
)
=
delete
;
// Copy constructor
C
puEngine
(
Cpu
Engine
&&
)
=
delete
;
// Move constructor
C
puEngine
&
operator
=
(
const
Cpu
Engine
&
)
=
delete
;
// Copy assignment
C
puEngine
&
operator
=
(
Cpu
Engine
&&
)
=
delete
;
// Move assignment
C
PUEngine
(
const
CPU
Engine
&
)
=
delete
;
// Copy constructor
C
PUEngine
(
CPU
Engine
&&
)
=
delete
;
// Move constructor
C
PUEngine
&
operator
=
(
const
CPU
Engine
&
)
=
delete
;
// Copy assignment
C
PUEngine
&
operator
=
(
CPU
Engine
&&
)
=
delete
;
// Move assignment
mkldnn
::
engine
&
getEngine
()
{
return
cpuEngine_
;
}
protected:
C
pu
Engine
()
:
cpuEngine_
(
mkldnn
::
engine
::
cpu
,
0
)
{}
// C
pu
Engine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {}
~
C
pu
Engine
()
{}
C
PU
Engine
()
:
cpuEngine_
(
mkldnn
::
engine
::
cpu
,
0
)
{}
// C
PU
Engine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {}
~
C
PU
Engine
()
{}
private:
mkldnn
::
engine
cpuEngine_
;
...
...
@@ -59,11 +59,11 @@ private:
* @brief MKLDNN Stream.
*
*/
class
M
kldnn
Stream
{
class
M
KLDNN
Stream
{
public:
M
kldnn
Stream
()
:
ready_
(
false
)
{
resetState
();
}
M
KLDNN
Stream
()
:
ready_
(
false
)
{
resetState
();
}
virtual
~
M
kldnn
Stream
()
{}
virtual
~
M
KLDNN
Stream
()
{}
/**
* @brief Submit stream
...
...
paddle/gserver/layers/M
kldnn
FcLayer.cpp
→
paddle/gserver/layers/M
KLDNN
FcLayer.cpp
浏览文件 @
a475a57d
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "M
kldnn
FcLayer.h"
#include "M
KLDNN
FcLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
...
...
@@ -24,11 +24,11 @@ typedef inner_product_backward_data fc_bwdData;
namespace
paddle
{
REGISTER_LAYER
(
mkldnn_fc
,
M
kldnn
FcLayer
);
REGISTER_LAYER
(
mkldnn_fc
,
M
KLDNN
FcLayer
);
bool
M
kldnn
FcLayer
::
init
(
const
LayerMap
&
layerMap
,
bool
M
KLDNN
FcLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
if
(
!
M
kldnn
Layer
::
init
(
layerMap
,
parameterMap
))
{
if
(
!
M
KLDNN
Layer
::
init
(
layerMap
,
parameterMap
))
{
return
false
;
}
...
...
@@ -56,7 +56,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
return
true
;
}
void
M
kldnn
FcLayer
::
convertWeightsFromPaddle
()
{
void
M
KLDNN
FcLayer
::
convertWeightsFromPaddle
()
{
if
(
FLAGS_use_mkldnn_wgt
)
{
return
;
}
...
...
@@ -81,7 +81,7 @@ void MkldnnFcLayer::convertWeightsFromPaddle() {
hasInitedWgt_
=
true
;
}
void
M
kldnn
FcLayer
::
convertWeightsToPaddle
()
{
void
M
KLDNN
FcLayer
::
convertWeightsToPaddle
()
{
MatrixPtr
dnnWgt
=
weight_
->
getW
();
MatrixPtr
paddleWgt
;
dnnWgt
->
transpose
(
paddleWgt
,
true
);
...
...
@@ -92,7 +92,7 @@ void MkldnnFcLayer::convertWeightsToPaddle() {
dnnWgtT
->
copyFrom
(
*
paddleWgt
);
}
void
M
kldnn
FcLayer
::
reshape
()
{
void
M
KLDNN
FcLayer
::
reshape
()
{
const
Argument
&
input
=
getInput
(
0
);
int
batchSize
=
input
.
getBatchSize
();
if
(
bs_
==
batchSize
)
{
...
...
@@ -129,7 +129,7 @@ void MkldnnFcLayer::reshape() {
convertWeightsFromPaddle
();
}
void
M
kldnn
FcLayer
::
resetFwd
()
{
void
M
KLDNN
FcLayer
::
resetFwd
()
{
bool
hasBias
=
biases_
&&
biases_
->
getW
();
real
*
iData
=
getInputValue
(
0
)
->
getData
();
real
*
oData
=
getOutputValue
()
->
getData
();
...
...
@@ -166,7 +166,7 @@ void MkldnnFcLayer::resetFwd() {
pipelineFwd_
.
push_back
(
*
fwd_
);
}
void
M
kldnn
FcLayer
::
resetBwd
()
{
void
M
KLDNN
FcLayer
::
resetBwd
()
{
if
(
!
needResetBwd_
)
{
return
;
}
...
...
@@ -231,7 +231,7 @@ void MkldnnFcLayer::resetBwd() {
pipelineBwd_
.
push_back
(
*
bwdData_
);
}
void
M
kldnn
FcLayer
::
forward
(
PassType
passType
)
{
void
M
KLDNN
FcLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
reshape
();
...
...
@@ -253,7 +253,7 @@ void MkldnnFcLayer::forward(PassType passType) {
}
}
void
M
kldnn
FcLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
void
M
KLDNN
FcLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
/* Do derivation */
{
REGISTER_TIMER_INFO
(
"BpActTimer"
,
getName
().
c_str
());
backwardActivation
();
...
...
paddle/gserver/layers/M
kldnn
FcLayer.h
→
paddle/gserver/layers/M
KLDNN
FcLayer.h
浏览文件 @
a475a57d
...
...
@@ -14,17 +14,17 @@ limitations under the License. */
#pragma once
#include "M
kldnn
Layer.h"
#include "M
KLDNN
Layer.h"
#include "mkldnn.hpp"
namespace
paddle
{
/**
* @brief A subclass of M
kldnn
Layer fc layer.
* @brief A subclass of M
KLDNN
Layer fc layer.
*
* The config file api is mkldnn_fc
*/
class
M
kldnnFcLayer
:
public
Mkldnn
Layer
{
class
M
KLDNNFcLayer
:
public
MKLDNN
Layer
{
protected:
// input layer size, can not be change after init
size_t
iLayerSize_
;
// == ic * ih * iw
...
...
@@ -37,10 +37,10 @@ protected:
std
::
unique_ptr
<
Weight
>
biases_
;
public:
explicit
M
kldnn
FcLayer
(
const
LayerConfig
&
config
)
:
M
kldnn
Layer
(
config
),
hasInitedWgt_
(
false
),
hasSpatial_
(
true
)
{}
explicit
M
KLDNN
FcLayer
(
const
LayerConfig
&
config
)
:
M
KLDNN
Layer
(
config
),
hasInitedWgt_
(
false
),
hasSpatial_
(
true
)
{}
~
M
kldnn
FcLayer
()
{}
~
M
KLDNN
FcLayer
()
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
...
...
paddle/gserver/layers/M
kldnn
Layer.h
→
paddle/gserver/layers/M
KLDNN
Layer.h
浏览文件 @
a475a57d
...
...
@@ -16,7 +16,7 @@ limitations under the License. */
#include <vector>
#include "Layer.h"
#include "M
kldnn
Base.h"
#include "M
KLDNN
Base.h"
#include "mkldnn.hpp"
DECLARE_bool
(
use_mkldnn
);
...
...
@@ -24,14 +24,14 @@ DECLARE_bool(use_mkldnn_wgt);
namespace
paddle
{
class
M
kldnn
Layer
;
typedef
std
::
shared_ptr
<
M
kldnnLayer
>
Mkldnn
LayerPtr
;
class
M
KLDNN
Layer
;
typedef
std
::
shared_ptr
<
M
KLDNNLayer
>
MKLDNN
LayerPtr
;
/**
* @brief Base class of M
kldnn
layer.
* @brief Base class of M
KLDNN
layer.
*
*/
class
M
kldnn
Layer
:
public
Layer
{
class
M
KLDNN
Layer
:
public
Layer
{
protected:
// batch size
int
bs_
;
...
...
@@ -45,14 +45,14 @@ protected:
// mkldnn engine, stream and primivtives
mkldnn
::
engine
engine_
;
std
::
shared_ptr
<
M
kldnn
Stream
>
stream_
;
std
::
shared_ptr
<
M
KLDNN
Stream
>
stream_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
fwd_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
bwdWgt_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
bwdData_
;
std
::
vector
<
mkldnn
::
primitive
>
pipelineFwd_
;
std
::
vector
<
mkldnn
::
primitive
>
pipelineBwd_
;
// TODO(TJ): change below memory as M
kldnn
MatrixPtr type
// TODO(TJ): change below memory as M
KLDNN
MatrixPtr type
std
::
shared_ptr
<
mkldnn
::
memory
>
inVal_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
inGrad_
;
std
::
shared_ptr
<
mkldnn
::
memory
>
outVal_
;
...
...
@@ -63,7 +63,7 @@ protected:
std
::
shared_ptr
<
mkldnn
::
memory
>
biasGrad_
;
public:
explicit
M
kldnn
Layer
(
const
LayerConfig
&
config
)
explicit
M
KLDNN
Layer
(
const
LayerConfig
&
config
)
:
Layer
(
config
),
bs_
(
0
),
ic_
(
0
),
...
...
@@ -79,7 +79,7 @@ public:
bwdWgt_
(
nullptr
),
bwdData_
(
nullptr
)
{}
~
M
kldnn
Layer
()
{}
~
M
KLDNN
Layer
()
{}
virtual
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
...
...
@@ -90,8 +90,8 @@ public:
CHECK
(
FLAGS_use_mkldnn
)
<<
"MkldnnLayers only support use_mkldnn."
<<
"Please set WITH_MKLDNN=ON "
<<
"and set use_mkldnn=True"
;
stream_
.
reset
(
new
M
kldnn
Stream
());
engine_
=
C
pu
Engine
::
Instance
().
getEngine
();
stream_
.
reset
(
new
M
KLDNN
Stream
());
engine_
=
C
PU
Engine
::
Instance
().
getEngine
();
// TODO(TJ): deivecId
return
true
;
...
...
paddle/gserver/tests/CMakeLists.txt
浏览文件 @
a475a57d
...
...
@@ -20,11 +20,11 @@ add_test(NAME test_LayerGrad
########## test_Mkldnn layers and activations ##########
if
(
WITH_MKLDNN
)
add_unittest_without_exec
(
test_M
kldnn
test_M
kldnn
.cpp
M
kldnn
Tester.cpp
add_unittest_without_exec
(
test_M
KLDNN
test_M
KLDNN
.cpp
M
KLDNN
Tester.cpp
LayerGradUtil.cpp
)
add_test
(
NAME test_M
kldnn COMMAND test_Mkldnn
)
add_test
(
NAME test_M
KLDNN COMMAND test_MKLDNN
)
endif
()
################ test_CRFLayerGrad ####################
...
...
paddle/gserver/tests/M
kldnn
Tester.cpp
→
paddle/gserver/tests/M
KLDNN
Tester.cpp
浏览文件 @
a475a57d
...
...
@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "M
kldnn
Tester.h"
#include "paddle/gserver/layers/M
kldnn
Base.h"
#include "paddle/gserver/layers/M
kldnn
Layer.h"
#include "M
KLDNN
Tester.h"
#include "paddle/gserver/layers/M
KLDNN
Base.h"
#include "paddle/gserver/layers/M
KLDNN
Layer.h"
namespace
paddle
{
// init data layer and test layer of both dnn and reference
void
M
kldnn
Tester
::
reset
(
const
TestConfig
&
dnn
,
void
M
KLDNN
Tester
::
reset
(
const
TestConfig
&
dnn
,
const
TestConfig
&
ref
,
size_t
batchSize
)
{
const
bool
trans
=
false
;
...
...
@@ -71,7 +71,7 @@ void MkldnnTester::reset(const TestConfig& dnn,
setInputImgSize
();
}
void
M
kldnn
Tester
::
setInputImgSize
()
{
void
M
KLDNN
Tester
::
setInputImgSize
()
{
for
(
size_t
n
=
0
;
n
<
dataLayers_
.
size
();
++
n
)
{
for
(
size_t
i
=
0
;
i
<
dataLayers_
[
n
].
size
();
++
i
)
{
// TODO(TJ): fix me when concat and elewise ready
...
...
@@ -82,7 +82,7 @@ void MkldnnTester::setInputImgSize() {
}
// init randome parameters of ref, and copy to mkldnn
void
M
kldnn
Tester
::
randomWgtDatas
()
{
void
M
KLDNN
Tester
::
randomWgtDatas
()
{
EXPECT_EQ
(
parameters_
[
DNN
].
size
(),
parameters_
[
REF
].
size
());
for
(
size_t
i
=
0
;
i
<
parameters_
[
REF
].
size
();
++
i
)
{
const
VectorPtr
&
dnnValue
=
parameters_
[
DNN
][
i
]
->
getBuf
(
PARAMETER_VALUE
);
...
...
@@ -96,7 +96,7 @@ void MkldnnTester::randomWgtDatas() {
}
// random botdata of ref layer and copy same to mkldnn
void
M
kldnn
Tester
::
randomBotDatas
()
{
void
M
KLDNN
Tester
::
randomBotDatas
()
{
CHECK_EQ
(
dataLayers_
.
size
(),
NUM
);
for
(
size_t
i
=
0
;
i
<
dataLayers_
[
DNN
].
size
();
++
i
)
{
dataLayers_
[
REF
][
i
]
->
getOutputValue
()
->
randomizeUniform
();
...
...
@@ -107,14 +107,14 @@ void MkldnnTester::randomBotDatas() {
}
}
void
M
kldnn
Tester
::
randomTopDiffs
()
{
void
M
KLDNN
Tester
::
randomTopDiffs
()
{
refLayer_
->
getOutputGrad
()
->
randomizeUniform
();
dnnLayer_
->
getOutputGrad
()
->
copyFrom
(
*
(
refLayer_
->
getOutputGrad
()));
VLOG
(
lvl_
)
<<
"Random dom Backward Input, TopDiff: "
;
printMatrix
(
refLayer_
->
getOutputGrad
());
}
void
M
kldnn
Tester
::
checkForward
()
{
void
M
KLDNN
Tester
::
checkForward
()
{
printTopDatas
();
double
delta
=
compareMatrix
(
testLayers_
[
DNN
]
->
getOutputValue
(),
testLayers_
[
REF
]
->
getOutputValue
());
...
...
@@ -122,7 +122,7 @@ void MkldnnTester::checkForward() {
EXPECT_LE
(
fabs
(
delta
),
eps_
);
}
void
M
kldnn
Tester
::
checkBackwardData
()
{
void
M
KLDNN
Tester
::
checkBackwardData
()
{
const
bool
isBN
=
dnnLayer_
->
getType
()
==
"mkldnn_batch_norm"
;
for
(
size_t
i
=
0
;
i
<
dataLayers_
[
DNN
].
size
();
++
i
)
{
const
MatrixPtr
&
dnnDiff
=
dataLayers_
[
DNN
][
i
]
->
getOutputGrad
();
...
...
@@ -141,13 +141,13 @@ void MkldnnTester::checkBackwardData() {
}
}
void
M
kldnn
Tester
::
checkBackwardWgts
()
{
void
M
KLDNN
Tester
::
checkBackwardWgts
()
{
CHECK_EQ
(
parameters_
[
DNN
].
size
(),
parameters_
[
REF
].
size
());
vector
<
VectorPtr
>
dnnWgts
;
// used to temply save mkldnn weights
saveWgt
(
parameters_
[
DNN
],
dnnWgts
);
const
M
kldnn
LayerPtr
dnnlayer
=
std
::
dynamic_pointer_cast
<
M
kldnn
Layer
>
(
dnnLayer_
);
const
M
KLDNN
LayerPtr
dnnlayer
=
std
::
dynamic_pointer_cast
<
M
KLDNN
Layer
>
(
dnnLayer_
);
CHECK
(
dnnlayer
);
dnnlayer
->
convertWeightsToPaddle
();
for
(
size_t
i
=
0
;
i
<
parameters_
[
DNN
].
size
();
++
i
)
{
...
...
@@ -166,7 +166,7 @@ void MkldnnTester::checkBackwardWgts() {
restoreWgt
(
dnnWgts
,
parameters_
[
DNN
]);
}
void
M
kldnn
Tester
::
saveWgt
(
const
vector
<
ParameterPtr
>&
from
,
void
M
KLDNN
Tester
::
saveWgt
(
const
vector
<
ParameterPtr
>&
from
,
vector
<
VectorPtr
>&
to
)
{
const
bool
useGpu
=
false
;
to
.
resize
(
from
.
size
());
...
...
@@ -177,7 +177,7 @@ void MkldnnTester::saveWgt(const vector<ParameterPtr>& from,
}
}
void
M
kldnn
Tester
::
restoreWgt
(
const
vector
<
VectorPtr
>&
from
,
void
M
KLDNN
Tester
::
restoreWgt
(
const
vector
<
VectorPtr
>&
from
,
vector
<
ParameterPtr
>&
to
)
{
CHECK_EQ
(
from
.
size
(),
to
.
size
());
for
(
size_t
i
=
0
;
i
<
from
.
size
();
++
i
)
{
...
...
@@ -187,7 +187,7 @@ void MkldnnTester::restoreWgt(const vector<VectorPtr>& from,
}
// clear parameters grad
void
M
kldnn
Tester
::
clearWgtDiffs
()
{
void
M
KLDNN
Tester
::
clearWgtDiffs
()
{
for
(
size_t
n
=
0
;
n
<
parameters_
.
size
();
++
n
)
{
for
(
size_t
i
=
0
;
i
<
parameters_
[
n
].
size
();
++
i
)
{
const
VectorPtr
&
grad
=
parameters_
[
n
][
i
]
->
getBuf
(
PARAMETER_GRADIENT
);
...
...
@@ -198,7 +198,7 @@ void MkldnnTester::clearWgtDiffs() {
}
}
void
M
kldnn
Tester
::
clearBotDiffs
()
{
void
M
KLDNN
Tester
::
clearBotDiffs
()
{
// dnn and ref
for
(
size_t
n
=
0
;
n
<
dataLayers_
.
size
();
++
n
)
{
// all inputs layers
...
...
@@ -208,7 +208,7 @@ void MkldnnTester::clearBotDiffs() {
}
}
void
M
kldnn
Tester
::
clearBotDiffs
(
int
n
)
{
void
M
KLDNN
Tester
::
clearBotDiffs
(
int
n
)
{
CHECK_LT
(
n
,
NUM
);
// all inputs layers
for
(
size_t
i
=
0
;
i
<
dataLayers_
[
n
].
size
();
++
i
)
{
...
...
@@ -216,13 +216,13 @@ void MkldnnTester::clearBotDiffs(int n) {
}
}
void
M
kldnn
Tester
::
clearTopDatas
()
{
void
M
KLDNN
Tester
::
clearTopDatas
()
{
for
(
size_t
i
=
0
;
i
<
testLayers_
.
size
();
++
i
)
{
testLayers_
[
i
]
->
getOutputValue
()
->
zeroMem
();
}
}
void
M
kldnn
Tester
::
printTopDatas
()
{
void
M
KLDNN
Tester
::
printTopDatas
()
{
if
(
!
log_
)
{
return
;
}
...
...
@@ -233,7 +233,7 @@ void MkldnnTester::printTopDatas() {
}
}
void
M
kldnn
Tester
::
printMatrix
(
const
MatrixPtr
&
m
)
{
void
M
KLDNN
Tester
::
printMatrix
(
const
MatrixPtr
&
m
)
{
if
(
!
log_
)
{
return
;
}
...
...
@@ -243,7 +243,7 @@ void MkldnnTester::printMatrix(const MatrixPtr& m) {
VLOG
(
lvl_
)
<<
std
::
endl
<<
ostr
.
str
();
}
void
M
kldnn
Tester
::
printVector
(
const
VectorPtr
&
v
)
{
void
M
KLDNN
Tester
::
printVector
(
const
VectorPtr
&
v
)
{
if
(
!
log_
)
{
return
;
}
...
...
@@ -253,7 +253,7 @@ void MkldnnTester::printVector(const VectorPtr& v) {
VLOG
(
lvl_
)
<<
std
::
endl
<<
ostr
.
str
();
}
double
M
kldnn
Tester
::
getDelta
(
const
real
*
d1
,
double
M
KLDNN
Tester
::
getDelta
(
const
real
*
d1
,
const
real
*
d2
,
size_t
len
,
const
float
failRate
,
...
...
@@ -280,17 +280,17 @@ double MkldnnTester::getDelta(const real* d1,
return
(
failCnt
/
(
float
)
len
)
>
failRate
?
maxOut
:
delta
/
sum
;
}
double
M
kldnn
Tester
::
compareMatrix
(
const
MatrixPtr
&
m1
,
const
MatrixPtr
&
m2
)
{
double
M
KLDNN
Tester
::
compareMatrix
(
const
MatrixPtr
&
m1
,
const
MatrixPtr
&
m2
)
{
CHECK_EQ
(
m1
->
getElementCnt
(),
m2
->
getElementCnt
());
return
getDelta
(
m1
->
getData
(),
m2
->
getData
(),
m1
->
getElementCnt
());
}
double
M
kldnn
Tester
::
compareVector
(
const
VectorPtr
&
v1
,
const
VectorPtr
&
v2
)
{
double
M
KLDNN
Tester
::
compareVector
(
const
VectorPtr
&
v1
,
const
VectorPtr
&
v2
)
{
CHECK_EQ
(
v1
->
getSize
(),
v2
->
getSize
());
return
getDelta
(
v1
->
getData
(),
v2
->
getData
(),
v1
->
getSize
());
}
void
M
kldnn
Tester
::
runOnce
()
{
void
M
KLDNN
Tester
::
runOnce
()
{
// test forward
randomBotDatas
();
dnnLayer_
->
forward
(
PASS_TRAIN
);
...
...
@@ -310,7 +310,7 @@ void MkldnnTester::runOnce() {
clearBotDiffs
(
REF
);
}
void
M
kldnn
Tester
::
run
(
const
TestConfig
&
dnn
,
void
M
KLDNN
Tester
::
run
(
const
TestConfig
&
dnn
,
const
TestConfig
&
ref
,
size_t
batchSize
,
size_t
inputImgH
,
...
...
paddle/gserver/tests/M
kldnn
Tester.h
→
paddle/gserver/tests/M
KLDNN
Tester.h
浏览文件 @
a475a57d
...
...
@@ -17,7 +17,7 @@ limitations under the License. */
#include <string>
#include <vector>
#include "LayerGradUtil.h"
#include "paddle/gserver/layers/M
kldnn
Base.h"
#include "paddle/gserver/layers/M
KLDNN
Base.h"
namespace
paddle
{
...
...
@@ -25,7 +25,7 @@ namespace paddle {
* @brief test the functionality of Mkldnnlayers
* refer to paddle original function
*/
class
M
kldnn
Tester
{
class
M
KLDNN
Tester
{
enum
{
DNN
=
0
,
REF
=
1
,
...
...
@@ -54,14 +54,14 @@ protected:
size_t
ih_
,
iw_
;
public:
explicit
M
kldnn
Tester
(
size_t
iter
=
3
,
float
epsilon
=
1e-4
)
{
explicit
M
KLDNN
Tester
(
size_t
iter
=
3
,
float
epsilon
=
1e-4
)
{
iter_
=
iter
;
eps_
=
epsilon
;
log_
=
false
;
lvl_
=
MKLDNN_ALL
;
}
~
M
kldnn
Tester
()
{}
~
M
KLDNN
Tester
()
{}
public:
void
run
(
const
TestConfig
&
dnn
,
...
...
paddle/gserver/tests/test_M
kldnn
.cpp
→
paddle/gserver/tests/test_M
KLDNN
.cpp
浏览文件 @
a475a57d
...
...
@@ -15,7 +15,7 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include "M
kldnn
Tester.h"
#include "M
KLDNN
Tester.h"
#include "ModelConfig.pb.h"
using
namespace
paddle
;
// NOLINT
...
...
@@ -43,7 +43,7 @@ void testFcLayer(const testFCDesc& pm) {
/* size of weight= */
size_t
(
pm
.
oc
*
pm
.
ic
*
pm
.
ih
*
pm
.
iw
)});
cfg
.
layerConfig
.
add_inputs
();
M
kldnn
Tester
tester
;
M
KLDNN
Tester
tester
;
for
(
auto
biasSize
:
{
pm
.
oc
,
0
})
{
cfg
.
biasSize
=
biasSize
;
TestConfig
ref
=
cfg
;
...
...
@@ -54,7 +54,7 @@ void testFcLayer(const testFCDesc& pm) {
}
}
TEST
(
M
kldnnLayer
,
f
cLayer
)
{
TEST
(
M
KLDNNLayer
,
F
cLayer
)
{
testFcLayer
({
/*bs*/
2
,
/*ic*/
2
,
/*oc*/
3
,
/*ih*/
1
,
/*iw*/
1
});
testFcLayer
({
/*bs*/
3
,
/*ic*/
7
,
/*oc*/
19
,
/*ih*/
1
,
/*iw*/
1
});
testFcLayer
({
/*bs*/
8
,
/*ic*/
16
,
/*oc*/
32
,
/*ih*/
13
,
/*iw*/
13
});
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录