Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
7483087c
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7483087c
编写于
9月 22, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
enable mkldnn_softmax
上级
330e9929
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
123 addition
and
44 deletion
+123
-44
paddle/gserver/activations/MKLDNNActivation.cpp
paddle/gserver/activations/MKLDNNActivation.cpp
+24
-0
paddle/gserver/activations/MKLDNNActivation.h
paddle/gserver/activations/MKLDNNActivation.h
+96
-41
paddle/gserver/tests/test_MKLDNN.cpp
paddle/gserver/tests/test_MKLDNN.cpp
+2
-2
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+1
-1
未找到文件。
paddle/gserver/activations/MKLDNNActivation.cpp
浏览文件 @
7483087c
...
...
@@ -26,6 +26,25 @@ static ClassRegistrar<ActivationFunction> gMKLDNNActivationRegistrar;
*/
#define MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE) mkldnn_##ACT_TYPE##Activation
/**
* @def DEFINE_MKLDNN_ACTIVATION
*/
#define DEFINE_MKLDNN_ACTIVATION(ACT_TYPE, BASE_CLASS) \
class MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE) : public BASE_CLASS { \
private: \
static const std::string name; \
\
public: \
const std::string& getName() const { return name; } \
}; \
const std::string MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)::name = \
"mkldnn_" #ACT_TYPE; \
static InitFunction __reg_activation__mkldnn_##ACT_TYPE([] { \
gMKLDNNActivationRegistrar \
.registerClass<MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)>( \
"mkldnn_" #ACT_TYPE); \
});
/**
* @def DEFINE_MKLDNN_ELTWISE_ACTIVATION
*/
...
...
@@ -73,6 +92,11 @@ DEFINE_MKLDNN_ELTWISE_ACTIVATION(tanh, 0.f, 0.f)
*/
DEFINE_MKLDNN_ELTWISE_ACTIVATION
(
elu
,
0.
f
,
0.
f
)
/**
* @brief MKLDNN Softmax Activation
*/
DEFINE_MKLDNN_ACTIVATION
(
softmax
,
MKLDNNSoftmaxActivation
)
ActivationFunction
*
MKLDNNActivation
::
create
(
const
std
::
string
&
type
)
{
return
gMKLDNNActivationRegistrar
.
createByType
(
type
);
}
...
...
paddle/gserver/activations/MKLDNNActivation.h
浏览文件 @
7483087c
...
...
@@ -36,6 +36,7 @@ protected:
// mkldnn matrix, primitive, stream and pipeline
MKLDNNMatrixPtr
val_
;
MKLDNNMatrixPtr
grad_
;
std
::
shared_ptr
<
mkldnn
::
engine
>
engine_
;
std
::
shared_ptr
<
MKLDNNStream
>
stream_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
fwd_
;
std
::
shared_ptr
<
mkldnn
::
primitive
>
bwd_
;
...
...
@@ -48,8 +49,44 @@ public:
static
ActivationFunction
*
create
(
const
std
::
string
&
type
);
static
std
::
vector
<
std
::
string
>
getAllRegisteredTypes
();
virtual
const
std
::
string
&
getName
()
const
=
0
;
virtual
Error
__must_check
forward
(
Argument
&
act
)
=
0
;
virtual
Error
__must_check
backward
(
Argument
&
act
)
=
0
;
/**
* reset the forward primitives
*/
virtual
void
resetFwd
(
Argument
&
act
)
{
VLOG
(
MKLDNN_BASE
)
<<
getName
()
<<
" reset mkldnn forward"
;
cnt_
=
act
.
value
->
getElementCnt
();
pipelineFwd_
.
clear
();
stream_
.
reset
(
new
MKLDNNStream
());
engine_
.
reset
(
new
mkldnn
::
engine
(
mkldnn
::
engine
::
cpu
,
0
));
val_
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
act
.
value
);
if
(
val_
==
nullptr
)
{
int
bs
=
act
.
getBatchSize
();
int
ih
=
act
.
getFrameHeight
()
>
0
?
act
.
getFrameHeight
()
:
1
;
int
iw
=
act
.
getFrameWidth
()
>
0
?
act
.
getFrameWidth
()
:
1
;
int
ic
=
cnt_
/
bs
/
ih
/
iw
;
CHECK_EQ
(
cnt_
,
(
size_t
)
bs
*
ic
*
ih
*
iw
);
val_
=
MKLDNNMatrix
::
create
(
act
.
value
,
{
bs
,
ic
,
ih
,
iw
},
mkldnn
::
memory
::
format
::
nchw
,
*
engine_
);
CHECK
(
val_
);
val_
->
downSpatial
();
}
}
/**
* reset the backward primitives,
* can not merge this functions into resetFwd as the grad data
* would be changing before backward.
*/
virtual
void
resetBwd
(
Argument
&
act
)
{}
virtual
Error
__must_check
forward
(
Argument
&
act
)
{
resetFwd
(
act
);
stream_
->
submit
(
pipelineFwd_
);
return
Error
();
}
virtual
Error
__must_check
backward
(
Argument
&
act
)
{
resetBwd
(
act
);
stream_
->
submit
(
pipelineBwd_
);
return
Error
();
}
};
/**
...
...
@@ -70,9 +107,7 @@ protected:
public:
MKLDNNEltwiseActivation
()
{}
~
MKLDNNEltwiseActivation
()
{}
virtual
const
std
::
string
&
getName
()
const
=
0
;
// in common, the alpha of forward and backward should be equal.
...
...
@@ -93,42 +128,21 @@ public:
return
(
mkldnn
::
algorithm
)
0
;
}
/**
* reshape and reset the forward primitives
*/
void
resetFwd
(
Argument
&
act
)
{
void
resetFwd
(
Argument
&
act
)
override
{
if
(
cnt_
==
act
.
value
->
getElementCnt
())
{
return
;
}
VLOG
(
MKLDNN_BASE
)
<<
getName
()
<<
" reset mkldnn forward"
;
cnt_
=
act
.
value
->
getElementCnt
();
stream_
.
reset
(
new
MKLDNNStream
());
auto
eng
=
CPUEngine
::
Instance
().
getEngine
();
// get algo setting
mkldnn
::
algorithm
algo
=
getAlgo
(
this
->
getName
());
MKLDNNActivation
::
resetFwd
(
act
);
// note: alpha represents the NegativeSlope when used in relu.
float
alpha
=
getAlpha
();
float
beta
=
getBeta
();
pipelineFwd_
.
clear
();
val_
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
act
.
value
);
if
(
val_
==
nullptr
)
{
int
bs
=
act
.
getBatchSize
();
int
ih
=
act
.
getFrameHeight
()
>
0
?
act
.
getFrameHeight
()
:
1
;
int
iw
=
act
.
getFrameWidth
()
>
0
?
act
.
getFrameWidth
()
:
1
;
int
ic
=
cnt_
/
bs
/
ih
/
iw
;
CHECK_EQ
(
cnt_
,
(
size_t
)
bs
*
ic
*
ih
*
iw
);
val_
=
MKLDNNMatrix
::
create
(
act
.
value
,
{
bs
,
ic
,
ih
,
iw
},
mkldnn
::
memory
::
format
::
nchw
,
eng
);
CHECK
(
val_
);
}
mkldnn
::
algorithm
algo
=
getAlgo
(
this
->
getName
());
auto
fwdDesc
=
eltwise_fwd
::
desc
(
mkldnn
::
prop_kind
::
forward_training
,
algo
,
val_
->
getMemoryDesc
(),
alpha
,
beta
);
fwdPD_
.
reset
(
new
eltwise_fwd
::
primitive_desc
(
fwdDesc
,
eng
));
fwdPD_
.
reset
(
new
eltwise_fwd
::
primitive_desc
(
fwdDesc
,
*
engine_
));
// use inplace for forward but save input value before submit
inVal_
=
val_
;
copyInVal_
=
nullptr
;
...
...
@@ -144,11 +158,7 @@ public:
needResetBwd_
=
true
;
}
/**
* reset the backward primitives, can not merge into resetFwd as the grad data
* would be changing before backward.
*/
void
resetBwd
(
Argument
&
act
)
{
void
resetBwd
(
Argument
&
act
)
override
{
if
(
!
needResetBwd_
)
{
return
;
}
...
...
@@ -167,16 +177,61 @@ public:
pipelineBwd_
.
clear
();
pipelineBwd_
.
push_back
(
*
bwd_
);
}
};
Error
__must_check
forward
(
Argument
&
act
)
{
resetFwd
(
act
);
stream_
->
submit
(
pipelineFwd_
);
return
Error
();
/**
* @brief Base class of MKLDNN softmax Activation,
* only have mkldnn forward, use cpu implement for backward.
*/
class
MKLDNNSoftmaxActivation
:
public
MKLDNNActivation
{
typedef
mkldnn
::
softmax_forward
softmax_fwd
;
private:
// for backward
MatrixPtr
sftMaxSum_
;
MatrixPtr
sftMaxDot_
;
public:
MKLDNNSoftmaxActivation
()
{}
~
MKLDNNSoftmaxActivation
()
{}
virtual
const
std
::
string
&
getName
()
const
=
0
;
void
resetFwd
(
Argument
&
act
)
override
{
if
(
cnt_
==
act
.
value
->
getElementCnt
())
{
return
;
}
MKLDNNActivation
::
resetFwd
(
act
);
int
axis
=
1
;
auto
fwdDesc
=
softmax_fwd
::
desc
(
mkldnn
::
prop_kind
::
forward_scoring
,
val_
->
getMemoryDesc
(),
axis
);
auto
fwdPD
=
softmax_fwd
::
primitive_desc
(
fwdDesc
,
*
engine_
);
fwd_
.
reset
(
new
softmax_fwd
(
fwdPD
,
*
val_
,
*
val_
));
pipelineFwd_
.
push_back
(
*
fwd_
);
}
Error
__must_check
backward
(
Argument
&
act
)
{
resetBwd
(
act
);
stream_
->
submit
(
pipelineBwd_
);
Error
__must_check
backward
(
Argument
&
act
)
override
{
MatrixPtr
outputV
=
act
.
value
;
MatrixPtr
outputG
=
act
.
grad
;
if
(
outputG
->
useGpu
())
{
outputG
->
softmaxBackward
(
*
outputV
);
}
else
{
SetDevice
device
(
act
.
deviceId
);
Matrix
::
resizeOrCreate
(
sftMaxDot_
,
outputG
->
getHeight
(),
outputG
->
getWidth
(),
/* trans */
false
,
useGpu
(
act
.
deviceId
));
Matrix
::
resizeOrCreate
(
sftMaxSum_
,
outputG
->
getHeight
(),
1
,
/* trans */
false
,
useGpu
(
act
.
deviceId
));
sftMaxDot_
->
dotMul
(
*
outputG
,
*
outputV
);
sftMaxSum_
->
colMerge
(
*
sftMaxDot_
);
act
.
grad
->
softmaxDerivative
(
*
act
.
value
,
*
sftMaxSum_
);
}
return
Error
();
}
};
...
...
paddle/gserver/tests/test_MKLDNN.cpp
浏览文件 @
7483087c
...
...
@@ -222,8 +222,8 @@ static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) {
}
void
testActivation
(
std
::
string
&
actType
,
const
testActDesc
&
pm
)
{
// TODO(TJ):
mkldnn_softmax not implemented, paddle do not have
elu activation
if
(
actType
==
"mkldnn_
softmax"
||
actType
==
"mkldnn_
elu"
)
{
// TODO(TJ):
remove me when paddle support
elu activation
if
(
actType
==
"mkldnn_elu"
)
{
return
;
}
const
std
::
string
compareTypes
[]
=
{
actType
,
actType
.
erase
(
0
,
7
)};
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
7483087c
...
...
@@ -1566,7 +1566,7 @@ class LayerBase(object):
self
.
config
=
g_config
.
model_config
.
layers
.
add
()
assert
isinstance
(
self
.
config
,
LayerConfig
)
use_mkldnn
=
bool
(
int
(
g_command_config_args
.
get
(
"use_mkldnn"
,
0
)))
mkldnn_acts
=
[
'relu'
,
'tanh'
]
mkldnn_acts
=
[
'relu'
,
'tanh'
,
'softmax'
]
if
use_mkldnn
and
active_type
in
mkldnn_acts
:
active_type
=
"mkldnn_"
+
active_type
self
.
config
.
name
=
name
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录