Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
e7d8e16a
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e7d8e16a
编写于
8月 01, 2018
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update softmax_mkldnn_op
上级
dc111d34
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
46 addition
and
25 deletion
+46
-25
paddle/fluid/operators/softmax_mkldnn_op.cc
paddle/fluid/operators/softmax_mkldnn_op.cc
+46
-25
未找到文件。
paddle/fluid/operators/softmax_mkldnn_op.cc
浏览文件 @
e7d8e16a
...
@@ -26,9 +26,9 @@ using paddle::platform::MKLDNNMemDesc;
...
@@ -26,9 +26,9 @@ using paddle::platform::MKLDNNMemDesc;
using
mkldnn
::
memory
;
// Note: paddle has also "memory" namespace
using
mkldnn
::
memory
;
// Note: paddle has also "memory" namespace
using
mkldnn
::
primitive
;
using
mkldnn
::
primitive
;
using
mkldnn
::
softmax_forward
;
using
mkldnn
::
softmax_backward
;
using
mkldnn
::
prop_kind
;
using
mkldnn
::
prop_kind
;
using
mkldnn
::
softmax_backward
;
using
mkldnn
::
softmax_forward
;
using
mkldnn
::
stream
;
using
mkldnn
::
stream
;
using
platform
::
to_void_cast
;
using
platform
::
to_void_cast
;
...
@@ -113,17 +113,27 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
...
@@ -113,17 +113,27 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
auto
mkldnn_engine
=
dev_ctx
.
GetEngine
();
auto
mkldnn_engine
=
dev_ctx
.
GetEngine
();
const
Tensor
*
input
=
ctx
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
input
=
ctx
.
Input
<
Tensor
>
(
"X"
);
Tensor
*
output
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
Tensor
*
output
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
PADDLE_ENFORCE
(
input
->
dims
().
size
()
==
2UL
,
PADDLE_ENFORCE_EQ
(
"The input of softmax op must be a 2D matrix."
);
input
->
dims
(),
output
->
dims
(),
const
T
*
input_data
=
input
->
data
<
T
>
();
"The shape of softmax's input and output must be identical."
);
// allocate memory for output
T
*
output_data
=
output
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
// make sure 'output' holds memory, which will be shared by
std
::
vector
<
int
>
src_tz
=
paddle
::
framework
::
vectorize2int
(
input
->
dims
());
// 'flattened_output' later.
std
::
vector
<
int
>
dst_tz
=
paddle
::
framework
::
vectorize2int
(
output
->
dims
());
output
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
// MKL-DNN does support softmax over selected axis. Having 2D Tensor,
// we will make normalization after final eg. axis: 1
// flatten input and output to 2-D matrixs
PADDLE_ENFORCE
(((
src_tz
[
0
]
==
dst_tz
[
0
])
&&
(
src_tz
[
1
]
==
dst_tz
[
1
])),
auto
dims
=
input
->
dims
();
// input and output share the same shape
"Softmax input and output dimensions should match"
);
auto
flattened_dims
=
framework
::
flatten_to_2d
(
dims
,
dims
.
size
()
-
1
);
framework
::
Tensor
flattened_input
;
framework
::
Tensor
flattened_output
;
flattened_input
.
ShareDataWith
(
*
input
).
Resize
(
flattened_dims
);
flattened_output
.
ShareDataWith
(
*
output
).
Resize
(
flattened_dims
);
const
T
*
input_data
=
flattened_input
.
data
<
T
>
();
T
*
output_data
=
flattened_output
.
mutable_data
<
T
>
(
ctx
.
GetPlace
());
std
::
vector
<
int
>
src_tz
=
paddle
::
framework
::
vectorize2int
(
flattened_dims
);
std
::
vector
<
int
>
dst_tz
=
src_tz
;
// Same memory descriptor to be used for input and output
// Same memory descriptor to be used for input and output
memory
::
dims
softmax_tz
=
{
src_tz
[
0
],
src_tz
[
1
]};
memory
::
dims
softmax_tz
=
{
src_tz
[
0
],
src_tz
[
1
]};
// Generate keys for storing/retriving primitives for this operator
// Generate keys for storing/retriving primitives for this operator
...
@@ -174,23 +184,34 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
...
@@ -174,23 +184,34 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
auto
&
dev_ctx
=
ctx
.
template
device_context
<
MKLDNNDeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
MKLDNNDeviceContext
>();
auto
mkldnn_engine
=
dev_ctx
.
GetEngine
();
auto
mkldnn_engine
=
dev_ctx
.
GetEngine
();
const
Tensor
*
output
=
ctx
.
Input
<
Tensor
>
(
"Out"
);
const
Tensor
*
output
=
ctx
.
Input
<
Tensor
>
(
"Out"
);
const
T
*
dst_data
=
output
->
data
<
T
>
();
auto
*
dout
=
ctx
.
template
Input
<
Tensor
>(
framework
::
GradVarName
(
"Out"
));
auto
*
dout
=
ctx
.
template
Input
<
Tensor
>(
framework
::
GradVarName
(
"Out"
));
const
auto
*
diff_dst_ptr
=
dout
->
template
data
<
T
>();
auto
*
dx
=
auto
*
dx
=
ctx
.
template
Output
<
framework
::
Tensor
>(
framework
::
GradVarName
(
"X"
));
ctx
.
template
Output
<
framework
::
Tensor
>(
framework
::
GradVarName
(
"X"
));
T
*
diff_src_ptr
=
dx
->
template
mutable_data
<
T
>(
ctx
.
GetPlace
());
std
::
vector
<
int
>
dst_tz
=
paddle
::
framework
::
vectorize2int
(
output
->
dims
());
PADDLE_ENFORCE_EQ
(
dout
->
dims
(),
dx
->
dims
(),
"The shape of softmax_grad's input and output must be identical."
);
// make sure 'dx' holds memory, which will be shared by 'flattened_dx'
// later.
dx
->
template
mutable_data
<
T
>(
ctx
.
GetPlace
());
auto
dims
=
dout
->
dims
();
// input and output share the same shape
auto
flattened_dims
=
framework
::
flatten_to_2d
(
dims
,
dims
.
size
()
-
1
);
framework
::
Tensor
flattened_output
;
framework
::
Tensor
flattened_dout
;
framework
::
Tensor
flattened_dx
;
flattened_output
.
ShareDataWith
(
*
output
).
Resize
(
flattened_dims
);
flattened_dout
.
ShareDataWith
(
*
dout
).
Resize
(
flattened_dims
);
flattened_dx
.
ShareDataWith
(
*
dx
).
Resize
(
flattened_dims
);
const
T
*
dst_data
=
flattened_output
.
data
<
T
>
();
const
T
*
diff_dst_ptr
=
flattened_dout
.
template
data
<
T
>();
T
*
diff_src_ptr
=
flattened_dx
.
template
mutable_data
<
T
>(
ctx
.
GetPlace
());
std
::
vector
<
int
>
dst_tz
=
paddle
::
framework
::
vectorize2int
(
flattened_dims
);
std
::
vector
<
int
>
src_tz
(
dst_tz
);
std
::
vector
<
int
>
src_tz
(
dst_tz
);
PADDLE_ENFORCE
(
output
->
dims
().
size
()
==
2UL
,
"The input of softmax op must be a 2D matrix."
);
// MKL-DNN does support softmax over selected axis. Having 2D Tensor,
// we will make normalization after final eg. axis: 1
PADDLE_ENFORCE
(((
src_tz
[
0
]
==
dst_tz
[
0
])
&&
(
src_tz
[
1
]
==
dst_tz
[
1
])),
"Softmax input and output dimensions should match"
);
// Same memory descriptor to be used for input and output
// Same memory descriptor to be used for input and output
memory
::
dims
softmax_tz
=
{
src_tz
[
0
],
src_tz
[
1
]};
memory
::
dims
softmax_tz
=
{
src_tz
[
0
],
src_tz
[
1
]};
// Currently only supports NC data format
// Currently only supports NC data format
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录