Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
479689f6
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
479689f6
编写于
5月 14, 2021
作者:
J
Jacek Czaja
提交者:
GitHub
5月 14, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[oneDNN] Refactoring of softmax grad onednn kernel to match common API (#32851)
上级
42aad304
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
35 addition
and
42 deletion
+35
-42
paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc
paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc
+33
-42
python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py
...le/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py
+2
-0
未找到文件。
paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc
浏览文件 @
479689f6
...
...
@@ -15,15 +15,6 @@ limitations under the License. */
#include "paddle/fluid/operators/softmax_op.h"
#include "paddle/fluid/platform/mkldnn_reuse.h"
namespace
paddle
{
namespace
framework
{
class
Tensor
;
}
// namespace framework
namespace
platform
{
class
MKLDNNDeviceContext
;
}
// namespace platform
}
// namespace paddle
namespace
paddle
{
namespace
operators
{
...
...
@@ -74,22 +65,34 @@ class SoftmaxMKLDNNHandler
}
}
SoftmaxMKLDNNHandler
(
const
std
::
vector
<
int64_t
>&
dims
,
const
MKLDNN
MemoryFormat
fmt
,
const
MKLDNNMemoryFormat
diff_fmt
,
const
int
&
axis
,
const
platform
::
MKLDNNDeviceContext
&
dev_ctx
,
platform
::
Place
cpu_place
,
const
std
::
string
&
uniq
_name
)
SoftmaxMKLDNNHandler
(
const
framework
::
ExecutionContext
&
ctx
,
const
MKLDNN
DeviceContext
&
dev_ctx
,
platform
::
Place
cpu_place
,
const
Tensor
*
out
,
const
Tensor
*
out_grad
,
Tensor
*
in_x_grad
,
const
std
::
string
&
unique
_name
)
:
platform
::
MKLDNNHandlerT
<
T
,
mkldnn
::
softmax_forward
,
mkldnn
::
softmax_backward
>
(
dev_ctx
,
dev_ctx
.
GetEngine
(),
cpu_place
,
platform
::
CreateKey
(
dev_ctx
,
dims
,
uniq_name
))
{
auto
data_softmax_md
=
mkldnn
::
memory
::
desc
(
dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
fmt
);
auto
diff_softmax_md
=
mkldnn
::
memory
::
desc
(
dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
diff_fmt
);
this
->
AcquireBackwardPrimitiveDescriptor
(
diff_softmax_md
,
data_softmax_md
,
axis
);
platform
::
CreateKey
(
dev_ctx
,
framework
::
vectorize
(
out
->
dims
()),
unique_name
))
{
if
(
!
this
->
isBwdCached
())
{
PADDLE_ENFORCE_EQ
(
out_grad
->
dims
(),
in_x_grad
->
dims
(),
platform
::
errors
::
InvalidArgument
(
"The shape of softmax_grad's input "
"and output must be identical."
));
auto
dims
=
out_grad
->
dims
();
// input and output share the same shape
const
int
axis
=
CanonicalAxis
(
ctx
.
Attr
<
int
>
(
"axis"
),
dims
.
size
());
auto
softmax_tz
=
framework
::
vectorize
<
int64_t
>
(
dims
);
auto
data_softmax_md
=
MKLDNNMemDesc
(
softmax_tz
,
platform
::
MKLDNNGetDataType
<
T
>
(),
out
->
format
());
auto
diff_softmax_md
=
MKLDNNMemDesc
(
softmax_tz
,
platform
::
MKLDNNGetDataType
<
T
>
(),
out_grad
->
format
());
this
->
AcquireBackwardPrimitiveDescriptor
(
diff_softmax_md
,
data_softmax_md
,
axis
);
}
}
};
...
...
@@ -145,27 +148,15 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
"Operator DNNL SoftmaxGrad must use CPUPlace"
));
auto
&
dev_ctx
=
ctx
.
template
device_context
<
MKLDNNDeviceContext
>();
const
Tensor
*
output
=
ctx
.
Input
<
Tensor
>
(
"Out"
);
auto
*
dout
=
ctx
.
template
Input
<
Tensor
>(
framework
::
GradVarName
(
"Out"
));
auto
*
dx
=
ctx
.
template
Output
<
framework
::
Tensor
>(
framework
::
GradVarName
(
"X"
));
PADDLE_ENFORCE_EQ
(
dout
->
dims
(),
dx
->
dims
(),
platform
::
errors
::
InvalidArgument
(
"The shape of softmax_grad's input and output must be identical."
));
auto
dims
=
dout
->
dims
();
// input and output share the same shape
const
int
axis
=
CanonicalAxis
(
ctx
.
Attr
<
int
>
(
"axis"
),
dims
.
size
());
auto
softmax_tz
=
paddle
::
framework
::
vectorize
<
int64_t
>
(
dims
);
auto
*
out_grad
=
ctx
.
template
Input
<
Tensor
>(
framework
::
GradVarName
(
"Out"
));
auto
*
in_x_grad
=
ctx
.
template
Output
<
Tensor
>(
framework
::
GradVarName
(
"X"
));
SoftmaxMKLDNNHandler
<
T
>
handler
(
softmax_tz
,
output
->
format
(),
dout
->
format
(),
axis
,
dev_ctx
,
ctx
.
GetPlace
(),
ctx
.
InputName
(
"Out"
));
SoftmaxMKLDNNHandler
<
T
>
handler
(
ctx
,
dev_ctx
,
ctx
.
GetPlace
(),
output
,
out_grad
,
in_x_grad
,
ctx
.
InputName
(
"Out"
));
auto
dst_memory_p
=
handler
.
AcquireDstMemory
(
output
);
auto
diff_dst_memory_p
=
handler
.
AcquireDiffDstMemory
(
dout
);
auto
diff_src_memory_p
=
handler
.
AcquireDiffSrcMemory
(
dx
);
auto
diff_dst_memory_p
=
handler
.
AcquireDiffDstMemory
(
out_grad
);
auto
diff_src_memory_p
=
handler
.
AcquireDiffSrcMemory
(
in_x_grad
);
auto
softmax_bwd_p
=
handler
.
AcquireBackwardPrimitive
();
...
...
@@ -176,8 +167,8 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
{
MKLDNN_ARG_DIFF_SRC
,
*
diff_src_memory_p
}});
astream
.
wait
();
dx
->
set_layout
(
framework
::
DataLayout
::
kMKLDNN
);
dx
->
set_format
(
dout
->
format
(
));
in_x_grad
->
set_layout
(
framework
::
DataLayout
::
kMKLDNN
);
in_x_grad
->
set_format
(
platform
::
GetMKLDNNFormat
(
*
diff_src_memory_p
));
}
};
}
// namespace operators
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py
浏览文件 @
479689f6
...
...
@@ -129,4 +129,6 @@ class TestSoftmaxMKLDNNPrimitivesAlreadyExist(unittest.TestCase):
if
__name__
==
'__main__'
:
from
paddle
import
enable_static
enable_static
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录