Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
5d604a6b
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5d604a6b
编写于
1月 27, 2021
作者:
W
Wojciech Uss
提交者:
GitHub
1月 27, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
- Disabling oneDNN inplace pass (#30588) (#30710)
Co-authored-by:
N
Jacek Czaja
<
jacek.czaja@intel.com
>
上级
02af1a62
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
35 addition
and
28 deletion
+35
-28
paddle/fluid/inference/api/paddle_pass_builder.cc
paddle/fluid/inference/api/paddle_pass_builder.cc
+3
-4
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
+3
-3
paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc
paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc
+1
-1
paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc
paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc
+13
-8
paddle/fluid/platform/mkldnn_reuse.h
paddle/fluid/platform/mkldnn_reuse.h
+15
-12
未找到文件。
paddle/fluid/inference/api/paddle_pass_builder.cc
浏览文件 @
5d604a6b
...
...
@@ -224,12 +224,11 @@ void CpuPassStrategy::EnableMKLDNN() {
// "fc_mkldnn_pass",
// "fc_act_mkldnn_fuse_pass",
"batch_norm_act_fuse_pass"
,
#ifndef _WIN32
// TODO(intel): Please fix the bug on windows.
// https://github.com/PaddlePaddle/Paddle/issues/29710
"mkldnn_inplace_pass"
,
// This pass should be activated after
// fuses
#endif
//
"mkldnn_inplace_pass", // This pass should be activated after
// fuses. Disabled by default due to
// little gain and lots of problems
}))
{
passes_
.
push_back
(
pass
);
}
...
...
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
浏览文件 @
5d604a6b
...
...
@@ -99,17 +99,17 @@ void eltwise_forward(const framework::ExecutionContext &ctx,
"5, or 6, but now the dimension size is"
,
x
->
dims
().
size
()));
bool
is_inplaced
=
x
->
IsSharedBufferWith
(
*
y
);
auto
src_tz
=
framework
::
vectorize
<
int64_t
>
(
x
->
dims
());
auto
src_format
=
src_tz
.
size
()
==
2
?
MKLDNNMemoryFormat
::
nc
:
x
->
format
();
platform
::
ActivationMKLDNNHandler
<
T
>
handler
(
src_tz
,
algorithm
,
alpha
,
beta
,
src_format
,
dev_ctx
,
ctx
.
GetPlace
(),
ctx
.
InputName
(
"X"
));
ctx
.
InputName
(
"X"
)
,
is_inplaced
);
auto
src_memory_p
=
handler
.
AcquireSrcMemory
(
x
);
auto
dst_memory_p
=
x
->
IsSharedBufferWith
(
*
y
)
?
src_memory_p
:
handler
.
AcquireDstMemory
(
y
);
auto
dst_memory_p
=
is_inplaced
?
src_memory_p
:
handler
.
AcquireDstMemory
(
y
);
auto
activation_p
=
handler
.
AcquireForwardPrimitive
();
mkldnn
::
stream
astream
(
dev_ctx
.
GetEngine
());
...
...
paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc
浏览文件 @
5d604a6b
...
...
@@ -127,7 +127,7 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
strides
,
ksize
);
platform
::
PoolingMKLDNNHandler
<
T
>::
ComputeAdaptivePoolParameters
(
ctx
,
paddle
::
framework
::
vectorize
(
in_x
->
dims
()),
ksize
,
strides
);
ctx
,
paddle
::
framework
::
vectorize
(
in_x
->
dims
()),
&
ksize
,
&
strides
);
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
MKLDNNDeviceContext
>();
...
...
paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc
浏览文件 @
5d604a6b
...
...
@@ -48,13 +48,17 @@ class SoftmaxMKLDNNHandler
const
mkldnn
::
engine
mkldnn_engine
,
platform
::
Place
cpu_place
,
const
Tensor
*
input
,
Tensor
*
output
,
const
int
axis
,
const
std
::
string
uniq_name
)
const
std
::
string
uniq_name
,
bool
is_inplaced
)
:
platform
::
MKLDNNHandlerT
<
T
,
mkldnn
::
softmax_forward
,
mkldnn
::
softmax_backward
>
(
dev_ctx
,
mkldnn_engine
,
cpu_place
,
// Softmax may be inplace then uniq_name is no longer unique
platform
::
CreateKey
(
dev_ctx
,
framework
::
vectorize
(
input
->
dims
()),
axis
,
uniq_name
))
{
is_inplaced
?
platform
::
CreateKey
(
dev_ctx
,
framework
::
vectorize
(
input
->
dims
()),
axis
,
uniq_name
)
:
platform
::
CreateKey
(
dev_ctx
,
framework
::
vectorize
(
input
->
dims
()),
uniq_name
))
{
if
(
!
this
->
isCached
())
{
PADDLE_ENFORCE_EQ
(
input
->
dims
(),
output
->
dims
(),
...
...
@@ -78,7 +82,7 @@ class SoftmaxMKLDNNHandler
:
platform
::
MKLDNNHandlerT
<
T
,
mkldnn
::
softmax_forward
,
mkldnn
::
softmax_backward
>
(
dev_ctx
,
dev_ctx
.
GetEngine
(),
cpu_place
,
platform
::
CreateKey
(
dev_ctx
,
dims
,
axis
,
uniq_name
))
{
platform
::
CreateKey
(
dev_ctx
,
dims
,
uniq_name
))
{
auto
data_softmax_md
=
mkldnn
::
memory
::
desc
(
dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
fmt
);
auto
diff_softmax_md
=
...
...
@@ -98,17 +102,18 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
const
Tensor
*
input
=
ctx
.
Input
<
Tensor
>
(
"X"
);
Tensor
*
output
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
bool
is_inplaced
=
input
->
IsSharedBufferWith
(
*
output
);
const
int
axis
=
CanonicalAxis
(
ctx
.
Attr
<
int
>
(
"axis"
),
input
->
dims
().
size
());
SoftmaxMKLDNNHandler
<
T
>
handler
(
dev_ctx
,
mkldnn_engine
,
ctx
.
GetPlace
(),
input
,
output
,
axis
,
ctx
.
OutputName
(
"Out"
));
input
,
output
,
axis
,
ctx
.
OutputName
(
"Out"
),
is_inplaced
);
auto
softmax_src_memory_p
=
handler
.
AcquireSrcMemory
(
input
);
// For Inplace src and and dst are the same memory object
auto
softmax_dst_memory_p
=
input
->
IsSharedBufferWith
(
*
output
)
?
softmax_src_memory_p
:
handler
.
AcquireDstMemory
(
output
);
auto
softmax_dst_memory_p
=
is_inplaced
?
softmax_src_memory_p
:
handler
.
AcquireDstMemory
(
output
);
auto
softmax_p
=
handler
.
AcquireForwardPrimitive
();
...
...
paddle/fluid/platform/mkldnn_reuse.h
浏览文件 @
5d604a6b
...
...
@@ -601,12 +601,15 @@ class ActivationMKLDNNHandler
const
MKLDNNMemoryFormat
fmt
,
const
platform
::
MKLDNNDeviceContext
&
dev_ctx
,
platform
::
Place
cpu_place
,
const
std
::
string
&
unique_name
)
const
std
::
string
&
unique_name
,
bool
is_inplaced
)
:
platform
::
MKLDNNHandlerT
<
T
,
mkldnn
::
eltwise_forward
,
mkldnn
::
eltwise_backward
>
(
dev_ctx
,
dev_ctx
.
GetEngine
(),
cpu_place
,
platform
::
CreateKey
(
dev_ctx
,
dims
,
"a"
,
algorithm
,
unique_name
))
{
is_inplaced
?
platform
::
CreateKey
(
dev_ctx
,
dims
,
"a"
,
algorithm
,
unique_name
)
:
platform
::
CreateKey
(
dev_ctx
,
dims
,
"a"
,
unique_name
))
{
auto
md
=
mkldnn
::
memory
::
desc
(
dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
fmt
);
this
->
AcquireForwardPrimitiveDescriptor
(
mkldnn
::
prop_kind
::
forward_training
,
...
...
@@ -624,7 +627,7 @@ class ActivationMKLDNNHandler
:
platform
::
MKLDNNHandlerT
<
T
,
mkldnn
::
eltwise_forward
,
mkldnn
::
eltwise_backward
>
(
dev_ctx
,
dev_ctx
.
GetEngine
(),
cpu_place
,
platform
::
CreateKey
(
dev_ctx
,
dims
,
"a"
,
algorithm
,
unique_name
))
{
platform
::
CreateKey
(
dev_ctx
,
dims
,
"a"
,
unique_name
))
{
auto
diff_dst_md
=
platform
::
MKLDNNMemDesc
(
dims
,
platform
::
MKLDNNGetDataType
<
T
>
(),
diff_fmt
);
auto
src_md
=
...
...
@@ -813,7 +816,7 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
mkldnn_paddings
[
1
]);
}
ComputeAdaptivePoolParameters
(
ctx
,
src_tz
,
ksize
,
strides
);
ComputeAdaptivePoolParameters
(
ctx
,
src_tz
,
&
ksize
,
&
strides
);
this
->
AcquireForwardPrimitiveDescriptor
(
is_test
?
mkldnn
::
prop_kind
::
forward_inference
...
...
@@ -883,22 +886,22 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
static
void
ComputeAdaptivePoolParameters
(
const
paddle
::
framework
::
ExecutionContext
&
ctx
,
const
std
::
vector
<
int64_t
>&
src_tz
,
std
::
vector
<
int64_t
>
&
ksize
,
std
::
vector
<
int64_t
>
&
strides
)
{
const
std
::
vector
<
int64_t
>&
src_tz
,
std
::
vector
<
int64_t
>
*
ksize
,
std
::
vector
<
int64_t
>
*
strides
)
{
if
(
ctx
.
Attr
<
bool
>
(
"adaptive"
))
{
// (jczaja): oneDNN is supporting only unchangable in size pool window
PADDLE_ENFORCE_EQ
(
src_tz
[
src_tz
.
size
()
-
1
]
%
ksize
[
1
]
,
0
,
src_tz
[
src_tz
.
size
()
-
1
]
%
ksize
->
at
(
1
)
,
0
,
platform
::
errors
::
Unimplemented
(
"Input dim must be divisible by corressponding ksize dim."
));
PADDLE_ENFORCE_EQ
(
src_tz
[
src_tz
.
size
()
-
2
]
%
ksize
[
0
]
,
0
,
src_tz
[
src_tz
.
size
()
-
2
]
%
ksize
->
at
(
0
)
,
0
,
platform
::
errors
::
Unimplemented
(
"Input dim must be divisible by corressponding ksize dim."
));
ksize
[
0
]
=
src_tz
[
src_tz
.
size
()
-
2
]
/
ksize
[
0
]
;
ksize
[
1
]
=
src_tz
[
src_tz
.
size
()
-
1
]
/
ksize
[
1
]
;
strides
[
0
]
=
ksize
[
0
]
;
strides
[
1
]
=
ksize
[
1
]
;
ksize
->
at
(
0
)
=
src_tz
[
src_tz
.
size
()
-
2
]
/
ksize
->
at
(
0
)
;
ksize
->
at
(
1
)
=
src_tz
[
src_tz
.
size
()
-
1
]
/
ksize
->
at
(
1
)
;
strides
->
at
(
0
)
=
ksize
->
at
(
0
)
;
strides
->
at
(
1
)
=
ksize
->
at
(
1
)
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录