Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
56a714a1
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
56a714a1
编写于
5月 26, 2020
作者:
A
Adam
提交者:
GitHub
5月 26, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add isCached() machinism to oneDNN pooling primitive (#24724)
上级
a0846b62
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
95 addition
and
77 deletion
+95
-77
paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc
paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc
+6
-48
paddle/fluid/platform/mkldnn_reuse.h
paddle/fluid/platform/mkldnn_reuse.h
+89
-29
未找到文件。
paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc
浏览文件 @
56a714a1
...
...
@@ -38,57 +38,14 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
"Operator DNNL Pool must use CPUPlace"
));
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
MKLDNNDeviceContext
>();
const
auto
&
mkldnn_engine
=
dev_ctx
.
GetEngine
();
const
Tensor
*
input
=
ctx
.
Input
<
Tensor
>
(
"X"
);
Tensor
*
output
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
PADDLE_ENFORCE_EQ
(
input
->
layout
(),
DataLayout
::
kMKLDNN
,
"Wrong layout set for Input tensor"
);
PADDLE_ENFORCE_NE
(
input
->
format
(),
MKLDNNMemoryFormat
::
undef
,
"Wrong format set for Input tensor"
);
std
::
string
pooling_type
=
ctx
.
Attr
<
std
::
string
>
(
"pooling_type"
);
std
::
vector
<
int
>
ksize_temp
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"ksize"
);
std
::
vector
<
int64_t
>
ksize
(
begin
(
ksize_temp
),
end
(
ksize_temp
));
std
::
vector
<
int
>
strides_temp
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int64_t
>
strides
(
begin
(
strides_temp
),
end
(
strides_temp
));
std
::
vector
<
int
>
paddings_temp
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int64_t
>
paddings
(
begin
(
paddings_temp
),
end
(
paddings_temp
));
bool
global_pooling
=
ctx
.
Attr
<
bool
>
(
"global_pooling"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
// Only 2D pooling is supported now
PADDLE_ENFORCE_EQ
(
ksize
.
size
(),
2
,
"ksize must be 2D, i.e. 2D pooling"
);
PADDLE_ENFORCE_EQ
(
pooling_type
==
"max"
||
pooling_type
==
"avg"
,
true
,
"pooling_type must be 'max' or 'avg'"
);
PADDLE_ENFORCE_EQ
(
input
->
dims
().
size
(),
4
,
"Input dim must be with 4, i.e. NCHW"
);
auto
input_dims
=
input
->
dims
();
framework
::
DDim
data_dims
=
framework
::
slice_ddim
(
input_dims
,
2
,
input_dims
.
size
());
if
(
global_pooling
)
{
UpdateKsize
(
&
ksize
,
data_dims
);
}
UpdatePadding
(
&
paddings
,
global_pooling
,
0
,
padding_algorithm
,
data_dims
,
strides
,
ksize
);
auto
src_tz
=
paddle
::
framework
::
vectorize
<
int64_t
>
(
input
->
dims
());
auto
dst_tz
=
paddle
::
framework
::
vectorize
<
int64_t
>
(
output
->
dims
());
auto
is_test
=
ctx
.
Attr
<
bool
>
(
"is_test"
);
platform
::
PoolingMKLDNNHandler
<
T
>
handler
(
src_tz
,
dst_tz
,
ksize
,
strides
,
paddings
,
pooling_type
,
ctx
.
Attr
<
bool
>
(
"ceil_mode"
),
input
->
format
(),
paddle
::
framework
::
ToMKLDNNDataType
(
input
->
type
()),
is_test
,
dev_ctx
,
ctx
.
GetPlace
(),
ctx
.
OutputName
(
"Out"
),
ctx
.
Attr
<
bool
>
(
"exclusive"
));
platform
::
PoolingMKLDNNHandler
<
T
>
handler
(
ctx
,
dev_ctx
,
mkldnn_engine
,
ctx
.
GetPlace
(),
input
,
output
,
ctx
.
OutputName
(
"Out"
));
auto
src_memory
=
handler
.
AcquireSrcMemory
(
input
);
auto
dst_memory
=
handler
.
AcquireDstMemory
(
output
);
...
...
@@ -96,7 +53,8 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto
pool_p
=
handler
.
AcquireForwardPrimitive
();
mkldnn
::
stream
astream
(
dev_ctx
.
GetEngine
());
if
((
is_test
==
false
)
&&
(
pooling_type
==
"max"
))
{
if
((
ctx
.
Attr
<
bool
>
(
"is_test"
)
==
false
)
&&
(
ctx
.
Attr
<
std
::
string
>
(
"pooling_type"
)
==
"max"
))
{
// Training
auto
workspace_memory
=
handler
.
AcquireWorkspaceMemory
();
pool_p
->
execute
(
astream
,
{{
MKLDNN_ARG_SRC
,
*
src_memory
},
...
...
paddle/fluid/platform/mkldnn_reuse.h
浏览文件 @
56a714a1
...
...
@@ -21,6 +21,7 @@ limitations under the License. */
#include "boost/optional.hpp"
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/pool_op.h"
#include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/place.h"
...
...
@@ -592,41 +593,100 @@ template <typename T>
class
PoolingMKLDNNHandler
:
public
MKLDNNHandlerT
<
T
,
mkldnn
::
pooling_forward
,
mkldnn
::
pooling_backward
>
{
public:
PoolingMKLDNNHandler
(
const
std
::
vector
<
int64_t
>&
src_dims
,
const
std
::
vector
<
int64_t
>&
dst_dims
,
const
std
::
vector
<
int64_t
>&
ksize
,
const
std
::
vector
<
int64_t
>&
strides
,
const
std
::
vector
<
int64_t
>&
paddings
,
const
std
::
string
&
pooling_type
,
bool
ceil_mode
,
const
MKLDNNMemoryFormat
fmt
,
mkldnn
::
memory
::
data_type
dt
,
bool
is_test
,
const
platform
::
MKLDNNDeviceContext
&
dev_ctx
,
platform
::
Place
cpu_place
,
const
std
::
string
&
unique_name
,
bool
exclude_padding
)
PoolingMKLDNNHandler
(
const
paddle
::
framework
::
ExecutionContext
&
ctx
,
const
MKLDNNDeviceContext
&
dev_ctx
,
const
mkldnn
::
engine
mkldnn_engine
,
platform
::
Place
cpu_place
,
const
Tensor
*
input
,
Tensor
*
output
,
const
std
::
string
&
unique_name
)
:
platform
::
MKLDNNHandlerT
<
T
,
mkldnn
::
pooling_forward
,
mkldnn
::
pooling_backward
>
(
dev_ctx
,
dev_ctx
.
GetEngine
(),
cpu_place
,
platform
::
CreateKey
(
src_dims
,
dt
,
unique_name
))
{
auto
src_md
=
mkldnn
::
memory
::
desc
(
src_dims
,
dt
,
fmt
);
/* create memory descriptor for pooling without specified format
* ('any') which lets a primitive (pooling in this case) choose
* the memory format preferred for best performance
*/
auto
dst_md
=
platform
::
MKLDNNMemDesc
(
dst_dims
,
dt
,
MKLDNNMemoryFormat
::
any
);
platform
::
CreateKey
(
framework
::
vectorize
(
input
->
dims
()),
framework
::
ToMKLDNNDataType
(
input
->
type
()),
unique_name
))
{
if
(
!
this
->
isCached
())
{
PADDLE_ENFORCE_EQ
(
input
->
layout
(),
DataLayout
::
kMKLDNN
,
platform
::
errors
::
InvalidArgument
(
"Wrong layout set for Input tensor"
));
PADDLE_ENFORCE_NE
(
input
->
format
(),
MKLDNNMemoryFormat
::
undef
,
platform
::
errors
::
InvalidArgument
(
"Wrong format set for Input tensor"
));
const
std
::
string
pooling_type
=
ctx
.
Attr
<
std
::
string
>
(
"pooling_type"
);
std
::
vector
<
int
>
ksize_temp
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"ksize"
);
std
::
vector
<
int64_t
>
ksize
(
begin
(
ksize_temp
),
end
(
ksize_temp
));
std
::
vector
<
int
>
strides_temp
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int64_t
>
strides
(
begin
(
strides_temp
),
end
(
strides_temp
));
std
::
vector
<
int
>
paddings_temp
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int64_t
>
paddings
(
begin
(
paddings_temp
),
end
(
paddings_temp
));
const
bool
global_pooling
=
ctx
.
Attr
<
bool
>
(
"global_pooling"
);
const
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
// Only 2D pooling is supported now
PADDLE_ENFORCE_EQ
(
ksize
.
size
(),
2
,
platform
::
errors
::
InvalidArgument
(
"ksize must be 2D, i.e. 2D pooling"
));
PADDLE_ENFORCE_EQ
(
pooling_type
==
"max"
||
pooling_type
==
"avg"
,
true
,
platform
::
errors
::
InvalidArgument
(
"pooling_type must be 'max' or 'avg'"
));
PADDLE_ENFORCE_EQ
(
input
->
dims
().
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"Input dim must be with 4, i.e. NCHW"
));
const
auto
input_dims
=
input
->
dims
();
framework
::
DDim
data_dims
=
framework
::
slice_ddim
(
input_dims
,
2
,
input_dims
.
size
());
if
(
global_pooling
)
{
operators
::
UpdateKsize
(
&
ksize
,
data_dims
);
}
auto
mkldnn_paddings
=
ToMkldnnPadding
(
paddings
);
operators
::
UpdatePadding
(
&
paddings
,
global_pooling
,
0
,
padding_algorithm
,
data_dims
,
strides
,
ksize
);
const
auto
src_tz
=
paddle
::
framework
::
vectorize
(
input
->
dims
());
const
auto
dst_tz
=
paddle
::
framework
::
vectorize
(
output
->
dims
());
const
auto
is_test
=
ctx
.
Attr
<
bool
>
(
"is_test"
);
const
auto
dt
=
framework
::
ToMKLDNNDataType
(
input
->
type
());
const
auto
fmt
=
input
->
format
();
const
auto
exclude_padding
=
ctx
.
Attr
<
bool
>
(
"exclusive"
);
const
auto
src_md
=
mkldnn
::
memory
::
desc
(
src_tz
,
dt
,
fmt
);
/* create memory descriptor for pooling without specified format
* ('any') which lets a primitive (pooling in this case) choose
* the memory format preferred for best performance
*/
const
auto
dst_md
=
platform
::
MKLDNNMemDesc
(
dst_tz
,
dt
,
MKLDNNMemoryFormat
::
any
);
if
(
ceil_mode
)
{
CorrectOutputSize
(
src_dims
,
dst_dims
,
ksize
,
paddings
,
strides
,
mkldnn_paddings
[
1
]);
auto
mkldnn_paddings
=
ToMkldnnPadding
(
paddings
);
const
bool
ceil_mode
=
ctx
.
Attr
<
bool
>
(
"ceil_mode"
);
if
(
ceil_mode
)
{
CorrectOutputSize
(
src_tz
,
dst_tz
,
ksize
,
paddings
,
strides
,
mkldnn_paddings
[
1
]);
}
this
->
AcquireForwardPrimitiveDescriptor
(
is_test
?
mkldnn
::
prop_kind
::
forward_inference
:
mkldnn
::
prop_kind
::
forward_training
,
pooling_type
==
"max"
?
mkldnn
::
algorithm
::
pooling_max
:
(
exclude_padding
?
mkldnn
::
algorithm
::
pooling_avg_exclude_padding
:
mkldnn
::
algorithm
::
pooling_avg_include_padding
),
src_md
,
dst_md
,
strides
,
ksize
,
mkldnn_paddings
[
0
],
mkldnn_paddings
[
1
]);
}
this
->
AcquireForwardPrimitiveDescriptor
(
is_test
?
mkldnn
::
prop_kind
::
forward_inference
:
mkldnn
::
prop_kind
::
forward_training
,
pooling_type
==
"max"
?
mkldnn
::
algorithm
::
pooling_max
:
(
exclude_padding
?
mkldnn
::
algorithm
::
pooling_avg_exclude_padding
:
mkldnn
::
algorithm
::
pooling_avg_include_padding
),
src_md
,
dst_md
,
strides
,
ksize
,
mkldnn_paddings
[
0
],
mkldnn_paddings
[
1
]);
}
PoolingMKLDNNHandler
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录