Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
30881647
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
30881647
编写于
6月 05, 2023
作者:
G
gouzil
提交者:
GitHub
6月 05, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[static op generation] pool2d, pool3d (#54070)
上级
0a10cf40
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
179 addition
and
721 deletion
+179
-721
paddle/fluid/operators/generator/get_expected_kernel_func.cc
paddle/fluid/operators/generator/get_expected_kernel_func.cc
+39
-0
paddle/fluid/operators/generator/get_expected_kernel_func.h
paddle/fluid/operators/generator/get_expected_kernel_func.h
+8
-0
paddle/fluid/operators/pool_op.cc
paddle/fluid/operators/pool_op.cc
+0
-539
paddle/fluid/operators/pool_op.h
paddle/fluid/operators/pool_op.h
+0
-61
paddle/fluid/operators/unity_build_rule.cmake
paddle/fluid/operators/unity_build_rule.cmake
+0
-1
paddle/phi/api/yaml/op_compat.yaml
paddle/phi/api/yaml/op_compat.yaml
+26
-5
paddle/phi/api/yaml/static_backward.yaml
paddle/phi/api/yaml/static_backward.yaml
+34
-0
paddle/phi/api/yaml/static_ops.yaml
paddle/phi/api/yaml/static_ops.yaml
+22
-0
paddle/phi/kernels/onednn/pool_grad_kernel.cc
paddle/phi/kernels/onednn/pool_grad_kernel.cc
+23
-1
paddle/phi/kernels/onednn/pool_kernel.cc
paddle/phi/kernels/onednn/pool_kernel.cc
+26
-1
paddle/phi/ops/compat/pool_sig.cc
paddle/phi/ops/compat/pool_sig.cc
+0
-112
test/cpp/fluid/mkldnn/CMakeLists.txt
test/cpp/fluid/mkldnn/CMakeLists.txt
+1
-1
未找到文件。
paddle/fluid/operators/generator/get_expected_kernel_func.cc
浏览文件 @
30881647
...
...
@@ -61,6 +61,20 @@ static bool ReduceOpHasOptimizedOneDNNKernel(
return
true
;
}
// only poolop
bool
CanMKLDNNSupportPool
(
const
framework
::
ExecutionContext
&
ctx
)
{
if
(
ctx
.
Attr
<
bool
>
(
"adaptive"
)
==
false
)
return
true
;
// oneDNN is supporting only unchangable in size pool window
auto
src_tz
=
phi
::
vectorize
(
ctx
.
Input
<
phi
::
DenseTensor
>
(
"X"
)
->
dims
());
if
(
!
ctx
.
HasAttr
(
"ksize"
))
{
return
false
;
}
std
::
vector
<
int
>
ksize
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"ksize"
);
// Fast but not exhustive check
return
((
src_tz
[
src_tz
.
size
()
-
1
]
%
ksize
[
1
]
==
0
)
&&
(
src_tz
[
src_tz
.
size
()
-
2
]
%
ksize
[
0
]
==
0
));
}
phi
::
KernelKey
GetCheckFiniteAndUnscaleExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
,
const
framework
::
OperatorWithKernel
*
op_ptr
)
{
...
...
@@ -136,6 +150,31 @@ phi::KernelKey GetAssignExpectedKernelType(
ctx
.
device_context
().
GetPlace
());
}
phi
::
KernelKey
GetPoolExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
,
const
framework
::
OperatorWithKernel
*
op_ptr
)
{
auto
data_type
=
op_ptr
->
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"X"
);
// NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_MKLDNN
op_ptr
->
SetDnnFallback
(
!
CanMKLDNNSupportPool
(
ctx
));
// NOTE(jiahongyu) END: Above codes originally enclosed by PADDLE_WITH_MKLDNN
return
phi
::
KernelKey
(
data_type
,
ctx
.
GetPlace
());
}
phi
::
KernelKey
GetPoolDoubleGradExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
,
const
framework
::
OperatorWithKernel
*
op_ptr
)
{
auto
data_type
=
op_ptr
->
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"grad_x@GRAD"
);
// NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_MKLDNN
op_ptr
->
SetDnnFallback
(
!
CanMKLDNNSupportPool
(
ctx
));
// NOTE(jiahongyu) END: Above codes originally enclosed by PADDLE_WITH_MKLDNN
return
phi
::
KernelKey
(
data_type
,
ctx
.
GetPlace
());
}
phi
::
KernelKey
GetSgdExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
,
const
framework
::
OperatorWithKernel
*
op_ptr
)
{
...
...
paddle/fluid/operators/generator/get_expected_kernel_func.h
浏览文件 @
30881647
...
...
@@ -40,6 +40,14 @@ phi::KernelKey GetAssignExpectedKernelType(
const
framework
::
ExecutionContext
&
ctx
,
const
framework
::
OperatorWithKernel
*
op_ptr
);
phi
::
KernelKey
GetPoolExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
,
const
framework
::
OperatorWithKernel
*
op_ptr
);
phi
::
KernelKey
GetPoolDoubleGradExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
,
const
framework
::
OperatorWithKernel
*
op_ptr
);
phi
::
KernelKey
GetSgdExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
,
const
framework
::
OperatorWithKernel
*
op_ptr
);
...
...
paddle/fluid/operators/pool_op.cc
已删除
100644 → 0
浏览文件 @
0a10cf40
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/pool_op.h"
#include <unordered_map>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
namespace
paddle
{
namespace
operators
{
bool
CanMKLDNNSupportPool
(
const
framework
::
ExecutionContext
&
ctx
)
{
if
(
ctx
.
Attr
<
bool
>
(
"adaptive"
)
==
false
)
return
true
;
// oneDNN is supporting only unchangable in size pool window
auto
src_tz
=
phi
::
vectorize
(
ctx
.
Input
<
phi
::
DenseTensor
>
(
"X"
)
->
dims
());
if
(
!
ctx
.
HasAttr
(
"ksize"
))
{
return
false
;
}
std
::
vector
<
int
>
ksize
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"ksize"
);
// Fast but not exhustive check
return
((
src_tz
[
src_tz
.
size
()
-
1
]
%
ksize
[
1
]
==
0
)
&&
(
src_tz
[
src_tz
.
size
()
-
2
]
%
ksize
[
0
]
==
0
));
}
phi
::
KernelKey
PoolOp
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
auto
data_type
=
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"X"
);
// NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_MKLDNN
this
->
SetDnnFallback
(
!
CanMKLDNNSupportPool
(
ctx
));
// NOTE(jiahongyu) END: Above codes originally enclosed by PADDLE_WITH_MKLDNN
return
phi
::
KernelKey
(
data_type
,
ctx
.
GetPlace
());
}
phi
::
KernelKey
PoolOp
::
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
phi
::
DenseTensor
&
tensor
,
const
phi
::
KernelKey
&
expected_kernel_type
)
const
{
#ifdef PADDLE_WITH_MKLDNN
if
((
expected_kernel_type
.
layout
()
==
phi
::
DataLayout
::
ONEDNN
)
&&
(
tensor
.
layout
()
!=
phi
::
DataLayout
::
ONEDNN
))
{
auto
attrs
=
Attrs
();
auto
ar
=
paddle
::
framework
::
AttrReader
(
attrs
);
const
std
::
string
data_format
=
ar
.
Get
<
std
::
string
>
(
"data_format"
);
auto
dl
=
phi
::
StringToDataLayout
(
data_format
);
// Some models may have intentionally set "AnyLayout" for pool
// op. Treat this as NCHW (default data_format value)
if
(
dl
!=
phi
::
DataLayout
::
kAnyLayout
)
{
return
phi
::
KernelKey
(
tensor
.
place
(),
dl
,
expected_kernel_type
.
dtype
());
}
}
#endif
return
phi
::
KernelKey
(
tensor
.
place
(),
tensor
.
layout
(),
expected_kernel_type
.
dtype
());
}
phi
::
KernelKey
PoolOpGrad
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
auto
input_data_type
=
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"X"
);
// NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_MKLDNN
this
->
SetDnnFallback
(
!
CanMKLDNNSupportPool
(
ctx
));
// NOTE(jiahongyu): Above codes originally enclosed by PADDLE_WITH_MKLDNN
return
phi
::
KernelKey
(
input_data_type
,
ctx
.
GetPlace
());
}
phi
::
KernelKey
PoolOpGrad
::
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
phi
::
DenseTensor
&
tensor
,
const
phi
::
KernelKey
&
expected_kernel_type
)
const
{
#ifdef PADDLE_WITH_MKLDNN
if
((
expected_kernel_type
.
layout
()
==
phi
::
DataLayout
::
ONEDNN
)
&&
(
tensor
.
layout
()
!=
phi
::
DataLayout
::
ONEDNN
))
{
auto
attrs
=
Attrs
();
auto
ar
=
paddle
::
framework
::
AttrReader
(
attrs
);
const
std
::
string
data_format
=
ar
.
Get
<
std
::
string
>
(
"data_format"
);
return
phi
::
KernelKey
(
tensor
.
place
(),
phi
::
StringToDataLayout
(
data_format
),
expected_kernel_type
.
dtype
());
}
#endif
return
phi
::
KernelKey
(
tensor
.
place
(),
tensor
.
layout
(),
expected_kernel_type
.
dtype
());
}
void
Pool2dOpMaker
::
Make
()
{
AddInput
(
"X"
,
"(phi::DenseTensor) The input tensor of pooling operator. "
"The format of input tensor is NCHW, where N is batch size, C is the "
"number of channels, H is the height of the feature, "
"and W is the width of the feature."
);
AddOutput
(
"Out"
,
"(phi::DenseTensor) The output tensor of pooling operator. "
"The format of output tensor is also NCHW, "
"where N is batch size, C is the number of channels, "
"H is the height of the feature, "
"and W is the width of the feature."
);
AddAttr
<
std
::
string
>
(
"pooling_type"
,
"(string), pooling type, can be
\"
max
\"
for max-pooling "
"and
\"
avg
\"
for average-pooling."
)
.
InEnum
({
"max"
,
"avg"
});
AddAttr
<
std
::
vector
<
int
>>
(
"ksize"
,
"(vector<int>) The pooling window "
"size(height, width) of the pooling operator. "
"If global_pooling = true, ksize and paddings will "
"be ignored."
)
.
SupportTensor
();
AddAttr
<
bool
>
(
"global_pooling"
,
"(bool) Whether to use the global pooling. "
"If global_pooling = true, kernel size and paddings will be ignored. "
"Default False."
)
.
SetDefault
(
false
);
AddAttr
<
std
::
vector
<
int
>>
(
"strides"
,
"(vector<int>, default {1, 1}), strides(height, "
"width) of pooling operator."
)
.
SetDefault
({
1
,
1
});
// TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddAttr
<
std
::
vector
<
int
>>
(
"paddings"
,
"(vector<int>, default {0,0}), paddings(height_top, height_bottom, "
"width_left, wifth_right) of pooling operator."
"If global_pooling = true, paddings and kernel size will be ignored."
)
.
SetDefault
({
0
,
0
});
AddAttr
<
bool
>
(
"exclusive"
,
"(bool) When true, will exclude the zero-padding in the "
"averaging calculating, otherwise, include the zero-padding. Note, it "
"is only used when pooling_type is avg. The default is True. "
"Default True."
)
.
SetDefault
(
true
);
AddAttr
<
bool
>
(
"adaptive"
,
"(bool) When true, will perform adaptive pooling instead, "
"output shape in H and W dimensions will be same as ksize, input data "
"will be divided into grids specify by ksize averagely and perform "
"pooling in each grid area to get output pooling value. "
"Default False."
)
.
SetDefault
(
false
);
AddAttr
<
bool
>
(
"ceil_mode"
,
"(bool) Whether to use the ceil function to calculate "
"output height and width. False is the default. If it is set to False, "
"the floor function will be used. Default False"
)
.
SetDefault
(
false
);
AddAttr
<
std
::
string
>
(
"data_format"
,
"(string, default NCHW) Only used in "
"An optional string from:
\"
NHWC
\"
,
\"
NCHW
\"
. "
"Defaults to
\"
NHWC
\"
. Specify the data format of the output data, "
"the input will be transformed automatically. "
)
.
SetDefault
(
"NCHW"
);
AddAttr
<
std
::
string
>
(
"padding_algorithm"
,
"(string, default
\"
EXPLICIT
\"
) An optional string from:
\"
EXPLICIT
\"
,"
"
\"
SAME
\"
,
\"
VALID
\"
. Set to
\"
EXPLICIT
\"
for explicit padding. "
"Set to
\"
SAME
\"
or
\"
VALID
\"
for algorithm of padding. "
)
.
SetDefault
(
"EXPLICIT"
);
// TODO(dzhwinter): need to registered layout transform function
AddAttr
<
bool
>
(
"use_cudnn"
,
"(bool) Only used in cudnn kernel, need install cudnn. Default False"
)
.
SetDefault
(
false
)
.
AsExtra
();
AddComment
(
R"DOC(
This operation calculates the pooling output based on
the input, pooling_type and pool_size, pool_stride, pool_padding parameters.
Input(X) and Output(Out) are in NCHW or NHWC format, where N is batch size, C is the
number of channels, H is the height of the feature, and W is the width of the feature.
Parameters(pool_size, pool_stride, pool_padding) hold two integer elements.
These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
Input:
X shape: $(N, C, H_{in}, W_{in})$
Output:
Out shape: $(N, C, H_{out}, W_{out})$
For pool_padding = "SAME":
$$
H_{out} = \\frac{(H_{in} + strides[0] - 1)}{strides[0]}
$$
$$
W_{out} = \\frac{(W_{in} + strides[1] - 1)}{strides[1]}
$$
For pool_padding = "VALID":
$$
H_{out} = \\frac{(H_{in} - ksize[0] + strides[0])}{strides[0]}
$$
$$
W_{out} = \\frac{(W_{in} - ksize[1] + strides[1])}{strides[1]}
$$
For ceil_mode = false:
$$
H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom}{strides[0]} + 1
$$
$$
W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right}{strides[1]} + 1
$$
For ceil_mode = true:
$$
H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom + strides[0] - 1)}{strides[0]} + 1
$$
$$
W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right + strides[1] - 1)}{strides[1]} + 1
$$
For exclusive = false:
$$
hstart = i * strides[0] - pad_height_top
$$
$$
hend = hstart + ksize[0]
$$
$$
wstart = j * strides[1] - pad_width_left
$$
$$
wend = wstart + ksize[1]
$$
$$
Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
$$
For exclusive = true:
$$
hstart = max(0, i * strides[0] - pad_height_top)
$$
$$
hend = min(H, hstart + ksize[0])
$$
$$
wstart = max(0, j * strides[1] - pad_width_left)
$$
$$
wend = min(W, wstart + ksize[1])
$$
$$
Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
$$
)DOC"
);
}
template
<
typename
T
>
class
Pool2dOpGradGradMaker
:
public
framework
::
SingleGradOpMaker
<
T
>
{
public:
using
framework
::
SingleGradOpMaker
<
T
>::
SingleGradOpMaker
;
protected:
void
Apply
(
GradOpPtr
<
T
>
grad_op
)
const
override
{
grad_op
->
SetType
(
"pool2d_double_grad"
);
grad_op
->
SetInput
(
"X"
,
this
->
OutputGrad
(
framework
::
GradVarName
(
"X"
)));
grad_op
->
SetOutput
(
"Out"
,
this
->
InputGrad
(
framework
::
GradVarName
(
"Out"
)));
grad_op
->
SetAttrMap
(
this
->
Attrs
());
}
};
class
PoolOpInferVarType
:
public
framework
::
PassInDtypeAndVarTypeToOutput
{
protected:
std
::
unordered_map
<
std
::
string
,
std
::
string
>&
GetInputOutputWithSameType
()
const
override
{
static
std
::
unordered_map
<
std
::
string
,
std
::
string
>
m
{{
"X"
,
/*->*/
"Out"
}};
return
m
;
}
};
void
Pool3dOpMaker
::
Make
()
{
AddInput
(
"X"
,
"(phi::DenseTensor) The input tensor of pooling operator. "
"The format of input tensor is NCDHW or NDHWC, where N is batch "
"size, C is "
"the number of channels, and D, H and W is the depth, height and "
"width of "
"the feature, respectively."
);
AddOutput
(
"Out"
,
"(phi::DenseTensor) The output tensor of pooling operator."
"The format of output tensor is also NCDHW or NDHWC, "
"where N is batch size, C is "
"the number of channels, and D, H and W is the depth, height and "
"width of the feature, respectively."
);
AddAttr
<
std
::
string
>
(
"pooling_type"
,
"(string) Pooling type, can be
\"
max
\"
for max-pooling "
"and
\"
avg
\"
for average-pooling."
)
.
InEnum
({
"max"
,
"avg"
});
AddAttr
<
std
::
vector
<
int
>>
(
"ksize"
,
"(vector<int>) The pooling window size(depth, height, "
"width) of pooling operator. "
"If global_pooling = true, ksize and paddings will "
"be ignored."
);
AddAttr
<
bool
>
(
"global_pooling"
,
"(bool) Whether to use the global pooling. "
"If global_pooling = true, kernel size and paddings will be ignored. "
"Default False"
)
.
SetDefault
(
false
);
AddAttr
<
std
::
vector
<
int
>>
(
"strides"
,
"(vector<int>, default {1,1,1}) Strides(depth, height, "
"width) of the pooling operator."
)
.
SetDefault
({
1
,
1
,
1
});
// TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddAttr
<
std
::
vector
<
int
>>
(
"paddings"
,
"(vector<int>, default {0,0,0}), paddings(pad_depth_front, "
"pad_depth_back, "
"pad_height_top, pad_height_bottom, pad_width_left, pad_width_right"
") of pooling operator. "
"If global_pooling = true, ksize and paddings will be ignored."
)
.
SetDefault
({
0
,
0
,
0
});
// TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.)
AddAttr
<
bool
>
(
"exclusive"
,
"(bool) When true, will exclude the zero-padding in the "
"averaging calculating, otherwise, include the zero-padding. Note, it "
"is only used when pooling_type is avg. The default is True. "
"Default True"
)
.
SetDefault
(
true
);
AddAttr
<
bool
>
(
"adaptive"
,
"(bool) When true, will perform adaptive pooling instead, "
"output shape in H and W dimensions will be same as ksize, input data "
"will be divided into grids specify by ksize averagely and perform "
"pooling in each grid area to get output pooling value. "
"Default False"
)
.
SetDefault
(
false
);
AddAttr
<
bool
>
(
"ceil_mode"
,
"(bool) Whether to use the ceil function to calculate "
"output height and width. False is the default. If it is set to False, "
"the floor function will be used. Default False"
)
.
SetDefault
(
false
);
AddAttr
<
std
::
string
>
(
"data_format"
,
"(string, default NCDHW) Only used in "
"An optional string from:
\"
NDHWC
\"
,
\"
NCDHW
\"
. "
"Defaults to
\"
NDHWC
\"
. Specify the data format of the output data, "
"the input will be transformed automatically. "
)
.
SetDefault
(
"NCDHW"
);
AddAttr
<
std
::
string
>
(
"padding_algorithm"
,
"(string, default
\"
EXPLICIT
\"
) An optional string from:
\"
EXPLICIT
\"
,"
"
\"
SAME
\"
,
\"
VALID
\"
. Set to
\"
EXPLICIT
\"
for explicit padding. "
"Set to
\"
SAME
\"
or
\"
VALID
\"
for algorithm of padding. "
)
.
SetDefault
(
"EXPLICIT"
);
AddAttr
<
bool
>
(
"use_cudnn"
,
"(bool) Only used in cudnn kernel, need install cudnn. Default False"
)
.
SetDefault
(
false
)
.
AsExtra
();
AddComment
(
R"DOC(
This operation calculates the output based on
the input, pooling_type, pool_size, pool_stride, and pool_padding parameters.
Input(X) and output(Out) are in NCDHW or NDHWC format, where N is batch
size, C is the number of channels, and D, H and W are the depth, height and
width of the feature, respectively. Parameters(pool_size, pool_stride, pool_padding)
hold three integer elements. These three elements represent depth, height and
width, respectively. The input(X) size and output(Out) size may be different.
Example:
Input:
X shape: $(N, C, D_{in}, H_{in}, W_{in})$
Output:
Out shape: $(N, C, D_{out}, H_{out}, W_{out})$
For pool_padding = "SAME":
$$
D_{out} = \\frac{(D_{in} + strides[0] - 1)}{strides[0]}
$$
$$
H_{out} = \\frac{(H_{in} + strides[1] - 1)}{strides[1]}
$$
$$
W_{out} = \\frac{(W_{in} + strides[2] - 1)}{strides[2]}
$$
For pool_padding = "VALID":
$$
D_{out} = \\frac{(D_{in} - ksize[0] + strides[0])}{strides[0]}
$$
$$
H_{out} = \\frac{(H_{in} - ksize[1] + strides[1])}{strides[1]}
$$
$$
W_{out} = \\frac{(W_{in} - ksize[2] + strides[2])}{strides[2]}
$$
For ceil_mode = false:
$$
D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back)}{strides[0]} + 1
$$
$$
H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom)}{strides[1]} + 1
$$
$$
W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right)}{strides[2]} + 1
$$
For ceil_mode = true:
$$
D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back + strides[0] -1)}{strides[0]} + 1
$$
$$
H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom + strides[1] -1)}{strides[1]} + 1
$$
$$
W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right + strides[2] -1)}{strides[2]} + 1
$$
For exclusive = false:
$$
dstart = i * strides[0] - pad_depth_front
$$
$$
dend = dstart + ksize[0]
$$
$$
hstart = j * strides[1] - pad_height_top
$$
$$
hend = hstart + ksize[1]
$$
$$
wstart = k * strides[2] - pad_width_left
$$
$$
wend = wstart + ksize[2]
$$
$$
Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{ksize[0] * ksize[1] * ksize[2]}
$$
For exclusive = true:
$$
dstart = max(0, i * strides[0] - pad_depth_front)
$$
$$
dend = min(D, dstart + ksize[0])
$$
$$
hstart = max(0, j * strides[1] - pad_height_top)
$$
$$
hend = min(H, hstart + ksize[1])
$$
$$
wstart = max(0, k * strides[2] - pad_width_left)
$$
$$
wend = min(W, wstart + ksize[2])
$$
$$
Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
$$
)DOC"
);
}
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
DECLARE_INFER_SHAPE_FUNCTOR
(
pool2d
,
Pool2dInferShapeFunctor
,
PD_INFER_META
(
phi
::
Pool2DInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
pool2d_grad
,
Pool2dGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
UnchangedInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
pool2d_double_grad
,
Pool2dDoubleGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
Pool2DInferMeta
));
REGISTER_OPERATOR
(
pool2d
,
ops
::
PoolOp
,
ops
::
Pool2dOpMaker
,
ops
::
PoolOpInferVarType
,
paddle
::
framework
::
DefaultGradOpMaker
<
paddle
::
framework
::
OpDesc
,
true
>
,
paddle
::
framework
::
DefaultGradOpMaker
<
paddle
::
imperative
::
OpBase
,
true
>
,
Pool2dInferShapeFunctor
);
REGISTER_OPERATOR
(
pool2d_grad
,
ops
::
PoolOpGrad
,
ops
::
Pool2dOpGradGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
Pool2dOpGradGradMaker
<
paddle
::
imperative
::
OpBase
>
,
Pool2dGradInferShapeFunctor
);
REGISTER_OPERATOR
(
pool2d_double_grad
,
ops
::
PoolOp
,
Pool2dDoubleGradInferShapeFunctor
);
DECLARE_INFER_SHAPE_FUNCTOR
(
pool3d
,
Pool3dInferShapeFunctor
,
PD_INFER_META
(
phi
::
PoolInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
pool3d_grad
,
Pool3dGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
UnchangedInferMeta
));
REGISTER_OPERATOR
(
pool3d
,
ops
::
PoolOp
,
ops
::
Pool3dOpMaker
,
ops
::
PoolOpInferVarType
,
paddle
::
framework
::
DefaultGradOpMaker
<
paddle
::
framework
::
OpDesc
,
true
>
,
paddle
::
framework
::
DefaultGradOpMaker
<
paddle
::
imperative
::
OpBase
,
true
>
,
Pool3dInferShapeFunctor
);
REGISTER_OPERATOR
(
pool3d_grad
,
ops
::
PoolOpGrad
,
Pool3dGradInferShapeFunctor
);
paddle/fluid/operators/pool_op.h
已删除
100644 → 0
浏览文件 @
0a10cf40
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
class
PoolOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
phi
::
KernelKey
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
;
phi
::
KernelKey
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
phi
::
DenseTensor
&
tensor
,
const
phi
::
KernelKey
&
expected_kernel_type
)
const
override
;
};
class
PoolOpGrad
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
phi
::
KernelKey
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
;
phi
::
KernelKey
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
phi
::
DenseTensor
&
tensor
,
const
phi
::
KernelKey
&
expected_kernel_type
)
const
override
;
};
class
Pool2dOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
;
};
class
Pool3dOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
;
};
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/unity_build_rule.cmake
浏览文件 @
30881647
...
...
@@ -200,7 +200,6 @@ register_unity_group(
cc
partial_sum_op.cc
pixel_shuffle_op.cc
pool_op.cc
pool_with_index_op.cc
positive_negative_pair_op.cc
prelu_op.cc
...
...
paddle/phi/api/yaml/op_compat.yaml
浏览文件 @
30881647
...
...
@@ -1887,17 +1887,38 @@
out
:
Out
-
op
:
pool2d
backward
:
pool2d_grad
attrs
:
kernel_size
:
ksize
backward
:
pool2d_grad, pool2d_double_grad
inputs
:
{
x
:
X
}
outputs
:
{
out
:
Out
}
attrs
:
{
kernel_size
:
ksize
}
int_array
:
kernel_size
:
data_type
:
int
support_tensor
:
true
get_expected_kernel_type
:
pool2d
:
GetPoolExpectedKernelType
pool2d_grad
:
GetPoolExpectedKernelType
pool2d_double_grad
:
GetPoolDoubleGradExpectedKernelType
extra
:
attrs
:
[
bool use_mkldnn = false
,
bool use_quantizer = false
,
str mkldnn_data_type = "float32"
,
bool is_test = false
]
str mkldnn_data_type = "float32"
,
bool is_test = false
,
bool use_cudnn = false
]
-
op
:
pool3d
backward
:
pool3d_grad
inputs
:
{
x
:
X
}
outputs
:
{
out
:
Out
}
attrs
:
{
kernel_size
:
ksize
}
get_expected_kernel_type
:
pool3d
:
GetPoolExpectedKernelType
pool3d_grad
:
GetPoolExpectedKernelType
extra
:
attrs
:
[
bool use_mkldnn = false
]
attrs
:
[
bool use_mkldnn = false
,
bool use_cudnn = false
]
-
op
:
pow
backward
:
pow_grad, pow_double_grad, pow_triple_grad
...
...
paddle/phi/api/yaml/static_backward.yaml
浏览文件 @
30881647
...
...
@@ -123,6 +123,40 @@
func
:
max_grad
composite
:
max_grad(x, out, out_grad, axis, keepdim, reduce_all, x_grad)
-
backward_op
:
pool2d_double_grad
forward
:
pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
Pool2DInferMeta
param
:
[
grad_x_grad
,
kernel_size
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
]
kernel
:
func
:
pool2d_double_grad
param
:
[
grad_x_grad
,
kernel_size
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
]
-
backward_op
:
pool2d_grad
forward
:
pool2d(Tensor x, IntArray kernel_size, int[] strides = {1,1}, int[] paddings = {0,0}, bool ceil_mode =
false
, bool exclusive =
true
, str data_format = "NCHW", str pooling_type = "", bool global_pooling =
false
, bool adaptive =
false
, str padding_algorithm = "EXPLICIT") -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pool2d_grad
param
:
[
x
,
out
,
out_grad
,
kernel_size
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
]
backward
:
pool2d_double_grad
-
backward_op
:
pool3d_grad
forward
:
pool3d(Tensor x, int[] kernel_size, int[] strides = {1,1,1}, int[] paddings = {0,0,0}, bool ceil_mode =
false
, bool exclusive =
true
, str data_format = "NCDHW", str pooling_type = "", bool global_pooling =
false
, bool adaptive =
false
, str padding_algorithm = "EXPLICIT") -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] ksize, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pool3d_grad
param
:
[
x
,
out
,
out_grad
,
kernel_size
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
]
-
backward_op
:
relu6_grad
forward
:
relu6 (Tensor x, float threshold = 6.0f) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
...
...
paddle/phi/api/yaml/static_ops.yaml
浏览文件 @
30881647
...
...
@@ -351,6 +351,28 @@
func
:
p_recv_array
param
:
[
peer
,
dtype
,
out_shape
]
-
op
:
pool2d
args
:
(Tensor x, IntArray kernel_size, int[] strides = {1,1}, int[] paddings = {0,0}, bool ceil_mode =
false
, bool exclusive =
true
, str data_format = "NCHW", str pooling_type = "", bool global_pooling =
false
, bool adaptive =
false
, str padding_algorithm = "EXPLICIT")
output
:
Tensor(out)
infer_meta
:
func
:
Pool2DInferMeta
param
:
[
x
,
kernel_size
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
]
kernel
:
func
:
pool2d
param
:
[
x
,
kernel_size
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
]
backward
:
pool2d_grad
-
op
:
pool3d
args
:
(Tensor x, int[] kernel_size, int[] strides = {1,1,1}, int[] paddings = {0,0,0}, bool ceil_mode =
false
, bool exclusive =
true
, str data_format = "NCDHW", str pooling_type = "", bool global_pooling =
false
, bool adaptive =
false
, str padding_algorithm = "EXPLICIT")
output
:
Tensor(out)
infer_meta
:
func
:
PoolInferMeta
param
:
[
x
,
kernel_size
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
]
kernel
:
func
:
pool3d
param
:
[
x
,
kernel_size
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
]
backward
:
pool3d_grad
-
op
:
randint
args
:
(int low, int high, IntArray shape = {}, DataType dtype = DataType::INT64, int seed = 0)
output
:
Tensor(out)
...
...
paddle/phi/kernels/onednn/pool_grad_kernel.cc
浏览文件 @
30881647
...
...
@@ -71,6 +71,26 @@ void Pool2dGradKernel(const Context& dev_ctx,
dx
->
set_mem_desc
(
diff_src_memory
->
get_desc
());
}
phi
::
KernelKey
PoolOpGradGetKernelTypeForVar
(
const
GetKernelTypeForVarContext
*
ctx
)
{
const
DenseTensor
&
tensor
=
ctx
->
GetTensor
();
const
KernelKey
&
expected_kernel_type
=
ctx
->
GetKernelKey
();
#ifdef PADDLE_WITH_MKLDNN
if
((
expected_kernel_type
.
layout
()
==
phi
::
DataLayout
::
ONEDNN
)
&&
(
tensor
.
layout
()
!=
phi
::
DataLayout
::
ONEDNN
))
{
const
AttributeMap
&
attrs
=
ctx
->
GetAttrs
();
auto
it
=
attrs
.
find
(
"data_format"
);
const
std
::
string
data_format
=
PADDLE_GET_CONST
(
std
::
string
,
it
->
second
);
return
phi
::
KernelKey
(
tensor
.
place
(),
phi
::
StringToDataLayout
(
data_format
),
expected_kernel_type
.
dtype
());
}
#endif
return
phi
::
KernelKey
(
tensor
.
place
(),
tensor
.
layout
(),
expected_kernel_type
.
dtype
());
}
}
// namespace phi
PD_REGISTER_KERNEL
(
pool2d_grad
,
...
...
@@ -78,4 +98,6 @@ PD_REGISTER_KERNEL(pool2d_grad,
ONEDNN
,
phi
::
Pool2dGradKernel
,
float
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
bfloat16
)
{
kernel
->
get_kerneltype_forvar_fn_
=
phi
::
PoolOpGradGetKernelTypeForVar
;
}
paddle/phi/kernels/onednn/pool_kernel.cc
浏览文件 @
30881647
...
...
@@ -70,6 +70,29 @@ void Pool2dKernel(const Context& dev_ctx,
out
->
set_mem_desc
(
dst_memory
->
get_desc
());
}
phi
::
KernelKey
PoolOpGetKernelTypeForVar
(
const
GetKernelTypeForVarContext
*
ctx
)
{
const
phi
::
DenseTensor
&
tensor
=
ctx
->
GetTensor
();
const
phi
::
KernelKey
&
expected_kernel_type
=
ctx
->
GetKernelKey
();
#ifdef PADDLE_WITH_MKLDNN
if
((
expected_kernel_type
.
layout
()
==
phi
::
DataLayout
::
ONEDNN
)
&&
(
tensor
.
layout
()
!=
phi
::
DataLayout
::
ONEDNN
))
{
const
AttributeMap
&
attrs
=
ctx
->
GetAttrs
();
auto
it
=
attrs
.
find
(
"data_format"
);
const
std
::
string
data_format
=
PADDLE_GET_CONST
(
std
::
string
,
it
->
second
);
auto
dl
=
phi
::
StringToDataLayout
(
data_format
);
// Some models may have intentionally set "AnyLayout" for pool
// op. Treat this as NCHW (default data_format value)
if
(
dl
!=
phi
::
DataLayout
::
kAnyLayout
)
{
return
phi
::
KernelKey
(
tensor
.
place
(),
dl
,
expected_kernel_type
.
dtype
());
}
}
#endif
return
phi
::
KernelKey
(
tensor
.
place
(),
tensor
.
layout
(),
expected_kernel_type
.
dtype
());
}
}
// namespace phi
PD_REGISTER_KERNEL
(
pool2d
,
...
...
@@ -79,4 +102,6 @@ PD_REGISTER_KERNEL(pool2d,
float
,
int8_t
,
uint8_t
,
phi
::
dtype
::
bfloat16
)
{}
phi
::
dtype
::
bfloat16
)
{
kernel
->
get_kerneltype_forvar_fn_
=
phi
::
PoolOpGetKernelTypeForVar
;
}
paddle/phi/ops/compat/pool_sig.cc
已删除
100644 → 0
浏览文件 @
0a10cf40
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace
phi
{
KernelSignature
Pool2dOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
UNUSED
)
{
return
KernelSignature
(
"pool2d"
,
{
"X"
},
{
"ksize"
,
"strides"
,
"paddings"
,
"ceil_mode"
,
"exclusive"
,
"data_format"
,
"pooling_type"
,
"global_pooling"
,
"adaptive"
,
"padding_algorithm"
},
{
"Out"
});
}
KernelSignature
Pool2dGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
UNUSED
)
{
return
KernelSignature
(
"pool2d_grad"
,
{
"X"
,
"Out"
,
"Out@GRAD"
},
{
"ksize"
,
"strides"
,
"paddings"
,
"ceil_mode"
,
"exclusive"
,
"data_format"
,
"pooling_type"
,
"global_pooling"
,
"adaptive"
,
"padding_algorithm"
},
{
"X@GRAD"
});
}
KernelSignature
Pool2dDoubleGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
UNUSED
)
{
return
KernelSignature
(
"pool2d_double_grad"
,
{
"X"
},
{
"ksize"
,
"strides"
,
"paddings"
,
"ceil_mode"
,
"exclusive"
,
"data_format"
,
"pooling_type"
,
"global_pooling"
,
"adaptive"
,
"padding_algorithm"
},
{
"Out"
});
}
KernelSignature
Pool3dOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
UNUSED
)
{
return
KernelSignature
(
"pool3d"
,
{
"X"
},
{
"ksize"
,
"strides"
,
"paddings"
,
"ceil_mode"
,
"exclusive"
,
"data_format"
,
"pooling_type"
,
"global_pooling"
,
"adaptive"
,
"padding_algorithm"
},
{
"Out"
});
}
KernelSignature
Pool3dGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
UNUSED
)
{
return
KernelSignature
(
"pool3d_grad"
,
{
"X"
,
"Out"
,
"Out@GRAD"
},
{
"ksize"
,
"strides"
,
"paddings"
,
"ceil_mode"
,
"exclusive"
,
"data_format"
,
"pooling_type"
,
"global_pooling"
,
"adaptive"
,
"padding_algorithm"
},
{
"X@GRAD"
});
}
}
// namespace phi
PD_REGISTER_ARG_MAPPING_FN
(
pool2d
,
phi
::
Pool2dOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
pool2d_grad
,
phi
::
Pool2dGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
pool2d_double_grad
,
phi
::
Pool2dDoubleGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
pool3d
,
phi
::
Pool3dOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
pool3d_grad
,
phi
::
Pool3dGradOpArgumentMapping
);
test/cpp/fluid/mkldnn/CMakeLists.txt
浏览文件 @
30881647
...
...
@@ -41,7 +41,7 @@ cc_test_old(
recurrent_op_helper
recurrent_op
op_registry
pool
_op
generated_static
_op
crop_op
activation_op
generated_op
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录