未验证 提交 d050c188 编写于 作者: W wangzhen38 提交者: GitHub

[remove fluid] Pool2D (#48707)

上级 dd304f31
...@@ -26,7 +26,6 @@ from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware ...@@ -26,7 +26,6 @@ from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, ReLU6 from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, ReLU6
from paddle.nn import Linear, Conv2D, Softmax, BatchNorm from paddle.nn import Linear, Conv2D, Softmax, BatchNorm
from paddle.fluid.dygraph.nn import Pool2D
from paddle.fluid.log_helper import get_logger from paddle.fluid.log_helper import get_logger
from imperative_test_utils import ( from imperative_test_utils import (
......
...@@ -50,7 +50,6 @@ from paddle import _C_ops, _legacy_C_ops ...@@ -50,7 +50,6 @@ from paddle import _C_ops, _legacy_C_ops
__all__ = [ __all__ = [
'Conv3D', 'Conv3D',
'Pool2D',
'Linear', 'Linear',
'BatchNorm', 'BatchNorm',
'Embedding', 'Embedding',
...@@ -506,238 +505,6 @@ class Conv3DTranspose(layers.Layer): ...@@ -506,238 +505,6 @@ class Conv3DTranspose(layers.Layer):
return self._helper.append_activation(pre_act, act=self._act) return self._helper.append_activation(pre_act, act=self._act)
class Pool2D(layers.Layer):
r"""
This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples.
The pooling2d operation calculates the output based on the input, pool_type and pool_size, pool_stride,
pool_padding parameters.Input and output are in NCHW format, where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
- Input:
Input shape: :math:`(N, C, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C, H_{out}, W_{out})`
If ``ceil_mode`` = False:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
If ``ceil_mode`` = True:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
If ``exclusive`` = False:
.. math::
hstart &= i * strides[0] - paddings[0] \\\\
hend &= hstart + ksize[0] \\\\
wstart &= j * strides[1] - paddings[1] \\\\
wend &= wstart + ksize[1] \\\\
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
If ``exclusive`` = True:
.. math::
hstart &= max(0, i * strides[0] - paddings[0])\\\\
hend &= min(H, hstart + ksize[0]) \\\\
wstart &= max(0, j * strides[1] - paddings[1]) \\\\
wend & = min(W, wstart + ksize[1]) \\\\
Output(i ,j) & = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Parameters:
pool_size (int or list or tuple, optional): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int. Default: -1.
pool_type(str, optional) : The pooling type, can be "max" for max-pooling and "avg" for average-pooling.
Default: max.
pool_stride (int or list or tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise,
the pool stride size will be a square of an int. Default: 1.
pool_padding (int or list or tuple, optional): The padding size for pooling operation.
If ``pool_padding`` is a tuple,
it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width).
Otherwise, the padding size for pooling operation will be a square of an int. Default: 0.
global_pooling (bool, optional): Whether to use the global pooling. If global_pooling = true,
kernel size and paddings will be ignored. Default: False.
use_cudnn (bool, optional): Only used in cudnn kernel, need install cudnn. Default: True.
ceil_mode (bool, optional): Whether to use the ceil function to calculate output height and width.
False is the default. If it is set to False, the floor function will be used. Default: False.
exclusive (bool, optional): Whether to exclude padding points in average pooling mode. Default: True.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
``[batch_size, input_channels, input_height, input_width]``. When it is `"NHWC"`, the data is
stored in the order of: ``[batch_size, input_height, input_width, input_channels]``
Returns:
None
Raises:
ValueError: If ``pool_type`` is not "max" nor "avg".
ValueError: If ``global_pooling`` is False and ``pool_size`` is -1.
ValueError: If ``use_cudnn`` is not a bool value.
ValueError: If ``data_format`` is not "NCHW" nor "NHWC".
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
with fluid.dygraph.guard():
data = numpy.random.random((3, 32, 32, 5)).astype('float32')
pool2d = fluid.dygraph.Pool2D(pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False)
pool2d_res = pool2d(to_variable(data))
"""
def __init__(
self,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
exclusive=True,
data_format="NCHW",
):
data_format = data_format.upper() # supprt NHWC, nhwc, etc.
pool_type = pool_type.lower() # supprt max, Max, etc.
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type),
)
if global_pooling is False and pool_size == -1:
raise ValueError(
"When the global_pooling is False, pool_size must be passed "
"and be a valid value. Received pool_size: " + str(pool_size)
)
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"]
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format)
)
super().__init__()
self._pool_type = pool_type
self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
self._pool_padding = utils.convert_to_list(
pool_padding, 2, 'pool_padding'
)
self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
self._global_pooling = global_pooling
self._use_cudnn = use_cudnn
self._ceil_mode = ceil_mode
self._exclusive = exclusive
self._data_format = data_format
self._l_type = 'pool2d'
def forward(self, input):
if _non_static_mode():
if not self._use_mkldnn and in_dygraph_mode():
input = input._use_gpudnn(self._use_cudnn)
return _C_ops.pool2d(
input,
self._pool_size,
self._pool_stride,
self._pool_padding,
self._ceil_mode,
self._exclusive,
self._data_format,
self._pool_type,
self._global_pooling,
False,
"EXPLICIT",
)
attrs = (
'pooling_type',
self._pool_type,
'ksize',
self._pool_size,
'global_pooling',
self._global_pooling,
'strides',
self._pool_stride,
'paddings',
self._pool_padding,
'use_cudnn',
self._use_cudnn,
'ceil_mode',
self._ceil_mode,
'use_mkldnn',
self._use_mkldnn,
'exclusive',
self._exclusive,
'data_format',
self._data_format,
)
return _legacy_C_ops.pool2d(input, *attrs)
check_variable_and_dtype(
input,
'input',
['int8', 'uint8', 'float16', 'float32', 'float64'],
'Pool2D',
)
attrs = {
"pooling_type": self._pool_type,
"ksize": self._pool_size,
"global_pooling": self._global_pooling,
"strides": self._pool_stride,
"paddings": self._pool_padding,
"use_cudnn": self._use_cudnn,
"ceil_mode": self._ceil_mode,
"use_mkldnn": self._use_mkldnn,
"exclusive": self._exclusive,
"data_format": self._data_format,
}
inputs = {"X": [input]}
pool_out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs=attrs,
)
return pool_out
class Linear(layers.Layer): class Linear(layers.Layer):
""" """
......
...@@ -114,9 +114,7 @@ class SqueezeExcitation(fluid.dygraph.Layer): ...@@ -114,9 +114,7 @@ class SqueezeExcitation(fluid.dygraph.Layer):
super().__init__() super().__init__()
self._num_channels = num_channels self._num_channels = num_channels
self._pool = paddle.fluid.dygraph.nn.Pool2D( self._pool = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=0, pool_type='avg', global_pooling=True
)
stdv = 1.0 / math.sqrt(num_channels * 1.0) stdv = 1.0 / math.sqrt(num_channels * 1.0)
self._squeeze = Linear( self._squeeze = Linear(
num_channels, num_channels,
...@@ -295,9 +293,7 @@ class SeResNeXt(fluid.dygraph.Layer): ...@@ -295,9 +293,7 @@ class SeResNeXt(fluid.dygraph.Layer):
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.fluid.dygraph.nn.Pool2D( self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=7, pool_type='avg', global_pooling=True
)
stdv = 1.0 / math.sqrt(2048 * 1.0) stdv = 1.0 / math.sqrt(2048 * 1.0)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1 self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1
......
...@@ -69,13 +69,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer): ...@@ -69,13 +69,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
bias_attr=None, bias_attr=None,
) )
self._pool2d = paddle.fluid.dygraph.nn.Pool2D( self._pool2d = paddle.nn.MaxPool2D(
pool_size=pool_size, kernel_size=pool_size,
pool_type=pool_type, stride=pool_stride,
pool_stride=pool_stride, padding=pool_padding,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn,
) )
def forward(self, inputs): def forward(self, inputs):
......
...@@ -256,9 +256,7 @@ class MobileNetV1(fluid.dygraph.Layer): ...@@ -256,9 +256,7 @@ class MobileNetV1(fluid.dygraph.Layer):
) )
self.dwsl.append(dws6) self.dwsl.append(dws6)
self.pool2d_avg = paddle.fluid.dygraph.nn.Pool2D( self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_type='avg', global_pooling=True
)
self.out = Linear( self.out = Linear(
int(1024 * scale), int(1024 * scale),
...@@ -424,9 +422,7 @@ class MobileNetV2(fluid.dygraph.Layer): ...@@ -424,9 +422,7 @@ class MobileNetV2(fluid.dygraph.Layer):
) )
# 4. pool # 4. pool
self._pool2d_avg = paddle.fluid.dygraph.nn.Pool2D( self._pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_type='avg', global_pooling=True
)
# 5. fc # 5. fc
tmp_param = ParamAttr(name=self.full_name() + "fc10_weights") tmp_param = ParamAttr(name=self.full_name() + "fc10_weights")
......
...@@ -184,9 +184,7 @@ class ResNet(fluid.dygraph.Layer): ...@@ -184,9 +184,7 @@ class ResNet(fluid.dygraph.Layer):
) )
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.fluid.dygraph.nn.Pool2D( self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=7, pool_type='avg', global_pooling=True
)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1 self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1
......
...@@ -184,9 +184,7 @@ class ResNet(paddle.nn.Layer): ...@@ -184,9 +184,7 @@ class ResNet(paddle.nn.Layer):
) )
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.fluid.dygraph.Pool2D( self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=7, pool_type='avg', global_pooling=True
)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1 self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1
......
...@@ -127,9 +127,7 @@ class SqueezeExcitation(fluid.dygraph.Layer): ...@@ -127,9 +127,7 @@ class SqueezeExcitation(fluid.dygraph.Layer):
super().__init__() super().__init__()
self._num_channels = num_channels self._num_channels = num_channels
self._pool = paddle.fluid.dygraph.nn.Pool2D( self._pool = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=0, pool_type='avg', global_pooling=True
)
stdv = 1.0 / math.sqrt(num_channels * 1.0) stdv = 1.0 / math.sqrt(num_channels * 1.0)
self._fc = Linear( self._fc = Linear(
num_channels, num_channels,
...@@ -309,9 +307,7 @@ class SeResNeXt(fluid.dygraph.Layer): ...@@ -309,9 +307,7 @@ class SeResNeXt(fluid.dygraph.Layer):
num_channels = bottleneck_block._num_channels_out num_channels = bottleneck_block._num_channels_out
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.fluid.dygraph.nn.Pool2D( self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=7, pool_type='avg', global_pooling=True
)
stdv = 1.0 / math.sqrt(2048 * 1.0) stdv = 1.0 / math.sqrt(2048 * 1.0)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1 self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1
......
...@@ -185,9 +185,7 @@ class TSM_ResNet(fluid.dygraph.Layer): ...@@ -185,9 +185,7 @@ class TSM_ResNet(fluid.dygraph.Layer):
num_channels = int(bottleneck_block._num_channels_out) num_channels = int(bottleneck_block._num_channels_out)
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.fluid.dygraph.nn.Pool2D( self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=7, pool_type='avg', global_pooling=True
)
import math import math
stdv = 1.0 / math.sqrt(2048 * 1.0) stdv = 1.0 / math.sqrt(2048 * 1.0)
......
...@@ -54,13 +54,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer): ...@@ -54,13 +54,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
bias_attr=None, bias_attr=None,
) )
self._pool2d = paddle.fluid.dygraph.nn.Pool2D( self._pool2d = paddle.nn.MaxPool2D(
pool_size=pool_size, kernel_size=pool_size,
pool_type=pool_type, stride=pool_stride,
pool_stride=pool_stride, padding=pool_padding,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn,
) )
def forward(self, inputs): def forward(self, inputs):
......
...@@ -57,13 +57,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer): ...@@ -57,13 +57,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
bias_attr=bias_attr, bias_attr=bias_attr,
) )
self._pool2d = paddle.fluid.dygraph.nn.Pool2D( self._pool2d = paddle.nn.MaxPool2D(
pool_size=pool_size, kernel_size=pool_size,
pool_type=pool_type, stride=pool_stride,
pool_stride=pool_stride, padding=pool_padding,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn,
) )
def forward(self, inputs): def forward(self, inputs):
......
...@@ -61,13 +61,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer): ...@@ -61,13 +61,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
bias_attr=None, bias_attr=None,
) )
self._pool2d = paddle.fluid.dygraph.nn.Pool2D( self._pool2d = paddle.nn.MaxPool2D(
pool_size=pool_size, kernel_size=pool_size,
pool_type=pool_type, stride=pool_stride,
pool_stride=pool_stride, padding=pool_padding,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn,
) )
def forward(self, inputs): def forward(self, inputs):
......
...@@ -59,13 +59,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer): ...@@ -59,13 +59,10 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
) )
self._pool2d = paddle.fluid.dygraph.nn.Pool2D( self._pool2d = paddle.nn.MaxPool2D(
pool_size=pool_size, kernel_size=pool_size,
pool_type=pool_type, stride=pool_stride,
pool_stride=pool_stride, padding=pool_padding,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn,
) )
def forward(self, inputs): def forward(self, inputs):
......
...@@ -215,9 +215,7 @@ class ResNet(fluid.Layer): ...@@ -215,9 +215,7 @@ class ResNet(fluid.Layer):
) )
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.fluid.dygraph.nn.Pool2D( self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=7, pool_type='avg', global_pooling=True
)
self.pool2d_avg_output = num_filters[-1] * 4 * 1 * 1 self.pool2d_avg_output = num_filters[-1] * 4 * 1 * 1
......
...@@ -104,9 +104,7 @@ class SqueezeExcitation(fluid.dygraph.Layer): ...@@ -104,9 +104,7 @@ class SqueezeExcitation(fluid.dygraph.Layer):
super().__init__() super().__init__()
self._num_channels = num_channels self._num_channels = num_channels
self._pool = paddle.fluid.dygraph.nn.Pool2D( self._pool = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=0, pool_type='avg', global_pooling=True
)
self._squeeze = paddle.nn.Linear( self._squeeze = paddle.nn.Linear(
num_channels, num_channels,
num_channels // reduction_ratio, num_channels // reduction_ratio,
...@@ -286,9 +284,7 @@ class SeResNeXt(fluid.dygraph.Layer): ...@@ -286,9 +284,7 @@ class SeResNeXt(fluid.dygraph.Layer):
num_channels = bottleneck_block._num_channels_out num_channels = bottleneck_block._num_channels_out
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.fluid.dygraph.nn.Pool2D( self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
pool_size=7, pool_type='avg', global_pooling=True
)
import math import math
stdv = 1.0 / math.sqrt(2048 * 1.0) stdv = 1.0 / math.sqrt(2048 * 1.0)
......
...@@ -41,10 +41,10 @@ class LeNetDygraph(paddle.nn.Layer): ...@@ -41,10 +41,10 @@ class LeNetDygraph(paddle.nn.Layer):
self.features = Sequential( self.features = Sequential(
Conv2D(1, 6, 3, stride=1, padding=1), Conv2D(1, 6, 3, stride=1, padding=1),
ReLU(), ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.nn.MaxPool2D(2, 2),
Conv2D(6, 16, 5, stride=1, padding=0), Conv2D(6, 16, 5, stride=1, padding=0),
ReLU(), ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.nn.MaxPool2D(2, 2),
) )
if num_classes > 0: if num_classes > 0:
...@@ -93,10 +93,10 @@ class LeNetListInput(paddle.nn.Layer): ...@@ -93,10 +93,10 @@ class LeNetListInput(paddle.nn.Layer):
self.features = Sequential( self.features = Sequential(
self.cov, self.cov,
ReLU(), ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.nn.MaxPool2D(2, 2),
Conv2D(6, 16, 5, stride=1, padding=0), Conv2D(6, 16, 5, stride=1, padding=0),
ReLU(), ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.nn.MaxPool2D(2, 2),
) )
if num_classes > 0: if num_classes > 0:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册