未验证 提交 095e8b49 编写于 作者: W Weilong Wu 提交者: GitHub

[Eager, Performance optimization] fix pool2d interface under eager mode (#45685)

* [Eager] fix pool2d interface under eager mode

* add use_gpudnn for pool2d

* fix pool2d yaml conflicts

* fix pool2d yaml
上级 2f19a364
......@@ -1978,26 +1978,17 @@
backward : pixel_shuffle_grad
- api : pool2d
args : (Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
args : (Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
output : Tensor(out)
infer_meta :
func : Pool2DInferMeta
param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
kernel :
func : pool2d
use_gpudnn : true
param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
use_gpudnn : use_gpudnn
backward : pool2d_grad
# Used in adaptive_avg_pool2d API
- api : pool2d_gpudnn_unused
args : (Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(out)
infer_meta :
func : Pool2DInferMeta
kernel :
func : pool2d
use_gpudnn : false
backward : pool2d_grad_gpudnn_unused
- api : pool3d
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
output : Tensor(out)
......
......@@ -1748,38 +1748,30 @@
func : pixel_shuffle_grad
- backward_api : pool2d_double_grad
forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
output : Tensor(grad_out_grad)
infer_meta :
func : Pool2DInferMeta
param : [grad_x_grad, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
kernel :
func : pool2d_double_grad
use_gpudnn : true
param : [grad_x_grad, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
use_gpudnn : use_gpudnn
- backward_api : pool2d_grad
forward : pool2d(Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
forward : pool2d(Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pool2d_grad
use_gpudnn : true
param : [x, out, out_grad, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
use_gpudnn : use_gpudnn
backward : pool2d_double_grad
- backward_api : pool2d_grad_gpudnn_unused
forward : pool2d_gpudnn_unused(Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pool2d_grad
use_gpudnn : false
- backward_api : pool3d_grad
forward : pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
......
......@@ -886,6 +886,13 @@ class Pool2D(layers.Layer):
def forward(self, input):
if _non_static_mode():
if not self._use_mkldnn and in_dygraph_mode():
return _C_ops.pool2d(input, self._pool_size, self._pool_stride,
self._pool_padding, self._ceil_mode,
self._exclusive, self._data_format,
self._pool_type, self._global_pooling,
False, "EXPLICIT", self._use_cudnn)
attrs = ('pooling_type', self._pool_type, 'ksize', self._pool_size,
'global_pooling', self._global_pooling, 'strides',
self._pool_stride, 'paddings', self._pool_padding,
......
......@@ -2268,7 +2268,8 @@ def pool2d(input,
if in_dygraph_mode():
return _C_ops.pool2d(input, pool_size, pool_stride, pool_padding,
ceil_mode, exclusive, data_format, pool_type,
global_pooling, False, padding_algorithm)
global_pooling, False, padding_algorithm,
use_cudnn)
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype()
......
......@@ -230,7 +230,13 @@ def avg_pool1d(x,
# use 2d to implenment 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding)
if in_dynamic_mode():
if in_dygraph_mode():
output = _C_ops.pool2d(x, kernel_size, stride, padding, ceil_mode,
exclusive, data_format, 'avg', False, False,
padding_algorithm, True)
return squeeze(output, [2])
if _in_legacy_dygraph():
output = _legacy_C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize',
kernel_size, 'global_pooling', False,
'strides', stride, 'paddings', padding,
......@@ -340,11 +346,11 @@ def avg_pool2d(x,
channel_last,
ceil_mode=ceil_mode)
if in_dygraph_mode() or _in_legacy_dygraph():
if _non_static_mode():
if in_dygraph_mode():
output = _C_ops.pool2d(x, kernel_size, stride, padding, ceil_mode,
exclusive, data_format, 'avg', False, False,
padding_algorithm)
padding_algorithm, True)
else:
output = _legacy_C_ops.pool2d(
x, 'pooling_type', 'avg', 'ksize', kernel_size,
......@@ -588,7 +594,7 @@ def max_pool1d(x,
else:
pool_out = _C_ops.pool2d(x, kernel_size, stride, padding, ceil_mode,
True, data_format, 'max', False, False,
padding_algorithm)
padding_algorithm, True)
return squeeze(pool_out, [2])
if _in_legacy_dygraph():
......@@ -1127,7 +1133,7 @@ def max_pool2d(x,
else:
return _C_ops.pool2d(x, kernel_size, stride, padding, ceil_mode,
True, data_format, 'max', False, False,
padding_algorithm)
padding_algorithm, True)
if _in_legacy_dygraph():
if return_mask:
......@@ -1362,7 +1368,12 @@ def adaptive_avg_pool1d(x, output_size, name=None):
pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')
x = unsqueeze(x, [2])
if in_dynamic_mode():
if in_dygraph_mode():
pool_out = _C_ops.pool2d(x, pool_size, [1, 1], [0, 0], False, True,
"NCHW", pool_type, False, True, "EXPLICIT",
False)
return squeeze(pool_out, [2])
if _in_legacy_dygraph():
pool_out = _legacy_C_ops.pool2d(x, 'pooling_type', pool_type, 'ksize',
pool_size, 'adaptive', True)
return squeeze(pool_out, [2])
......@@ -1477,9 +1488,8 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
output_size = utils._convert_to_tensor_list(output_size)
if in_dygraph_mode():
return _C_ops.pool2d_gpudnn_unused(x, output_size, [1, 1], [0, 0],
False, True, data_format, 'avg',
False, True, "EXPLICIT")
return _C_ops.pool2d(x, output_size, [1, 1], [0, 0], False, True,
data_format, 'avg', False, True, "EXPLICIT", False)
if _in_legacy_dygraph():
return _legacy_C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册