未验证 提交 7dca4f5c 编写于 作者: W Weilong Wu 提交者: GitHub

[Eager, Performance optimization] fix pool3d under eager mode (#45710)

* [Eager, Performance optimization] fix pool3d under eager mode

* polish code
上级 638965c5
...@@ -1999,13 +1999,15 @@ ...@@ -1999,13 +1999,15 @@
backward : pool2d_grad_gpudnn_unused backward : pool2d_grad_gpudnn_unused
- api : pool3d - api : pool3d
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
output : Tensor(out) output : Tensor(out)
infer_meta : infer_meta :
func : PoolInferMeta func : PoolInferMeta
param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
kernel : kernel :
func : pool3d func : pool3d
use_gpudnn : true param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
use_gpudnn : use_gpudnn
backward : pool3d_grad backward : pool3d_grad
- api : pow - api : pow
......
...@@ -1781,15 +1781,16 @@ ...@@ -1781,15 +1781,16 @@
use_gpudnn : false use_gpudnn : false
- backward_api : pool3d_grad - backward_api : pool3d_grad
forward : pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out) forward : pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
output : Tensor(x_grad) output : Tensor(x_grad)
infer_meta : infer_meta :
func : UnchangedInferMeta func : UnchangedInferMeta
param: [x] param: [x]
kernel : kernel :
func : pool3d_grad func : pool3d_grad
use_gpudnn : true param : [x, out, out_grad, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
use_gpudnn : use_gpudnn
- backward_api : pow_grad - backward_api : pow_grad
forward : pow(Tensor x, Scalar s) -> Tensor(out) forward : pow(Tensor x, Scalar s) -> Tensor(out)
......
...@@ -462,48 +462,41 @@ def avg_pool3d(x, ...@@ -462,48 +462,41 @@ def avg_pool3d(x,
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3) _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3) _check_value_limitation(stride, "stride", min_limit=1e-3)
if in_dygraph_mode() or _in_legacy_dygraph(): if in_dygraph_mode():
if in_dygraph_mode(): pool_out = _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode,
output = _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode, exclusive, data_format, 'avg', False, False,
exclusive, data_format, 'avg', False, False, padding_algorithm, True)
padding_algorithm) elif _in_legacy_dygraph():
if _in_legacy_dygraph(): pool_out = _legacy_C_ops.pool3d(
output = _legacy_C_ops.pool3d( x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides', stride,
x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides', 'paddings', padding, 'global_pooling', False, 'padding_algorithm',
stride, 'paddings', padding, 'global_pooling', False, padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
'padding_algorithm', padding_algorithm, 'use_cudnn', True, 'use_mkldnn', False, 'exclusive', exclusive, 'data_format',
'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', data_format)
exclusive, 'data_format', data_format) else:
if divisor_override is None: op_type = "pool3d"
return output helper = LayerHelper(op_type, **locals())
else: check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
_check_instance(divisor_override, "divisor_override") dtype = helper.input_dtype(input_param_name='x')
return output * (kernel_size[0] * kernel_size[1] * pool_out = helper.create_variable_for_type_inference(dtype)
kernel_size[2]) / divisor_override outputs = {"Out": pool_out}
op_type = "pool3d" helper.append_op(type=op_type,
helper = LayerHelper(op_type, **locals()) inputs={"X": x},
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d') outputs=outputs,
dtype = helper.input_dtype(input_param_name='x') attrs={
pool_out = helper.create_variable_for_type_inference(dtype) "pooling_type": 'avg',
outputs = {"Out": pool_out} "ksize": kernel_size,
"global_pooling": False,
helper.append_op(type=op_type, "strides": stride,
inputs={"X": x}, "paddings": padding,
outputs=outputs, "padding_algorithm": padding_algorithm,
attrs={ "use_cudnn": True,
"pooling_type": 'avg', "ceil_mode": ceil_mode,
"ksize": kernel_size, "use_mkldnn": False,
"global_pooling": False, "exclusive": exclusive,
"strides": stride, "data_format": data_format,
"paddings": padding, })
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
if divisor_override is None: if divisor_override is None:
return pool_out return pool_out
...@@ -1275,7 +1268,7 @@ def max_pool3d(x, ...@@ -1275,7 +1268,7 @@ def max_pool3d(x,
else: else:
return _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode, return _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode,
True, data_format, 'max', False, False, True, data_format, 'max', False, False,
padding_algorithm) padding_algorithm, True)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
if return_mask: if return_mask:
...@@ -1601,7 +1594,10 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): ...@@ -1601,7 +1594,10 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
if output_size[2] is None: if output_size[2] is None:
output_size[2] = in_w output_size[2] = in_w
if in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.pool3d(x, output_size, [1, 1, 1], [0, 0, 0], False, True,
data_format, 'avg', False, True, "EXPLICIT", False)
elif _in_legacy_dygraph():
return _legacy_C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize', return _legacy_C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize',
output_size, 'global_pooling', False, output_size, 'global_pooling', False,
'adaptive', True, 'data_format', 'adaptive', True, 'data_format',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册