未验证 提交 7dca4f5c 编写于 作者: W Weilong Wu 提交者: GitHub

[Eager, Performance optimization] fix pool3d under eager mode (#45710)

* [Eager, Performance optimization] fix pool3d under eager mode

* polish code
上级 638965c5
......@@ -1999,13 +1999,15 @@
backward : pool2d_grad_gpudnn_unused
- api : pool3d
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
output : Tensor(out)
infer_meta :
func : PoolInferMeta
param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
kernel :
func : pool3d
use_gpudnn : true
param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
use_gpudnn : use_gpudnn
backward : pool3d_grad
- api : pow
......
......@@ -1781,15 +1781,16 @@
use_gpudnn : false
- backward_api : pool3d_grad
forward : pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
forward : pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm, bool use_gpudnn)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pool3d_grad
use_gpudnn : true
param : [x, out, out_grad, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
use_gpudnn : use_gpudnn
- backward_api : pow_grad
forward : pow(Tensor x, Scalar s) -> Tensor(out)
......
......@@ -462,48 +462,41 @@ def avg_pool3d(x,
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3)
if in_dygraph_mode() or _in_legacy_dygraph():
if in_dygraph_mode():
output = _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode,
exclusive, data_format, 'avg', False, False,
padding_algorithm)
if _in_legacy_dygraph():
output = _legacy_C_ops.pool3d(
x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides',
stride, 'paddings', padding, 'global_pooling', False,
'padding_algorithm', padding_algorithm, 'use_cudnn', True,
'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive',
exclusive, 'data_format', data_format)
if divisor_override is None:
return output
else:
_check_instance(divisor_override, "divisor_override")
return output * (kernel_size[0] * kernel_size[1] *
kernel_size[2]) / divisor_override
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
helper.append_op(type=op_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": 'avg',
"ksize": kernel_size,
"global_pooling": False,
"strides": stride,
"paddings": padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
if in_dygraph_mode():
pool_out = _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode,
exclusive, data_format, 'avg', False, False,
padding_algorithm, True)
elif _in_legacy_dygraph():
pool_out = _legacy_C_ops.pool3d(
x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides', stride,
'paddings', padding, 'global_pooling', False, 'padding_algorithm',
padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', exclusive, 'data_format',
data_format)
else:
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
helper.append_op(type=op_type,
inputs={"X": x},
outputs=outputs,
attrs={
"pooling_type": 'avg',
"ksize": kernel_size,
"global_pooling": False,
"strides": stride,
"paddings": padding,
"padding_algorithm": padding_algorithm,
"use_cudnn": True,
"ceil_mode": ceil_mode,
"use_mkldnn": False,
"exclusive": exclusive,
"data_format": data_format,
})
if divisor_override is None:
return pool_out
......@@ -1275,7 +1268,7 @@ def max_pool3d(x,
else:
return _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode,
True, data_format, 'max', False, False,
padding_algorithm)
padding_algorithm, True)
if _in_legacy_dygraph():
if return_mask:
......@@ -1601,7 +1594,10 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
if output_size[2] is None:
output_size[2] = in_w
if in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.pool3d(x, output_size, [1, 1, 1], [0, 0, 0], False, True,
data_format, 'avg', False, True, "EXPLICIT", False)
elif _in_legacy_dygraph():
return _legacy_C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize',
output_size, 'global_pooling', False,
'adaptive', True, 'data_format',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册