未验证 提交 8986a821 编写于 作者: B Bai Yifan 提交者: GitHub

fix adaptive gpu grad bug, add doc refine (#26660)

上级 98e057bb
......@@ -111,12 +111,11 @@ __global__ void KernelPool2DGrad(
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
phstart = AdaptStartIndex(h_offset, output_height, input_height);
phend = AdaptEndIndex(h_offset, output_height, input_height);
pwstart = AdaptStartIndex(w_offset, output_width, input_width);
pwend = AdaptEndIndex(w_offset, output_width, input_width);
} else {
phstart = (h_offset < ksize_height)
? 0
......@@ -159,6 +158,7 @@ __global__ void KernelPool2DGrad(
pool_size = exclusive ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
}
int output_sub_idx = channel_last
? (ph * output_width + pw) * channels + offsetC
: ph * output_width + pw;
......@@ -689,15 +689,14 @@ __global__ void KernelPool3DGrad(
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
pdstart = AdaptStartIndex(d_offset, output_depth, input_depth);
pdend = AdaptEndIndex(d_offset, output_depth, input_depth);
phstart = AdaptStartIndex(h_offset, output_height, input_height);
phend = AdaptEndIndex(h_offset, output_height, input_height);
pwstart = AdaptStartIndex(w_offset, output_width, input_width);
pwend = AdaptEndIndex(w_offset, output_width, input_width);
} else {
pdstart = (d_offset < ksize_depth)
? 0
......
......@@ -517,6 +517,19 @@ class TestAvgPoolAdaptive(TestCase1):
self.adaptive = True
class TestAvgPoolAdaptiveAsyOutSize(TestCase1):
def init_adaptive(self):
self.adaptive = True
def init_shape(self):
self.shape = [8, 3, 6, 6]
def init_test_case(self):
self.ksize = [2, 3]
self.strides = [1, 1]
self.paddings = [0, 0, 0, 0]
#-------test pool2d with asymmetric padding-----
......
......@@ -453,6 +453,18 @@ class TestAvgPoolAdaptive(TestCase1):
self.adaptive = True
class TestAvgPoolAdaptiveAsyOutSize(TestCase1):
def init_adaptive(self):
self.adaptive = True
def init_shape(self):
self.shape = [8, 3, 2, 4, 4]
def init_test_case(self):
self.ksize = [2, 2, 3]
self.strides = [1, 1, 1]
#-------test pool3d with asymmetric padding------
class TestPool3d_Op_AsyPadding(TestPool3d_Op):
def init_test_case(self):
......
......@@ -1238,7 +1238,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
Args:
x (Tensor): The input tensor of adaptive avg pool2d operator, which is a 4-D tensor.
The data type can be float16, float32, float64, int32 or int64.
The data type can be float32 or float64.
output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two element, (H, W). H and W can be either a int, or None which means
the size will be the same as that of the input.
......@@ -1285,9 +1285,8 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
# pool_out.shape is [2, 3, 3, 3]
"""
if not in_dygraph_mode():
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_avg_pool2d')
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_avg_pool2d')
check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d')
if data_format not in ["NCHW", "NHWC"]:
......@@ -1363,7 +1362,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
Args:
x (Tensor): The input tensor of adaptive avg pool3d operator, which is a 5-D tensor.
The data type can be float16, float32, float64, int32 or int64.
The data type can be float32, float64.
output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means
the size will be the same as that of the input.
......@@ -1413,9 +1412,8 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
# pool_out.shape is [2, 3, 3, 3, 3]
"""
if not in_dygraph_mode():
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_avg_pool3d')
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_avg_pool3d')
check_type(data_format, 'data_format', str, 'adaptive_avg_pool3d')
if data_format not in ["NCDHW", "NDHWC"]:
......
......@@ -67,7 +67,7 @@ class AdaptiveAvgPool2d(layers.Layer):
None by default.
Shape:
x (Tensor): The input tensor of adaptive avg pool2d operator, which is a 4-D tensor. The data type can be float16, float32, float64, int32 or int64.
x (Tensor): The input tensor of adaptive avg pool2d operator, which is a 4-D tensor. The data type can be float32 or float64.
output (Tensor): The output tensor of adaptive avg pool2d operator, which is a 4-D tensor. The data type is same as input x.
Returns:
......@@ -152,7 +152,7 @@ class AdaptiveAvgPool3d(layers.Layer):
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Shape:
x (Tensor): The input tensor of adaptive avg pool3d operator, which is a 5-D tensor. The data type can be float16, float32, float64, int32 or int64.
x (Tensor): The input tensor of adaptive avg pool3d operator, which is a 5-D tensor. The data type can be float32 or float64.
output (Tensor): The output tensor of adaptive avg pool3d operator, which is a 5-D tensor. The data type is same as input x.
Returns:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册