未验证 提交 96331f74 编写于 作者: L LielinJiang 提交者: GitHub

fix conv1d padding (#26921)

上级 95e1434b
......@@ -44,7 +44,7 @@ class Conv1dTestCase(unittest.TestCase):
self.spartial_shape = spartial_shape
self.filter_size = filter_size
self.data_format = data_format
self.channel_last = (self.data_format == "NHWC")
self.channel_last = (self.data_format == "NLC")
self.padding = padding
self.padding_mode = padding_mode
......@@ -147,6 +147,14 @@ class Conv1dErrorTestCase(Conv1dTestCase):
self.paddle_nn_layer()
class Conv1dTypeErrorTestCase(Conv1dTestCase):
def runTest(self):
place = fluid.CPUPlace()
with dg.guard(place):
with self.assertRaises(TypeError):
self.paddle_nn_layer()
def add_cases(suite):
suite.addTest(Conv1dTestCase(methodName='runTest'))
suite.addTest(Conv1dTestCase(methodName='runTest', stride=[1], dilation=2))
......@@ -161,6 +169,7 @@ def add_cases(suite):
Conv1dTestCase(
methodName='runTest', padding=2, data_format='NLC'))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=[1]))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=[1, 2]))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=2))
suite.addTest(Conv1dTestCase(methodName='runTest'))
suite.addTest(
......@@ -178,7 +187,7 @@ def add_cases(suite):
def add_error_cases(suite):
suite.addTest(
Conv1dErrorTestCase(
Conv1dTypeErrorTestCase(
methodName='runTest', padding_mode="reflect", padding="valid"))
suite.addTest(
Conv1dErrorTestCase(
......
......@@ -201,6 +201,7 @@ def add_cases(suite):
ConvTranspose1dTestCase(
methodName='runTest', data_format="NLC", stride=3,
output_padding=2))
suite.addTest(ConvTranspose1dTestCase(methodName='runTest', padding=[1, 2]))
def add_error_cases(suite):
......
......@@ -232,7 +232,7 @@ def conv1d(x,
raise ValueError("Attr(data_format) should be 'NCL' or 'NLC'. "
"Received Attr(data_format): {}.".format(data_format))
channel_last = (data_format == "NHWC")
channel_last = (data_format == "NLC")
channel_dim = -1 if channel_last else 1
conv2d_data_format = "NHWC" if channel_last else "NCHW"
num_channels = x.shape[channel_dim]
......@@ -399,7 +399,7 @@ def conv2d(x,
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when
`data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0],
[pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel
......@@ -733,20 +733,31 @@ def conv_transpose1d(x,
stride = utils.convert_to_list(stride, 1, 'stride') + [1]
dilation = utils.convert_to_list(dilation, 1, 'dilation') + [1]
output_padding = utils.convert_to_list(output_padding, 1,
'output_padding') + [0]
if output_padding[0] > stride[0]:
raise ValueError(
"The size of output_padding should not be greater than stride."
"But got output_padding={} and stride={}".format(output_padding[0],
stride[0]))
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 1, 'output_size') + [1]
else:
raise ValueError("output_size should be int, or list, tuple of ints")
if output_padding != 0:
raise ValueError('output_padding option is mutually exclusive with '
'output_size')
if isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 1,
'output_size') + [1]
else:
raise ValueError(
"output_size should be int, or list, tuple of ints")
if output_padding == 0:
output_padding = []
else:
output_padding = utils.convert_to_list(output_padding, 1,
'output_padding') + [0]
if len(output_padding) > 0 and output_padding[0] > stride[0]:
raise ValueError(
"The size of output_padding should not be greater than stride."
"But got output_padding={} and stride={}".format(output_padding[0],
stride[0]))
op_type = 'conv2d_transpose'
num_filters = weight.shape[1]
......@@ -761,16 +772,17 @@ def conv_transpose1d(x,
weight = nn.unsqueeze(input=weight, axes=[-1])
if in_dygraph_mode():
attrs = ('output_size', output_size, 'strides', stride, 'paddings',
padding, 'padding_algorithm', padding_algorithm, 'dilations',
dilation, 'groups', groups, 'use_cudnn', use_cudnn,
'data_format', conv2d_data_format)
attrs = ('output_padding', output_padding, 'output_size', output_size,
'strides', stride, 'paddings', padding, 'padding_algorithm',
padding_algorithm, 'dilations', dilation, 'groups', groups,
'use_cudnn', use_cudnn, 'data_format', conv2d_data_format)
out = getattr(core.ops, op_type)(x, weight, *attrs)
if bias is not None:
out = nn.elementwise_add(out, bias, axis=channel_dim)
else:
inputs = {'Input': [x], 'Filter': [weight]}
attrs = {
'output_padding': output_padding,
'output_size': output_size,
'strides': stride,
'paddings': padding,
......@@ -791,12 +803,6 @@ def conv_transpose1d(x,
if bias is not None:
out = nn.elementwise_add(out, bias, axis=channel_dim)
if output_size is None:
out = pad2d(
out,
padding=[0, output_padding, 0, 0],
data_format=conv2d_data_format,
name=name)
out = nn.squeeze(input=out, axes=[squeeze_axis])
return out
......@@ -888,9 +894,9 @@ def conv_transpose2d(x,
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCHW"`, `pool_padding` can be in the form
and when `data_format` is `"NCHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `pool_padding` can be in the form
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
output_padding(int|list|tuple, optional): Additional size added to one side
......@@ -1116,9 +1122,9 @@ def conv3d(x,
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
and when `data_format` is `"NCDHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
when `data_format` is `"NDHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel points.
......@@ -1340,9 +1346,9 @@ def conv_transpose3d(x,
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
and when `data_format` is `"NCDHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
when `data_format` is `"NDHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
output_padding(int|list|tuple, optional): Additional size added to one side
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册