未验证 提交 d4a4eb9d 编写于 作者: X xiaoting 提交者: GitHub

Fix fold python examples (#38636)

* fix fold python examples, test=develop

* fix size type, test=develop

* fix python example, test=develop

* fix fold shape check

* fix fold dygraph mode, test=develop
上级 48b4366c
...@@ -95,6 +95,17 @@ class FoldOp : public framework::OperatorWithKernel { ...@@ -95,6 +95,17 @@ class FoldOp : public framework::OperatorWithKernel {
"but recieved strides_height: %d strides_width: %d.", "but recieved strides_height: %d strides_width: %d.",
strides[0], strides[1])); strides[0], strides[1]));
// check dilations // check dilations
PADDLE_ENFORCE_GT(output_height, 1,
platform::errors::InvalidArgument(
"The `output_height` should be greater than one, "
"but recieved output_height: %d .",
output_height));
PADDLE_ENFORCE_GT(output_width, 1,
platform::errors::InvalidArgument(
"The `output_width` should be greater than one, "
"but recieved output_width: %d .",
output_width));
// check output size
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
dilation_height, 0, dilation_height, 0,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -146,7 +157,7 @@ class FoldOp : public framework::OperatorWithKernel { ...@@ -146,7 +157,7 @@ class FoldOp : public framework::OperatorWithKernel {
output_width)); output_width));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
blocks_height * blocks_width, in_dims[1], blocks_height * blocks_width, in_dims[2],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Given input output_size (%d, %d), " "Given input output_size (%d, %d), "
"kernel_sizes (%d, %d), strides (%d, %d), dilations (%d, %d), " "kernel_sizes (%d, %d), strides (%d, %d), dilations (%d, %d), "
...@@ -156,6 +167,15 @@ class FoldOp : public framework::OperatorWithKernel { ...@@ -156,6 +167,15 @@ class FoldOp : public framework::OperatorWithKernel {
strides[0], strides[1], dilations[0], dilations[1], blocks_height, strides[0], strides[1], dilations[0], dilations[1], blocks_height,
blocks_width, blocks_height * blocks_width, in_dims[2])); blocks_width, blocks_height * blocks_width, in_dims[2]));
PADDLE_ENFORCE_EQ(
in_dims[1] % (kernel_sizes[0] * kernel_sizes[1]), 0,
platform::errors::InvalidArgument(
"Expected size of input's dimension 1 to be divisible by the"
"product of kernel_size, but got input.size(1)=%d and "
"kernel_size=( %d"
", %d).",
in_dims[1], kernel_sizes[0], kernel_sizes[1]));
out_dims.push_back(output_height); out_dims.push_back(output_height);
out_dims.push_back(output_width); out_dims.push_back(output_width);
ctx->SetOutputDim("Y", phi::make_ddim(out_dims)); ctx->SetOutputDim("Y", phi::make_ddim(out_dims));
......
...@@ -174,6 +174,15 @@ class TestFoldOpError(unittest.TestCase): ...@@ -174,6 +174,15 @@ class TestFoldOpError(unittest.TestCase):
x, output_sizes=[6, 6], kernel_sizes=[2, 2], x, output_sizes=[6, 6], kernel_sizes=[2, 2],
strides=[1, 1]) strides=[1, 1])
def test_output_size_2():
# out_size must GT 1
x = paddle.randn(shape=[2, 6, 6], dtype="float32")
out = fold(
x,
output_sizes=[0.1, 0.2],
kernel_sizes=[2, 2],
strides=[1, 1])
def test_block_h_w(): def test_block_h_w():
# test_block_h_w GT 0 # test_block_h_w GT 0
x = paddle.randn(shape=[2, 1, 1], dtype="float32") x = paddle.randn(shape=[2, 1, 1], dtype="float32")
...@@ -196,6 +205,7 @@ class TestFoldOpError(unittest.TestCase): ...@@ -196,6 +205,7 @@ class TestFoldOpError(unittest.TestCase):
self.assertRaises(AssertionError, test_dilations_shape) self.assertRaises(AssertionError, test_dilations_shape)
self.assertRaises(AssertionError, test_strides_shape) self.assertRaises(AssertionError, test_strides_shape)
self.assertRaises(ValueError, test_output_size) self.assertRaises(ValueError, test_output_size)
self.assertRaises(ValueError, test_output_size_2)
self.assertRaises(ValueError, test_block_h_w) self.assertRaises(ValueError, test_block_h_w)
self.assertRaises(ValueError, test_GT_0) self.assertRaises(ValueError, test_GT_0)
......
...@@ -351,7 +351,6 @@ def interpolate(x, ...@@ -351,7 +351,6 @@ def interpolate(x,
out_shape = size out_shape = size
scale = scale_factor scale = scale_factor
if out_shape is not None and scale is not None: if out_shape is not None and scale is not None:
raise ValueError("Only one of size or scale_factor should be defined.") raise ValueError("Only one of size or scale_factor should be defined.")
if out_shape is not None: if out_shape is not None:
...@@ -362,6 +361,8 @@ def interpolate(x, ...@@ -362,6 +361,8 @@ def interpolate(x,
if in_dynamic_mode(): if in_dynamic_mode():
if isinstance(out_shape, Variable): if isinstance(out_shape, Variable):
out_shape = list(out_shape.numpy()) out_shape = list(out_shape.numpy())
else:
out_shape = list(out_shape)
for i, dim in enumerate(out_shape): for i, dim in enumerate(out_shape):
if isinstance(dim, Variable): if isinstance(dim, Variable):
out_shape[i] = dim.numpy()[0] out_shape[i] = dim.numpy()[0]
...@@ -1818,7 +1819,6 @@ def fold(x, ...@@ -1818,7 +1819,6 @@ def fold(x,
can be calculated as following. can be calculated as following.
.. math:: .. math::
H_out &= output_size[0] H_out &= output_size[0]
W_out &= output_size[1] W_out &= output_size[1]
C_out &= C_in / kernel\_sizes[0] / kernel\_sizes[1] C_out &= C_in / kernel\_sizes[0] / kernel\_sizes[1]
...@@ -1826,21 +1826,21 @@ def fold(x, ...@@ -1826,21 +1826,21 @@ def fold(x,
Parameters: Parameters:
x(Tensor): 3-D Tensor, input tensor of format [N, C, L], x(Tensor): 3-D Tensor, input tensor of format [N, C, L],
data type can be float32 or float64 data type can be float32 or float64
output_sizes(list): The size of output size, should be [output_size_h, output_size_w] output_sizes(int|list|tuple): The size of output size, should be [output_size_h, output_size_w]
or an interger o treated as [o, o]. or an interger o treated as [o, o].
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w] kernel_sizes(int|list|tuple): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k]. or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w] strides(int|list|tuple): The strides, should be [stride_h, stride_w]
or an integer stride treated as [sride, stride]. or an integer stride treated as [sride, stride].
For default, strides will be [1, 1]. For default, strides will be [1, 1].
paddings(int|list): The paddings of each dimension, should be paddings(int|list|tuple): The paddings of each dimension, should be
[padding_top, padding_left, padding_bottom, padding_right] [padding_top, padding_left, padding_bottom, padding_right]
or [padding_h, padding_w] or an integer padding. or [padding_h, padding_w] or an integer padding.
If [padding_h, padding_w] was given, it will expanded to If [padding_h, padding_w] was given, it will expanded to
[padding_h, padding_w, padding_h, padding_w]. If an integer [padding_h, padding_w, padding_h, padding_w]. If an integer
padding was given, [padding, padding, padding, padding] will padding was given, [padding, padding, padding, padding] will
be used. For default, paddings will be [0, 0, 0, 0] be used. For default, paddings will be [0, 0, 0, 0]
dilations(int|list): the dilations of convolution kernel, should be dilations(int|list|tuple): the dilations of convolution kernel, should be
[dilation_h, dilation_w], or an integer dilation treated as [dilation_h, dilation_w], or an integer dilation treated as
[dilation, dilation]. For default, it will be [1, 1]. [dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None. name(str, optional): The default value is None.
...@@ -1859,9 +1859,9 @@ def fold(x, ...@@ -1859,9 +1859,9 @@ def fold(x,
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
x = paddle.randn([2,12,9]) x = paddle.randn([2,3*2*2,12])
y = F.fold(x, output_sizes=(4, 4), kernel_sizes=2) y = F.fold(x, output_sizes=[4, 5], kernel_sizes=2)
# y.shape = [2,3,4,4] # y.shape = [2,3,4,5]
""" """
...@@ -1872,29 +1872,32 @@ def fold(x, ...@@ -1872,29 +1872,32 @@ def fold(x,
assert len(x.shape) == 3, \ assert len(x.shape) == 3, \
"input should be the format of [N, C, L]" "input should be the format of [N, C, L]"
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if isinstance(output_sizes, int): if isinstance(output_sizes, int):
output_sizes = [output_sizes, output_sizes] output_sizes = [output_sizes, output_sizes]
else: else:
assert isinstance(output_sizes, list) and (len(output_sizes) == 2), \ assert _is_list_or_turple_(output_sizes) and (len(output_sizes) == 2), \
"output_sizes should either be an integer or a list of two integers" "output_sizes should either be an integer or a list/tuple of two integers"
if isinstance(kernel_sizes, int): if isinstance(kernel_sizes, int):
kernel_sizes = [kernel_sizes, kernel_sizes] kernel_sizes = [kernel_sizes, kernel_sizes]
else: else:
assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \ assert _is_list_or_turple_(kernel_sizes) and (len(kernel_sizes) == 2), \
"kernel_sizes should either be an integer or a list of two integers" "kernel_sizes should either be an integer or a list/tuple of two integers"
if isinstance(strides, int): if isinstance(strides, int):
strides = [strides, strides] strides = [strides, strides]
else: else:
assert isinstance(strides, list) and (len(strides) == 2), \ assert _is_list_or_turple_(strides) and (len(strides) == 2), \
"strides should either be an integer or a list of two integers" "strides should either be an integer or a list/tuple of two integers"
if isinstance(dilations, int): if isinstance(dilations, int):
dilations = [dilations, dilations] dilations = [dilations, dilations]
else: else:
assert isinstance(dilations, list) and (len(dilations) == 2), \ assert _is_list_or_turple_(dilations) and (len(dilations) == 2), \
"dilations should either be an integer or a list of two integers" "dilations should either be an integer or a list/tuple of two integers"
if isinstance(paddings, int): if isinstance(paddings, int):
paddings = [paddings] * 4 paddings = [paddings] * 4
...@@ -1912,16 +1915,21 @@ def fold(x, ...@@ -1912,16 +1915,21 @@ def fold(x,
"Unexpected type of paddings, it should be either an integer or a list" "Unexpected type of paddings, it should be either an integer or a list"
"of 2 or 4 integers") "of 2 or 4 integers")
out = helper.create_variable_for_type_inference(dtype=x.dtype) if in_dynamic_mode():
helper.append_op( out = _C_ops.fold(x, "output_sizes", output_sizes, "kernel_sizes",
type="fold", kernel_sizes, "strides", strides, "paddings",
inputs={"X": x}, paddings, "dilations", dilations)
outputs={"Y": out}, else:
attrs={ out = helper.create_variable_for_type_inference(dtype=x.dtype)
"output_sizes": output_sizes, helper.append_op(
"kernel_sizes": kernel_sizes, type="fold",
"strides": strides, inputs={"X": x},
"paddings": paddings, outputs={"Y": out},
"dilations": dilations attrs={
}) "output_sizes": output_sizes,
"kernel_sizes": kernel_sizes,
"strides": strides,
"paddings": paddings,
"dilations": dilations
})
return out return out
...@@ -1565,7 +1565,6 @@ class Fold(Layer): ...@@ -1565,7 +1565,6 @@ class Fold(Layer):
can be calculated as following. can be calculated as following.
.. math:: .. math::
H_out &= output_size[0] H_out &= output_size[0]
W_out &= output_size[1] W_out &= output_size[1]
C_out &= C_in / kernel\_sizes[0] / kernel\_sizes[1] C_out &= C_in / kernel\_sizes[0] / kernel\_sizes[1]
...@@ -1573,19 +1572,19 @@ class Fold(Layer): ...@@ -1573,19 +1572,19 @@ class Fold(Layer):
Parameters: Parameters:
output_sizes(list): The size of output size, should be [output_size_h, output_size_w] output_sizes(list): The size of output size, should be [output_size_h, output_size_w]
or an interger o treated as [o, o]. or an interger o treated as [o, o].
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w] kernel_sizes(int|list|tuple): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k]. or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w] strides(int|list|tuple): The strides, should be [stride_h, stride_w]
or an integer stride treated as [sride, stride]. or an integer stride treated as [sride, stride].
For default, strides will be [1, 1]. For default, strides will be [1, 1].
paddings(int|list): The paddings of each dimension, should be paddings(int|list|tuple): The paddings of each dimension, should be
[padding_top, padding_left, padding_bottom, padding_right] [padding_top, padding_left, padding_bottom, padding_right]
or [padding_h, padding_w] or an integer padding. or [padding_h, padding_w] or an integer padding.
If [padding_h, padding_w] was given, it will expanded to If [padding_h, padding_w] was given, it will expanded to
[padding_h, padding_w, padding_h, padding_w]. If an integer [padding_h, padding_w, padding_h, padding_w]. If an integer
padding was given, [padding, padding, padding, padding] will padding was given, [padding, padding, padding, padding] will
be used. For default, paddings will be [0, 0, 0, 0] be used. For default, paddings will be [0, 0, 0, 0]
dilations(int|list): the dilations of convolution kernel, should be dilations(int|list|tuple): the dilations of convolution kernel, should be
[dilation_h, dilation_w], or an integer dilation treated as [dilation_h, dilation_w], or an integer dilation treated as
[dilation, dilation]. For default, it will be [1, 1]. [dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None. name(str, optional): The default value is None.
...@@ -1604,10 +1603,10 @@ class Fold(Layer): ...@@ -1604,10 +1603,10 @@ class Fold(Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
x = paddle.randn([2,12,9]) x = paddle.randn([2,3*2*2,12])
fold = nn.Fold(output_sizes=(4, 4), kernel_sizes=2) fold = nn.Fold(output_sizes=[4, 5], kernel_sizes=2)
y = fold(x) y = fold(x)
# y.shape = [2,3,4,4] # y.shape = [2,3,4,5]
""" """
def __init__(self, def __init__(self,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册