未验证 提交 9a227ee7 编写于 作者: Y yunyaoXYY 提交者: GitHub

[Clean fluid] Clean fluid resize_linear and image_resize_short. (#48357)

* Clean fluid resize_linear API

* Clean fluid image_resize_short API

* add image_resize back
上级 3d1981ad
......@@ -102,8 +102,6 @@ __all__ = [
'roi_pool',
'roi_align',
'image_resize',
'image_resize_short',
'resize_linear',
'resize_bilinear',
'resize_trilinear',
'resize_nearest',
......@@ -5843,144 +5841,6 @@ def image_resize(
return out
@templatedoc(op_type="linear_interp")
def resize_linear(
input,
out_shape=None,
scale=None,
name=None,
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCW',
):
"""
This op resizes the input by performing linear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
**Warning:** the parameter :attr:`actual_shape` will be deprecated in
the future and only use :attr:`out_shape` instead.
Align_corners and align_mode are optional parameters,the calculation
method of interpolation can be selected by them.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = W_{in} * scale_{factor}
Parameters:
input(Variable): 3-D Tensor(NCW), its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize linear
layer, the shape is (out_w,). Default: None. If a list, each
element can be an integer or a Tensor Variable with shape: [1]. If a
Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool): ${align_corners_comment}
align_mode(bool): ${align_mode_comment}
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCW"`, `"NWC"`.
The default is `"NCW"`. When it is `"NCW"`, the data is stored in the order of:
`[batch_size, input_channels, input_width]`.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: 3-D tensor(NCW or NWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,100])
output = fluid.layers.resize_linear(input=input,out_shape=[50,])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(1,3,100).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
# (1, 3, 50)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_linear(input=input, out_shape=[50,])
print(output.shape)
# [1L, 3L, 50L]
"""
return image_resize(
input,
out_shape,
scale,
name,
'LINEAR',
actual_shape,
align_corners,
align_mode,
data_format,
)
@templatedoc(op_type="bilinear_interp")
def resize_bilinear(
input,
......@@ -6505,46 +6365,6 @@ def resize_nearest(
)
def image_resize_short(input, out_short_len, resample='BILINEAR'):
"""
This op resizes a batch of images. The short edge of input images will be
resized to the given 'out_short_len'. The long edge of input images
will be resized proportionately to make images' length-width ratio
constant.
Parameters:
input (Variable): 4-D tensor(NCHW), The input tensor of image resize layer.
out_short_len(int): The length of output images' short edge.
resample (str): resample method, default: BILINEAR.
Returns:
Variable: 4-D tensor(NCHW).
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name="input", shape=[None,3,6,9], dtype="float32")
out = fluid.layers.image_resize_short(input, out_short_len=3)
"""
in_shape = input.shape
if len(in_shape) != 4:
raise ValueError(
"The rank of input must be 4 (num_batches, channels, in_h, in_w)."
)
hw = in_shape[2:4]
short_idx = hw.index(min(hw))
long_idx = 1 - short_idx
out_shape = list(hw)
out_shape[short_idx] = out_short_len
out_shape[long_idx] = int(
float(out_shape[long_idx])
* (float(out_short_len) / float(hw[short_idx]))
+ 0.5
)
return image_resize(input=input, out_shape=out_shape, resample=resample)
@deprecated(since="2.0.0", update_to="paddle.gather_nd")
def gather_nd(input, index, name=None):
"""
......
......@@ -20,7 +20,6 @@ import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.nn.functional import interpolate
def linear_interp_np(
......@@ -242,111 +241,6 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
self.outputs = {'Out': output_np}
class TestResizeLinearAPI(unittest.TestCase):
def test_case(self):
x = fluid.data(name="x", shape=[1, 3, 64], dtype="float32")
dim = fluid.data(name="dim", shape=[1], dtype="int32")
shape_tensor = fluid.data(name="shape_tensor", shape=[1], dtype="int32")
actual_size = fluid.data(name="actual_size", shape=[1], dtype="int32")
scale_tensor = fluid.data(
name="scale_tensor", shape=[1], dtype="float32"
)
out1 = fluid.layers.resize_linear(
x,
out_shape=[
128,
],
align_mode=1,
align_corners=False,
)
out2 = fluid.layers.resize_linear(
x, out_shape=[128], align_mode=1, align_corners=False
)
out3 = fluid.layers.resize_linear(
x, out_shape=shape_tensor, align_mode=1, align_corners=False
)
out4 = fluid.layers.resize_linear(
x,
out_shape=[
128,
],
actual_shape=actual_size,
align_mode=1,
align_corners=False,
)
out5 = fluid.layers.resize_linear(
x, scale=scale_tensor, align_mode=1, align_corners=False
)
out6 = interpolate(
x,
scale_factor=scale_tensor,
mode='linear',
align_mode=1,
align_corners=False,
data_format='NCW',
)
out7 = interpolate(
x,
size=[
128,
],
mode='linear',
align_mode=1,
align_corners=False,
data_format='NCW',
)
out8 = interpolate(
x,
size=shape_tensor,
mode='linear',
align_mode=1,
align_corners=False,
data_format='NCW',
)
x_data = np.random.random((1, 3, 64)).astype("float32")
dim_data = np.array([128]).astype("int32")
shape_data = np.array(
[
128,
]
).astype("int32")
actual_size_data = np.array(
[
128,
]
).astype("int32")
scale_data = np.array([2.0]).astype("float32")
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(
fluid.default_main_program(),
feed={
"x": x_data,
"dim": dim_data,
"shape_tensor": shape_data,
"actual_size": actual_size_data,
"scale_tensor": scale_data,
},
fetch_list=[out1, out2, out3, out4, out5, out6, out7, out8],
return_numpy=True,
)
expect_res = linear_interp_np(
x_data, out_w=128, align_mode=1, align_corners=False
)
for res in results:
np.testing.assert_allclose(res, expect_res, rtol=1e-05)
class TestLinearInterpOpAPI2_0(unittest.TestCase):
def test_case(self):
......@@ -426,44 +320,6 @@ class TestResizeLinearOpUint8(OpTest):
self.align_mode = 1
class TestLinearInterpOpException(unittest.TestCase):
def test_exception(self):
def input_shape_error():
x1 = fluid.data(name="x1", shape=[1], dtype="float32")
out = fluid.layers.resize_linear(
x1,
out_shape=[
256,
],
data_format='NCW',
)
def data_format_error():
x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32")
out = fluid.layers.resize_linear(
x2,
out_shape=[
256,
],
data_format='NHWCD',
)
def out_shape_error():
x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32")
out = fluid.layers.resize_linear(
x3,
out_shape=[
256,
256,
],
data_format='NHWC',
)
self.assertRaises(ValueError, input_shape_error)
self.assertRaises(ValueError, data_format_error)
self.assertRaises(ValueError, out_shape_error)
class TestLinearInterpOpError(unittest.TestCase):
def test_error(self):
with program_guard(Program(), Program()):
......
......@@ -309,111 +309,6 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
self.outputs = {'Out': output_np}
class TestResizeLinearAPI(unittest.TestCase):
def test_case(self):
x = fluid.data(name="x", shape=[1, 3, 64], dtype="float32")
dim = fluid.data(name="dim", shape=[1], dtype="int32")
shape_tensor = fluid.data(name="shape_tensor", shape=[1], dtype="int32")
actual_size = fluid.data(name="actual_size", shape=[1], dtype="int32")
scale_tensor = fluid.data(
name="scale_tensor", shape=[1], dtype="float32"
)
out1 = fluid.layers.resize_linear(
x,
out_shape=[
128,
],
align_mode=1,
align_corners=False,
)
out2 = fluid.layers.resize_linear(
x, out_shape=[128], align_mode=1, align_corners=False
)
out3 = fluid.layers.resize_linear(
x, out_shape=shape_tensor, align_mode=1, align_corners=False
)
out4 = fluid.layers.resize_linear(
x,
out_shape=[
128,
],
actual_shape=actual_size,
align_mode=1,
align_corners=False,
)
out5 = fluid.layers.resize_linear(
x, scale=scale_tensor, align_mode=1, align_corners=False
)
out6 = interpolate(
x,
scale_factor=scale_tensor,
mode='linear',
align_mode=1,
align_corners=False,
data_format='NCW',
)
out7 = interpolate(
x,
size=[
128,
],
mode='linear',
align_mode=1,
align_corners=False,
data_format='NCW',
)
out8 = interpolate(
x,
size=shape_tensor,
mode='linear',
align_mode=1,
align_corners=False,
data_format='NCW',
)
x_data = np.random.random((1, 3, 64)).astype("float32")
dim_data = np.array([128]).astype("int32")
shape_data = np.array(
[
128,
]
).astype("int32")
actual_size_data = np.array(
[
128,
]
).astype("int32")
scale_data = np.array([2.0]).astype("float32")
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(
fluid.default_main_program(),
feed={
"x": x_data,
"dim": dim_data,
"shape_tensor": shape_data,
"actual_size": actual_size_data,
"scale_tensor": scale_data,
},
fetch_list=[out1, out2, out3, out4, out5, out6, out7, out8],
return_numpy=True,
)
expect_res = linear_interp_np(
x_data, out_w=128, align_mode=1, align_corners=False
)
for res in results:
np.testing.assert_allclose(res, expect_res, rtol=1e-05)
class TestLinearInterpOpAPI2_0(unittest.TestCase):
def test_case(self):
......@@ -501,44 +396,6 @@ class TestResizeLinearOpUint8(OpTest):
self.align_mode = 1
class TestLinearInterpOpException(unittest.TestCase):
def test_exception(self):
def input_shape_error():
x1 = fluid.data(name="x1", shape=[1], dtype="float32")
out = fluid.layers.resize_linear(
x1,
out_shape=[
256,
],
data_format='NCW',
)
def data_format_error():
x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32")
out = fluid.layers.resize_linear(
x2,
out_shape=[
256,
],
data_format='NHWCD',
)
def out_shape_error():
x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32")
out = fluid.layers.resize_linear(
x3,
out_shape=[
256,
256,
],
data_format='NHWC',
)
self.assertRaises(ValueError, input_shape_error)
self.assertRaises(ValueError, data_format_error)
self.assertRaises(ValueError, out_shape_error)
class TestLinearInterpOpError(unittest.TestCase):
def test_error(self):
with program_guard(Program(), Program()):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册