未验证 提交 4edf37d7 编写于 作者: V Vvsmile 提交者: GitHub

[Clean Fluid API]Remove API: crop_tensor (#47983)

* remove crop_tensor which is not used in paddle 2.0

* replace the crop_tensor with paddle.crop in orgnizing network
上级 ea830d43
...@@ -107,7 +107,6 @@ __all__ = [ ...@@ -107,7 +107,6 @@ __all__ = [
'gather_nd', 'gather_nd',
'relu', 'relu',
'log', 'log',
'crop_tensor',
'prelu', 'prelu',
'unique', 'unique',
'unique_with_counts', 'unique_with_counts',
...@@ -6541,199 +6540,6 @@ def relu(x, name=None): ...@@ -6541,199 +6540,6 @@ def relu(x, name=None):
return out return out
def crop_tensor(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
.. code-block:: text
* Case 1 (input is a 2-D Tensor):
Input:
X.shape = [3, 5]
X.data = [[0, 1, 2, 0, 0],
[0, 3, 4, 0, 0],
[0, 0, 0, 0, 0]]
Parameters:
shape = [2, 2]
offsets = [0, 1]
Output:
Out.shape = [2, 2]
Out.data = [[1, 2],
[3, 4]]
* Case 2 (input is a 3-D Tensor):
Input:
X.shape = [2, 3, 4]
X.data = [[[0, 1, 2, 3],
[0, 5, 6, 7],
[0, 0, 0, 0]],
[[0, 3, 4, 5],
[0, 6, 7, 8],
[0, 0, 0, 0]]]
Parameters:
shape = [2, 2, -1]
offsets = [0, 0, 1]
Output:
Out.shape = [2, 2, 3]
Out.data = [[[1, 2, 3],
[5, 6, 7]],
[[3, 4, 5],
[6, 7, 8]]]
Parameters:
x (Tensor): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64.
shape (list|tuple|Tensor): The output shape is specified
by `shape`. Its data type is int32. If a list/tuple, it's length must be
the same as the dimension size of `x`. If a Tensor, it should be a 1-D Tensor.
When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the shape may
be changed each iteration.
offsets (list|tuple|Variable, optional): Specifies the cropping
offsets at each dimension. Its data type is int32. If a list/tuple, it's length
must be the same as the dimension size of `x`. If a Tensor, it should be a 1-D
Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the offsets may be changed
each iteration. Default: None, the offsets are 0 at each dimension.
name(str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: The cropped Tensor has same data type with `x`.
Examples:
.. code-block:: python
:name: code-example1
import paddle
x = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# x.shape = [3, 3]
# x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# shape can be a 1-D Tensor or list or tuple.
shape = paddle.to_tensor([2, 2], dtype='int32')
# shape = [2, 2]
# shape = (2, 2)
out = paddle.crop(x, shape)
# out.shape = [2, 2]
# out = [[1,2], [4,5]]
# offsets can be a 1-D Tensor or list or tuple.
offsets = paddle.to_tensor([0, 1], dtype='int32')
# offsets = [1, 0]
# offsets = (1, 1)
out = paddle.crop(x, shape, offsets)
# out.shape = [2, 2]
# if offsets = [0, 0], out = [[1,2], [4,5]]
# if offsets = [0, 1], out = [[2,3], [5,6]]
# if offsets = [1, 0], out = [[4,5], [7,8]]
# if offsets = [1, 1], out = [[5,6], [8,9]]
"""
helper = LayerHelper('crop_tensor', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor'
)
check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
check_type(
offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor'
)
if offsets is None:
offsets = [0] * len(x.shape)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
def _attr_shape_check(shape_val):
if not isinstance(shape_val, int):
raise TypeError(
"Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(shape_val)
)
if shape_val == 0:
raise ValueError(
"Attr(shape) of Op(crop_tensor) should not be zero, but received: %s."
% str(shape_val)
)
if shape_val < -1:
raise ValueError(
"When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s."
% str(shape_val)
)
def _attr_offsets_check(offset_val):
if not isinstance(offset_val, int):
raise TypeError(
"Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(offset_val)
)
if offset_val < 0:
raise ValueError(
"Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s."
% str(offset_val)
)
if isinstance(offsets, Variable):
offsets.stop_gradient = True
ipts['Offsets'] = offsets
attrs['offsets'] = [-1] * len(x.shape)
elif utils._contain_var(offsets):
new_offsets_tensor = []
offsets_attr = []
for dim in offsets:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_offsets_tensor.append(dim)
offsets_attr.append(-1)
else:
_attr_offsets_check(dim)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_offsets_tensor.append(temp_out)
offsets_attr.append(dim)
ipts['OffsetsTensor'] = new_offsets_tensor
attrs['offsets'] = offsets_attr
else:
for offset in offsets:
_attr_offsets_check(offset)
attrs['offsets'] = offsets
if isinstance(shape, Variable):
shape.stop_gradient = True
ipts['Shape'] = shape
elif utils._contain_var(shape):
new_shape_tensor = []
shape_attr = []
for dim_size in shape:
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
new_shape_tensor.append(dim_size)
shape_attr.append(0)
else:
_attr_shape_check(dim_size)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out
)
new_shape_tensor.append(temp_out)
shape_attr.append(dim_size)
ipts['ShapeTensor'] = new_shape_tensor
attrs['shape'] = shape_attr
else:
for dim_size in shape:
_attr_shape_check(dim_size)
attrs['shape'] = shape
helper.append_op(
type='crop_tensor',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs,
)
return out
@deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") @deprecated(since="2.0.0", update_to="paddle.static.nn.prelu")
def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): def prelu(x, mode, param_attr=None, data_format="NCHW", name=None):
r""" r"""
......
...@@ -53,7 +53,7 @@ class TestDeviceGuard(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestDeviceGuard(unittest.TestCase):
with paddle.static.device_guard("cpu"): with paddle.static.device_guard("cpu"):
shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4])
with paddle.static.device_guard("gpu"): with paddle.static.device_guard("gpu"):
out = fluid.layers.crop_tensor(data1, shape=shape) out = paddle.crop(data1, shape=shape)
# check if the device attr is set correctly # check if the device attr is set correctly
all_ops = main_program.global_block().ops all_ops = main_program.global_block().ops
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
...@@ -79,7 +79,7 @@ class TestDeviceGuard(unittest.TestCase): ...@@ -79,7 +79,7 @@ class TestDeviceGuard(unittest.TestCase):
with paddle.static.device_guard("cpu"): with paddle.static.device_guard("cpu"):
shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4])
with paddle.static.device_guard("gpu:1"): with paddle.static.device_guard("gpu:1"):
out = fluid.layers.crop_tensor(data1, shape=shape) out = paddle.crop(data1, shape=shape)
# check if the device attr is set correctly # check if the device attr is set correctly
all_ops = main_program.global_block().ops all_ops = main_program.global_block().ops
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
......
...@@ -2863,15 +2863,9 @@ class TestLayer(LayerTest): ...@@ -2863,15 +2863,9 @@ class TestLayer(LayerTest):
) )
crop_offsets3 = [0, dim1, dim2, 0] crop_offsets3 = [0, dim1, dim2, 0]
out1 = fluid.layers.crop_tensor( out1 = paddle.crop(x, shape=crop_shape1, offsets=crop_offsets1)
x, shape=crop_shape1, offsets=crop_offsets1 out2 = paddle.crop(x, shape=crop_shape2, offsets=crop_offsets2)
) out3 = paddle.crop(x, shape=crop_shape3, offsets=crop_offsets3)
out2 = fluid.layers.crop_tensor(
x, shape=crop_shape2, offsets=crop_offsets2
)
out3 = fluid.layers.crop_tensor(
x, shape=crop_shape3, offsets=crop_offsets3
)
self.assertIsNotNone(out1) self.assertIsNotNone(out1)
self.assertIsNotNone(out2) self.assertIsNotNone(out2)
......
...@@ -58,7 +58,7 @@ class TestDeviceGuard(unittest.TestCase): ...@@ -58,7 +58,7 @@ class TestDeviceGuard(unittest.TestCase):
with paddle.static.device_guard("cpu"): with paddle.static.device_guard("cpu"):
shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4])
with paddle.static.device_guard("xpu"): with paddle.static.device_guard("xpu"):
out = fluid.layers.crop_tensor(data1, shape=shape) out = paddle.crop(data1, shape=shape)
# check if the device attr is set correctly # check if the device attr is set correctly
all_ops = main_program.global_block().ops all_ops = main_program.global_block().ops
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
...@@ -84,7 +84,7 @@ class TestDeviceGuard(unittest.TestCase): ...@@ -84,7 +84,7 @@ class TestDeviceGuard(unittest.TestCase):
with paddle.static.device_guard("cpu"): with paddle.static.device_guard("cpu"):
shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4])
with paddle.static.device_guard("xpu:1"): with paddle.static.device_guard("xpu:1"):
out = fluid.layers.crop_tensor(data1, shape=shape) out = paddle.crop(data1, shape=shape)
# check if the device attr is set correctly # check if the device attr is set correctly
all_ops = main_program.global_block().ops all_ops = main_program.global_block().ops
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册