From 4edf37d72f85e7ba57b96679450e9eb090e9a01e Mon Sep 17 00:00:00 2001 From: Vvsmile <450864116@qq.com> Date: Mon, 28 Nov 2022 16:53:25 +0800 Subject: [PATCH] [Clean Fluid API]Remove API: crop_tensor (#47983) * remove crop_tensor which is not used in paddle 2.0 * replace the crop_tensor with paddle.crop in orgnizing network --- python/paddle/fluid/layers/nn.py | 194 ------------------ .../tests/unittests/test_device_guard.py | 4 +- .../fluid/tests/unittests/test_layers.py | 12 +- .../unittests/xpu/test_device_guard_xpu.py | 4 +- 4 files changed, 7 insertions(+), 207 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index ac080de1ff..83c89fe6b4 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -107,7 +107,6 @@ __all__ = [ 'gather_nd', 'relu', 'log', - 'crop_tensor', 'prelu', 'unique', 'unique_with_counts', @@ -6541,199 +6540,6 @@ def relu(x, name=None): return out -def crop_tensor(x, shape=None, offsets=None, name=None): - """ - Crop input into output, as specified by offsets and shape. - - .. code-block:: text - - * Case 1 (input is a 2-D Tensor): - Input: - X.shape = [3, 5] - X.data = [[0, 1, 2, 0, 0], - [0, 3, 4, 0, 0], - [0, 0, 0, 0, 0]] - Parameters: - shape = [2, 2] - offsets = [0, 1] - Output: - Out.shape = [2, 2] - Out.data = [[1, 2], - [3, 4]] - * Case 2 (input is a 3-D Tensor): - Input: - X.shape = [2, 3, 4] - X.data = [[[0, 1, 2, 3], - [0, 5, 6, 7], - [0, 0, 0, 0]], - [[0, 3, 4, 5], - [0, 6, 7, 8], - [0, 0, 0, 0]]] - Parameters: - shape = [2, 2, -1] - offsets = [0, 0, 1] - Output: - Out.shape = [2, 2, 3] - Out.data = [[[1, 2, 3], - [5, 6, 7]], - [[3, 4, 5], - [6, 7, 8]]] - - Parameters: - x (Tensor): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64. - shape (list|tuple|Tensor): The output shape is specified - by `shape`. Its data type is int32. If a list/tuple, it's length must be - the same as the dimension size of `x`. If a Tensor, it should be a 1-D Tensor. - When it is a list, each element can be an integer or a Tensor of shape: [1]. - If Variable contained, it is suitable for the case that the shape may - be changed each iteration. - offsets (list|tuple|Variable, optional): Specifies the cropping - offsets at each dimension. Its data type is int32. If a list/tuple, it's length - must be the same as the dimension size of `x`. If a Tensor, it should be a 1-D - Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1]. - If Variable contained, it is suitable for the case that the offsets may be changed - each iteration. Default: None, the offsets are 0 at each dimension. - name(str, optional): The default value is None. Normally there is no need for user to set - this property. For more information, please refer to :ref:`api_guide_Name` . - - Returns: - Tensor: The cropped Tensor has same data type with `x`. - - Examples: - - .. code-block:: python - :name: code-example1 - - import paddle - x = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - # x.shape = [3, 3] - # x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - - # shape can be a 1-D Tensor or list or tuple. - shape = paddle.to_tensor([2, 2], dtype='int32') - # shape = [2, 2] - # shape = (2, 2) - out = paddle.crop(x, shape) - # out.shape = [2, 2] - # out = [[1,2], [4,5]] - - # offsets can be a 1-D Tensor or list or tuple. - offsets = paddle.to_tensor([0, 1], dtype='int32') - # offsets = [1, 0] - # offsets = (1, 1) - out = paddle.crop(x, shape, offsets) - # out.shape = [2, 2] - # if offsets = [0, 0], out = [[1,2], [4,5]] - # if offsets = [0, 1], out = [[2,3], [5,6]] - # if offsets = [1, 0], out = [[4,5], [7,8]] - # if offsets = [1, 1], out = [[5,6], [8,9]] - - """ - helper = LayerHelper('crop_tensor', **locals()) - check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor' - ) - check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor') - check_type( - offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor' - ) - - if offsets is None: - offsets = [0] * len(x.shape) - - out = helper.create_variable_for_type_inference(x.dtype) - ipts = {'X': x} - attrs = {} - - def _attr_shape_check(shape_val): - if not isinstance(shape_val, int): - raise TypeError( - "Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s." - % type(shape_val) - ) - if shape_val == 0: - raise ValueError( - "Attr(shape) of Op(crop_tensor) should not be zero, but received: %s." - % str(shape_val) - ) - if shape_val < -1: - raise ValueError( - "When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s." - % str(shape_val) - ) - - def _attr_offsets_check(offset_val): - if not isinstance(offset_val, int): - raise TypeError( - "Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s." - % type(offset_val) - ) - if offset_val < 0: - raise ValueError( - "Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s." - % str(offset_val) - ) - - if isinstance(offsets, Variable): - offsets.stop_gradient = True - ipts['Offsets'] = offsets - attrs['offsets'] = [-1] * len(x.shape) - elif utils._contain_var(offsets): - new_offsets_tensor = [] - offsets_attr = [] - for dim in offsets: - if isinstance(dim, Variable): - dim.stop_gradient = True - new_offsets_tensor.append(dim) - offsets_attr.append(-1) - else: - _attr_offsets_check(dim) - temp_out = helper.create_variable_for_type_inference('int32') - fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out) - new_offsets_tensor.append(temp_out) - offsets_attr.append(dim) - ipts['OffsetsTensor'] = new_offsets_tensor - attrs['offsets'] = offsets_attr - else: - for offset in offsets: - _attr_offsets_check(offset) - attrs['offsets'] = offsets - - if isinstance(shape, Variable): - shape.stop_gradient = True - ipts['Shape'] = shape - elif utils._contain_var(shape): - new_shape_tensor = [] - shape_attr = [] - for dim_size in shape: - if isinstance(dim_size, Variable): - dim_size.stop_gradient = True - new_shape_tensor.append(dim_size) - shape_attr.append(0) - else: - _attr_shape_check(dim_size) - temp_out = helper.create_variable_for_type_inference('int32') - fill_constant( - [1], 'int32', dim_size, force_cpu=True, out=temp_out - ) - new_shape_tensor.append(temp_out) - shape_attr.append(dim_size) - ipts['ShapeTensor'] = new_shape_tensor - attrs['shape'] = shape_attr - else: - for dim_size in shape: - _attr_shape_check(dim_size) - attrs['shape'] = shape - - helper.append_op( - type='crop_tensor', - inputs=ipts, - outputs={'Out': out}, - attrs=None if len(attrs) == 0 else attrs, - ) - return out - - @deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): r""" diff --git a/python/paddle/fluid/tests/unittests/test_device_guard.py b/python/paddle/fluid/tests/unittests/test_device_guard.py index b43fbb6e87..d1e0c383ac 100644 --- a/python/paddle/fluid/tests/unittests/test_device_guard.py +++ b/python/paddle/fluid/tests/unittests/test_device_guard.py @@ -53,7 +53,7 @@ class TestDeviceGuard(unittest.TestCase): with paddle.static.device_guard("cpu"): shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) with paddle.static.device_guard("gpu"): - out = fluid.layers.crop_tensor(data1, shape=shape) + out = paddle.crop(data1, shape=shape) # check if the device attr is set correctly all_ops = main_program.global_block().ops device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() @@ -79,7 +79,7 @@ class TestDeviceGuard(unittest.TestCase): with paddle.static.device_guard("cpu"): shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) with paddle.static.device_guard("gpu:1"): - out = fluid.layers.crop_tensor(data1, shape=shape) + out = paddle.crop(data1, shape=shape) # check if the device attr is set correctly all_ops = main_program.global_block().ops device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index fb0bb9827f..0bea87884e 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -2863,15 +2863,9 @@ class TestLayer(LayerTest): ) crop_offsets3 = [0, dim1, dim2, 0] - out1 = fluid.layers.crop_tensor( - x, shape=crop_shape1, offsets=crop_offsets1 - ) - out2 = fluid.layers.crop_tensor( - x, shape=crop_shape2, offsets=crop_offsets2 - ) - out3 = fluid.layers.crop_tensor( - x, shape=crop_shape3, offsets=crop_offsets3 - ) + out1 = paddle.crop(x, shape=crop_shape1, offsets=crop_offsets1) + out2 = paddle.crop(x, shape=crop_shape2, offsets=crop_offsets2) + out3 = paddle.crop(x, shape=crop_shape3, offsets=crop_offsets3) self.assertIsNotNone(out1) self.assertIsNotNone(out2) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py index 8c41cb8675..0989fb347c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py @@ -58,7 +58,7 @@ class TestDeviceGuard(unittest.TestCase): with paddle.static.device_guard("cpu"): shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) with paddle.static.device_guard("xpu"): - out = fluid.layers.crop_tensor(data1, shape=shape) + out = paddle.crop(data1, shape=shape) # check if the device attr is set correctly all_ops = main_program.global_block().ops device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() @@ -84,7 +84,7 @@ class TestDeviceGuard(unittest.TestCase): with paddle.static.device_guard("cpu"): shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) with paddle.static.device_guard("xpu:1"): - out = fluid.layers.crop_tensor(data1, shape=shape) + out = paddle.crop(data1, shape=shape) # check if the device attr is set correctly all_ops = main_program.global_block().ops device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() -- GitLab