From 87388d59677cc94a8d4c528394b9212eb9e6448a Mon Sep 17 00:00:00 2001 From: Vvsmile <450864116@qq.com> Date: Mon, 21 Nov 2022 16:45:20 +0800 Subject: [PATCH] remove lrn which is not used in paddle 2.0 (#47945) --- python/paddle/fluid/layers/nn.py | 98 ------------------- .../fluid/tests/unittests/test_layers.py | 7 -- .../fluid/tests/unittests/test_lrn_op.py | 50 ---------- 3 files changed, 155 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d9253f50a1..c10dbcd7d1 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -104,7 +104,6 @@ __all__ = [ 'unsqueeze', 'lod_reset', 'lod_append', - 'lrn', 'pad', 'label_smooth', 'roi_pool', @@ -6831,103 +6830,6 @@ def lod_append(x, level): return out -def lrn( - input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, data_format='NCHW' -): - r""" - :alias_main: paddle.nn.functional.lrn - :alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn - :old_api: paddle.fluid.layers.lrn - - This operator implements the Local Response Normalization Layer. - This layer performs a type of "lateral inhibition" by normalizing over local input regions. - For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks `_ - - The formula is as follows: - - .. math:: - - Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta} - - In the above equation: - - - :math:`n` : The number of channels to sum over. - - :math:`k` : The offset (avoid being divided by 0). - - :math:`\\alpha` : The scaling parameter. - - :math:`\\beta` : The exponent parameter. - - - Args: - input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C], - where N is the batch size, C is the input channel, H is Height, W is weight. The data - type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError. - n (int, optional): The number of channels to sum over. Default: 5 - k (float, optional): An offset, positive. Default: 1.0 - alpha (float, optional): The scaling parameter, positive. Default:1e-4 - beta (float, optional): The exponent, positive. Default:0.75 - name (str, optional): The default value is None. Normally there is no need for user to set - this property. For more information, please refer to :ref:`api_guide_Name` - data_format (str, optional): Specify the data format of the input, and the data format of the output - will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. - The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_height, input_width]`. - - Returns: - Variable: A tensor variable storing the transformation result with the same shape and data type as input. - - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - data = fluid.data( - name="data", shape=[None, 3, 112, 112], dtype="float32") - lrn = fluid.layers.lrn(input=data) - print(lrn.shape) # [-1, 3, 112, 112] - print(lrn.dtype) # float32 - """ - helper = LayerHelper('lrn', **locals()) - check_variable_and_dtype(input, 'input', ['float32'], 'lrn') - dtype = helper.input_dtype() - input_shape = input.shape - dims = len(input_shape) - - if dims != 4: - raise ValueError( - "Input's dimension size of Op(lrn) must be 4, but received %d." - % (dims) - ) - if data_format not in ['NCHW', 'NHWC']: - raise ValueError( - "Attr(data_format) of Op(lrn) got wrong value: received " - + data_format - + " but only NCHW or NHWC supported." - ) - - mid_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True - ) - lrn_out = helper.create_variable_for_type_inference(dtype) - helper.append_op( - type="lrn", - inputs={"X": input}, - outputs={ - "Out": lrn_out, - "MidOut": mid_out, - }, - attrs={ - "n": n, - "k": k, - "alpha": alpha, - "beta": beta, - "data_format": data_format, - }, - ) - - return lrn_out - - def pad(x, paddings, pad_value=0.0, name=None): r""" :alias_main: paddle.nn.functional.pad diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 621e8090d2..1f2f07a067 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -3339,13 +3339,6 @@ class TestBook(LayerTest): ) return layers.space_to_depth(data, 3) - def make_lrn(self): - with program_guard( - fluid.default_main_program(), fluid.default_startup_program() - ): - data = self._get_data(name='data', shape=[6, 2, 2], dtype='float32') - return layers.lrn(data) - def make_get_places(self): with program_guard( fluid.default_main_program(), fluid.default_startup_program() diff --git a/python/paddle/fluid/tests/unittests/test_lrn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_op.py index 6044628460..521889f53e 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lrn_op.py @@ -106,56 +106,6 @@ class TestLRNOpAttrDataFormat(TestLRNOp): self.data_format = 'NHWC' -class TestLRNAPI(unittest.TestCase): - def test_case(self): - data1 = fluid.data(name='data1', shape=[2, 4, 5, 5], dtype='float32') - data2 = fluid.data(name='data2', shape=[2, 5, 5, 4], dtype='float32') - out1 = fluid.layers.lrn(data1, data_format='NCHW') - out2 = fluid.layers.lrn(data2, data_format='NHWC') - data1_np = np.random.random((2, 4, 5, 5)).astype("float32") - data2_np = np.transpose(data1_np, [0, 2, 3, 1]) - - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - else: - place = core.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - results = exe.run( - fluid.default_main_program(), - feed={"data1": data1_np, "data2": data2_np}, - fetch_list=[out1, out2], - return_numpy=True, - ) - - np.testing.assert_allclose( - results[0], np.transpose(results[1], (0, 3, 1, 2)), rtol=1e-05 - ) - - def test_exception(self): - input1 = fluid.data(name="input1", shape=[2, 4, 5, 5], dtype="float32") - input2 = fluid.data( - name="input2", shape=[2, 4, 5, 5, 5], dtype="float32" - ) - - def _attr_data_fromat(): - out = fluid.layers.lrn(input1, data_format='NDHW') - - def _input_dim_size(): - out = fluid.layers.lrn(input2) - - self.assertRaises(ValueError, _attr_data_fromat) - self.assertRaises(ValueError, _input_dim_size) - - -class TestLRNOpError(unittest.TestCase): - def test_errors(self): - with program_guard(Program(), Program()): - # the input must be float32 - in_w = fluid.data(name="in_w", shape=[None, 3, 3, 3], dtype="int64") - self.assertRaises(TypeError, fluid.layers.lrn, in_w) - - class TestLocalResponseNormFAPI(unittest.TestCase): def setUp(self): np.random.seed(123) -- GitLab