未验证 提交 87388d59 编写于 作者: V Vvsmile 提交者: GitHub

remove lrn which is not used in paddle 2.0 (#47945)

上级 d79eda71
...@@ -104,7 +104,6 @@ __all__ = [ ...@@ -104,7 +104,6 @@ __all__ = [
'unsqueeze', 'unsqueeze',
'lod_reset', 'lod_reset',
'lod_append', 'lod_append',
'lrn',
'pad', 'pad',
'label_smooth', 'label_smooth',
'roi_pool', 'roi_pool',
...@@ -6831,103 +6830,6 @@ def lod_append(x, level): ...@@ -6831,103 +6830,6 @@ def lod_append(x, level):
return out return out
def lrn(
input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, data_format='NCHW'
):
r"""
:alias_main: paddle.nn.functional.lrn
:alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn
:old_api: paddle.fluid.layers.lrn
This operator implements the Local Response Normalization Layer.
This layer performs a type of "lateral inhibition" by normalizing over local input regions.
For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
The formula is as follows:
.. math::
Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta}
In the above equation:
- :math:`n` : The number of channels to sum over.
- :math:`k` : The offset (avoid being divided by 0).
- :math:`\\alpha` : The scaling parameter.
- :math:`\\beta` : The exponent parameter.
Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C],
where N is the batch size, C is the input channel, H is Height, W is weight. The data
type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError.
n (int, optional): The number of channels to sum over. Default: 5
k (float, optional): An offset, positive. Default: 1.0
alpha (float, optional): The scaling parameter, positive. Default:1e-4
beta (float, optional): The exponent, positive. Default:0.75
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
Variable: A tensor variable storing the transformation result with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.data(
name="data", shape=[None, 3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
print(lrn.shape) # [-1, 3, 112, 112]
print(lrn.dtype) # float32
"""
helper = LayerHelper('lrn', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'lrn')
dtype = helper.input_dtype()
input_shape = input.shape
dims = len(input_shape)
if dims != 4:
raise ValueError(
"Input's dimension size of Op(lrn) must be 4, but received %d."
% (dims)
)
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(lrn) got wrong value: received "
+ data_format
+ " but only NCHW or NHWC supported."
)
mid_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True
)
lrn_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="lrn",
inputs={"X": input},
outputs={
"Out": lrn_out,
"MidOut": mid_out,
},
attrs={
"n": n,
"k": k,
"alpha": alpha,
"beta": beta,
"data_format": data_format,
},
)
return lrn_out
def pad(x, paddings, pad_value=0.0, name=None): def pad(x, paddings, pad_value=0.0, name=None):
r""" r"""
:alias_main: paddle.nn.functional.pad :alias_main: paddle.nn.functional.pad
......
...@@ -3339,13 +3339,6 @@ class TestBook(LayerTest): ...@@ -3339,13 +3339,6 @@ class TestBook(LayerTest):
) )
return layers.space_to_depth(data, 3) return layers.space_to_depth(data, 3)
def make_lrn(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
data = self._get_data(name='data', shape=[6, 2, 2], dtype='float32')
return layers.lrn(data)
def make_get_places(self): def make_get_places(self):
with program_guard( with program_guard(
fluid.default_main_program(), fluid.default_startup_program() fluid.default_main_program(), fluid.default_startup_program()
......
...@@ -106,56 +106,6 @@ class TestLRNOpAttrDataFormat(TestLRNOp): ...@@ -106,56 +106,6 @@ class TestLRNOpAttrDataFormat(TestLRNOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestLRNAPI(unittest.TestCase):
def test_case(self):
data1 = fluid.data(name='data1', shape=[2, 4, 5, 5], dtype='float32')
data2 = fluid.data(name='data2', shape=[2, 5, 5, 4], dtype='float32')
out1 = fluid.layers.lrn(data1, data_format='NCHW')
out2 = fluid.layers.lrn(data2, data_format='NHWC')
data1_np = np.random.random((2, 4, 5, 5)).astype("float32")
data2_np = np.transpose(data1_np, [0, 2, 3, 1])
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(
fluid.default_main_program(),
feed={"data1": data1_np, "data2": data2_np},
fetch_list=[out1, out2],
return_numpy=True,
)
np.testing.assert_allclose(
results[0], np.transpose(results[1], (0, 3, 1, 2)), rtol=1e-05
)
def test_exception(self):
input1 = fluid.data(name="input1", shape=[2, 4, 5, 5], dtype="float32")
input2 = fluid.data(
name="input2", shape=[2, 4, 5, 5, 5], dtype="float32"
)
def _attr_data_fromat():
out = fluid.layers.lrn(input1, data_format='NDHW')
def _input_dim_size():
out = fluid.layers.lrn(input2)
self.assertRaises(ValueError, _attr_data_fromat)
self.assertRaises(ValueError, _input_dim_size)
class TestLRNOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input must be float32
in_w = fluid.data(name="in_w", shape=[None, 3, 3, 3], dtype="int64")
self.assertRaises(TypeError, fluid.layers.lrn, in_w)
class TestLocalResponseNormFAPI(unittest.TestCase): class TestLocalResponseNormFAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册