diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 4cbe12698c58ab411cc3da3c1872e026595a20c6..8ad463f2d3ad3dc235a2ab25451eede899123009 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -56,7 +56,6 @@ __all__ = [ 'Dropout', 'Embedding', 'GRUUnit', - 'InstanceNorm', 'LayerNorm', 'NCE', 'PRelu', @@ -880,157 +879,6 @@ class Linear(layers.Layer): return self._helper.append_activation(pre_activation, act=self._act) -class InstanceNorm(layers.Layer): - r""" - This interface is used to construct a callable object of the ``InstanceNorm`` class. - For more details, refer to code examples. - - Can be used as a normalizer function for convolution or fully_connected operations. - The required data format for this layer is one of the following: - - DataLayout: NCHW `[batch, in_channels, in_height, in_width]` - - Refer to `Instance Normalization: The Missing Ingredient for Fast Stylization `_ - for more details. - - :math:`input` is the input features over a mini-batch. - - .. math:: - - \\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\ - \\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\ - \\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\ - \\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\ - \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ - \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ - y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift - - Note: - `H` means height of feature map, `W` means width of feature map. - - Parameters: - num_channels(int): Indicate the number of channels of the input ``Tensor``. - epsilon(float, optional): A value added to the denominator for - numerical stability. Default is 1e-5. - param_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` - of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm - will create ParamAttr as param_attr, the name of scale can be set in ParamAttr. - If the Initializer of the param_attr is not set, the parameter is initialized - one. If it is set to False, will not create param_attr. Default: None. - bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm. - If it is set to None or one attribute of ParamAttr, instance_norm - will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. - If the Initializer of the bias_attr is not set, the bias is initialized zero. - If it is set to False, will not create bias_attr. Default: None. - dtype(str, optional): Indicate the data type of the input ``Tensor``, - which can be float32 or float64. Default: float32. - - Returns: - None. - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - from paddle.fluid.dygraph.base import to_variable - import numpy as np - import paddle - - # x's shape is [1, 3, 1, 2] - x = np.array([[[[1.0, 8.0]], [[10.0, 5.0]], [[4.0, 6.0]]]]).astype('float32') - with fluid.dygraph.guard(): - x = to_variable(x) - instanceNorm = paddle.nn.InstanceNorm(3) - ret = instanceNorm(x) - # ret's shape is [1, 3, 1, 2]; value is [-1 1 0.999999 -0.999999 -0.999995 0.999995] - print(ret) - - """ - - def __init__( - self, - num_channels, - epsilon=1e-5, - param_attr=None, - bias_attr=None, - dtype='float32', - ): - super().__init__() - - if param_attr == False or bias_attr == False: - assert ( - bias_attr == param_attr - ), "param_attr and bias_attr must be set to False at the same time in InstanceNorm" - self._epsilon = epsilon - self._param_attr = param_attr - self._bias_attr = bias_attr - self._dtype = dtype - - if param_attr != False and bias_attr != False: - self.scale = self.create_parameter( - attr=self._param_attr, - shape=[num_channels], - dtype=self._dtype, - default_initializer=Constant(1.0), - is_bias=False, - ) - self.bias = self.create_parameter( - attr=self._bias_attr, - shape=[num_channels], - dtype=self._dtype, - default_initializer=Constant(0.0), - is_bias=True, - ) - else: - self.scale = None - self.bias = None - - def forward(self, input): - if in_dygraph_mode(): - out = _C_ops.instance_norm( - input, self.scale, self.bias, self._epsilon - ) - return out - if _in_legacy_dygraph(): - out, _, _ = _legacy_C_ops.instance_norm( - input, self.scale, self.bias, 'epsilon', self._epsilon - ) - return out - - check_variable_and_dtype( - input, 'input', ['float32', 'float64'], "InstanceNorm" - ) - - attrs = {"epsilon": self._epsilon} - - if self.scale and self.bias: - inputs = {"X": [input], "Scale": [self.scale], "Bias": [self.bias]} - else: - inputs = {"X": [input]} - - saved_mean = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True - ) - saved_variance = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True - ) - instance_norm_out = self._helper.create_variable_for_type_inference( - self._dtype - ) - - outputs = { - "Y": [instance_norm_out], - "SavedMean": [saved_mean], - "SavedVariance": [saved_variance], - } - - self._helper.append_op( - type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs - ) - return instance_norm_out - - class BatchNorm(layers.Layer): r""" diff --git a/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py index 1abc4af24cd4f82ef8db6af4892f5a4589b5f83f..285c9f3048d8dedebcfcbf43d07c749d972324e9 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py @@ -38,7 +38,7 @@ class TestInstanceNorm(unittest.TestCase): def compute_v1(x): with fluid.dygraph.guard(p): - bn = fluid.dygraph.InstanceNorm(shape[1]) + bn = paddle.nn.InstanceNorm(shape[1]) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() @@ -61,7 +61,7 @@ class TestInstanceNorm(unittest.TestCase): def compute_v1(x_np): with program_guard(Program(), Program()): - ins = fluid.dygraph.InstanceNorm(shape[1]) + ins = paddle.nn.InstanceNorm(shape[1]) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = ins(x) exe.run(fluid.default_startup_program()) diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py index 50278505f98d66d13003ffb036eb3623a3ca94a9..ed9e01259e6c54d3550635c0715934140bfd45e4 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py @@ -281,8 +281,8 @@ class TestElasticNormOp(unittest.TestCase): for place in self.places: with fluid.dygraph.guard(place): - instance_norm = fluid.dygraph.InstanceNorm( - 5, param_attr=False, bias_attr=False + instance_norm = paddle.nn.InstanceNorm2D( + 5, weight_attr=False, bias_attr=False ) outputs = instance_norm(to_variable(inputs)) np.testing.assert_allclose( @@ -319,8 +319,8 @@ class TestElasticNormOpCase2(unittest.TestCase): for place in self.places: with fluid.dygraph.guard(place): - instance_norm = fluid.dygraph.InstanceNorm( - 3, param_attr=True, bias_attr=True + instance_norm = paddle.nn.InstanceNorm2D( + 3, weight_attr=True, bias_attr=True ) outputs = instance_norm(to_variable(inputs)) np.testing.assert_allclose( diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py index 66bbc3a338714e879cf3da956ff6765d1c780dc6..31719eecfa9dcaacc3a7b9769afb3bb4f7a9837f 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py @@ -69,7 +69,7 @@ class TestInstanceNorm(unittest.TestCase): def compute_v1(x): with fluid.dygraph.guard(p): - bn = fluid.dygraph.InstanceNorm(shape[1]) + bn = paddle.nn.InstanceNorm2D(shape[1]) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() @@ -96,7 +96,7 @@ class TestInstanceNorm(unittest.TestCase): def compute_v1(x_np): with program_guard(Program(), Program()): - ins = fluid.dygraph.InstanceNorm(shape[1]) + ins = paddle.nn.InstanceNorm2D(shape[1]) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = ins(x) exe.run(fluid.default_startup_program()) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 04a937f33706be558fb4f15af540d66f6c7a189a..f5294fd5862726ec58652b1f1ef9df621730b47e 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1962,7 +1962,7 @@ class TestLayer(LayerTest): X = fluid.layers.data( name='X', shape=shape, dtype='float32', append_batch_size=False ) - instanceNorm = nn.InstanceNorm(num_channels=shape[1]) + instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) ret = instanceNorm(X) static_ret2 = self.get_static_graph_result( feed={'X': input}, fetch_list=[ret] @@ -1970,21 +1970,21 @@ class TestLayer(LayerTest): with self.dynamic_graph(): with _test_eager_guard(): - instanceNorm = nn.InstanceNorm(num_channels=shape[1]) + instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) dy_eager_ret = instanceNorm(base.to_variable(input)) dy_eager_rlt_value = dy_eager_ret.numpy() - instanceNorm = nn.InstanceNorm(num_channels=shape[1]) + instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) dy_ret = instanceNorm(base.to_variable(input)) dy_rlt_value = dy_ret.numpy() with self.dynamic_graph(): with _test_eager_guard(): - instanceNorm = nn.InstanceNorm(num_channels=shape[1]) + instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) dy_eager_ret = instanceNorm(base.to_variable(input)) dy_eager_rlt_value2 = dy_eager_ret.numpy() - instanceNorm = nn.InstanceNorm(num_channels=shape[1]) + instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) dy_ret = instanceNorm(base.to_variable(input)) dy_rlt_value2 = dy_ret.numpy() @@ -1997,7 +1997,7 @@ class TestLayer(LayerTest): with self.static_graph(): # the input of InstanceNorm must be Variable. def test_Variable(): - instanceNorm = nn.InstanceNorm(num_channels=shape[1]) + instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) ret1 = instanceNorm(input) self.assertRaises(TypeError, test_Variable) @@ -2005,7 +2005,7 @@ class TestLayer(LayerTest): # the input dtype of InstanceNorm must be float32 or float64 def test_type(): input = np.random.random(shape).astype('int32') - instanceNorm = nn.InstanceNorm(num_channels=shape[1]) + instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) ret2 = instanceNorm(input) self.assertRaises(TypeError, test_type)