From d39e789882877dc11a81c1eef6b025e5a50aef05 Mon Sep 17 00:00:00 2001 From: LutaoChu <30695251+LutaoChu@users.noreply.github.com> Date: Fri, 28 Aug 2020 11:56:48 +0800 Subject: [PATCH] For normalize op, remove special treatment of one-dimensional input, add check for one-dimensional input (#26747) For normalize op, remove special treatment of one-dimensional input, add check for one-dimensional input --- .../fluid/tests/unittests/test_normalize.py | 15 ++++++++------- python/paddle/nn/functional/norm.py | 11 ++++++----- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_normalize.py b/python/paddle/fluid/tests/unittests/test_normalize.py index 6595a29b24a..614e0e89761 100644 --- a/python/paddle/fluid/tests/unittests/test_normalize.py +++ b/python/paddle/fluid/tests/unittests/test_normalize.py @@ -23,8 +23,6 @@ import numpy as np def p_normalize(x, axis=1, p=2, epsilon=1e-12, keepdims=True): - if len(x.shape) == 1: - axis = 0 xp = np.power(np.abs(x), p) s = np.sum(xp, axis=axis, keepdims=keepdims) r = np.maximum(np.power(s, 1.0 / p), epsilon) @@ -38,10 +36,10 @@ class TestNNFunctionalNormalize(unittest.TestCase): self.expected0 = p_normalize(self.input_np) self.expected1 = p_normalize(self.input_np, p=1.5) self.expected2 = p_normalize(self.input_np, axis=0) - self.expected3 = p_normalize(self.input_np2) + self.expected3 = p_normalize(self.input_np2, axis=0) def run_imperative(self): - x = paddle.to_variable(self.input_np) + x = paddle.to_tensor(self.input_np) y = F.normalize(x) self.assertTrue(np.allclose(y.numpy(), self.expected0)) @@ -51,10 +49,12 @@ class TestNNFunctionalNormalize(unittest.TestCase): y = F.normalize(x, axis=0) self.assertTrue(np.allclose(y.numpy(), self.expected2)) - x = paddle.to_variable(self.input_np2) - y = F.normalize(x) + x = paddle.to_tensor(self.input_np2) + y = F.normalize(x, axis=0) self.assertTrue(np.allclose(y.numpy(), self.expected3)) + self.assertRaises(BaseException, F.normalize, x) + def run_static(self, use_gpu=False): x = paddle.data(name='input', shape=[10, 10], dtype='float32') x2 = paddle.data(name='input2', shape=[2], dtype='float32') @@ -62,7 +62,7 @@ class TestNNFunctionalNormalize(unittest.TestCase): result1 = F.normalize(x, p=1.5) result2 = F.normalize(x, axis=0) result3 = F.normalize(x, name='aaa') - result4 = F.normalize(x2) + result4 = F.normalize(x2, axis=0) place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) @@ -77,6 +77,7 @@ class TestNNFunctionalNormalize(unittest.TestCase): self.assertTrue(np.allclose(static_result[2], self.expected2)) self.assertTrue('aaa' in result3.name) self.assertTrue(np.allclose(static_result[3], self.expected3)) + self.assertRaises(ValueError, F.normalize, x2) def test_cpu(self): paddle.disable_static(place=paddle.fluid.CPUPlace()) diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 13e86e5712a..e9c1a21ecff 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -54,8 +54,7 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|int, optional): The exponent value in the norm formulation. Default: 2 - axis (int, optional): The axis on which to apply normalization. If ``x`` is 1-D tensor, ``axis`` is fixed to 0. If `axis < 0`, \ - the dimension to normalization is `x.ndim + axis`. -1 is the last dimension. + axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension. epsilon (float, optional): Small float added to denominator to avoid dividing by zero. Default is 1e-12. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -72,7 +71,7 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): paddle.disable_static() x = np.arange(6, dtype=np.float32).reshape(2,3) - x = paddle.to_variable(x) + x = paddle.to_tensor(x) y = F.normalize(x) print(y.numpy()) # [[0. 0.4472136 0.8944272 ] @@ -88,8 +87,6 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): # [[0. 0.24253564 0.37139067] # [1. 0.97014254 0.9284767 ]] """ - if len(x.shape) == 1: - axis = 0 if in_dygraph_mode(): eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype) out = core.ops.p_norm(x, 'axis', axis, 'porder', @@ -99,6 +96,10 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): check_type(p, 'p', (float, int), 'normalize') check_type(axis, 'axis', (int), 'normalize') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'normalize') + if len(x.shape) == 1 and axis != 0 and axis != -1: + raise ValueError( + "Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}". + format(axis)) attrs = { 'axis': axis, -- GitLab