diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 13a1c8e079d42a71ed5b069ce3ce218698df4dce..06a14295a81d3872d3a07c78494810d8a3a9c9bc 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5061,7 +5061,7 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): X = paddle.randn(shape=[3, 5], dtype='float64') out = paddle.fluid.layers.l2_normalize(X, axis=-1) - print(out.numpy()) + print(out) # [[ 0.21558504 0.56360189 0.47466096 0.46269539 -0.44326736] # [-0.70602414 -0.52745777 0.37771788 -0.2804768 -0.04449922] diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py index af81d15717a7036b5ea9c42e3c75b865754a4cd9..6b424e03cc24389be53039a7530bd4f96c58c30d 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_norm_op.py @@ -41,10 +41,10 @@ class TestNormOp(OpTest): self.outputs = {'Out': y, 'Norm': norm} def test_check_output(self): - self.check_output(atol=1e-5) + self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out', max_relative_error=0.008) + self.check_grad(['X'], 'Out') def init_test_case(self): self.shape = [2, 3, 4, 5] @@ -97,6 +97,9 @@ class TestNormOp6(TestNormOp): def init_dtype(self): self.dtype = "float32" + def test_check_grad(self): + self.check_grad(['X'], 'Out', max_relative_error=0.008) + @unittest.skipIf(not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA")