test_norm_op.py 4.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import unittest
16

17
import numpy as np
18
from eager_op_test import OpTest, skip_check_grad_ci
19

20 21
import paddle
import paddle.fluid as fluid
22 23


24 25 26 27 28 29
def l2_norm(x, axis, epsilon):
    x2 = x**2
    s = np.sum(x2, axis=axis, keepdims=True)
    r = np.sqrt(s + epsilon)
    y = x / np.broadcast_to(r, x.shape)
    return y, r
30 31


32 33 34 35
def norm_wrapper(x, axis=1, epsilon=1e-12, is_test=False):
    return paddle.nn.functional.normalize(x, axis=axis, epsilon=epsilon)


36 37 38
class TestNormOp(OpTest):
    def setUp(self):
        self.op_type = "norm"
39
        self.python_api = norm_wrapper
40
        self.init_test_case()
41 42
        self.init_dtype()
        x = np.random.random(self.shape).astype(self.dtype)
43 44 45 46
        y, norm = l2_norm(x, self.axis, self.epsilon)
        self.inputs = {'X': x}
        self.attrs = {'epsilon': self.epsilon, 'axis': self.axis}
        self.outputs = {'Out': y, 'Norm': norm}
47
        self.python_out_sig = ['Out']
48 49

    def test_check_output(self):
G
Guoxia Wang 已提交
50
        self.check_output()
51 52

    def test_check_grad(self):
G
Guoxia Wang 已提交
53
        self.check_grad(['X'], 'Out')
54 55

    def init_test_case(self):
56
        self.shape = [2, 3, 4, 5]
57 58 59
        self.axis = 1
        self.epsilon = 1e-8

60 61 62
    def init_dtype(self):
        self.dtype = "float64"

63 64 65 66 67 68 69 70 71 72 73 74 75

class TestNormOp2(TestNormOp):
    def init_test_case(self):
        self.shape = [5, 3, 9, 7]
        self.axis = 0
        self.epsilon = 1e-8


class TestNormOp3(TestNormOp):
    def init_test_case(self):
        self.shape = [5, 3, 2, 7]
        self.axis = -1
        self.epsilon = 1e-8
76 77


78 79 80 81
@skip_check_grad_ci(
    reason="'check_grad' on large inputs is too slow, "
    + "however it is desirable to cover the forward pass"
)
82 83 84 85 86 87 88 89 90 91
class TestNormOp4(TestNormOp):
    def init_test_case(self):
        self.shape = [128, 1024, 14, 14]
        self.axis = 2
        self.epsilon = 1e-8

    def test_check_grad(self):
        pass


92 93 94 95
@skip_check_grad_ci(
    reason="'check_grad' on large inputs is too slow, "
    + "however it is desirable to cover the forward pass"
)
96 97 98 99 100 101 102 103 104 105
class TestNormOp5(TestNormOp):
    def init_test_case(self):
        self.shape = [2048, 2048]
        self.axis = 1
        self.epsilon = 1e-8

    def test_check_grad(self):
        pass


106 107 108 109
class TestNormOp6(TestNormOp):
    def init_dtype(self):
        self.dtype = "float32"

G
Guoxia Wang 已提交
110 111 112
    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.008)

113

114 115 116
@unittest.skipIf(
    not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
117 118 119 120 121 122 123 124
class TestNormOp7(TestNormOp):
    def init_dtype(self):
        self.dtype = "float16"

    def test_check_output(self):
        self.check_output_with_place(fluid.core.CUDAPlace(0), atol=5e-2)

    def test_check_grad(self):
125 126 127
        self.check_grad_with_place(
            fluid.core.CUDAPlace(0), ['X'], 'Out', max_relative_error=0.05
        )
128 129


130 131 132 133
@skip_check_grad_ci(reason="skip check grad for test mode.")
class TestNormTestOp(OpTest):
    def setUp(self):
        self.op_type = "norm"
134
        self.python_api = norm_wrapper
135 136 137 138 139 140
        self.init_test_case()
        x = np.random.random(self.shape).astype("float64")
        y, norm = l2_norm(x, self.axis, self.epsilon)
        self.inputs = {'X': x}
        self.attrs = {
            'epsilon': self.epsilon,
141
            'axis': int(self.axis),
142
            'is_test': True,
143 144
        }
        self.outputs = {'Out': y}
145
        self.python_out_sig = ["out"]
146 147

    def test_check_output(self):
148 149
        # dynamic graph just supports float tensor
        self.check_output(check_dygraph=True)
150 151 152 153 154 155 156 157 158 159

    def test_check_grad(self):
        pass

    def init_test_case(self):
        self.shape = [2, 3, 4, 5]
        self.axis = 1
        self.epsilon = 1e-8


160 161 162 163 164 165
class API_NormTest(unittest.TestCase):
    def test_errors(self):
        with fluid.program_guard(fluid.Program()):

            def test_norm_x_type():
                data = fluid.data(name="x", shape=[3, 3], dtype="int64")
166
                out = paddle.nn.functional.normalize(data)
167 168 169 170

            self.assertRaises(TypeError, test_norm_x_type)


171
if __name__ == '__main__':
H
hong 已提交
172
    paddle.enable_static()
173
    unittest.main()