test_eigvalsh_op.py 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
W
wanghuancoder 已提交
18
from eager_op_test import OpTest
19

20 21
import paddle

22

23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
def compare_result(actual, expected):
    assert actual.ndim == 1 or actual.ndim == 2

    if actual.ndim == 1:
        valid_eigenvalues(actual, expected)
        return

    for batch_actual, batch_expected in zip(actual, expected):
        valid_eigenvalues(batch_actual, batch_expected)


def valid_eigenvalues(actual, expected):

    FP32_MAX_RELATIVE_ERR = 5e-5
    FP64_MAX_RELATIVE_ERR = 1e-14

39 40 41 42 43
    rtol = (
        FP32_MAX_RELATIVE_ERR
        if actual.dtype == np.single
        else FP64_MAX_RELATIVE_ERR
    )
44 45 46 47 48 49 50 51

    diff = np.abs(expected - actual)
    max_diff = np.max(diff)
    max_ref = np.max(np.abs(expected))
    relative_error = max_diff / max_ref
    np.testing.assert_array_less(relative_error, rtol)


52 53 54 55
class TestEigvalshOp(OpTest):
    def setUp(self):
        paddle.enable_static()
        self.op_type = "eigvalsh"
56 57
        self.python_api = paddle.linalg.eigvalsh
        self.python_out_sig = ['Eigenvalues']
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
        self.init_input()
        self.init_config()
        np.random.seed(123)
        out_w, out_v = np.linalg.eigh(self.x_np, self.UPLO)
        self.inputs = {"X": self.x_np}
        self.attrs = {"UPLO": self.UPLO, "is_test": False}
        self.outputs = {'Eigenvalues': out_w, 'Eigenvectors': out_v}

    def init_config(self):
        self.UPLO = 'L'

    def init_input(self):
        self.x_shape = (10, 10)
        self.x_type = np.float64
        self.x_np = np.random.random(self.x_shape).astype(self.x_type)

    def test_check_output(self):
        # Vectors in posetive or negative is equivalent
W
wanghuancoder 已提交
76
        self.check_output(no_check_set=['Eigenvectors'])
77 78

    def test_grad(self):
W
wanghuancoder 已提交
79
        self.check_grad(["X"], ["Eigenvalues"])
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99


class TestEigvalshUPLOCase(TestEigvalshOp):
    def init_config(self):
        self.UPLO = 'U'


class TestEigvalshGPUCase(unittest.TestCase):
    def setUp(self):
        self.x_shape = [32, 32]
        self.dtype = "float32"
        np.random.seed(123)
        self.x_np = np.random.random(self.x_shape).astype(self.dtype)

    def test_check_output_gpu(self):
        if paddle.is_compiled_with_cuda():
            paddle.disable_static(place=paddle.CUDAPlace(0))
            input_real_data = paddle.to_tensor(self.x_np)
            expected_w = np.linalg.eigvalsh(self.x_np)
            actual_w = paddle.linalg.eigvalsh(input_real_data)
100
            compare_result(actual_w.numpy(), expected_w)
101 102 103 104 105 106


class TestEigvalshAPI(unittest.TestCase):
    def setUp(self):
        self.dtype = "float32"
        self.UPLO = 'L'
107 108
        self.rtol = 1e-5  # test_eigvalsh_grad
        self.atol = 1e-5  # test_eigvalsh_grad
109 110 111
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
112
            else paddle.CPUPlace()
113
        )
114
        np.random.seed(123)
115
        self.init_input_shape()
L
lkylkylky 已提交
116 117
        self.init_input_data()

118 119 120
    def init_input_shape(self):
        self.x_shape = [5, 5]

L
lkylkylky 已提交
121
    def init_input_data(self):
122
        self.real_data = np.random.random(self.x_shape).astype(self.dtype)
L
lkylkylky 已提交
123
        complex_data = np.random.random(self.x_shape).astype(
124 125
            self.dtype
        ) + 1j * np.random.random(self.x_shape).astype(self.dtype)
126
        self.trans_dims = list(range(len(self.x_shape) - 2)) + [
127 128
            len(self.x_shape) - 1,
            len(self.x_shape) - 2,
129
        ]
L
lkylkylky 已提交
130
        self.complex_symm = np.divide(
131 132
            complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2
        )
133 134 135 136 137

    def check_static_float_result(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
138 139 140
            input_x = paddle.static.data(
                'input_x', shape=self.x_shape, dtype=self.dtype
            )
141 142
            output_w = paddle.linalg.eigvalsh(input_x)
            exe = paddle.static.Executor(self.place)
143 144 145 146 147
            actual_w = exe.run(
                main_prog,
                feed={"input_x": self.real_data},
                fetch_list=[output_w],
            )
148

149 150
            expected_w = np.linalg.eigvalsh(self.real_data)
            compare_result(actual_w[0], expected_w)
151 152 153 154 155 156

    def check_static_complex_result(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
            x_dtype = np.complex64 if self.dtype == "float32" else np.complex128
157 158 159
            input_x = paddle.static.data(
                'input_x', shape=self.x_shape, dtype=x_dtype
            )
160 161
            output_w = paddle.linalg.eigvalsh(input_x)
            exe = paddle.static.Executor(self.place)
162 163 164 165 166
            actual_w = exe.run(
                main_prog,
                feed={"input_x": self.complex_symm},
                fetch_list=[output_w],
            )
167 168
            expected_w = np.linalg.eigvalsh(self.complex_symm)
            compare_result(actual_w[0], expected_w)
169 170 171 172 173 174 175 176 177 178 179

    def test_in_static_mode(self):
        paddle.enable_static()
        self.check_static_float_result()
        self.check_static_complex_result()

    def test_in_dynamic_mode(self):
        paddle.disable_static(self.place)
        input_real_data = paddle.to_tensor(self.real_data)
        expected_w = np.linalg.eigvalsh(self.real_data)
        actual_w = paddle.linalg.eigvalsh(input_real_data)
180
        compare_result(actual_w.numpy(), expected_w)
181

L
lkylkylky 已提交
182 183 184
        input_complex_symm = paddle.to_tensor(self.complex_symm)
        expected_w = np.linalg.eigvalsh(self.complex_symm)
        actual_w = paddle.linalg.eigvalsh(input_complex_symm)
185
        compare_result(actual_w.numpy(), expected_w)
186 187 188

    def test_eigvalsh_grad(self):
        paddle.disable_static(self.place)
L
lkylkylky 已提交
189
        x = paddle.to_tensor(self.complex_symm, stop_gradient=False)
190 191
        w = paddle.linalg.eigvalsh(x)
        (w.sum()).backward()
192 193 194 195 196 197
        np.testing.assert_allclose(
            abs(x.grad.numpy()),
            abs(x.grad.numpy().conj().transpose(self.trans_dims)),
            rtol=self.rtol,
            atol=self.atol,
        )
198 199 200 201 202 203 204 205 206 207 208 209


class TestEigvalshBatchAPI(TestEigvalshAPI):
    def init_input_shape(self):
        self.x_shape = [2, 5, 5]


class TestEigvalshAPIError(unittest.TestCase):
    def test_error(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
210 211 212 213
            # input maxtrix must greater than 2 dimensions
            input_x = paddle.static.data(
                name='x_1', shape=[12], dtype='float32'
            )
214 215
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x)

216 217 218 219
            # input matrix must be square matrix
            input_x = paddle.static.data(
                name='x_2', shape=[12, 32], dtype='float32'
            )
220 221
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x)

222 223 224 225
            # uplo must be in 'L' or 'U'
            input_x = paddle.static.data(
                name='x_3', shape=[4, 4], dtype="float32"
            )
226 227 228
            uplo = 'R'
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x, uplo)

229 230 231 232
            # x_data cannot be integer
            input_x = paddle.static.data(
                name='x_4', shape=[4, 4], dtype="int32"
            )
233 234 235 236 237
            self.assertRaises(TypeError, paddle.linalg.eigvalsh, input_x)


if __name__ == "__main__":
    unittest.main()