test_eigvalsh_op.py 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
import paddle
from op_test import OpTest


21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
def compare_result(actual, expected):
    assert actual.ndim == 1 or actual.ndim == 2

    if actual.ndim == 1:
        valid_eigenvalues(actual, expected)
        return

    for batch_actual, batch_expected in zip(actual, expected):
        valid_eigenvalues(batch_actual, batch_expected)


def valid_eigenvalues(actual, expected):

    FP32_MAX_RELATIVE_ERR = 5e-5
    FP64_MAX_RELATIVE_ERR = 1e-14

37 38 39 40 41
    rtol = (
        FP32_MAX_RELATIVE_ERR
        if actual.dtype == np.single
        else FP64_MAX_RELATIVE_ERR
    )
42 43 44 45 46 47 48 49

    diff = np.abs(expected - actual)
    max_diff = np.max(diff)
    max_ref = np.max(np.abs(expected))
    relative_error = max_diff / max_ref
    np.testing.assert_array_less(relative_error, rtol)


50 51 52 53
class TestEigvalshOp(OpTest):
    def setUp(self):
        paddle.enable_static()
        self.op_type = "eigvalsh"
54 55
        self.python_api = paddle.linalg.eigvalsh
        self.python_out_sig = ['Eigenvalues']
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
        self.init_input()
        self.init_config()
        np.random.seed(123)
        out_w, out_v = np.linalg.eigh(self.x_np, self.UPLO)
        self.inputs = {"X": self.x_np}
        self.attrs = {"UPLO": self.UPLO, "is_test": False}
        self.outputs = {'Eigenvalues': out_w, 'Eigenvectors': out_v}

    def init_config(self):
        self.UPLO = 'L'

    def init_input(self):
        self.x_shape = (10, 10)
        self.x_type = np.float64
        self.x_np = np.random.random(self.x_shape).astype(self.x_type)

    def test_check_output(self):
        # Vectors in posetive or negative is equivalent
74
        self.check_output(no_check_set=['Eigenvectors'], check_eager=True)
75 76

    def test_grad(self):
77
        self.check_grad(["X"], ["Eigenvalues"], check_eager=True)
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97


class TestEigvalshUPLOCase(TestEigvalshOp):
    def init_config(self):
        self.UPLO = 'U'


class TestEigvalshGPUCase(unittest.TestCase):
    def setUp(self):
        self.x_shape = [32, 32]
        self.dtype = "float32"
        np.random.seed(123)
        self.x_np = np.random.random(self.x_shape).astype(self.dtype)

    def test_check_output_gpu(self):
        if paddle.is_compiled_with_cuda():
            paddle.disable_static(place=paddle.CUDAPlace(0))
            input_real_data = paddle.to_tensor(self.x_np)
            expected_w = np.linalg.eigvalsh(self.x_np)
            actual_w = paddle.linalg.eigvalsh(input_real_data)
98
            compare_result(actual_w.numpy(), expected_w)
99 100 101 102 103 104


class TestEigvalshAPI(unittest.TestCase):
    def setUp(self):
        self.dtype = "float32"
        self.UPLO = 'L'
105 106
        self.rtol = 1e-5  # test_eigvalsh_grad
        self.atol = 1e-5  # test_eigvalsh_grad
107 108 109
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
110
            else paddle.CPUPlace()
111
        )
112
        np.random.seed(123)
113
        self.init_input_shape()
L
lkylkylky 已提交
114 115
        self.init_input_data()

116 117 118
    def init_input_shape(self):
        self.x_shape = [5, 5]

L
lkylkylky 已提交
119
    def init_input_data(self):
120
        self.real_data = np.random.random(self.x_shape).astype(self.dtype)
L
lkylkylky 已提交
121
        complex_data = np.random.random(self.x_shape).astype(
122 123
            self.dtype
        ) + 1j * np.random.random(self.x_shape).astype(self.dtype)
124
        self.trans_dims = list(range(len(self.x_shape) - 2)) + [
125 126
            len(self.x_shape) - 1,
            len(self.x_shape) - 2,
127
        ]
L
lkylkylky 已提交
128
        self.complex_symm = np.divide(
129 130
            complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2
        )
131 132 133 134 135

    def check_static_float_result(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
136 137 138
            input_x = paddle.static.data(
                'input_x', shape=self.x_shape, dtype=self.dtype
            )
139 140
            output_w = paddle.linalg.eigvalsh(input_x)
            exe = paddle.static.Executor(self.place)
141 142 143 144 145
            actual_w = exe.run(
                main_prog,
                feed={"input_x": self.real_data},
                fetch_list=[output_w],
            )
146

147 148
            expected_w = np.linalg.eigvalsh(self.real_data)
            compare_result(actual_w[0], expected_w)
149 150 151 152 153 154

    def check_static_complex_result(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
            x_dtype = np.complex64 if self.dtype == "float32" else np.complex128
155 156 157
            input_x = paddle.static.data(
                'input_x', shape=self.x_shape, dtype=x_dtype
            )
158 159
            output_w = paddle.linalg.eigvalsh(input_x)
            exe = paddle.static.Executor(self.place)
160 161 162 163 164
            actual_w = exe.run(
                main_prog,
                feed={"input_x": self.complex_symm},
                fetch_list=[output_w],
            )
165 166
            expected_w = np.linalg.eigvalsh(self.complex_symm)
            compare_result(actual_w[0], expected_w)
167 168 169 170 171 172 173 174 175 176 177

    def test_in_static_mode(self):
        paddle.enable_static()
        self.check_static_float_result()
        self.check_static_complex_result()

    def test_in_dynamic_mode(self):
        paddle.disable_static(self.place)
        input_real_data = paddle.to_tensor(self.real_data)
        expected_w = np.linalg.eigvalsh(self.real_data)
        actual_w = paddle.linalg.eigvalsh(input_real_data)
178
        compare_result(actual_w.numpy(), expected_w)
179

L
lkylkylky 已提交
180 181 182
        input_complex_symm = paddle.to_tensor(self.complex_symm)
        expected_w = np.linalg.eigvalsh(self.complex_symm)
        actual_w = paddle.linalg.eigvalsh(input_complex_symm)
183
        compare_result(actual_w.numpy(), expected_w)
184 185 186

    def test_eigvalsh_grad(self):
        paddle.disable_static(self.place)
L
lkylkylky 已提交
187
        x = paddle.to_tensor(self.complex_symm, stop_gradient=False)
188 189
        w = paddle.linalg.eigvalsh(x)
        (w.sum()).backward()
190 191 192 193 194 195
        np.testing.assert_allclose(
            abs(x.grad.numpy()),
            abs(x.grad.numpy().conj().transpose(self.trans_dims)),
            rtol=self.rtol,
            atol=self.atol,
        )
196 197 198 199 200 201 202 203 204 205 206 207


class TestEigvalshBatchAPI(TestEigvalshAPI):
    def init_input_shape(self):
        self.x_shape = [2, 5, 5]


class TestEigvalshAPIError(unittest.TestCase):
    def test_error(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
208 209 210 211
            # input maxtrix must greater than 2 dimensions
            input_x = paddle.static.data(
                name='x_1', shape=[12], dtype='float32'
            )
212 213
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x)

214 215 216 217
            # input matrix must be square matrix
            input_x = paddle.static.data(
                name='x_2', shape=[12, 32], dtype='float32'
            )
218 219
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x)

220 221 222 223
            # uplo must be in 'L' or 'U'
            input_x = paddle.static.data(
                name='x_3', shape=[4, 4], dtype="float32"
            )
224 225 226
            uplo = 'R'
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x, uplo)

227 228 229 230
            # x_data cannot be integer
            input_x = paddle.static.data(
                name='x_4', shape=[4, 4], dtype="int32"
            )
231 232 233 234 235
            self.assertRaises(TypeError, paddle.linalg.eigvalsh, input_x)


if __name__ == "__main__":
    unittest.main()