test_eigvalsh_op.py 8.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
import paddle
from op_test import OpTest
from gradient_checker import grad_check


24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
def compare_result(actual, expected):
    assert actual.ndim == 1 or actual.ndim == 2

    if actual.ndim == 1:
        valid_eigenvalues(actual, expected)
        return

    for batch_actual, batch_expected in zip(actual, expected):
        valid_eigenvalues(batch_actual, batch_expected)


def valid_eigenvalues(actual, expected):

    FP32_MAX_RELATIVE_ERR = 5e-5
    FP64_MAX_RELATIVE_ERR = 1e-14

    rtol = FP32_MAX_RELATIVE_ERR if actual.dtype == np.single else FP64_MAX_RELATIVE_ERR

    diff = np.abs(expected - actual)
    max_diff = np.max(diff)
    max_ref = np.max(np.abs(expected))
    relative_error = max_diff / max_ref
    np.testing.assert_array_less(relative_error, rtol)


49
class TestEigvalshOp(OpTest):
50

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
    def setUp(self):
        paddle.enable_static()
        self.op_type = "eigvalsh"
        self.init_input()
        self.init_config()
        np.random.seed(123)
        out_w, out_v = np.linalg.eigh(self.x_np, self.UPLO)
        self.inputs = {"X": self.x_np}
        self.attrs = {"UPLO": self.UPLO, "is_test": False}
        self.outputs = {'Eigenvalues': out_w, 'Eigenvectors': out_v}

    def init_config(self):
        self.UPLO = 'L'

    def init_input(self):
        self.x_shape = (10, 10)
        self.x_type = np.float64
        self.x_np = np.random.random(self.x_shape).astype(self.x_type)

    def test_check_output(self):
        # Vectors in posetive or negative is equivalent
        self.check_output(no_check_set=['Eigenvectors'])

    def test_grad(self):
        self.check_grad(["X"], ["Eigenvalues"])


class TestEigvalshUPLOCase(TestEigvalshOp):
79

80 81 82 83 84
    def init_config(self):
        self.UPLO = 'U'


class TestEigvalshGPUCase(unittest.TestCase):
85

86 87 88 89 90 91 92 93 94 95 96 97
    def setUp(self):
        self.x_shape = [32, 32]
        self.dtype = "float32"
        np.random.seed(123)
        self.x_np = np.random.random(self.x_shape).astype(self.dtype)

    def test_check_output_gpu(self):
        if paddle.is_compiled_with_cuda():
            paddle.disable_static(place=paddle.CUDAPlace(0))
            input_real_data = paddle.to_tensor(self.x_np)
            expected_w = np.linalg.eigvalsh(self.x_np)
            actual_w = paddle.linalg.eigvalsh(input_real_data)
98
            compare_result(actual_w.numpy(), expected_w)
99 100 101


class TestEigvalshAPI(unittest.TestCase):
102

103 104 105
    def setUp(self):
        self.dtype = "float32"
        self.UPLO = 'L'
106 107
        self.rtol = 1e-5  # test_eigvalsh_grad
        self.atol = 1e-5  # test_eigvalsh_grad
108 109 110
        self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()
        np.random.seed(123)
111
        self.init_input_shape()
L
lkylkylky 已提交
112 113
        self.init_input_data()

114 115 116
    def init_input_shape(self):
        self.x_shape = [5, 5]

L
lkylkylky 已提交
117
    def init_input_data(self):
118
        self.real_data = np.random.random(self.x_shape).astype(self.dtype)
L
lkylkylky 已提交
119
        complex_data = np.random.random(self.x_shape).astype(
120 121 122 123
            self.dtype) + 1J * np.random.random(self.x_shape).astype(self.dtype)
        self.trans_dims = list(range(len(self.x_shape) - 2)) + [
            len(self.x_shape) - 1, len(self.x_shape) - 2
        ]
L
lkylkylky 已提交
124 125
        self.complex_symm = np.divide(
            complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2)
126 127 128 129 130

    def check_static_float_result(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
131 132 133
            input_x = paddle.static.data('input_x',
                                         shape=self.x_shape,
                                         dtype=self.dtype)
134 135
            output_w = paddle.linalg.eigvalsh(input_x)
            exe = paddle.static.Executor(self.place)
136 137 138
            actual_w = exe.run(main_prog,
                               feed={"input_x": self.real_data},
                               fetch_list=[output_w])
139

140 141
            expected_w = np.linalg.eigvalsh(self.real_data)
            compare_result(actual_w[0], expected_w)
142 143 144 145 146 147

    def check_static_complex_result(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
            x_dtype = np.complex64 if self.dtype == "float32" else np.complex128
148 149 150
            input_x = paddle.static.data('input_x',
                                         shape=self.x_shape,
                                         dtype=x_dtype)
151 152
            output_w = paddle.linalg.eigvalsh(input_x)
            exe = paddle.static.Executor(self.place)
153 154 155 156 157
            actual_w = exe.run(main_prog,
                               feed={"input_x": self.complex_symm},
                               fetch_list=[output_w])
            expected_w = np.linalg.eigvalsh(self.complex_symm)
            compare_result(actual_w[0], expected_w)
158 159 160 161 162 163 164 165 166 167 168

    def test_in_static_mode(self):
        paddle.enable_static()
        self.check_static_float_result()
        self.check_static_complex_result()

    def test_in_dynamic_mode(self):
        paddle.disable_static(self.place)
        input_real_data = paddle.to_tensor(self.real_data)
        expected_w = np.linalg.eigvalsh(self.real_data)
        actual_w = paddle.linalg.eigvalsh(input_real_data)
169
        compare_result(actual_w.numpy(), expected_w)
170

L
lkylkylky 已提交
171 172 173
        input_complex_symm = paddle.to_tensor(self.complex_symm)
        expected_w = np.linalg.eigvalsh(self.complex_symm)
        actual_w = paddle.linalg.eigvalsh(input_complex_symm)
174
        compare_result(actual_w.numpy(), expected_w)
175 176 177

    def test_eigvalsh_grad(self):
        paddle.disable_static(self.place)
L
lkylkylky 已提交
178
        x = paddle.to_tensor(self.complex_symm, stop_gradient=False)
179 180
        w = paddle.linalg.eigvalsh(x)
        (w.sum()).backward()
181 182 183 184 185
        np.testing.assert_allclose(abs(x.grad.numpy()),
                                   abs(x.grad.numpy().conj().transpose(
                                       self.trans_dims)),
                                   rtol=self.rtol,
                                   atol=self.atol)
186 187 188


class TestEigvalshBatchAPI(TestEigvalshAPI):
189

190 191 192 193 194
    def init_input_shape(self):
        self.x_shape = [2, 5, 5]


class TestEigvalshAPIError(unittest.TestCase):
195

196 197 198 199 200
    def test_error(self):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, startup_prog):
            #input maxtrix must greater than 2 dimensions
201 202 203
            input_x = paddle.static.data(name='x_1',
                                         shape=[12],
                                         dtype='float32')
204 205 206
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x)

            #input matrix must be square matrix
207 208 209
            input_x = paddle.static.data(name='x_2',
                                         shape=[12, 32],
                                         dtype='float32')
210 211 212
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x)

            #uplo must be in 'L' or 'U'
213 214 215
            input_x = paddle.static.data(name='x_3',
                                         shape=[4, 4],
                                         dtype="float32")
216 217 218 219
            uplo = 'R'
            self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x, uplo)

            #x_data cannot be integer
220 221 222
            input_x = paddle.static.data(name='x_4',
                                         shape=[4, 4],
                                         dtype="int32")
223 224 225 226 227
            self.assertRaises(TypeError, paddle.linalg.eigvalsh, input_x)


if __name__ == "__main__":
    unittest.main()