test_svd_op.py 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci


class TestSvdOp(OpTest):
    def setUp(self):
        paddle.enable_static()
26
        self.python_api = paddle.linalg.svd
27 28 29
        self.generate_input()
        self.generate_output()
        self.op_type = "svd"
30
        assert hasattr(self, "_output_data")
31 32 33 34 35
        self.inputs = {"X": self._input_data}
        self.attrs = {'full_matrices': self.get_full_matrices_option()}
        self.outputs = {
            "U": self._output_data[0],
            "S": self._output_data[1],
36
            "VH": self._output_data[2],
37 38 39
        }

    def generate_input(self):
40
        """return a input_data and input_shape"""
41 42 43 44 45 46 47
        self._input_shape = (100, 1)
        self._input_data = np.random.random(self._input_shape).astype("float64")

    def get_full_matrices_option(self):
        return False

    def generate_output(self):
48
        assert hasattr(self, "_input_data")
49 50 51
        self._output_data = np.linalg.svd(self._input_data)

    def test_check_output(self):
52
        self.check_output(no_check_set=['U', 'VH'], check_eager=True)
53 54

    def test_svd_forward(self):
55
        """u matmul diag(s) matmul vt must become X"""
56
        single_input = self._input_data.reshape(
57 58
            [-1, self._input_shape[-2], self._input_shape[-1]]
        )[0]
59 60 61 62 63 64 65 66 67 68 69 70 71
        paddle.disable_static()
        dy_x = paddle.to_tensor(single_input)
        dy_u, dy_s, dy_vt = paddle.linalg.svd(dy_x)
        dy_out_x = dy_u.matmul(paddle.diag(dy_s)).matmul(dy_vt)
        if (paddle.abs(dy_out_x - dy_x) < 1e-7).all():
            ...
        else:
            print("EXPECTED:\n", dy_x)
            print("GOT     :\n", dy_out_x)
            raise RuntimeError("Check SVD Failed")
        paddle.enable_static()

    def check_S_grad(self):
72 73 74
        self.check_grad(
            ['X'], ['S'], numeric_grad_delta=0.001, check_eager=True
        )
75 76

    def check_U_grad(self):
77 78 79
        self.check_grad(
            ['X'], ['U'], numeric_grad_delta=0.001, check_eager=True
        )
80 81

    def check_V_grad(self):
82 83 84
        self.check_grad(
            ['X'], ['VH'], numeric_grad_delta=0.001, check_eager=True
        )
85 86

    def test_check_grad(self):
87
        """
88 89 90 91 92 93 94 95 96 97 98 99 100 101
        remember the input matrix must be the full rank matrix, otherwise the gradient will stochatic because the u / v 's  (n-k) freedom  vectors
        """
        self.check_S_grad()
        self.check_U_grad()
        self.check_V_grad()


class TestSvdCheckGrad2(TestSvdOp):
    # NOTE(xiongkun03): because we want to construct some full rank matrics,
    #                   so we can't specifize matrices which numel() > 100

    no_need_check_grad = True

    def generate_input(self):
102 103
        """return a deterministic  matrix, the range matrix;
        vander matrix must be a full rank matrix.
104 105
        """
        self._input_shape = (5, 5)
106 107 108 109 110
        self._input_data = (
            np.vander([2, 3, 4, 5, 6])
            .astype("float64")
            .reshape(self._input_shape)
        )
111 112 113 114


class TestSvdNormalMatrixSmall(TestSvdCheckGrad2):
    def generate_input(self):
115
        """small matrix SVD."""
116 117 118 119 120 121
        self._input_shape = (1, 1)
        self._input_data = np.random.random(self._input_shape).astype("float64")


class TestSvdNormalMatrix6x3(TestSvdCheckGrad2):
    def generate_input(self):
122 123
        """return a deterministic  matrix, the range matrix;
        vander matrix must be a full rank matrix.
124 125
        """
        self._input_shape = (6, 3)
126 127 128 129 130 131 132 133 134 135
        self._input_data = np.array(
            [
                [1.0, 2.0, 3.0],
                [0.0, 1.0, 5.0],
                [0.0, 0.0, 6.0],
                [2.0, 4.0, 9.0],
                [3.0, 6.0, 8.0],
                [3.0, 1.0, 0.0],
            ]
        ).astype("float64")
136 137 138 139


class TestSvdNormalMatrix3x6(TestSvdCheckGrad2):
    def generate_input(self):
140 141
        """return a deterministic  matrix, the range matrix;
        vander matrix must be a full rank matrix.
142 143
        """
        self._input_shape = (3, 6)
144 145 146 147 148 149 150 151 152 153
        self._input_data = np.array(
            [
                [1.0, 2.0, 3.0],
                [0.0, 1.0, 5.0],
                [0.0, 0.0, 6.0],
                [2.0, 4.0, 9.0],
                [3.0, 6.0, 8.0],
                [3.0, 1.0, 0.0],
            ]
        ).astype("float64")
154 155 156 157 158 159
        self._input_data = self._input_data.transpose((-1, -2))


class TestSvdNormalMatrix6x3Batched(TestSvdOp):
    def generate_input(self):
        self._input_shape = (10, 6, 3)
160 161 162 163 164 165 166 167 168 169
        self._input_data = np.array(
            [
                [1.0, 2.0, 3.0],
                [0.0, 1.0, 5.0],
                [0.0, 0.0, 6.0],
                [2.0, 4.0, 9.0],
                [3.0, 6.0, 8.0],
                [3.0, 1.0, 0.0],
            ]
        ).astype("float64")
170 171 172
        self._input_data = np.stack([self._input_data] * 10, axis=0)

    def test_svd_forward(self):
173
        """test_svd_forward not support batched input, so disable this test."""
174 175 176 177 178
        pass


class TestSvdNormalMatrix3x6Batched(TestSvdOp):
    def generate_input(self):
179 180
        """return a deterministic  matrix, the range matrix;
        vander matrix must be a full rank matrix.
181 182
        """
        self._input_shape = (10, 3, 6)
183 184 185 186 187 188 189 190 191 192
        self._input_data = np.array(
            [
                [1.0, 2.0, 3.0],
                [0.0, 1.0, 5.0],
                [0.0, 0.0, 6.0],
                [2.0, 4.0, 9.0],
                [3.0, 6.0, 8.0],
                [3.0, 1.0, 0.0],
            ]
        ).astype("float64")
193 194 195 196
        self._input_data = self._input_data.transpose((-1, -2))
        self._input_data = np.stack([self._input_data] * 10, axis=0)

    def test_svd_forward(self):
197
        """test_svd_forward not support batched input, so disable this test."""
198 199 200 201 202
        pass


class TestSvdNormalMatrix3x3x3x6Batched(TestSvdOp):
    def generate_input(self):
203 204
        """return a deterministic  matrix, the range matrix;
        vander matrix must be a full rank matrix.
205 206
        """
        self._input_shape = (3, 3, 3, 6)
207 208 209 210 211 212 213 214 215 216
        self._input_data = np.array(
            [
                [1.0, 2.0, 3.0],
                [0.0, 1.0, 5.0],
                [0.0, 0.0, 6.0],
                [2.0, 4.0, 9.0],
                [3.0, 6.0, 8.0],
                [3.0, 1.0, 0.0],
            ]
        ).astype("float64")
217 218
        self._input_data = self._input_data.transpose((-1, -2))
        self._input_data = np.stack(
219 220
            [self._input_data, self._input_data, self._input_data], axis=0
        )
221
        self._input_data = np.stack(
222 223
            [self._input_data, self._input_data, self._input_data], axis=0
        )
224 225

    def test_svd_forward(self):
226
        """test_svd_forward not support batched input, so disable this test."""
227 228 229
        pass


230 231 232 233
@skip_check_grad_ci(
    reason="'check_grad' on large inputs is too slow, "
    + "however it is desirable to cover the forward pass"
)
234 235
class TestSvdNormalMatrixBig(TestSvdOp):
    def generate_input(self):
236
        """big matrix SVD."""
237 238 239 240
        self._input_shape = (2, 200, 300)
        self._input_data = np.random.random(self._input_shape).astype("float64")

    def test_svd_forward(self):
241
        """test_svd_forward not support batched input, so disable this test."""
242 243 244 245 246 247 248 249
        pass

    def test_check_grad(self):
        pass


class TestSvdNormalMatrixBig2(TestSvdOp):
    def generate_input(self):
250
        """big matrix SVD."""
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
        self._input_shape = (1, 100)
        self._input_data = np.random.random(self._input_shape).astype("float64")


class TestSvdNormalMatrixFullMatrices(unittest.TestCase):
    def setUp(self):
        paddle.disable_static()

    def tearDown(self):
        paddle.enable_static()

    def test_full_matrices(self):
        mat_shape = (2, 3)
        mat = np.random.random(mat_shape).astype("float64")
        x = paddle.to_tensor(mat)
        u, s, vh = paddle.linalg.svd(x, full_matrices=True)
267 268
        assert u.shape == [2, 2]
        assert vh.shape == [3, 3]
269
        x_recover = u.matmul(paddle.diag(s)).matmul(vh[0:2])
270
        if (paddle.abs(x_recover - x) > 1e-4).any():
271 272 273 274 275 276 277 278
            raise RuntimeError("mat can't be recovered\n")


class TestSvdFullMatriceGrad(TestSvdNormalMatrix6x3):
    def get_full_matrices_option(self):
        return True

    def test_svd_forward(self):
279
        """test_svd_forward not support full matrices, so disable this test."""
280 281 282
        pass

    def test_check_grad(self):
283
        """
284 285 286
        remember the input matrix must be the full rank matrix, otherwise the gradient will stochatic because the u / v 's  (n-k) freedom  vectors
        """
        self.check_S_grad()
287
        # self.check_U_grad() // don't check U grad, because U have freedom vector
288 289 290 291 292 293 294 295 296 297
        self.check_V_grad()


class TestSvdAPI(unittest.TestCase):
    def test_dygraph(self):
        paddle.disable_static()
        a = np.random.rand(5, 5)
        x = paddle.to_tensor(a)
        u, s, vh = paddle.linalg.svd(x)
        gt_u, gt_s, gt_vh = np.linalg.svd(a, full_matrices=False)
298
        np.testing.assert_allclose(s, gt_s, rtol=1e-05)
299 300 301 302 303 304 305 306 307

    def test_static(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                a = np.random.rand(5, 5)
308 309 310
                x = paddle.fluid.data(
                    name="input", shape=[5, 5], dtype='float64'
                )
311 312 313
                u, s, vh = paddle.linalg.svd(x)
                exe = fluid.Executor(place)
                gt_u, gt_s, gt_vh = np.linalg.svd(a, full_matrices=False)
314 315 316 317 318
                fetches = exe.run(
                    fluid.default_main_program(),
                    feed={"input": a},
                    fetch_list=[s],
                )
319
                np.testing.assert_allclose(fetches[0], gt_s, rtol=1e-05)
320 321 322 323 324


if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()