test_svd_op.py 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci


class TestSvdOp(OpTest):
24

25 26
    def setUp(self):
        paddle.enable_static()
27
        self.python_api = paddle.linalg.svd
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
        self.generate_input()
        self.generate_output()
        self.op_type = "svd"
        assert (hasattr(self, "_output_data"))
        self.inputs = {"X": self._input_data}
        self.attrs = {'full_matrices': self.get_full_matrices_option()}
        self.outputs = {
            "U": self._output_data[0],
            "S": self._output_data[1],
            "VH": self._output_data[2]
        }

    def generate_input(self):
        """ return a input_data and input_shape
        """
        self._input_shape = (100, 1)
        self._input_data = np.random.random(self._input_shape).astype("float64")

    def get_full_matrices_option(self):
        return False

    def generate_output(self):
        assert (hasattr(self, "_input_data"))
        self._output_data = np.linalg.svd(self._input_data)

    def test_check_output(self):
54
        self.check_output(no_check_set=['U', 'VH'], check_eager=True)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

    def test_svd_forward(self):
        """ u matmul diag(s) matmul vt must become X
        """
        single_input = self._input_data.reshape(
            [-1, self._input_shape[-2], self._input_shape[-1]])[0]
        paddle.disable_static()
        dy_x = paddle.to_tensor(single_input)
        dy_u, dy_s, dy_vt = paddle.linalg.svd(dy_x)
        dy_out_x = dy_u.matmul(paddle.diag(dy_s)).matmul(dy_vt)
        if (paddle.abs(dy_out_x - dy_x) < 1e-7).all():
            ...
        else:
            print("EXPECTED:\n", dy_x)
            print("GOT     :\n", dy_out_x)
            raise RuntimeError("Check SVD Failed")
        paddle.enable_static()

    def check_S_grad(self):
74 75 76
        self.check_grad(['X'], ['S'],
                        numeric_grad_delta=0.001,
                        check_eager=True)
77 78

    def check_U_grad(self):
79 80 81
        self.check_grad(['X'], ['U'],
                        numeric_grad_delta=0.001,
                        check_eager=True)
82 83

    def check_V_grad(self):
84 85 86
        self.check_grad(['X'], ['VH'],
                        numeric_grad_delta=0.001,
                        check_eager=True)
87 88

    def test_check_grad(self):
89
        """
90 91 92 93 94 95 96 97 98 99 100 101 102 103
        remember the input matrix must be the full rank matrix, otherwise the gradient will stochatic because the u / v 's  (n-k) freedom  vectors
        """
        self.check_S_grad()
        self.check_U_grad()
        self.check_V_grad()


class TestSvdCheckGrad2(TestSvdOp):
    # NOTE(xiongkun03): because we want to construct some full rank matrics,
    #                   so we can't specifize matrices which numel() > 100

    no_need_check_grad = True

    def generate_input(self):
104
        """ return a deterministic  matrix, the range matrix;
105 106 107
            vander matrix must be a full rank matrix.
        """
        self._input_shape = (5, 5)
108 109
        self._input_data = np.vander([2, 3, 4, 5, 6]).astype("float64").reshape(
            self._input_shape)
110 111 112


class TestSvdNormalMatrixSmall(TestSvdCheckGrad2):
113

114
    def generate_input(self):
115
        """ small matrix SVD.
116 117 118 119 120 121
        """
        self._input_shape = (1, 1)
        self._input_data = np.random.random(self._input_shape).astype("float64")


class TestSvdNormalMatrix6x3(TestSvdCheckGrad2):
122

123
    def generate_input(self):
124
        """ return a deterministic  matrix, the range matrix;
125 126 127
            vander matrix must be a full rank matrix.
        """
        self._input_shape = (6, 3)
128 129 130 131
        self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0],
                                     [0.0, 0.0, 6.0], [2.0, 4.0, 9.0],
                                     [3.0, 6.0, 8.0], [3.0, 1.0,
                                                       0.0]]).astype("float64")
132 133 134


class TestSvdNormalMatrix3x6(TestSvdCheckGrad2):
135

136
    def generate_input(self):
137
        """ return a deterministic  matrix, the range matrix;
138 139 140
            vander matrix must be a full rank matrix.
        """
        self._input_shape = (3, 6)
141 142 143 144
        self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0],
                                     [0.0, 0.0, 6.0], [2.0, 4.0, 9.0],
                                     [3.0, 6.0, 8.0], [3.0, 1.0,
                                                       0.0]]).astype("float64")
145 146 147 148
        self._input_data = self._input_data.transpose((-1, -2))


class TestSvdNormalMatrix6x3Batched(TestSvdOp):
149

150 151
    def generate_input(self):
        self._input_shape = (10, 6, 3)
152 153 154 155
        self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0],
                                     [0.0, 0.0, 6.0], [2.0, 4.0, 9.0],
                                     [3.0, 6.0, 8.0], [3.0, 1.0,
                                                       0.0]]).astype("float64")
156 157 158 159 160 161 162 163 164
        self._input_data = np.stack([self._input_data] * 10, axis=0)

    def test_svd_forward(self):
        """ test_svd_forward not support batched input, so disable this test.
        """
        pass


class TestSvdNormalMatrix3x6Batched(TestSvdOp):
165

166
    def generate_input(self):
167
        """ return a deterministic  matrix, the range matrix;
168 169 170
            vander matrix must be a full rank matrix.
        """
        self._input_shape = (10, 3, 6)
171 172 173 174
        self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0],
                                     [0.0, 0.0, 6.0], [2.0, 4.0, 9.0],
                                     [3.0, 6.0, 8.0], [3.0, 1.0,
                                                       0.0]]).astype("float64")
175 176 177 178 179 180 181 182 183 184
        self._input_data = self._input_data.transpose((-1, -2))
        self._input_data = np.stack([self._input_data] * 10, axis=0)

    def test_svd_forward(self):
        """ test_svd_forward not support batched input, so disable this test.
        """
        pass


class TestSvdNormalMatrix3x3x3x6Batched(TestSvdOp):
185

186
    def generate_input(self):
187
        """ return a deterministic  matrix, the range matrix;
188 189 190
            vander matrix must be a full rank matrix.
        """
        self._input_shape = (3, 3, 3, 6)
191 192 193 194
        self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0],
                                     [0.0, 0.0, 6.0], [2.0, 4.0, 9.0],
                                     [3.0, 6.0, 8.0], [3.0, 1.0,
                                                       0.0]]).astype("float64")
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
        self._input_data = self._input_data.transpose((-1, -2))
        self._input_data = np.stack(
            [self._input_data, self._input_data, self._input_data], axis=0)
        self._input_data = np.stack(
            [self._input_data, self._input_data, self._input_data], axis=0)

    def test_svd_forward(self):
        """ test_svd_forward not support batched input, so disable this test.
        """
        pass


@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " +
                    "however it is desirable to cover the forward pass")
class TestSvdNormalMatrixBig(TestSvdOp):
210

211
    def generate_input(self):
212 213
        """ big matrix SVD.

214 215 216 217 218 219 220 221 222 223 224 225 226 227
        """
        self._input_shape = (2, 200, 300)
        self._input_data = np.random.random(self._input_shape).astype("float64")

    def test_svd_forward(self):
        """ test_svd_forward not support batched input, so disable this test.
        """
        pass

    def test_check_grad(self):
        pass


class TestSvdNormalMatrixBig2(TestSvdOp):
228

229
    def generate_input(self):
230
        """ big matrix SVD.
231 232 233 234 235 236
        """
        self._input_shape = (1, 100)
        self._input_data = np.random.random(self._input_shape).astype("float64")


class TestSvdNormalMatrixFullMatrices(unittest.TestCase):
237

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
    def setUp(self):
        paddle.disable_static()

    def tearDown(self):
        paddle.enable_static()

    def test_full_matrices(self):
        mat_shape = (2, 3)
        mat = np.random.random(mat_shape).astype("float64")
        x = paddle.to_tensor(mat)
        u, s, vh = paddle.linalg.svd(x, full_matrices=True)
        assert (u.shape == [2, 2])
        assert (vh.shape == [3, 3])
        x_recover = u.matmul(paddle.diag(s)).matmul(vh[0:2])
        if ((paddle.abs(x_recover - x) > 1e-4).any()):
            raise RuntimeError("mat can't be recovered\n")


class TestSvdFullMatriceGrad(TestSvdNormalMatrix6x3):
257

258 259 260 261 262 263 264 265 266
    def get_full_matrices_option(self):
        return True

    def test_svd_forward(self):
        """ test_svd_forward not support full matrices, so disable this test.
        """
        pass

    def test_check_grad(self):
267
        """
268 269 270 271 272 273 274 275
        remember the input matrix must be the full rank matrix, otherwise the gradient will stochatic because the u / v 's  (n-k) freedom  vectors
        """
        self.check_S_grad()
        #self.check_U_grad() // don't check U grad, because U have freedom vector
        self.check_V_grad()


class TestSvdAPI(unittest.TestCase):
276

277 278 279 280 281 282
    def test_dygraph(self):
        paddle.disable_static()
        a = np.random.rand(5, 5)
        x = paddle.to_tensor(a)
        u, s, vh = paddle.linalg.svd(x)
        gt_u, gt_s, gt_vh = np.linalg.svd(a, full_matrices=False)
283
        np.testing.assert_allclose(s, gt_s, rtol=1e-05)
284 285 286 287 288 289 290 291 292

    def test_static(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                a = np.random.rand(5, 5)
293 294 295
                x = paddle.fluid.data(name="input",
                                      shape=[5, 5],
                                      dtype='float64')
296 297 298 299 300 301
                u, s, vh = paddle.linalg.svd(x)
                exe = fluid.Executor(place)
                gt_u, gt_s, gt_vh = np.linalg.svd(a, full_matrices=False)
                fetches = exe.run(fluid.default_main_program(),
                                  feed={"input": a},
                                  fetch_list=[s])
302
                np.testing.assert_allclose(fetches[0], gt_s, rtol=1e-05)
303 304 305 306 307


if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()