test_sparse_transpose_op.py 3.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import numpy as np
import unittest
from paddle.fluid.framework import _test_eager_guard


class TestTranspose(unittest.TestCase):
    # x: sparse, out: sparse
    def check_result(self, x_shape, dims, format):
        with _test_eager_guard():
            mask = paddle.randint(0, 2, x_shape).astype("float32")
            # "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
            # or the backward checks may fail.
            origin_x = (paddle.rand(x_shape, dtype='float32') + 1) * mask
            dense_x = origin_x.detach()
            dense_x.stop_gradient = False
            dense_out = paddle.transpose(dense_x, dims)

            if format == "coo":
                sp_x = origin_x.detach().to_sparse_coo(len(x_shape))
            else:
                sp_x = origin_x.detach().to_sparse_csr()
            sp_x.stop_gradient = False
            sp_out = paddle.incubate.sparse.transpose(sp_x, dims)

            np.testing.assert_allclose(sp_out.to_dense().numpy(),
                                       dense_out.numpy(),
                                       rtol=1e-05)
            dense_out.backward()
            sp_out.backward()
            np.testing.assert_allclose(sp_x.grad.to_dense().numpy(),
                                       (dense_x.grad * mask).numpy(),
                                       rtol=1e-05)

    def test_transpose_2d(self):
        self.check_result([2, 5], [0, 1], 'coo')
        self.check_result([2, 5], [0, 1], 'csr')
        self.check_result([2, 5], [1, 0], 'coo')
        self.check_result([2, 5], [1, 0], 'csr')

    def test_transpose_3d(self):
        self.check_result([6, 2, 3], [0, 1, 2], 'coo')
        self.check_result([6, 2, 3], [0, 1, 2], 'csr')
        self.check_result([6, 2, 3], [0, 2, 1], 'coo')
        self.check_result([6, 2, 3], [0, 2, 1], 'csr')
        self.check_result([6, 2, 3], [1, 0, 2], 'coo')
        self.check_result([6, 2, 3], [1, 0, 2], 'csr')
        self.check_result([6, 2, 3], [2, 0, 1], 'coo')
        self.check_result([6, 2, 3], [2, 0, 1], 'csr')
        self.check_result([6, 2, 3], [2, 1, 0], 'coo')
        self.check_result([6, 2, 3], [2, 1, 0], 'csr')
        self.check_result([6, 2, 3], [1, 2, 0], 'coo')
        self.check_result([6, 2, 3], [1, 2, 0], 'csr')

    def test_transpose_nd(self):
        self.check_result([8, 3, 4, 4, 5, 3], [5, 3, 4, 1, 0, 2], 'coo')
        # Randint now only supports access to dimension 0 to 9.
        self.check_result([2, 3, 4, 2, 3, 4, 2, 3, 4],
                          [2, 3, 4, 5, 6, 7, 8, 0, 1], 'coo')


if __name__ == "__main__":
    unittest.main()