test_diag_v2.py 10.3 KB
Newer Older
L
LutaoChu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
21
from paddle.fluid.framework import _test_eager_guard
L
LutaoChu 已提交
22 23 24 25 26


class TestDiagV2Op(OpTest):
    def setUp(self):
        self.op_type = "diag_v2"
H
hong 已提交
27
        self.python_api = paddle.diag
L
LutaoChu 已提交
28 29 30 31 32 33 34 35 36
        self.x = np.random.rand(10, 10)
        self.offset = 0
        self.padding_value = 0.0
        self.out = np.diag(self.x, self.offset)

        self.init_config()
        self.inputs = {'X': self.x}
        self.attrs = {
            'offset': self.offset,
37
            'padding_value': self.padding_value,
L
LutaoChu 已提交
38 39 40 41 42
        }
        self.outputs = {'Out': self.out}

    def test_check_output(self):
        paddle.enable_static()
43
        self.check_output(check_eager=False)
L
LutaoChu 已提交
44

45 46
    def test_check_grad(self):
        paddle.enable_static()
47
        self.check_grad(['X'], 'Out', check_eager=False)
48

L
LutaoChu 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
    def init_config(self):
        pass


class TestDiagV2OpCase1(TestDiagV2Op):
    def init_config(self):
        self.offset = 1
        self.out = np.diag(self.x, self.offset)


class TestDiagV2OpCase2(TestDiagV2Op):
    def init_config(self):
        self.offset = -1
        self.out = np.diag(self.x, self.offset)


class TestDiagV2OpCase3(TestDiagV2Op):
    def init_config(self):
67
        self.x = np.random.randint(-10, 10, size=(10, 10)).astype("float64")
L
LutaoChu 已提交
68 69 70 71 72 73
        self.out = np.diag(self.x, self.offset)


class TestDiagV2OpCase4(TestDiagV2Op):
    def init_config(self):
        self.x = np.random.rand(100)
74
        self.padding_value = 2
L
LutaoChu 已提交
75
        n = self.x.size
76 77 78 79 80
        self.out = (
            self.padding_value * np.ones((n, n))
            + np.diag(self.x, self.offset)
            - np.diag(self.padding_value * np.ones(n))
        )
L
LutaoChu 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113


class TestDiagV2Error(unittest.TestCase):
    def test_errors(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):

            def test_diag_v2_type():
                x = [1, 2, 3]
                output = paddle.diag(x)

            self.assertRaises(TypeError, test_diag_v2_type)

            x = paddle.static.data('data', [3, 3])
            self.assertRaises(TypeError, paddle.diag, x, offset=2.5)

            self.assertRaises(TypeError, paddle.diag, x, padding_value=[9])

            x = paddle.static.data('data2', [3, 3, 3])
            self.assertRaises(ValueError, paddle.diag, x)


class TestDiagV2API(unittest.TestCase):
    def setUp(self):
        self.input_np = np.random.random(size=(10, 10)).astype(np.float32)
        self.expected0 = np.diag(self.input_np)
        self.expected1 = np.diag(self.input_np, k=1)
        self.expected2 = np.diag(self.input_np, k=-1)

        self.input_np2 = np.random.rand(100)
        self.offset = 0
        self.padding_value = 8
        n = self.input_np2.size
114 115 116 117 118
        self.expected3 = (
            self.padding_value * np.ones((n, n))
            + np.diag(self.input_np2, self.offset)
            - np.diag(self.padding_value * np.ones(n))
        )
L
LutaoChu 已提交
119 120 121 122

        self.input_np3 = np.random.randint(-10, 10, size=(100)).astype(np.int64)
        self.padding_value = 8.0
        n = self.input_np3.size
123 124 125 126 127
        self.expected4 = (
            self.padding_value * np.ones((n, n))
            + np.diag(self.input_np3, self.offset)
            - np.diag(self.padding_value * np.ones(n))
        )
L
LutaoChu 已提交
128 129

        self.padding_value = -8
130 131 132 133 134
        self.expected5 = (
            self.padding_value * np.ones((n, n))
            + np.diag(self.input_np3, self.offset)
            - np.diag(self.padding_value * np.ones(n))
        )
L
LutaoChu 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

        self.input_np4 = np.random.random(size=(2000, 2000)).astype(np.float32)
        self.expected6 = np.diag(self.input_np4)
        self.expected7 = np.diag(self.input_np4, k=1)
        self.expected8 = np.diag(self.input_np4, k=-1)

        self.input_np5 = np.random.random(size=(2000)).astype(np.float32)
        self.expected9 = np.diag(self.input_np5)
        self.expected10 = np.diag(self.input_np5, k=1)
        self.expected11 = np.diag(self.input_np5, k=-1)

        self.input_np6 = np.random.random(size=(2000, 1500)).astype(np.float32)
        self.expected12 = np.diag(self.input_np6, k=-1)

    def run_imperative(self):
        x = paddle.to_tensor(self.input_np)
        y = paddle.diag(x)
152
        np.testing.assert_allclose(y.numpy(), self.expected0, rtol=1e-05)
L
LutaoChu 已提交
153 154

        y = paddle.diag(x, offset=1)
155
        np.testing.assert_allclose(y.numpy(), self.expected1, rtol=1e-05)
L
LutaoChu 已提交
156 157

        y = paddle.diag(x, offset=-1)
158
        np.testing.assert_allclose(y.numpy(), self.expected2, rtol=1e-05)
L
LutaoChu 已提交
159 160 161

        x = paddle.to_tensor(self.input_np2)
        y = paddle.diag(x, padding_value=8)
162
        np.testing.assert_allclose(y.numpy(), self.expected3, rtol=1e-05)
L
LutaoChu 已提交
163 164 165

        x = paddle.to_tensor(self.input_np3)
        y = paddle.diag(x, padding_value=8.0)
166
        np.testing.assert_allclose(y.numpy(), self.expected4, rtol=1e-05)
L
LutaoChu 已提交
167 168

        y = paddle.diag(x, padding_value=-8)
169
        np.testing.assert_allclose(y.numpy(), self.expected5, rtol=1e-05)
L
LutaoChu 已提交
170 171 172

        x = paddle.to_tensor(self.input_np4)
        y = paddle.diag(x)
173
        np.testing.assert_allclose(y.numpy(), self.expected6, rtol=1e-05)
L
LutaoChu 已提交
174 175

        y = paddle.diag(x, offset=1)
176
        np.testing.assert_allclose(y.numpy(), self.expected7, rtol=1e-05)
L
LutaoChu 已提交
177 178

        y = paddle.diag(x, offset=-1)
179
        np.testing.assert_allclose(y.numpy(), self.expected8, rtol=1e-05)
L
LutaoChu 已提交
180 181 182

        x = paddle.to_tensor(self.input_np5)
        y = paddle.diag(x)
183
        np.testing.assert_allclose(y.numpy(), self.expected9, rtol=1e-05)
L
LutaoChu 已提交
184 185

        y = paddle.diag(x, offset=1)
186
        np.testing.assert_allclose(y.numpy(), self.expected10, rtol=1e-05)
L
LutaoChu 已提交
187 188

        y = paddle.diag(x, offset=-1)
189
        np.testing.assert_allclose(y.numpy(), self.expected11, rtol=1e-05)
L
LutaoChu 已提交
190 191 192

        x = paddle.to_tensor(self.input_np6)
        y = paddle.diag(x, offset=-1)
193
        np.testing.assert_allclose(y.numpy(), self.expected12, rtol=1e-05)
L
LutaoChu 已提交
194 195 196 197 198

    def run_static(self, use_gpu=False):
        x = paddle.static.data(name='input', shape=[10, 10], dtype='float32')
        x2 = paddle.static.data(name='input2', shape=[100], dtype='float64')
        x3 = paddle.static.data(name='input3', shape=[100], dtype='int64')
199 200 201
        x4 = paddle.static.data(
            name='input4', shape=[2000, 2000], dtype='float32'
        )
L
LutaoChu 已提交
202
        x5 = paddle.static.data(name='input5', shape=[2000], dtype='float32')
203 204 205
        x6 = paddle.static.data(
            name='input6', shape=[2000, 1500], dtype='float32'
        )
L
LutaoChu 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
        result0 = paddle.diag(x)
        result1 = paddle.diag(x, offset=1)
        result2 = paddle.diag(x, offset=-1)
        result3 = paddle.diag(x, name='aaa')
        result4 = paddle.diag(x2, padding_value=8)
        result5 = paddle.diag(x3, padding_value=8.0)
        result6 = paddle.diag(x3, padding_value=-8)
        result7 = paddle.diag(x4)
        result8 = paddle.diag(x4, offset=1)
        result9 = paddle.diag(x4, offset=-1)
        result10 = paddle.diag(x5)
        result11 = paddle.diag(x5, offset=1)
        result12 = paddle.diag(x5, offset=-1)
        result13 = paddle.diag(x6, offset=-1)

        place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
        (
            res0,
            res1,
            res2,
            res4,
            res5,
            res6,
            res7,
            res8,
            res9,
            res10,
            res11,
            res12,
            res13,
        ) = exe.run(
L
LutaoChu 已提交
239 240 241 242 243 244
            feed={
                "input": self.input_np,
                "input2": self.input_np2,
                'input3': self.input_np3,
                'input4': self.input_np4,
                'input5': self.input_np5,
245
                'input6': self.input_np6,
L
LutaoChu 已提交
246 247
            },
            fetch_list=[
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
                result0,
                result1,
                result2,
                result4,
                result5,
                result6,
                result7,
                result8,
                result9,
                result10,
                result11,
                result12,
                result13,
            ],
        )
L
LutaoChu 已提交
263

264 265 266
        np.testing.assert_allclose(res0, self.expected0, rtol=1e-05)
        np.testing.assert_allclose(res1, self.expected1, rtol=1e-05)
        np.testing.assert_allclose(res2, self.expected2, rtol=1e-05)
L
LutaoChu 已提交
267
        self.assertTrue('aaa' in result3.name)
268 269 270 271 272 273 274 275 276 277
        np.testing.assert_allclose(res4, self.expected3, rtol=1e-05)
        np.testing.assert_allclose(res5, self.expected4, rtol=1e-05)
        np.testing.assert_allclose(res6, self.expected5, rtol=1e-05)
        np.testing.assert_allclose(res7, self.expected6, rtol=1e-05)
        np.testing.assert_allclose(res8, self.expected7, rtol=1e-05)
        np.testing.assert_allclose(res9, self.expected8, rtol=1e-05)
        np.testing.assert_allclose(res10, self.expected9, rtol=1e-05)
        np.testing.assert_allclose(res11, self.expected10, rtol=1e-05)
        np.testing.assert_allclose(res12, self.expected11, rtol=1e-05)
        np.testing.assert_allclose(res13, self.expected12, rtol=1e-05)
L
LutaoChu 已提交
278 279 280 281

    def test_cpu(self):
        paddle.disable_static(place=paddle.fluid.CPUPlace())
        self.run_imperative()
282 283 284
        with _test_eager_guard():
            self.run_imperative()

L
LutaoChu 已提交
285 286 287 288 289 290 291 292 293 294 295
        paddle.enable_static()

        with fluid.program_guard(fluid.Program()):
            self.run_static()

    def test_gpu(self):
        if not fluid.core.is_compiled_with_cuda():
            return

        paddle.disable_static(place=paddle.fluid.CUDAPlace(0))
        self.run_imperative()
296 297
        with _test_eager_guard():
            self.run_imperative()
L
LutaoChu 已提交
298 299 300 301 302 303 304
        paddle.enable_static()

        with fluid.program_guard(fluid.Program()):
            self.run_static(use_gpu=True)


if __name__ == "__main__":
H
hong 已提交
305
    paddle.enable_static()
L
LutaoChu 已提交
306
    unittest.main()