test_cumsum_op.py 14.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
E
emailweixu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

W
WangZhen 已提交
15 16
import os
import tempfile
17 18
import unittest

E
emailweixu 已提交
19
import numpy as np
20
from op_test import OpTest
21

22
import paddle
23
import paddle.fluid as fluid
24 25
import paddle.fluid.core as core
import paddle.inference as paddle_infer
26 27 28 29 30


class TestCumsumOp(unittest.TestCase):
    def run_cases(self):
        data_np = np.arange(12).reshape(3, 4)
Z
Zhou Wei 已提交
31
        data = paddle.to_tensor(data_np)
32 33 34

        y = paddle.cumsum(data)
        z = np.cumsum(data_np)
35
        np.testing.assert_array_equal(z, y.numpy())
36 37 38

        y = paddle.cumsum(data, axis=0)
        z = np.cumsum(data_np, axis=0)
39
        np.testing.assert_array_equal(z, y.numpy())
40 41 42

        y = paddle.cumsum(data, axis=-1)
        z = np.cumsum(data_np, axis=-1)
43
        np.testing.assert_array_equal(z, y.numpy())
44 45 46 47 48 49 50 51 52

        y = paddle.cumsum(data, dtype='float64')
        self.assertTrue(y.dtype == core.VarDesc.VarType.FP64)

        y = paddle.cumsum(data, dtype=np.int32)
        self.assertTrue(y.dtype == core.VarDesc.VarType.INT32)

        y = paddle.cumsum(data, axis=-2)
        z = np.cumsum(data_np, axis=-2)
53
        np.testing.assert_array_equal(z, y.numpy())
54 55 56 57

    def run_static(self, use_gpu=False):
        with fluid.program_guard(fluid.Program()):
            data_np = np.random.random((100, 100)).astype(np.float32)
58
            x = paddle.static.data('X', [100, 100])
59 60 61 62 63 64 65 66 67 68
            y = paddle.cumsum(x)
            y2 = paddle.cumsum(x, axis=0)
            y3 = paddle.cumsum(x, axis=-1)
            y4 = paddle.cumsum(x, dtype='float64')
            y5 = paddle.cumsum(x, dtype=np.int32)
            y6 = paddle.cumsum(x, axis=-2)

            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
69 70 71 72 73 74 75 76 77 78 79
            out = exe.run(
                feed={'X': data_np},
                fetch_list=[
                    y.name,
                    y2.name,
                    y3.name,
                    y4.name,
                    y5.name,
                    y6.name,
                ],
            )
80 81

            z = np.cumsum(data_np)
82
            np.testing.assert_allclose(z, out[0], rtol=1e-05)
83
            z = np.cumsum(data_np, axis=0)
84
            np.testing.assert_allclose(z, out[1], rtol=1e-05)
85
            z = np.cumsum(data_np, axis=-1)
86
            np.testing.assert_allclose(z, out[2], rtol=1e-05)
87 88 89
            self.assertTrue(out[3].dtype == np.float64)
            self.assertTrue(out[4].dtype == np.int32)
            z = np.cumsum(data_np, axis=-2)
90
            np.testing.assert_allclose(z, out[5], rtol=1e-05)
91 92

    def test_cpu(self):
93 94 95
        paddle.disable_static(paddle.fluid.CPUPlace())
        self.run_cases()
        paddle.enable_static()
96 97 98 99 100 101

        self.run_static()

    def test_gpu(self):
        if not fluid.core.is_compiled_with_cuda():
            return
102 103 104
        paddle.disable_static(paddle.fluid.CUDAPlace(0))
        self.run_cases()
        paddle.enable_static()
105 106 107 108 109

        self.run_static(use_gpu=True)

    def test_name(self):
        with fluid.program_guard(fluid.Program()):
110
            x = paddle.static.data('x', [3, 4])
111 112
            y = paddle.cumsum(x, name='out')
            self.assertTrue('out' in y.name)
E
emailweixu 已提交
113 114 115 116 117 118 119 120 121 122


class TestSumOp1(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)}

    def test_check_output(self):
123
        self.check_output()
E
emailweixu 已提交
124 125

    def test_check_grad(self):
126
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
127 128 129 130 131 132 133 134


class TestSumOp2(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': -1, 'reverse': True}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {
135 136 137
            'Out': np.flip(
                np.flip(self.inputs['X'], axis=2).cumsum(axis=2), axis=2
            )
E
emailweixu 已提交
138 139 140
        }

    def test_check_output(self):
141
        self.check_output()
E
emailweixu 已提交
142 143

    def test_check_grad(self):
144
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
145 146 147 148 149 150 151 152 153 154


class TestSumOp3(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 1}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}

    def test_check_output(self):
155
        self.check_output()
E
emailweixu 已提交
156 157

    def test_check_grad(self):
158
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
159 160 161 162 163 164 165 166 167 168


class TestSumOp4(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 0}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}

    def test_check_output(self):
169
        self.check_output()
E
emailweixu 已提交
170 171

    def test_check_grad(self):
172
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
173 174 175 176 177


class TestSumOp5(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
Z
zhupengyang 已提交
178
        self.inputs = {'X': np.random.random((5, 20)).astype("float64")}
E
emailweixu 已提交
179 180 181
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}

    def test_check_output(self):
182
        self.check_output()
E
emailweixu 已提交
183 184

    def test_check_grad(self):
185
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
186 187 188 189 190


class TestSumOp7(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
Z
zhupengyang 已提交
191
        self.inputs = {'X': np.random.random((100)).astype("float64")}
E
emailweixu 已提交
192 193 194
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}

    def test_check_output(self):
195
        self.check_output()
E
emailweixu 已提交
196 197

    def test_check_grad(self):
198
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
199 200


201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
class TestCumsumFP16(unittest.TestCase):
    def check_main(self, x_np, dtype):
        paddle.disable_static()
        x = paddle.to_tensor(x_np.astype(dtype))
        x.stop_gradient = False
        y = paddle.cumsum(x, dtype=dtype)
        x_g = paddle.grad(y, [x])
        y_np = y.numpy().astype('float32')
        x_g_np = x_g[0].numpy().astype('float32')
        paddle.enable_static()
        return y_np, x_g_np

    def test_main(self):
        if not paddle.is_compiled_with_cuda():
            return

        np.random.seed(20)
        x_np = np.random.random([10, 12])
        y_np_1, x_g_np_1 = self.check_main(x_np, 'float16')
        y_np_2, x_g_np_2 = self.check_main(x_np, 'float32')

        np.testing.assert_allclose(y_np_1, y_np_2, rtol=1e-03)
        np.testing.assert_allclose(x_g_np_1, x_g_np_2, rtol=1e-03)


226
class TestSumOpExclusive1(OpTest):
E
emailweixu 已提交
227 228 229
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
230
        a = np.random.random((4, 5, 20)).astype("float64")
E
emailweixu 已提交
231 232
        self.inputs = {'X': a}
        self.outputs = {
233 234 235 236 237 238 239
            'Out': np.concatenate(
                (
                    np.zeros((4, 5, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
E
emailweixu 已提交
240 241 242
        }

    def test_check_output(self):
243
        self.check_output()
E
emailweixu 已提交
244

245 246 247
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

248 249 250 251 252

class TestSumOpExclusive2(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
253
        a = np.random.random((1, 1, 100)).astype("float64")
254 255
        self.inputs = {'X': a}
        self.outputs = {
256 257 258 259 260 261 262
            'Out': np.concatenate(
                (
                    np.zeros((1, 1, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
263 264 265 266 267
        }

    def test_check_output(self):
        self.check_output()

268 269 270
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

271 272 273 274 275

class TestSumOpExclusive3(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
276
        a = np.random.random((4, 5, 20)).astype("float64")
277 278
        self.inputs = {'X': a}
        self.outputs = {
279 280 281 282 283 284 285
            'Out': np.concatenate(
                (
                    np.zeros((4, 5, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
286 287 288 289 290
        }

    def test_check_output(self):
        self.check_output()

291 292 293
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

294 295 296 297 298

class TestSumOpExclusive4(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
299
        a = np.random.random((1, 1, 100)).astype("float64")
300 301
        self.inputs = {'X': a}
        self.outputs = {
302 303 304 305 306 307 308
            'Out': np.concatenate(
                (
                    np.zeros((1, 1, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
309 310 311 312 313
        }

    def test_check_output(self):
        self.check_output()

314 315 316
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

317 318 319 320 321

class TestSumOpExclusive5(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
322
        a = np.random.random((4, 5, 40)).astype("float64")
323 324
        self.inputs = {'X': a}
        self.outputs = {
325 326 327 328 329 330 331
            'Out': np.concatenate(
                (
                    np.zeros((4, 5, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
332 333 334 335 336
        }

    def test_check_output(self):
        self.check_output()

337 338 339
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

340

341 342 343 344
class TestSumOpExclusiveFP16(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True, "dtype": "float16"}
345
        a = np.random.random((4, 5, 20)).astype("float64")
346 347
        self.inputs = {'X': a}
        self.outputs = {
348 349 350 351 352 353 354
            'Out': np.concatenate(
                (
                    np.zeros((4, 5, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
355 356 357 358 359
        }

    def test_check_output(self):
        self.check_output()

360 361 362
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

363

364 365 366 367 368 369 370 371
class TestSumOpReverseExclusive(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, 'reverse': True, "exclusive": True}
        a = np.random.random((4, 5, 6)).astype("float64")
        self.inputs = {'X': a}
        a = np.flip(a, axis=2)
        self.outputs = {
372 373 374 375 376 377 378
            'Out': np.concatenate(
                (
                    np.flip(a[:, :, :-1].cumsum(axis=2), axis=2),
                    np.zeros((4, 5, 1), dtype=np.float64),
                ),
                axis=2,
            )
379 380 381 382
        }

    def test_check_output(self):
        self.check_output()
E
emailweixu 已提交
383

384 385 386
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

E
emailweixu 已提交
387

388 389 390 391 392
class BadInputTest(unittest.TestCase):
    def test_error(self):
        with fluid.program_guard(fluid.Program()):

            def test_bad_x():
393
                data = [1, 2, 4]
394
                result = paddle.cumsum(data, axis=0)
395 396 397 398

            self.assertRaises(TypeError, test_bad_x)


W
WangZhen 已提交
399 400 401 402 403
class TestTensorAxis(unittest.TestCase):
    def setUp(self):
        paddle.seed(2022)
        self.temp_dir = tempfile.TemporaryDirectory()
        self.save_path = os.path.join(self.temp_dir.name, 'tensor_axis_cumsum')
404 405 406 407 408
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
            else paddle.CPUPlace()
        )
W
WangZhen 已提交
409 410 411 412 413 414

    def test_dygraph(self):
        paddle.disable_static()
        x = np.random.randn(5, 6)
        axis = 1
        np_out = np.cumsum(x, axis)
415 416 417
        pd_out = paddle.cumsum(
            paddle.to_tensor(x), axis=paddle.to_tensor([axis], dtype='int32')
        )
W
WangZhen 已提交
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
        np.testing.assert_allclose(np_out, pd_out.numpy())

    def test_static_and_infer(self):
        paddle.enable_static()
        np_x = np.random.randn(9, 10, 11).astype('float32')
        main_prog = paddle.static.Program()
        starup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, starup_prog):
            # run static
            x = paddle.static.data(shape=np_x.shape, name='x', dtype=np_x.dtype)
            linear = paddle.nn.Linear(np_x.shape[-1], np_x.shape[-1])
            linear_out = linear(x)
            relu_out = paddle.nn.functional.relu(linear_out)
            axis = paddle.full([1], 2, dtype='int64')
            out = paddle.cumsum(relu_out, axis=axis)
433
            loss = paddle.mean(out)
434
            sgd = paddle.optimizer.SGD(learning_rate=0.0)
435
            sgd.minimize(paddle.mean(out))
W
WangZhen 已提交
436 437 438 439 440 441 442

            exe = paddle.static.Executor(self.place)
            exe.run(starup_prog)
            static_out = exe.run(feed={'x': np_x}, fetch_list=[out])

            # run infer
            paddle.static.save_inference_model(self.save_path, [x], [out], exe)
443 444 445
            config = paddle_infer.Config(
                self.save_path + '.pdmodel', self.save_path + '.pdiparams'
            )
W
WangZhen 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
            if paddle.is_compiled_with_cuda():
                config.enable_use_gpu(100, 0)
            else:
                config.disable_gpu()

            predictor = paddle_infer.create_predictor(config)
            input_names = predictor.get_input_names()
            input_handle = predictor.get_input_handle(input_names[0])
            fake_input = np_x
            input_handle.reshape(np_x.shape)
            input_handle.copy_from_cpu(fake_input)
            predictor.run()
            output_names = predictor.get_output_names()
            output_handle = predictor.get_output_handle(output_names[0])
            infer_out = output_handle.copy_to_cpu()
            np.testing.assert_allclose(static_out[0], infer_out)


464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
class TestCumSumOpFp16(unittest.TestCase):
    def test_fp16(self):
        x_np = np.random.random((100, 100)).astype('float16')
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data(shape=[100, 100], name='x', dtype='float16')
            y1 = paddle.cumsum(x)
            y2 = paddle.cumsum(x, axis=0)
            y3 = paddle.cumsum(x, axis=-1)
            y4 = paddle.cumsum(x, axis=-2)
            if core.is_compiled_with_cuda():
                place = paddle.CUDAPlace(0)
                exe = paddle.static.Executor(place)
                exe.run(paddle.static.default_startup_program())
                out = exe.run(feed={'x': x_np}, fetch_list=[y1, y2, y3, y4])


E
emailweixu 已提交
480 481
if __name__ == '__main__':
    unittest.main()