test_cumsum_op.py 16.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
E
emailweixu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

W
WangZhen 已提交
15 16
import os
import tempfile
17 18
import unittest

E
emailweixu 已提交
19
import numpy as np
20
from op_test import OpTest
21

22
import paddle
23
import paddle.fluid as fluid
24 25
import paddle.fluid.core as core
import paddle.inference as paddle_infer
26 27 28 29 30


class TestCumsumOp(unittest.TestCase):
    def run_cases(self):
        data_np = np.arange(12).reshape(3, 4)
Z
Zhou Wei 已提交
31
        data = paddle.to_tensor(data_np)
32 33 34

        y = paddle.cumsum(data)
        z = np.cumsum(data_np)
35
        np.testing.assert_array_equal(z, y.numpy())
36 37 38

        y = paddle.cumsum(data, axis=0)
        z = np.cumsum(data_np, axis=0)
39
        np.testing.assert_array_equal(z, y.numpy())
40 41 42

        y = paddle.cumsum(data, axis=-1)
        z = np.cumsum(data_np, axis=-1)
43
        np.testing.assert_array_equal(z, y.numpy())
44 45 46 47 48 49 50 51 52

        y = paddle.cumsum(data, dtype='float64')
        self.assertTrue(y.dtype == core.VarDesc.VarType.FP64)

        y = paddle.cumsum(data, dtype=np.int32)
        self.assertTrue(y.dtype == core.VarDesc.VarType.INT32)

        y = paddle.cumsum(data, axis=-2)
        z = np.cumsum(data_np, axis=-2)
53
        np.testing.assert_array_equal(z, y.numpy())
54 55 56 57

    def run_static(self, use_gpu=False):
        with fluid.program_guard(fluid.Program()):
            data_np = np.random.random((100, 100)).astype(np.float32)
58
            x = paddle.static.data('X', [100, 100])
59 60 61 62 63 64 65 66 67 68
            y = paddle.cumsum(x)
            y2 = paddle.cumsum(x, axis=0)
            y3 = paddle.cumsum(x, axis=-1)
            y4 = paddle.cumsum(x, dtype='float64')
            y5 = paddle.cumsum(x, dtype=np.int32)
            y6 = paddle.cumsum(x, axis=-2)

            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
69 70 71 72 73 74 75 76 77 78 79
            out = exe.run(
                feed={'X': data_np},
                fetch_list=[
                    y.name,
                    y2.name,
                    y3.name,
                    y4.name,
                    y5.name,
                    y6.name,
                ],
            )
80 81

            z = np.cumsum(data_np)
82
            np.testing.assert_allclose(z, out[0], rtol=1e-05)
83
            z = np.cumsum(data_np, axis=0)
84
            np.testing.assert_allclose(z, out[1], rtol=1e-05)
85
            z = np.cumsum(data_np, axis=-1)
86
            np.testing.assert_allclose(z, out[2], rtol=1e-05)
87 88 89
            self.assertTrue(out[3].dtype == np.float64)
            self.assertTrue(out[4].dtype == np.int32)
            z = np.cumsum(data_np, axis=-2)
90
            np.testing.assert_allclose(z, out[5], rtol=1e-05)
91 92

    def test_cpu(self):
93 94 95
        paddle.disable_static(paddle.fluid.CPUPlace())
        self.run_cases()
        paddle.enable_static()
96 97 98 99 100 101

        self.run_static()

    def test_gpu(self):
        if not fluid.core.is_compiled_with_cuda():
            return
102 103 104
        paddle.disable_static(paddle.fluid.CUDAPlace(0))
        self.run_cases()
        paddle.enable_static()
105 106 107 108 109

        self.run_static(use_gpu=True)

    def test_name(self):
        with fluid.program_guard(fluid.Program()):
110
            x = paddle.static.data('x', [3, 4])
111 112
            y = paddle.cumsum(x, name='out')
            self.assertTrue('out' in y.name)
E
emailweixu 已提交
113 114 115 116 117


class TestSumOp1(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
118 119 120
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
E
emailweixu 已提交
121 122 123 124 125
        self.attrs = {'axis': 2}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)}

    def test_check_output(self):
126
        self.check_output()
E
emailweixu 已提交
127 128

    def test_check_grad(self):
G
GGBond8488 已提交
129
        self.check_grad(['X'], 'Out', check_prim=True)
E
emailweixu 已提交
130 131 132 133 134


class TestSumOp2(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
135 136 137
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
E
emailweixu 已提交
138 139 140
        self.attrs = {'axis': -1, 'reverse': True}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {
141 142 143
            'Out': np.flip(
                np.flip(self.inputs['X'], axis=2).cumsum(axis=2), axis=2
            )
E
emailweixu 已提交
144 145 146
        }

    def test_check_output(self):
147
        self.check_output()
E
emailweixu 已提交
148 149

    def test_check_grad(self):
G
GGBond8488 已提交
150
        self.check_grad(['X'], 'Out', check_prim=True)
E
emailweixu 已提交
151 152 153 154 155


class TestSumOp3(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
156 157 158
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
E
emailweixu 已提交
159 160 161 162 163
        self.attrs = {'axis': 1}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}

    def test_check_output(self):
164
        self.check_output()
E
emailweixu 已提交
165 166

    def test_check_grad(self):
G
GGBond8488 已提交
167
        self.check_grad(['X'], 'Out', check_prim=True)
E
emailweixu 已提交
168 169 170 171 172


class TestSumOp4(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
173 174 175
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
E
emailweixu 已提交
176 177 178 179 180
        self.attrs = {'axis': 0}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}

    def test_check_output(self):
181
        self.check_output()
E
emailweixu 已提交
182 183

    def test_check_grad(self):
184
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
185 186 187 188 189


class TestSumOp5(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
190 191 192
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
Z
zhupengyang 已提交
193
        self.inputs = {'X': np.random.random((5, 20)).astype("float64")}
E
emailweixu 已提交
194 195 196
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}

    def test_check_output(self):
197
        self.check_output()
E
emailweixu 已提交
198 199

    def test_check_grad(self):
200
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
201 202 203 204 205


class TestSumOp7(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
206 207 208
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
Z
zhupengyang 已提交
209
        self.inputs = {'X': np.random.random((100)).astype("float64")}
E
emailweixu 已提交
210 211 212
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}

    def test_check_output(self):
213
        self.check_output()
E
emailweixu 已提交
214 215

    def test_check_grad(self):
216
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
217 218


219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
class TestCumsumFP16(unittest.TestCase):
    def check_main(self, x_np, dtype):
        paddle.disable_static()
        x = paddle.to_tensor(x_np.astype(dtype))
        x.stop_gradient = False
        y = paddle.cumsum(x, dtype=dtype)
        x_g = paddle.grad(y, [x])
        y_np = y.numpy().astype('float32')
        x_g_np = x_g[0].numpy().astype('float32')
        paddle.enable_static()
        return y_np, x_g_np

    def test_main(self):
        if not paddle.is_compiled_with_cuda():
            return

        np.random.seed(20)
        x_np = np.random.random([10, 12])
        y_np_1, x_g_np_1 = self.check_main(x_np, 'float16')
        y_np_2, x_g_np_2 = self.check_main(x_np, 'float32')

        np.testing.assert_allclose(y_np_1, y_np_2, rtol=1e-03)
        np.testing.assert_allclose(x_g_np_1, x_g_np_2, rtol=1e-03)


244
class TestSumOpExclusive1(OpTest):
E
emailweixu 已提交
245 246
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
247 248 249
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
E
emailweixu 已提交
250
        self.attrs = {'axis': 2, "exclusive": True}
251
        a = np.random.random((4, 5, 20)).astype("float64")
E
emailweixu 已提交
252 253
        self.inputs = {'X': a}
        self.outputs = {
254 255 256 257 258 259 260
            'Out': np.concatenate(
                (
                    np.zeros((4, 5, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
E
emailweixu 已提交
261 262 263
        }

    def test_check_output(self):
264
        self.check_output()
E
emailweixu 已提交
265

266
    def test_check_grad(self):
G
GGBond8488 已提交
267
        self.check_grad(['X'], 'Out', check_prim=True)
268

269 270 271 272

class TestSumOpExclusive2(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
273 274 275
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
276
        self.attrs = {'axis': 2, "exclusive": True}
277
        a = np.random.random((1, 1, 100)).astype("float64")
278 279
        self.inputs = {'X': a}
        self.outputs = {
280 281 282 283 284 285 286
            'Out': np.concatenate(
                (
                    np.zeros((1, 1, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
287 288 289 290 291
        }

    def test_check_output(self):
        self.check_output()

292
    def test_check_grad(self):
G
GGBond8488 已提交
293
        self.check_grad(['X'], 'Out', check_prim=True)
294

295 296 297 298

class TestSumOpExclusive3(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
299 300 301
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
302
        self.attrs = {'axis': 2, "exclusive": True}
303
        a = np.random.random((4, 5, 20)).astype("float64")
304 305
        self.inputs = {'X': a}
        self.outputs = {
306 307 308 309 310 311 312
            'Out': np.concatenate(
                (
                    np.zeros((4, 5, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
313 314 315 316 317
        }

    def test_check_output(self):
        self.check_output()

318
    def test_check_grad(self):
G
GGBond8488 已提交
319
        self.check_grad(['X'], 'Out', check_prim=True)
320

321 322 323 324

class TestSumOpExclusive4(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
325 326 327
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
328
        self.attrs = {'axis': 2, "exclusive": True}
329
        a = np.random.random((1, 1, 100)).astype("float64")
330 331
        self.inputs = {'X': a}
        self.outputs = {
332 333 334 335 336 337 338
            'Out': np.concatenate(
                (
                    np.zeros((1, 1, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
339 340 341 342 343
        }

    def test_check_output(self):
        self.check_output()

344
    def test_check_grad(self):
G
GGBond8488 已提交
345
        self.check_grad(['X'], 'Out', check_prim=True)
346

347 348 349 350

class TestSumOpExclusive5(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
351 352 353
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
354
        self.attrs = {'axis': 2, "exclusive": True}
355
        a = np.random.random((4, 5, 40)).astype("float64")
356 357
        self.inputs = {'X': a}
        self.outputs = {
358 359 360 361 362 363 364
            'Out': np.concatenate(
                (
                    np.zeros((4, 5, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
365 366 367 368 369
        }

    def test_check_output(self):
        self.check_output()

370
    def test_check_grad(self):
G
GGBond8488 已提交
371
        self.check_grad(['X'], 'Out', check_prim=True)
372

373

374 375 376
class TestSumOpExclusiveFP16(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
377 378 379
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
380
        self.attrs = {'axis': 2, "exclusive": True, "dtype": "float16"}
381
        a = np.random.random((4, 5, 20)).astype("float64")
382 383
        self.inputs = {'X': a}
        self.outputs = {
384 385 386 387 388 389 390
            'Out': np.concatenate(
                (
                    np.zeros((4, 5, 1), dtype=np.float64),
                    a[:, :, :-1].cumsum(axis=2),
                ),
                axis=2,
            )
391 392 393 394 395
        }

    def test_check_output(self):
        self.check_output()

396
    def test_check_grad(self):
G
GGBond8488 已提交
397
        self.check_grad(['X'], 'Out', check_prim=True)
398

399

400 401 402
class TestSumOpReverseExclusive(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
G
GGBond8488 已提交
403 404 405
        self.prim_op_type = "prim"
        self.python_api = paddle.cumsum
        self.enable_cinn = False
406 407 408 409 410
        self.attrs = {'axis': 2, 'reverse': True, "exclusive": True}
        a = np.random.random((4, 5, 6)).astype("float64")
        self.inputs = {'X': a}
        a = np.flip(a, axis=2)
        self.outputs = {
411 412 413 414 415 416 417
            'Out': np.concatenate(
                (
                    np.flip(a[:, :, :-1].cumsum(axis=2), axis=2),
                    np.zeros((4, 5, 1), dtype=np.float64),
                ),
                axis=2,
            )
418 419 420 421
        }

    def test_check_output(self):
        self.check_output()
E
emailweixu 已提交
422

423
    def test_check_grad(self):
G
GGBond8488 已提交
424
        self.check_grad(['X'], 'Out', check_prim=True)
425

E
emailweixu 已提交
426

427 428 429 430 431
class BadInputTest(unittest.TestCase):
    def test_error(self):
        with fluid.program_guard(fluid.Program()):

            def test_bad_x():
432
                data = [1, 2, 4]
433
                result = paddle.cumsum(data, axis=0)
434 435 436 437

            self.assertRaises(TypeError, test_bad_x)


W
WangZhen 已提交
438 439 440 441 442
class TestTensorAxis(unittest.TestCase):
    def setUp(self):
        paddle.seed(2022)
        self.temp_dir = tempfile.TemporaryDirectory()
        self.save_path = os.path.join(self.temp_dir.name, 'tensor_axis_cumsum')
443 444 445 446 447
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
            else paddle.CPUPlace()
        )
W
WangZhen 已提交
448 449 450 451 452 453

    def test_dygraph(self):
        paddle.disable_static()
        x = np.random.randn(5, 6)
        axis = 1
        np_out = np.cumsum(x, axis)
454 455 456
        pd_out = paddle.cumsum(
            paddle.to_tensor(x), axis=paddle.to_tensor([axis], dtype='int32')
        )
W
WangZhen 已提交
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
        np.testing.assert_allclose(np_out, pd_out.numpy())

    def test_static_and_infer(self):
        paddle.enable_static()
        np_x = np.random.randn(9, 10, 11).astype('float32')
        main_prog = paddle.static.Program()
        starup_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, starup_prog):
            # run static
            x = paddle.static.data(shape=np_x.shape, name='x', dtype=np_x.dtype)
            linear = paddle.nn.Linear(np_x.shape[-1], np_x.shape[-1])
            linear_out = linear(x)
            relu_out = paddle.nn.functional.relu(linear_out)
            axis = paddle.full([1], 2, dtype='int64')
            out = paddle.cumsum(relu_out, axis=axis)
472
            loss = paddle.mean(out)
473
            sgd = paddle.optimizer.SGD(learning_rate=0.0)
474
            sgd.minimize(paddle.mean(out))
W
WangZhen 已提交
475 476 477 478 479 480 481

            exe = paddle.static.Executor(self.place)
            exe.run(starup_prog)
            static_out = exe.run(feed={'x': np_x}, fetch_list=[out])

            # run infer
            paddle.static.save_inference_model(self.save_path, [x], [out], exe)
482 483 484
            config = paddle_infer.Config(
                self.save_path + '.pdmodel', self.save_path + '.pdiparams'
            )
W
WangZhen 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
            if paddle.is_compiled_with_cuda():
                config.enable_use_gpu(100, 0)
            else:
                config.disable_gpu()

            predictor = paddle_infer.create_predictor(config)
            input_names = predictor.get_input_names()
            input_handle = predictor.get_input_handle(input_names[0])
            fake_input = np_x
            input_handle.reshape(np_x.shape)
            input_handle.copy_from_cpu(fake_input)
            predictor.run()
            output_names = predictor.get_output_names()
            output_handle = predictor.get_output_handle(output_names[0])
            infer_out = output_handle.copy_to_cpu()
            np.testing.assert_allclose(static_out[0], infer_out)


503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
class TestCumSumOpFp16(unittest.TestCase):
    def test_fp16(self):
        x_np = np.random.random((100, 100)).astype('float16')
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data(shape=[100, 100], name='x', dtype='float16')
            y1 = paddle.cumsum(x)
            y2 = paddle.cumsum(x, axis=0)
            y3 = paddle.cumsum(x, axis=-1)
            y4 = paddle.cumsum(x, axis=-2)
            if core.is_compiled_with_cuda():
                place = paddle.CUDAPlace(0)
                exe = paddle.static.Executor(place)
                exe.run(paddle.static.default_startup_program())
                out = exe.run(feed={'x': x_np}, fetch_list=[y1, y2, y3, y4])


E
emailweixu 已提交
519 520
if __name__ == '__main__':
    unittest.main()