test_cumsum_op.py 9.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
E
emailweixu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

E
emailweixu 已提交
17 18
import unittest
import numpy as np
19
from op_test import OpTest
20
import paddle
21 22 23
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
24
from paddle import to_variable
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85


class TestCumsumOp(unittest.TestCase):
    def run_cases(self):
        data_np = np.arange(12).reshape(3, 4)
        data = to_variable(data_np)

        y = paddle.cumsum(data)
        z = np.cumsum(data_np)
        self.assertTrue(np.array_equal(z, y.numpy()))

        y = paddle.cumsum(data, axis=0)
        z = np.cumsum(data_np, axis=0)
        self.assertTrue(np.array_equal(z, y.numpy()))

        y = paddle.cumsum(data, axis=-1)
        z = np.cumsum(data_np, axis=-1)
        self.assertTrue(np.array_equal(z, y.numpy()))

        y = paddle.cumsum(data, dtype='float64')
        self.assertTrue(y.dtype == core.VarDesc.VarType.FP64)

        y = paddle.cumsum(data, dtype=np.int32)
        self.assertTrue(y.dtype == core.VarDesc.VarType.INT32)

        y = paddle.cumsum(data, axis=-2)
        z = np.cumsum(data_np, axis=-2)
        self.assertTrue(np.array_equal(z, y.numpy()))

    def run_static(self, use_gpu=False):
        with fluid.program_guard(fluid.Program()):
            data_np = np.random.random((100, 100)).astype(np.float32)
            x = paddle.nn.data('X', [100, 100])
            y = paddle.cumsum(x)
            y2 = paddle.cumsum(x, axis=0)
            y3 = paddle.cumsum(x, axis=-1)
            y4 = paddle.cumsum(x, dtype='float64')
            y5 = paddle.cumsum(x, dtype=np.int32)
            y6 = paddle.cumsum(x, axis=-2)

            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            out = exe.run(feed={'X': data_np},
                          fetch_list=[
                              y.name, y2.name, y3.name, y4.name, y5.name,
                              y6.name
                          ])

            z = np.cumsum(data_np)
            self.assertTrue(np.allclose(z, out[0]))
            z = np.cumsum(data_np, axis=0)
            self.assertTrue(np.allclose(z, out[1]))
            z = np.cumsum(data_np, axis=-1)
            self.assertTrue(np.allclose(z, out[2]))
            self.assertTrue(out[3].dtype == np.float64)
            self.assertTrue(out[4].dtype == np.int32)
            z = np.cumsum(data_np, axis=-2)
            self.assertTrue(np.allclose(z, out[5]))

    def test_cpu(self):
86 87 88
        paddle.disable_static(paddle.fluid.CPUPlace())
        self.run_cases()
        paddle.enable_static()
89 90 91 92 93 94

        self.run_static()

    def test_gpu(self):
        if not fluid.core.is_compiled_with_cuda():
            return
95 96 97
        paddle.disable_static(paddle.fluid.CUDAPlace(0))
        self.run_cases()
        paddle.enable_static()
98 99 100 101 102 103 104 105

        self.run_static(use_gpu=True)

    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = paddle.nn.data('x', [3, 4])
            y = paddle.cumsum(x, name='out')
            self.assertTrue('out' in y.name)
E
emailweixu 已提交
106 107 108 109 110 111 112 113 114 115


class TestSumOp1(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)}

    def test_check_output(self):
116
        self.check_output()
E
emailweixu 已提交
117 118

    def test_check_grad(self):
119
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133


class TestSumOp2(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': -1, 'reverse': True}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {
            'Out': np.flip(
                np.flip(
                    self.inputs['X'], axis=2).cumsum(axis=2), axis=2)
        }

    def test_check_output(self):
134
        self.check_output()
E
emailweixu 已提交
135 136

    def test_check_grad(self):
137
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
138 139 140 141 142 143 144 145 146 147


class TestSumOp3(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 1}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}

    def test_check_output(self):
148
        self.check_output()
E
emailweixu 已提交
149 150

    def test_check_grad(self):
151
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
152 153 154 155 156 157 158 159 160 161


class TestSumOp4(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 0}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}

    def test_check_output(self):
162
        self.check_output()
E
emailweixu 已提交
163 164

    def test_check_grad(self):
165
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
166 167 168 169 170


class TestSumOp5(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
Z
zhupengyang 已提交
171
        self.inputs = {'X': np.random.random((5, 20)).astype("float64")}
E
emailweixu 已提交
172 173 174
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}

    def test_check_output(self):
175
        self.check_output()
E
emailweixu 已提交
176 177

    def test_check_grad(self):
178
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
179 180 181 182 183


class TestSumOp7(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
Z
zhupengyang 已提交
184
        self.inputs = {'X': np.random.random((100)).astype("float64")}
E
emailweixu 已提交
185 186 187
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}

    def test_check_output(self):
188
        self.check_output()
E
emailweixu 已提交
189 190

    def test_check_grad(self):
191
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
192 193


194
class TestSumOpExclusive1(OpTest):
E
emailweixu 已提交
195 196 197
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
198
        a = np.random.random((4, 5, 65)).astype("float64")
E
emailweixu 已提交
199 200 201 202
        self.inputs = {'X': a}
        self.outputs = {
            'Out': np.concatenate(
                (np.zeros(
203
                    (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
E
emailweixu 已提交
204 205 206 207
                axis=2)
        }

    def test_check_output(self):
208
        self.check_output()
E
emailweixu 已提交
209

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295

class TestSumOpExclusive2(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
        a = np.random.random((1, 1, 888)).astype("float64")
        self.inputs = {'X': a}
        self.outputs = {
            'Out': np.concatenate(
                (np.zeros(
                    (1, 1, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                axis=2)
        }

    def test_check_output(self):
        self.check_output()


class TestSumOpExclusive3(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
        a = np.random.random((4, 5, 888)).astype("float32")
        self.inputs = {'X': a}
        self.outputs = {
            'Out': np.concatenate(
                (np.zeros(
                    (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                axis=2)
        }

    def test_check_output(self):
        self.check_output()


class TestSumOpExclusive4(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
        a = np.random.random((1, 1, 3049)).astype("float64")
        self.inputs = {'X': a}
        self.outputs = {
            'Out': np.concatenate(
                (np.zeros(
                    (1, 1, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                axis=2)
        }

    def test_check_output(self):
        self.check_output()


class TestSumOpExclusive5(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
        a = np.random.random((4, 5, 3096)).astype("float64")
        self.inputs = {'X': a}
        self.outputs = {
            'Out': np.concatenate(
                (np.zeros(
                    (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                axis=2)
        }

    def test_check_output(self):
        self.check_output()


class TestSumOpReverseExclusive(OpTest):
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, 'reverse': True, "exclusive": True}
        a = np.random.random((4, 5, 6)).astype("float64")
        self.inputs = {'X': a}
        a = np.flip(a, axis=2)
        self.outputs = {
            'Out': np.concatenate(
                (np.flip(
                    a[:, :, :-1].cumsum(axis=2), axis=2), np.zeros(
                        (4, 5, 1), dtype=np.float64)),
                axis=2)
        }

    def test_check_output(self):
        self.check_output()
E
emailweixu 已提交
296 297


298 299 300 301 302
class BadInputTest(unittest.TestCase):
    def test_error(self):
        with fluid.program_guard(fluid.Program()):

            def test_bad_x():
303
                data = [1, 2, 4]
304 305 306 307 308
                result = fluid.layers.cumsum(data, axis=0)

            self.assertRaises(TypeError, test_bad_x)


E
emailweixu 已提交
309 310
if __name__ == '__main__':
    unittest.main()