test_cumsum_op.py 9.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
E
emailweixu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

E
emailweixu 已提交
17 18
import unittest
import numpy as np
19
from op_test import OpTest
20
import paddle
21 22 23
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
24 25 26


class TestCumsumOp(unittest.TestCase):
27

28 29
    def run_cases(self):
        data_np = np.arange(12).reshape(3, 4)
Z
Zhou Wei 已提交
30
        data = paddle.to_tensor(data_np)
31 32 33

        y = paddle.cumsum(data)
        z = np.cumsum(data_np)
34
        np.testing.assert_array_equal(z, y.numpy())
35 36 37

        y = paddle.cumsum(data, axis=0)
        z = np.cumsum(data_np, axis=0)
38
        np.testing.assert_array_equal(z, y.numpy())
39 40 41

        y = paddle.cumsum(data, axis=-1)
        z = np.cumsum(data_np, axis=-1)
42
        np.testing.assert_array_equal(z, y.numpy())
43 44 45 46 47 48 49 50 51

        y = paddle.cumsum(data, dtype='float64')
        self.assertTrue(y.dtype == core.VarDesc.VarType.FP64)

        y = paddle.cumsum(data, dtype=np.int32)
        self.assertTrue(y.dtype == core.VarDesc.VarType.INT32)

        y = paddle.cumsum(data, axis=-2)
        z = np.cumsum(data_np, axis=-2)
52
        np.testing.assert_array_equal(z, y.numpy())
53 54 55 56

    def run_static(self, use_gpu=False):
        with fluid.program_guard(fluid.Program()):
            data_np = np.random.random((100, 100)).astype(np.float32)
57
            x = paddle.static.data('X', [100, 100])
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
            y = paddle.cumsum(x)
            y2 = paddle.cumsum(x, axis=0)
            y3 = paddle.cumsum(x, axis=-1)
            y4 = paddle.cumsum(x, dtype='float64')
            y5 = paddle.cumsum(x, dtype=np.int32)
            y6 = paddle.cumsum(x, axis=-2)

            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            out = exe.run(feed={'X': data_np},
                          fetch_list=[
                              y.name, y2.name, y3.name, y4.name, y5.name,
                              y6.name
                          ])

            z = np.cumsum(data_np)
            self.assertTrue(np.allclose(z, out[0]))
            z = np.cumsum(data_np, axis=0)
            self.assertTrue(np.allclose(z, out[1]))
            z = np.cumsum(data_np, axis=-1)
            self.assertTrue(np.allclose(z, out[2]))
            self.assertTrue(out[3].dtype == np.float64)
            self.assertTrue(out[4].dtype == np.int32)
            z = np.cumsum(data_np, axis=-2)
            self.assertTrue(np.allclose(z, out[5]))

    def test_cpu(self):
86 87 88
        paddle.disable_static(paddle.fluid.CPUPlace())
        self.run_cases()
        paddle.enable_static()
89 90 91 92 93 94

        self.run_static()

    def test_gpu(self):
        if not fluid.core.is_compiled_with_cuda():
            return
95 96 97
        paddle.disable_static(paddle.fluid.CUDAPlace(0))
        self.run_cases()
        paddle.enable_static()
98 99 100 101 102

        self.run_static(use_gpu=True)

    def test_name(self):
        with fluid.program_guard(fluid.Program()):
103
            x = paddle.static.data('x', [3, 4])
104 105
            y = paddle.cumsum(x, name='out')
            self.assertTrue('out' in y.name)
E
emailweixu 已提交
106 107 108


class TestSumOp1(OpTest):
109

E
emailweixu 已提交
110 111 112 113 114 115 116
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)}

    def test_check_output(self):
117
        self.check_output()
E
emailweixu 已提交
118 119

    def test_check_grad(self):
120
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
121 122 123


class TestSumOp2(OpTest):
124

E
emailweixu 已提交
125 126 127 128 129
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': -1, 'reverse': True}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {
130 131
            'Out': np.flip(np.flip(self.inputs['X'], axis=2).cumsum(axis=2),
                           axis=2)
E
emailweixu 已提交
132 133 134
        }

    def test_check_output(self):
135
        self.check_output()
E
emailweixu 已提交
136 137

    def test_check_grad(self):
138
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
139 140 141


class TestSumOp3(OpTest):
142

E
emailweixu 已提交
143 144 145 146 147 148 149
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 1}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}

    def test_check_output(self):
150
        self.check_output()
E
emailweixu 已提交
151 152

    def test_check_grad(self):
153
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
154 155 156


class TestSumOp4(OpTest):
157

E
emailweixu 已提交
158 159 160 161 162 163 164
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 0}
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}

    def test_check_output(self):
165
        self.check_output()
E
emailweixu 已提交
166 167

    def test_check_grad(self):
168
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
169 170 171


class TestSumOp5(OpTest):
172

E
emailweixu 已提交
173 174
    def setUp(self):
        self.op_type = "cumsum"
Z
zhupengyang 已提交
175
        self.inputs = {'X': np.random.random((5, 20)).astype("float64")}
E
emailweixu 已提交
176 177 178
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}

    def test_check_output(self):
179
        self.check_output()
E
emailweixu 已提交
180 181

    def test_check_grad(self):
182
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
183 184 185


class TestSumOp7(OpTest):
186

E
emailweixu 已提交
187 188
    def setUp(self):
        self.op_type = "cumsum"
Z
zhupengyang 已提交
189
        self.inputs = {'X': np.random.random((100)).astype("float64")}
E
emailweixu 已提交
190 191 192
        self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}

    def test_check_output(self):
193
        self.check_output()
E
emailweixu 已提交
194 195

    def test_check_grad(self):
196
        self.check_grad(['X'], 'Out')
E
emailweixu 已提交
197 198


199
class TestSumOpExclusive1(OpTest):
200

E
emailweixu 已提交
201 202 203
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
204
        a = np.random.random((4, 5, 65)).astype("float64")
E
emailweixu 已提交
205 206
        self.inputs = {'X': a}
        self.outputs = {
207 208 209 210
            'Out':
            np.concatenate((np.zeros(
                (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                           axis=2)
E
emailweixu 已提交
211 212 213
        }

    def test_check_output(self):
214
        self.check_output()
E
emailweixu 已提交
215

216 217

class TestSumOpExclusive2(OpTest):
218

219 220 221 222 223 224
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
        a = np.random.random((1, 1, 888)).astype("float64")
        self.inputs = {'X': a}
        self.outputs = {
225 226 227 228
            'Out':
            np.concatenate((np.zeros(
                (1, 1, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                           axis=2)
229 230 231 232 233 234 235
        }

    def test_check_output(self):
        self.check_output()


class TestSumOpExclusive3(OpTest):
236

237 238 239 240 241 242
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
        a = np.random.random((4, 5, 888)).astype("float32")
        self.inputs = {'X': a}
        self.outputs = {
243 244 245 246
            'Out':
            np.concatenate((np.zeros(
                (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                           axis=2)
247 248 249 250 251 252 253
        }

    def test_check_output(self):
        self.check_output()


class TestSumOpExclusive4(OpTest):
254

255 256 257 258 259 260
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
        a = np.random.random((1, 1, 3049)).astype("float64")
        self.inputs = {'X': a}
        self.outputs = {
261 262 263 264
            'Out':
            np.concatenate((np.zeros(
                (1, 1, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                           axis=2)
265 266 267 268 269 270 271
        }

    def test_check_output(self):
        self.check_output()


class TestSumOpExclusive5(OpTest):
272

273 274 275 276 277 278
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, "exclusive": True}
        a = np.random.random((4, 5, 3096)).astype("float64")
        self.inputs = {'X': a}
        self.outputs = {
279 280 281 282
            'Out':
            np.concatenate((np.zeros(
                (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
                           axis=2)
283 284 285 286 287 288 289
        }

    def test_check_output(self):
        self.check_output()


class TestSumOpReverseExclusive(OpTest):
290

291 292 293 294 295 296 297
    def setUp(self):
        self.op_type = "cumsum"
        self.attrs = {'axis': 2, 'reverse': True, "exclusive": True}
        a = np.random.random((4, 5, 6)).astype("float64")
        self.inputs = {'X': a}
        a = np.flip(a, axis=2)
        self.outputs = {
298 299 300 301
            'Out':
            np.concatenate(
                (np.flip(a[:, :, :-1].cumsum(axis=2),
                         axis=2), np.zeros((4, 5, 1), dtype=np.float64)),
302 303 304 305 306
                axis=2)
        }

    def test_check_output(self):
        self.check_output()
E
emailweixu 已提交
307 308


309
class BadInputTest(unittest.TestCase):
310

311 312 313 314
    def test_error(self):
        with fluid.program_guard(fluid.Program()):

            def test_bad_x():
315
                data = [1, 2, 4]
316 317 318 319 320
                result = fluid.layers.cumsum(data, axis=0)

            self.assertRaises(TypeError, test_bad_x)


E
emailweixu 已提交
321 322
if __name__ == '__main__':
    unittest.main()