test_reduce_op.py 33.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
18
import paddle
19 20 21
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
22
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
23 24


25
class TestSumOp(OpTest):
26

G
guosheng 已提交
27
    def setUp(self):
F
From00 已提交
28
        self.python_api = paddle.sum
29
        self.op_type = "reduce_sum"
30
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
31
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
32
        self.attrs = {'dim': [0]}
33 34

    def test_check_output(self):
F
From00 已提交
35
        self.check_output(check_eager=True)
36 37

    def test_check_grad(self):
F
From00 已提交
38
        self.check_grad(['X'], 'Out', check_eager=True)
39 40


41
class TestSumOp_fp16(OpTest):
42

43
    def setUp(self):
F
From00 已提交
44
        self.python_api = paddle.sum
45 46 47 48 49 50 51 52 53 54 55
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
56
        self.check_output(check_eager=True)
57 58 59 60 61 62 63

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return grad,

    def test_check_grad(self):
64 65 66 67
        self.check_grad(['X'],
                        'Out',
                        user_defined_grads=self.gradient,
                        check_eager=True)
68 69


70 71 72
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSumOp_bf16(OpTest):
73

74 75
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
76
        self.python_api = paddle.sum
77 78 79 80 81 82 83 84 85 86 87 88 89
        self.op_type = "reduce_sum"
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        place = core.CUDAPlace(0)
F
From00 已提交
90
        self.check_output_with_place(place, check_eager=True)
91 92 93

    def test_check_grad(self):
        place = core.CUDAPlace(0)
94 95 96 97
        self.check_grad_with_place(place, ['X'],
                                   'Out',
                                   user_defined_grads=self.gradient,
                                   check_eager=True)
98 99 100 101 102 103 104

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


105
class TestSumOp_fp16_withInt(OpTest):
106

107
    def setUp(self):
F
From00 已提交
108
        self.python_api = paddle.sum
109 110 111 112 113 114 115 116 117 118 119 120 121
        self.op_type = "reduce_sum"
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
122
        self.check_output(check_eager=True)
123 124 125 126 127 128 129

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return grad,

    def test_check_grad(self):
130 131 132 133
        self.check_grad(['X'],
                        'Out',
                        user_defined_grads=self.gradient,
                        check_eager=True)
134 135


136
class TestSumOp5D(OpTest):
137

138
    def setUp(self):
F
From00 已提交
139
        self.python_api = paddle.sum
140 141 142 143
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
144
        self.attrs = {'dim': [0]}
145 146 147
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
F
From00 已提交
148
        self.check_output(check_eager=True)
149 150

    def test_check_grad(self):
F
From00 已提交
151
        self.check_grad(['X'], 'Out', check_eager=True)
152 153 154


class TestSumOp6D(OpTest):
155

156
    def setUp(self):
F
From00 已提交
157
        self.python_api = paddle.sum
158 159 160 161
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
162
        self.attrs = {'dim': [0]}
163
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
164

165
    def test_check_output(self):
F
From00 已提交
166
        self.check_output(check_eager=True)
G
guosheng 已提交
167

168
    def test_check_grad(self):
F
From00 已提交
169
        self.check_grad(['X'], 'Out', check_eager=True)
G
guosheng 已提交
170 171


172
class TestSumOp8D(OpTest):
173

174
    def setUp(self):
F
From00 已提交
175
        self.python_api = paddle.sum
176 177 178 179 180 181 182 183
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
F
From00 已提交
184
        self.check_output(check_eager=True)
185 186

    def test_check_grad(self):
F
From00 已提交
187
        self.check_grad(['X'], 'Out', check_eager=True)
188 189


190 191 192
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
193 194
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
195 196

    def setUp(self):
197
        self.op_type = "reduce_max"
198
        self.python_api = paddle.max
199
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
200 201 202 203
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
204 205

    def test_check_output(self):
206
        self.check_output(check_eager=True)
G
guosheng 已提交
207 208


209 210 211
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
212 213
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
214

215 216
    def setUp(self):
        self.op_type = "reduce_min"
217
        self.python_api = paddle.min
218
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
219 220 221 222
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
223

224
    def test_check_output(self):
225
        self.check_output(check_eager=True)
G
guosheng 已提交
226 227


228 229 230 231 232
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
233
        self.python_api = paddle.min
234 235 236 237 238 239 240 241 242
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
243
        self.check_output(check_eager=True)
244 245 246 247 248 249 250


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
251
        self.python_api = paddle.min
252 253 254 255 256 257 258 259 260
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
261
        self.check_output(check_eager=True)
262 263


H
hong 已提交
264 265 266 267
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


268
class TestProdOp(OpTest):
269

270 271
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
272
        self.python_api = raw_reduce_prod
273 274
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
275 276
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

277 278 279 280
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

281
    def test_check_output(self):
H
hong 已提交
282
        self.check_output(check_eager=True)
283 284

    def test_check_grad(self):
H
hong 已提交
285
        self.check_grad(['X'], 'Out', check_eager=True)
286 287


288
class TestProd6DOp(OpTest):
289

290 291
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
292
        self.python_api = raw_reduce_prod
293
        self.init_data_type()
294
        self.inputs = {
295
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
296 297 298 299 300 301
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

302 303 304 305
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

306
    def test_check_output(self):
H
hong 已提交
307
        self.check_output(check_eager=True)
308 309

    def test_check_grad(self):
H
hong 已提交
310
        self.check_grad(['X'], 'Out', check_eager=True)
311 312 313


class TestProd8DOp(OpTest):
314

315 316
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
317
        self.python_api = raw_reduce_prod
318
        self.init_data_type()
319
        self.inputs = {
320 321
            'X': np.random.random(
                (2, 5, 3, 2, 2, 3, 4, 2)).astype(self.data_type)
322 323 324 325 326 327
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

328 329 330 331
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

332
    def test_check_output(self):
H
hong 已提交
333
        self.check_output(check_eager=True)
334 335

    def test_check_grad(self):
H
hong 已提交
336
        self.check_grad(['X'], 'Out', check_eager=True)
337 338


Z
zhoukunsheng 已提交
339
class TestAllOp(OpTest):
340

Z
zhoukunsheng 已提交
341 342
    def setUp(self):
        self.op_type = "reduce_all"
343
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
344 345 346 347 348
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
349
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
350 351


352
class TestAll8DOp(OpTest):
353

354 355
    def setUp(self):
        self.op_type = "reduce_all"
356
        self.python_api = paddle.all
357 358 359 360 361 362 363 364
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
365
        self.check_output(check_eager=True)
366 367


Z
zhoukunsheng 已提交
368
class TestAllOpWithDim(OpTest):
369

Z
zhoukunsheng 已提交
370 371
    def setUp(self):
        self.op_type = "reduce_all"
372
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
373
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
374 375 376 377
        self.attrs = {'dim': (1, )}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
378
        self.check_output(check_eager=True)
379 380 381


class TestAll8DOpWithDim(OpTest):
382

383 384
    def setUp(self):
        self.op_type = "reduce_all"
385
        self.python_api = paddle.all
386 387 388 389 390 391
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
392 393

    def test_check_output(self):
394
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
395 396 397


class TestAllOpWithKeepDim(OpTest):
398

Z
zhoukunsheng 已提交
399 400
    def setUp(self):
        self.op_type = "reduce_all"
401
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
402 403 404
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
405
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
406 407 408
        }

    def test_check_output(self):
409
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
410 411


412
class TestAll8DOpWithKeepDim(OpTest):
413

414 415
    def setUp(self):
        self.op_type = "reduce_all"
416
        self.python_api = paddle.all
417 418 419 420 421 422
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (5, ), 'keep_dim': True}
        self.outputs = {
423 424
            'Out':
            np.expand_dims(self.inputs['X'].all(axis=self.attrs['dim']), axis=5)
425 426 427
        }

    def test_check_output(self):
428
        self.check_output(check_eager=True)
429 430


431
class TestAllOpError(unittest.TestCase):
432

433 434 435 436 437 438
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
            # The input dtype of reduce_all_op must be bool.
439 440 441
            input2 = fluid.layers.data(name='input2',
                                       shape=[12, 10],
                                       dtype="int32")
442 443 444
            self.assertRaises(TypeError, fluid.layers.reduce_all, input2)


Z
zhoukunsheng 已提交
445
class TestAnyOp(OpTest):
446

Z
zhoukunsheng 已提交
447 448
    def setUp(self):
        self.op_type = "reduce_any"
449
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
450 451 452 453 454
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
455
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
456 457


458
class TestAny8DOp(OpTest):
459

460 461
    def setUp(self):
        self.op_type = "reduce_any"
462
        self.python_api = paddle.any
463 464 465 466 467 468 469 470
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
471
        self.check_output(check_eager=True)
472 473


Z
zhoukunsheng 已提交
474
class TestAnyOpWithDim(OpTest):
475

Z
zhoukunsheng 已提交
476 477
    def setUp(self):
        self.op_type = "reduce_any"
478
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
479 480 481 482 483
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
484
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
485 486


487
class TestAny8DOpWithDim(OpTest):
488

489 490
    def setUp(self):
        self.op_type = "reduce_any"
491
        self.python_api = paddle.any
492 493 494 495 496 497 498 499
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
500
        self.check_output(check_eager=True)
501 502


Z
zhoukunsheng 已提交
503
class TestAnyOpWithKeepDim(OpTest):
504

Z
zhoukunsheng 已提交
505 506
    def setUp(self):
        self.op_type = "reduce_any"
507
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
508
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
509 510
        self.attrs = {'dim': (1, ), 'keep_dim': True}
        self.outputs = {
511 512
            'Out':
            np.expand_dims(self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
513 514 515
        }

    def test_check_output(self):
516
        self.check_output(check_eager=True)
517 518 519


class TestAny8DOpWithKeepDim(OpTest):
520

521 522
    def setUp(self):
        self.op_type = "reduce_any"
523
        self.python_api = paddle.any
524 525 526 527 528
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (1, ), 'keep_dim': True}
Z
zhoukunsheng 已提交
529
        self.outputs = {
530 531
            'Out':
            np.expand_dims(self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
Z
zhoukunsheng 已提交
532 533 534
        }

    def test_check_output(self):
535
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
536 537


538
class TestAnyOpError(unittest.TestCase):
539

540 541 542 543 544 545
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
            # The input dtype of reduce_any_op must be bool.
546 547 548
            input2 = fluid.layers.data(name='input2',
                                       shape=[12, 10],
                                       dtype="int32")
549 550 551
            self.assertRaises(TypeError, fluid.layers.reduce_any, input2)


Q
qiaolongfei 已提交
552
class Test1DReduce(OpTest):
553

G
guosheng 已提交
554
    def setUp(self):
555
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
556
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
557
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
558 559 560

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
561

562 563
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
564 565


Q
qiaolongfei 已提交
566
class Test2DReduce0(Test1DReduce):
567

G
guosheng 已提交
568
    def setUp(self):
569
        self.op_type = "reduce_sum"
Q
qiaolongfei 已提交
570 571
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
572 573 574
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
575
class Test2DReduce1(Test1DReduce):
576

Q
qiaolongfei 已提交
577 578 579 580
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
581 582 583 584 585 586
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
587

Q
qiaolongfei 已提交
588 589 590 591 592 593 594 595 596 597
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
598

Q
qiaolongfei 已提交
599 600 601 602 603 604 605 606 607 608
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
609

Q
qiaolongfei 已提交
610 611 612 613 614 615 616 617 618 619
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
620

Q
qiaolongfei 已提交
621 622 623 624 625 626 627
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
628 629


630
class Test8DReduce0(Test1DReduce):
631

632 633 634 635 636 637 638 639 640 641 642
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


Q
qiaolongfei 已提交
643
class TestKeepDimReduce(Test1DReduce):
644

Q
qiaolongfei 已提交
645 646 647
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
648
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
649
        self.outputs = {
650 651 652
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                 keepdims=self.attrs['keep_dim'])
Q
qiaolongfei 已提交
653 654 655
        }


656
class TestKeepDim8DReduce(Test1DReduce):
657

658 659 660 661 662 663 664
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
665 666 667
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                 keepdims=self.attrs['keep_dim'])
668 669 670
        }


671 672 673
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
W
whs 已提交
674 675 676 677 678
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
679
        self.python_api = paddle.max
W
whs 已提交
680 681 682 683 684 685 686
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
687
        self.check_output(check_eager=True)
W
whs 已提交
688 689


690 691 692
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
W
whs 已提交
693 694 695 696 697
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
698
        self.python_api = paddle.min
W
whs 已提交
699 700 701 702 703 704 705
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
706
        self.check_output(check_eager=True)
W
whs 已提交
707 708 709


class TestKeepDimReduceSumMultiAxises(OpTest):
710

W
whs 已提交
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


727
class TestReduceSumWithDimOne(OpTest):
728

729 730
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
731
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
732 733
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
734 735
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
736 737 738 739 740 741 742 743 744 745
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceSumWithNumelOne(OpTest):
746

747 748
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
749
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
750 751
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
752 753
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=False)
754 755 756 757 758 759 760 761 762 763
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceAll(OpTest):
764

765 766
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
767
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
768 769 770 771 772 773 774 775 776 777
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


778
class Test1DReduceWithAxes1(OpTest):
779

780 781
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
782
        self.inputs = {'X': np.random.random(100).astype("float64")}
783 784 785 786 787 788 789 790 791 792
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


793
class TestReduceWithDtype(OpTest):
794

795 796 797 798 799 800
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
        self.attrs.update({
801 802 803 804
            'in_dtype':
            int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype':
            int(convert_np_dtype_to_dtype_(np.float64))
805 806 807 808 809 810 811 812 813 814
        })

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceWithDtype1(TestReduceWithDtype):
815

816 817 818 819 820 821
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
        self.attrs.update({
822 823 824 825
            'in_dtype':
            int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype':
            int(convert_np_dtype_to_dtype_(np.float64))
826 827 828 829
        })


class TestReduceWithDtype2(TestReduceWithDtype):
830

831 832 833 834 835 836
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.attrs.update({
837 838 839 840
            'in_dtype':
            int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype':
            int(convert_np_dtype_to_dtype_(np.float64))
841 842 843
        })


844
class TestReduceSumOpError(unittest.TestCase):
845

846 847 848
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
849 850
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
851 852 853 854 855 856
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)


857
class API_TestSumOp(unittest.TestCase):
858

859 860 861 862 863 864 865 866
    def run_static(self,
                   shape,
                   x_dtype,
                   attr_axis,
                   attr_dtype=None,
                   np_axis=None):
        if np_axis is None:
            np_axis = attr_axis
867

868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
                result_sum = paddle.sum(x=data,
                                        axis=attr_axis,
                                        dtype=attr_dtype)

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
                res, = exe.run(feed={"data": input_data},
                               fetch_list=[result_sum])

883 884 885 886
            np.testing.assert_allclose(res,
                                       np.sum(input_data.astype(attr_dtype),
                                              axis=np_axis),
                                       rtol=1e-05)
887

888 889 890 891
    def test_static(self):
        shape = [10, 10]
        axis = 1

892 893 894
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
895
        self.run_static(shape, "bool", axis, attr_dtype="float16")
896

897 898 899
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
900
        self.run_static(shape, "int32", axis, attr_dtype="float64")
901

902 903 904 905
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

906 907 908
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
909
        self.run_static(shape, "float32", axis, attr_dtype="int64")
910 911 912 913

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
914 915 916

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
917 918 919 920
        self.run_static(shape,
                        "int32", (),
                        attr_dtype="int32",
                        np_axis=(0, 1, 2))
921 922 923

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
924 925
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
926 927 928 929 930 931 932 933 934
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
935 936


937
class TestAllAPI(unittest.TestCase):
938

939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
956
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
957 958 959 960 961 962 963 964 965

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
966
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
994

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
1012
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
1013 1014 1015 1016 1017 1018 1019 1020 1021

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1022
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


G
guosheng 已提交
1049
if __name__ == '__main__':
1050 1051
    import paddle
    paddle.enable_static()
G
guosheng 已提交
1052
    unittest.main()