test_reduce_op.py 32.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
18
import paddle
19 20
import paddle.fluid.core as core
import paddle.fluid as fluid
21
from paddle.fluid import Program, program_guard
22
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
23 24


25
class TestSumOp(OpTest):
G
guosheng 已提交
26
    def setUp(self):
F
From00 已提交
27
        self.python_api = paddle.sum
28
        self.op_type = "reduce_sum"
29
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
30
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
31
        self.attrs = {'dim': [0]}
32 33

    def test_check_output(self):
F
From00 已提交
34
        self.check_output(check_eager=True)
35 36

    def test_check_grad(self):
F
From00 已提交
37
        self.check_grad(['X'], 'Out', check_eager=True)
38 39


40 41
class TestSumOp_fp16(OpTest):
    def setUp(self):
F
From00 已提交
42
        self.python_api = paddle.sum
43 44 45 46 47 48 49 50 51 52 53
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
54
        self.check_output(check_eager=True)
55 56 57 58

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
59
        return (grad,)
60 61

    def test_check_grad(self):
62 63 64
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True
        )
65 66


67 68 69
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
70 71 72
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
73
        self.python_api = paddle.sum
74 75 76 77 78 79 80 81 82 83 84 85 86
        self.op_type = "reduce_sum"
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        place = core.CUDAPlace(0)
F
From00 已提交
87
        self.check_output_with_place(place, check_eager=True)
88 89 90

    def test_check_grad(self):
        place = core.CUDAPlace(0)
91 92 93 94 95 96 97
        self.check_grad_with_place(
            place,
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
        )
98 99 100 101 102 103 104

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


105 106
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
F
From00 已提交
107
        self.python_api = paddle.sum
108 109 110 111 112 113 114 115 116 117 118 119 120
        self.op_type = "reduce_sum"
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
121
        self.check_output(check_eager=True)
122 123 124 125

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
126
        return (grad,)
127 128

    def test_check_grad(self):
129 130 131
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True
        )
132 133


134 135
class TestSumOp5D(OpTest):
    def setUp(self):
F
From00 已提交
136
        self.python_api = paddle.sum
137 138 139 140
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
141
        self.attrs = {'dim': [0]}
142 143 144
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
F
From00 已提交
145
        self.check_output(check_eager=True)
146 147

    def test_check_grad(self):
F
From00 已提交
148
        self.check_grad(['X'], 'Out', check_eager=True)
149 150 151 152


class TestSumOp6D(OpTest):
    def setUp(self):
F
From00 已提交
153
        self.python_api = paddle.sum
154 155 156 157
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
158
        self.attrs = {'dim': [0]}
159
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
160

161
    def test_check_output(self):
F
From00 已提交
162
        self.check_output(check_eager=True)
G
guosheng 已提交
163

164
    def test_check_grad(self):
F
From00 已提交
165
        self.check_grad(['X'], 'Out', check_eager=True)
G
guosheng 已提交
166 167


168 169
class TestSumOp8D(OpTest):
    def setUp(self):
F
From00 已提交
170
        self.python_api = paddle.sum
171 172 173 174 175 176 177 178
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
F
From00 已提交
179
        self.check_output(check_eager=True)
180 181

    def test_check_grad(self):
F
From00 已提交
182
        self.check_grad(['X'], 'Out', check_eager=True)
183 184


185 186
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
187 188
    " its gradient check is not supported by unittest framework."
)
189 190
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
191 192

    def setUp(self):
193
        self.op_type = "reduce_max"
194
        self.python_api = paddle.max
195
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
196 197 198 199
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
200 201

    def test_check_output(self):
202
        self.check_output(check_eager=True)
G
guosheng 已提交
203 204


205 206
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
207 208
    " its gradient check is not supported by unittest framework."
)
209 210
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
211

212 213
    def setUp(self):
        self.op_type = "reduce_min"
214
        self.python_api = paddle.min
215
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
216 217 218 219
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
220

221
    def test_check_output(self):
222
        self.check_output(check_eager=True)
G
guosheng 已提交
223 224


225 226 227 228 229
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
230
        self.python_api = paddle.min
231 232 233 234 235 236 237 238 239
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
240
        self.check_output(check_eager=True)
241 242 243 244 245 246 247


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
248
        self.python_api = paddle.min
249 250 251 252 253 254 255 256 257
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
258
        self.check_output(check_eager=True)
259 260


H
hong 已提交
261 262 263 264
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


265 266 267
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
268
        self.python_api = raw_reduce_prod
269 270
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
271 272
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

273
    def init_data_type(self):
274 275 276
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
277

278
    def test_check_output(self):
H
hong 已提交
279
        self.check_output(check_eager=True)
280 281

    def test_check_grad(self):
H
hong 已提交
282
        self.check_grad(['X'], 'Out', check_eager=True)
283 284


285 286 287
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
288
        self.python_api = raw_reduce_prod
289
        self.init_data_type()
290
        self.inputs = {
291
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
292 293 294 295 296 297
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

298
    def init_data_type(self):
299 300 301
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
302

303
    def test_check_output(self):
H
hong 已提交
304
        self.check_output(check_eager=True)
305 306

    def test_check_grad(self):
H
hong 已提交
307
        self.check_grad(['X'], 'Out', check_eager=True)
308 309 310 311 312


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
313
        self.python_api = raw_reduce_prod
314
        self.init_data_type()
315
        self.inputs = {
316 317 318
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(
                self.data_type
            )
319 320 321 322 323 324
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

325
    def init_data_type(self):
326 327 328
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
329

330
    def test_check_output(self):
H
hong 已提交
331
        self.check_output(check_eager=True)
332 333

    def test_check_grad(self):
H
hong 已提交
334
        self.check_grad(['X'], 'Out', check_eager=True)
335 336


Z
zhoukunsheng 已提交
337 338 339
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
340
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
341 342 343 344 345
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
346
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
347 348


349 350 351
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
352
        self.python_api = paddle.all
353
        self.inputs = {
354 355 356
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
357 358 359 360 361
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
362
        self.check_output(check_eager=True)
363 364


Z
zhoukunsheng 已提交
365 366 367
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
368
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
369
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
370
        self.attrs = {'dim': (1,)}
371 372 373
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
374
        self.check_output(check_eager=True)
375 376 377 378 379


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
380
        self.python_api = paddle.all
381
        self.inputs = {
382 383 384
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
385 386 387
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
388 389

    def test_check_output(self):
390
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
391 392 393 394 395


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
396
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
397 398 399
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
400
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
401 402 403
        }

    def test_check_output(self):
404
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
405 406


407 408 409
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
410
        self.python_api = paddle.all
411
        self.inputs = {
412 413 414
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
415
        }
416
        self.attrs = {'dim': (5,), 'keep_dim': True}
417
        self.outputs = {
418 419 420
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5
            )
421 422 423
        }

    def test_check_output(self):
424
        self.check_output(check_eager=True)
425 426


427 428 429 430 431 432 433
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
            # The input dtype of reduce_all_op must be bool.
434 435 436
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
437 438 439
            self.assertRaises(TypeError, fluid.layers.reduce_all, input2)


Z
zhoukunsheng 已提交
440 441 442
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
443
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
444 445 446 447 448
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
449
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
450 451


452 453 454
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
455
        self.python_api = paddle.any
456
        self.inputs = {
457 458 459
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
460 461 462 463 464
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
465
        self.check_output(check_eager=True)
466 467


Z
zhoukunsheng 已提交
468 469 470
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
471
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
472 473 474 475 476
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
477
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
478 479


480 481 482
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
483
        self.python_api = paddle.any
484
        self.inputs = {
485 486 487
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
488 489 490 491 492
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
493
        self.check_output(check_eager=True)
494 495


Z
zhoukunsheng 已提交
496 497 498
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
499
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
500
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
501
        self.attrs = {'dim': (1,), 'keep_dim': True}
502
        self.outputs = {
503 504 505
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
506 507 508
        }

    def test_check_output(self):
509
        self.check_output(check_eager=True)
510 511 512 513 514


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
515
        self.python_api = paddle.any
516
        self.inputs = {
517 518 519
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
520
        }
521
        self.attrs = {'dim': (1,), 'keep_dim': True}
Z
zhoukunsheng 已提交
522
        self.outputs = {
523 524 525
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
Z
zhoukunsheng 已提交
526 527 528
        }

    def test_check_output(self):
529
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
530 531


532 533 534 535 536 537 538
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
            # The input dtype of reduce_any_op must be bool.
539 540 541
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
542 543 544
            self.assertRaises(TypeError, fluid.layers.reduce_any, input2)


Q
qiaolongfei 已提交
545
class Test1DReduce(OpTest):
G
guosheng 已提交
546
    def setUp(self):
547
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
548
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
549
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
550 551 552

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
553

554 555
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
556 557


Q
qiaolongfei 已提交
558
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
559
    def setUp(self):
560
        self.op_type = "reduce_sum"
Q
qiaolongfei 已提交
561 562
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
563 564 565
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
566 567 568 569 570
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
614 615


616 617 618 619 620 621 622 623 624 625 626 627
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


Q
qiaolongfei 已提交
628 629 630 631
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
632
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
633
        self.outputs = {
634 635 636
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
Q
qiaolongfei 已提交
637 638 639
        }


640 641 642 643 644 645 646 647
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
648 649 650
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
651 652 653
        }


654 655
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
656 657
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
658 659 660 661 662
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
663
        self.python_api = paddle.max
W
whs 已提交
664 665 666 667 668 669 670
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
671
        self.check_output(check_eager=True)
W
whs 已提交
672 673


674 675
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
676 677
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
678 679 680 681 682
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
683
        self.python_api = paddle.min
W
whs 已提交
684 685 686 687 688 689 690
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
691
        self.check_output(check_eager=True)
W
whs 已提交
692 693 694 695 696 697 698 699


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
700 701 702
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
W
whs 已提交
703 704 705 706 707 708 709 710 711
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


712 713 714
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
715
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
716 717
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
718 719 720
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
721 722 723 724 725 726 727 728 729 730 731 732
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
733
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
734 735
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
736 737 738
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=False
            )
739 740 741 742 743 744 745 746 747 748 749 750
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
751
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
752 753 754 755 756 757 758 759 760 761
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


762 763 764
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
765
        self.inputs = {'X': np.random.random(100).astype("float64")}
766 767 768 769 770 771 772 773 774 775
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


776 777 778 779 780 781
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
782 783 784 785 786 787
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
788 789 790 791 792 793 794 795 796 797 798 799 800 801

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
802 803 804 805 806 807
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
808 809 810 811 812 813 814 815


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
816 817 818 819 820 821
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
822 823


824
class TestReduceSumOpError(unittest.TestCase):
825 826 827
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
828 829 830
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
831 832 833 834 835 836
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)


837
class API_TestSumOp(unittest.TestCase):
838 839 840
    def run_static(
        self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None
    ):
841 842
        if np_axis is None:
            np_axis = attr_axis
843

844 845 846 847 848 849
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
850 851 852
                result_sum = paddle.sum(
                    x=data, axis=attr_axis, dtype=attr_dtype
                )
853 854 855

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
856 857 858
                (res,) = exe.run(
                    feed={"data": input_data}, fetch_list=[result_sum]
                )
859

860 861 862 863 864
            np.testing.assert_allclose(
                res,
                np.sum(input_data.astype(attr_dtype), axis=np_axis),
                rtol=1e-05,
            )
865

866 867 868 869
    def test_static(self):
        shape = [10, 10]
        axis = 1

870 871 872
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
873
        self.run_static(shape, "bool", axis, attr_dtype="float16")
874

875 876 877
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
878
        self.run_static(shape, "int32", axis, attr_dtype="float64")
879

880 881 882 883
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

884 885 886
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
887
        self.run_static(shape, "float32", axis, attr_dtype="int64")
888 889 890 891

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
892 893 894

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
895 896 897
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2)
        )
898 899 900

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
901 902
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
903 904 905 906 907 908 909 910 911
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
912 913


914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
929 930 931 932 933
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
934
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
935 936 937 938 939 940 941 942 943

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
944
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
986 987 988 989 990
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
991
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
992 993 994 995 996 997 998 999 1000

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1001
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


G
guosheng 已提交
1028
if __name__ == '__main__':
1029
    import paddle
1030

1031
    paddle.enable_static()
G
guosheng 已提交
1032
    unittest.main()