test_reduce_op.py 33.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
18
import paddle
19 20
import paddle.fluid.core as core
import paddle.fluid as fluid
21
from paddle.fluid import Program, program_guard
22
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
23 24


25
class TestSumOp(OpTest):
G
guosheng 已提交
26
    def setUp(self):
F
From00 已提交
27
        self.python_api = paddle.sum
28
        self.op_type = "reduce_sum"
29
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
30
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
31
        self.attrs = {'dim': [0]}
32 33

    def test_check_output(self):
F
From00 已提交
34
        self.check_output(check_eager=True)
35 36

    def test_check_grad(self):
F
From00 已提交
37
        self.check_grad(['X'], 'Out', check_eager=True)
38 39


40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
class TestSumOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.sum
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


55 56
class TestSumOp_fp16(OpTest):
    def setUp(self):
F
From00 已提交
57
        self.python_api = paddle.sum
58 59 60 61 62 63 64 65 66 67 68
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
69
        self.check_output(check_eager=True)
70 71 72 73

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
74
        return (grad,)
75 76

    def test_check_grad(self):
77 78 79
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True
        )
80 81


82 83 84
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
85 86 87
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
88
        self.python_api = paddle.sum
89 90 91 92 93 94 95 96 97 98 99 100 101
        self.op_type = "reduce_sum"
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        place = core.CUDAPlace(0)
F
From00 已提交
102
        self.check_output_with_place(place, check_eager=True)
103 104 105

    def test_check_grad(self):
        place = core.CUDAPlace(0)
106 107 108 109 110 111 112
        self.check_grad_with_place(
            place,
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
        )
113 114 115 116 117 118 119

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


120 121
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
F
From00 已提交
122
        self.python_api = paddle.sum
123 124 125 126 127 128 129 130 131 132 133 134 135
        self.op_type = "reduce_sum"
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
136
        self.check_output(check_eager=True)
137 138 139 140

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
141
        return (grad,)
142 143

    def test_check_grad(self):
144 145 146
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True
        )
147 148


149 150
class TestSumOp5D(OpTest):
    def setUp(self):
F
From00 已提交
151
        self.python_api = paddle.sum
152 153 154 155
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
156
        self.attrs = {'dim': [0]}
157 158 159
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
F
From00 已提交
160
        self.check_output(check_eager=True)
161 162

    def test_check_grad(self):
F
From00 已提交
163
        self.check_grad(['X'], 'Out', check_eager=True)
164 165 166 167


class TestSumOp6D(OpTest):
    def setUp(self):
F
From00 已提交
168
        self.python_api = paddle.sum
169 170 171 172
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
173
        self.attrs = {'dim': [0]}
174
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
175

176
    def test_check_output(self):
F
From00 已提交
177
        self.check_output(check_eager=True)
G
guosheng 已提交
178

179
    def test_check_grad(self):
F
From00 已提交
180
        self.check_grad(['X'], 'Out', check_eager=True)
G
guosheng 已提交
181 182


183 184
class TestSumOp8D(OpTest):
    def setUp(self):
F
From00 已提交
185
        self.python_api = paddle.sum
186 187 188 189 190 191 192 193
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
F
From00 已提交
194
        self.check_output(check_eager=True)
195 196

    def test_check_grad(self):
F
From00 已提交
197
        self.check_grad(['X'], 'Out', check_eager=True)
198 199


200 201
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
202 203
    " its gradient check is not supported by unittest framework."
)
204 205
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
206 207

    def setUp(self):
208
        self.op_type = "reduce_max"
209
        self.python_api = paddle.max
210
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
211 212 213 214
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
215 216

    def test_check_output(self):
217
        self.check_output(check_eager=True)
G
guosheng 已提交
218 219


220 221
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
222 223
    " its gradient check is not supported by unittest framework."
)
224 225
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
226

227 228
    def setUp(self):
        self.op_type = "reduce_min"
229
        self.python_api = paddle.min
230
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
231 232 233 234
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
235

236
    def test_check_output(self):
237
        self.check_output(check_eager=True)
G
guosheng 已提交
238 239


240 241 242 243 244
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
245
        self.python_api = paddle.min
246 247 248 249 250 251 252 253 254
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
255
        self.check_output(check_eager=True)
256 257 258 259 260 261 262


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
263
        self.python_api = paddle.min
264 265 266 267 268 269 270 271 272
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
273
        self.check_output(check_eager=True)
274 275


H
hong 已提交
276 277 278 279
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


280 281 282
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
283
        self.python_api = raw_reduce_prod
284 285
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
286 287
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

288
    def init_data_type(self):
289 290 291
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
292

293
    def test_check_output(self):
H
hong 已提交
294
        self.check_output(check_eager=True)
295 296

    def test_check_grad(self):
H
hong 已提交
297
        self.check_grad(['X'], 'Out', check_eager=True)
298 299


300 301 302
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
303
        self.python_api = raw_reduce_prod
304
        self.init_data_type()
305
        self.inputs = {
306
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
307 308 309 310 311 312
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

313
    def init_data_type(self):
314 315 316
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
317

318
    def test_check_output(self):
H
hong 已提交
319
        self.check_output(check_eager=True)
320 321

    def test_check_grad(self):
H
hong 已提交
322
        self.check_grad(['X'], 'Out', check_eager=True)
323 324 325 326 327


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
328
        self.python_api = raw_reduce_prod
329
        self.init_data_type()
330
        self.inputs = {
331 332 333
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(
                self.data_type
            )
334 335 336 337 338 339
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

340
    def init_data_type(self):
341 342 343
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
344

345
    def test_check_output(self):
H
hong 已提交
346
        self.check_output(check_eager=True)
347 348

    def test_check_grad(self):
H
hong 已提交
349
        self.check_grad(['X'], 'Out', check_eager=True)
350 351


Z
zhoukunsheng 已提交
352 353 354
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
355
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
356 357 358 359 360
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
361
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
362 363


364 365 366
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
367
        self.python_api = paddle.all
368
        self.inputs = {
369 370 371
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
372 373 374 375 376
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
377
        self.check_output(check_eager=True)
378 379


Z
zhoukunsheng 已提交
380 381 382
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
383
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
384
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
385
        self.attrs = {'dim': (1,)}
386 387 388
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
389
        self.check_output(check_eager=True)
390 391 392 393 394


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
395
        self.python_api = paddle.all
396
        self.inputs = {
397 398 399
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
400 401 402
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
403 404

    def test_check_output(self):
405
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
406 407 408 409 410


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
411
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
412 413 414
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
415
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
416 417 418
        }

    def test_check_output(self):
419
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
420 421


422 423 424
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
425
        self.python_api = paddle.all
426
        self.inputs = {
427 428 429
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
430
        }
431
        self.attrs = {'dim': (5,), 'keep_dim': True}
432
        self.outputs = {
433 434 435
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5
            )
436 437 438
        }

    def test_check_output(self):
439
        self.check_output(check_eager=True)
440 441


442 443 444 445 446 447 448
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
            # The input dtype of reduce_all_op must be bool.
449 450 451
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
452 453 454
            self.assertRaises(TypeError, fluid.layers.reduce_all, input2)


Z
zhoukunsheng 已提交
455 456 457
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
458
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
459 460 461 462 463
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
464
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
465 466


467 468 469
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
470
        self.python_api = paddle.any
471
        self.inputs = {
472 473 474
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
475 476 477 478 479
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
480
        self.check_output(check_eager=True)
481 482


Z
zhoukunsheng 已提交
483 484 485
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
486
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
487 488 489 490 491
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
492
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
493 494


495 496 497
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
498
        self.python_api = paddle.any
499
        self.inputs = {
500 501 502
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
503 504 505 506 507
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
508
        self.check_output(check_eager=True)
509 510


Z
zhoukunsheng 已提交
511 512 513
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
514
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
515
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
516
        self.attrs = {'dim': (1,), 'keep_dim': True}
517
        self.outputs = {
518 519 520
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
521 522 523
        }

    def test_check_output(self):
524
        self.check_output(check_eager=True)
525 526 527 528 529


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
530
        self.python_api = paddle.any
531
        self.inputs = {
532 533 534
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
535
        }
536
        self.attrs = {'dim': (1,), 'keep_dim': True}
Z
zhoukunsheng 已提交
537
        self.outputs = {
538 539 540
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
Z
zhoukunsheng 已提交
541 542 543
        }

    def test_check_output(self):
544
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
545 546


547 548 549 550 551 552 553
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
            # The input dtype of reduce_any_op must be bool.
554 555 556
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
557 558 559
            self.assertRaises(TypeError, fluid.layers.reduce_any, input2)


Q
qiaolongfei 已提交
560
class Test1DReduce(OpTest):
G
guosheng 已提交
561
    def setUp(self):
562
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
563
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
564
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
565 566 567

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
568

569 570
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
571 572


Q
qiaolongfei 已提交
573
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
574
    def setUp(self):
575
        self.op_type = "reduce_sum"
Q
qiaolongfei 已提交
576 577
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
578 579 580
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
581 582 583 584 585
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
629 630


631 632 633 634 635 636 637 638 639 640 641 642
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


Q
qiaolongfei 已提交
643 644 645 646
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
647
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
648
        self.outputs = {
649 650 651
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
Q
qiaolongfei 已提交
652 653 654
        }


655 656 657 658 659 660 661 662
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
663 664 665
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
666 667 668
        }


669 670
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
671 672
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
673 674 675 676 677
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
678
        self.python_api = paddle.max
W
whs 已提交
679 680 681 682 683 684 685
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
686
        self.check_output(check_eager=True)
W
whs 已提交
687 688


689 690
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
691 692
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
693 694 695 696 697
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
698
        self.python_api = paddle.min
W
whs 已提交
699 700 701 702 703 704 705
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
706
        self.check_output(check_eager=True)
W
whs 已提交
707 708 709 710 711 712 713 714


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
715 716 717
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
W
whs 已提交
718 719 720 721 722 723 724 725 726
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


727 728 729
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
730
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
731 732
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
733 734 735
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
736 737 738 739 740 741 742 743 744 745 746 747
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
748
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
749 750
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
751 752 753
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=False
            )
754 755 756 757 758 759 760 761 762 763 764 765
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
766
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
767 768 769 770 771 772 773 774 775 776
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


777 778 779
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
780
        self.inputs = {'X': np.random.random(100).astype("float64")}
781 782 783 784 785 786 787 788 789 790
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


791 792 793 794 795 796
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
797 798 799 800 801 802
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
803 804 805 806 807 808 809 810 811 812 813 814 815 816

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
817 818 819 820 821 822
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
823 824 825 826 827 828 829 830


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
831 832 833 834 835 836
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
837 838


839
class TestReduceSumOpError(unittest.TestCase):
840 841 842
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
843 844 845
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
846 847 848 849 850 851
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)


852
class API_TestSumOp(unittest.TestCase):
853 854 855
    def run_static(
        self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None
    ):
856 857
        if np_axis is None:
            np_axis = attr_axis
858

859 860 861 862 863 864
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
865 866 867
                result_sum = paddle.sum(
                    x=data, axis=attr_axis, dtype=attr_dtype
                )
868 869 870

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
871 872 873
                (res,) = exe.run(
                    feed={"data": input_data}, fetch_list=[result_sum]
                )
874

875 876 877 878 879
            np.testing.assert_allclose(
                res,
                np.sum(input_data.astype(attr_dtype), axis=np_axis),
                rtol=1e-05,
            )
880

881 882 883 884
    def test_static(self):
        shape = [10, 10]
        axis = 1

885 886 887
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
888
        self.run_static(shape, "bool", axis, attr_dtype="float16")
889

890 891 892
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
893
        self.run_static(shape, "int32", axis, attr_dtype="float64")
894

895 896 897 898
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

899 900 901
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
902
        self.run_static(shape, "float32", axis, attr_dtype="int64")
903 904 905 906

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
907 908 909

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
910 911 912
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2)
        )
913 914 915

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
916 917
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
918 919 920 921 922 923 924 925 926
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
927 928


929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
944 945 946 947 948
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
949
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
950 951 952 953 954 955 956 957 958

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
959
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1001 1002 1003 1004 1005
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1006
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
1007 1008 1009 1010 1011 1012 1013 1014 1015

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1016
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


G
guosheng 已提交
1043
if __name__ == '__main__':
1044
    import paddle
1045

1046
    paddle.enable_static()
G
guosheng 已提交
1047
    unittest.main()