test_reduce_op.py 32.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

G
guosheng 已提交
17 18
import unittest
import numpy as np
19
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
20
import paddle
21 22 23
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
24
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
25 26


27
class TestSumOp(OpTest):
G
guosheng 已提交
28
    def setUp(self):
29
        self.op_type = "reduce_sum"
30
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
31
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
32 33 34 35 36 37 38 39

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
class TestSumOp_fp16(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        self.check_output()

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return grad,

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)


64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
        self.op_type = "reduce_sum"
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(
            place, ['X'], 'Out', user_defined_grads=self.gradient)

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        self.check_output()

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return grad,

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)


121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
class TestSumOp5D(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestSumOp6D(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
143

144 145
    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
146

147 148
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
149 150


151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
class TestSumOp8D(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


167 168 169
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
170 171
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
172 173

    def setUp(self):
174
        self.op_type = "reduce_max"
175
        self.python_api = paddle.max
176
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
177 178 179 180
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
181 182

    def test_check_output(self):
183
        self.check_output(check_eager=True)
G
guosheng 已提交
184 185


186 187 188
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
189 190
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
191

192 193
    def setUp(self):
        self.op_type = "reduce_min"
194
        self.python_api = paddle.min
195
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
196 197 198 199
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
200

201
    def test_check_output(self):
202
        self.check_output(check_eager=True)
G
guosheng 已提交
203 204


205 206 207 208 209
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
210
        self.python_api = paddle.min
211 212 213 214 215 216 217 218 219
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
220
        self.check_output(check_eager=True)
221 222 223 224 225 226 227


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
228
        self.python_api = paddle.min
229 230 231 232 233 234 235 236 237
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
238
        self.check_output(check_eager=True)
239 240


H
hong 已提交
241 242 243 244
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


245 246 247
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
248
        self.python_api = raw_reduce_prod
249 250
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
251 252
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

253 254 255 256
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

257
    def test_check_output(self):
H
hong 已提交
258
        self.check_output(check_eager=True)
259 260

    def test_check_grad(self):
H
hong 已提交
261
        self.check_grad(['X'], 'Out', check_eager=True)
262 263


264 265 266
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
267
        self.python_api = raw_reduce_prod
268
        self.init_data_type()
269
        self.inputs = {
270
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
271 272 273 274 275 276
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

277 278 279 280
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

281
    def test_check_output(self):
H
hong 已提交
282
        self.check_output(check_eager=True)
283 284

    def test_check_grad(self):
H
hong 已提交
285
        self.check_grad(['X'], 'Out', check_eager=True)
286 287 288 289 290


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
291
        self.python_api = raw_reduce_prod
292
        self.init_data_type()
293
        self.inputs = {
294 295
            'X': np.random.random(
                (2, 5, 3, 2, 2, 3, 4, 2)).astype(self.data_type)
296 297 298 299 300 301
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

302 303 304 305
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

306
    def test_check_output(self):
H
hong 已提交
307
        self.check_output(check_eager=True)
308 309

    def test_check_grad(self):
H
hong 已提交
310
        self.check_grad(['X'], 'Out', check_eager=True)
311 312


Z
zhoukunsheng 已提交
313 314 315
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
316
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
317 318 319 320 321
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
322
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
323 324


325 326 327
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
328
        self.python_api = paddle.all
329 330 331 332 333 334 335 336
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
337
        self.check_output(check_eager=True)
338 339


Z
zhoukunsheng 已提交
340 341 342
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
343
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
344
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
345 346 347 348
        self.attrs = {'dim': (1, )}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
349
        self.check_output(check_eager=True)
350 351 352 353 354


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
355
        self.python_api = paddle.all
356 357 358 359 360 361
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
362 363

    def test_check_output(self):
364
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
365 366 367 368 369


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
370
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
371 372 373 374 375 376 377 378
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=1), axis=1)
        }

    def test_check_output(self):
379
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
380 381


382 383 384
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
385
        self.python_api = paddle.all
386 387 388 389 390 391 392 393 394 395 396
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (5, ), 'keep_dim': True}
        self.outputs = {
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5)
        }

    def test_check_output(self):
397
        self.check_output(check_eager=True)
398 399


400 401 402 403 404 405 406 407 408 409 410 411
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
            # The input dtype of reduce_all_op must be bool.
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.reduce_all, input2)


Z
zhoukunsheng 已提交
412 413 414
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
415
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
416 417 418 419 420
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
421
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
422 423


424 425 426
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
427
        self.python_api = paddle.any
428 429 430 431 432 433 434 435
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
436
        self.check_output(check_eager=True)
437 438


Z
zhoukunsheng 已提交
439 440 441
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
442
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
443 444 445 446 447
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
448
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
449 450


451 452 453
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
454
        self.python_api = paddle.any
455 456 457 458 459 460 461 462
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
463
        self.check_output(check_eager=True)
464 465


Z
zhoukunsheng 已提交
466 467 468
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
469
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
470
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
471 472 473 474 475 476 477
        self.attrs = {'dim': (1, ), 'keep_dim': True}
        self.outputs = {
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
        }

    def test_check_output(self):
478
        self.check_output(check_eager=True)
479 480 481 482 483


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
484
        self.python_api = paddle.any
485 486 487 488 489
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (1, ), 'keep_dim': True}
Z
zhoukunsheng 已提交
490 491
        self.outputs = {
            'Out': np.expand_dims(
492
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
Z
zhoukunsheng 已提交
493 494 495
        }

    def test_check_output(self):
496
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
497 498


499 500 501 502 503 504 505 506 507 508 509 510
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
            # The input dtype of reduce_any_op must be bool.
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.reduce_any, input2)


Q
qiaolongfei 已提交
511
class Test1DReduce(OpTest):
G
guosheng 已提交
512
    def setUp(self):
513
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
514
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
515
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
516 517 518

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
519

520 521
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
522 523


Q
qiaolongfei 已提交
524
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
525
    def setUp(self):
526
        self.op_type = "reduce_sum"
Q
qiaolongfei 已提交
527 528
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
529 530 531
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
532 533 534 535 536
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
580 581


582 583 584 585 586 587 588 589 590 591 592 593
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


Q
qiaolongfei 已提交
594 595 596 597
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
598
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
599 600 601 602 603 604
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                        keepdims=self.attrs['keep_dim'])
        }


605 606 607 608 609 610 611 612 613 614 615 616 617
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                        keepdims=self.attrs['keep_dim'])
        }


618 619 620
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
W
whs 已提交
621 622 623 624 625
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
626
        self.python_api = paddle.max
W
whs 已提交
627 628 629 630 631 632 633
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
634
        self.check_output(check_eager=True)
W
whs 已提交
635 636


637 638 639
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
W
whs 已提交
640 641 642 643 644
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
645
        self.python_api = paddle.min
W
whs 已提交
646 647 648 649 650 651 652
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
653
        self.check_output(check_eager=True)
W
whs 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


673 674 675
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
676
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                        keepdims=True)
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
693
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                        keepdims=False)
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
710
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
711 712 713 714 715 716 717 718 719 720
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


721 722 723
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
724
        self.inputs = {'X': np.random.random(100).astype("float64")}
725 726 727 728 729 730 731 732 733 734
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
        self.attrs.update({
            'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
        })

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
        self.attrs.update({
            'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
        })


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.attrs.update({
            'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
        })


777
class TestReduceSumOpError(unittest.TestCase):
778 779 780 781 782 783 784 785 786 787 788
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)


789
class API_TestSumOp(unittest.TestCase):
790 791 792 793 794 795 796 797
    def run_static(self,
                   shape,
                   x_dtype,
                   attr_axis,
                   attr_dtype=None,
                   np_axis=None):
        if np_axis is None:
            np_axis = attr_axis
798

799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
                result_sum = paddle.sum(x=data,
                                        axis=attr_axis,
                                        dtype=attr_dtype)

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
                res, = exe.run(feed={"data": input_data},
                               fetch_list=[result_sum])

            self.assertTrue(
                np.allclose(
                    res, np.sum(input_data.astype(attr_dtype), axis=np_axis)))
817

818 819 820 821
    def test_static(self):
        shape = [10, 10]
        axis = 1

822 823 824
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
825
        self.run_static(shape, "bool", axis, attr_dtype="float16")
826

827 828 829
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
830
        self.run_static(shape, "int32", axis, attr_dtype="float64")
831

832 833 834 835
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

836 837 838
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
839
        self.run_static(shape, "float32", axis, attr_dtype="int64")
840 841 842 843

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
844 845 846 847 848

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2))
849 850 851

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
852 853
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
854 855 856 857 858 859 860 861 862
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
863 864


865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], np.all(input_np)))

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], np.any(input_np)))

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


G
guosheng 已提交
975
if __name__ == '__main__':
976 977
    import paddle
    paddle.enable_static()
G
guosheng 已提交
978
    unittest.main()