test_reduce_op.py 33.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

G
guosheng 已提交
17 18
import unittest
import numpy as np
19
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
20
import paddle
21 22 23
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
24
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
25 26


27
class TestSumOp(OpTest):
28

G
guosheng 已提交
29
    def setUp(self):
F
From00 已提交
30
        self.python_api = paddle.sum
31
        self.op_type = "reduce_sum"
32
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
33
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
34
        self.attrs = {'dim': [0]}
35 36

    def test_check_output(self):
F
From00 已提交
37
        self.check_output(check_eager=True)
38 39

    def test_check_grad(self):
F
From00 已提交
40
        self.check_grad(['X'], 'Out', check_eager=True)
41 42


43
class TestSumOp_fp16(OpTest):
44

45
    def setUp(self):
F
From00 已提交
46
        self.python_api = paddle.sum
47 48 49 50 51 52 53 54 55 56 57
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
58
        self.check_output(check_eager=True)
59 60 61 62 63 64 65

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return grad,

    def test_check_grad(self):
66 67 68 69
        self.check_grad(['X'],
                        'Out',
                        user_defined_grads=self.gradient,
                        check_eager=True)
70 71


72 73 74
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSumOp_bf16(OpTest):
75

76 77
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
78
        self.python_api = paddle.sum
79 80 81 82 83 84 85 86 87 88 89 90 91
        self.op_type = "reduce_sum"
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        place = core.CUDAPlace(0)
F
From00 已提交
92
        self.check_output_with_place(place, check_eager=True)
93 94 95

    def test_check_grad(self):
        place = core.CUDAPlace(0)
96 97 98 99
        self.check_grad_with_place(place, ['X'],
                                   'Out',
                                   user_defined_grads=self.gradient,
                                   check_eager=True)
100 101 102 103 104 105 106

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


107
class TestSumOp_fp16_withInt(OpTest):
108

109
    def setUp(self):
F
From00 已提交
110
        self.python_api = paddle.sum
111 112 113 114 115 116 117 118 119 120 121 122 123
        self.op_type = "reduce_sum"
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
124
        self.check_output(check_eager=True)
125 126 127 128 129 130 131

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return grad,

    def test_check_grad(self):
132 133 134 135
        self.check_grad(['X'],
                        'Out',
                        user_defined_grads=self.gradient,
                        check_eager=True)
136 137


138
class TestSumOp5D(OpTest):
139

140
    def setUp(self):
F
From00 已提交
141
        self.python_api = paddle.sum
142 143 144 145
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
146
        self.attrs = {'dim': [0]}
147 148 149
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
F
From00 已提交
150
        self.check_output(check_eager=True)
151 152

    def test_check_grad(self):
F
From00 已提交
153
        self.check_grad(['X'], 'Out', check_eager=True)
154 155 156


class TestSumOp6D(OpTest):
157

158
    def setUp(self):
F
From00 已提交
159
        self.python_api = paddle.sum
160 161 162 163
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
164
        self.attrs = {'dim': [0]}
165
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
166

167
    def test_check_output(self):
F
From00 已提交
168
        self.check_output(check_eager=True)
G
guosheng 已提交
169

170
    def test_check_grad(self):
F
From00 已提交
171
        self.check_grad(['X'], 'Out', check_eager=True)
G
guosheng 已提交
172 173


174
class TestSumOp8D(OpTest):
175

176
    def setUp(self):
F
From00 已提交
177
        self.python_api = paddle.sum
178 179 180 181 182 183 184 185
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
F
From00 已提交
186
        self.check_output(check_eager=True)
187 188

    def test_check_grad(self):
F
From00 已提交
189
        self.check_grad(['X'], 'Out', check_eager=True)
190 191


192 193 194
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
195 196
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
197 198

    def setUp(self):
199
        self.op_type = "reduce_max"
200
        self.python_api = paddle.max
201
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
202 203 204 205
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
206 207

    def test_check_output(self):
208
        self.check_output(check_eager=True)
G
guosheng 已提交
209 210


211 212 213
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
214 215
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
216

217 218
    def setUp(self):
        self.op_type = "reduce_min"
219
        self.python_api = paddle.min
220
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
221 222 223 224
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
225

226
    def test_check_output(self):
227
        self.check_output(check_eager=True)
G
guosheng 已提交
228 229


230 231 232 233 234
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
235
        self.python_api = paddle.min
236 237 238 239 240 241 242 243 244
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
245
        self.check_output(check_eager=True)
246 247 248 249 250 251 252


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
253
        self.python_api = paddle.min
254 255 256 257 258 259 260 261 262
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
263
        self.check_output(check_eager=True)
264 265


H
hong 已提交
266 267 268 269
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


270
class TestProdOp(OpTest):
271

272 273
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
274
        self.python_api = raw_reduce_prod
275 276
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
277 278
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

279 280 281 282
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

283
    def test_check_output(self):
H
hong 已提交
284
        self.check_output(check_eager=True)
285 286

    def test_check_grad(self):
H
hong 已提交
287
        self.check_grad(['X'], 'Out', check_eager=True)
288 289


290
class TestProd6DOp(OpTest):
291

292 293
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
294
        self.python_api = raw_reduce_prod
295
        self.init_data_type()
296
        self.inputs = {
297
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
298 299 300 301 302 303
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

304 305 306 307
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

308
    def test_check_output(self):
H
hong 已提交
309
        self.check_output(check_eager=True)
310 311

    def test_check_grad(self):
H
hong 已提交
312
        self.check_grad(['X'], 'Out', check_eager=True)
313 314 315


class TestProd8DOp(OpTest):
316

317 318
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
319
        self.python_api = raw_reduce_prod
320
        self.init_data_type()
321
        self.inputs = {
322 323
            'X': np.random.random(
                (2, 5, 3, 2, 2, 3, 4, 2)).astype(self.data_type)
324 325 326 327 328 329
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

330 331 332 333
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

334
    def test_check_output(self):
H
hong 已提交
335
        self.check_output(check_eager=True)
336 337

    def test_check_grad(self):
H
hong 已提交
338
        self.check_grad(['X'], 'Out', check_eager=True)
339 340


Z
zhoukunsheng 已提交
341
class TestAllOp(OpTest):
342

Z
zhoukunsheng 已提交
343 344
    def setUp(self):
        self.op_type = "reduce_all"
345
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
346 347 348 349 350
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
351
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
352 353


354
class TestAll8DOp(OpTest):
355

356 357
    def setUp(self):
        self.op_type = "reduce_all"
358
        self.python_api = paddle.all
359 360 361 362 363 364 365 366
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
367
        self.check_output(check_eager=True)
368 369


Z
zhoukunsheng 已提交
370
class TestAllOpWithDim(OpTest):
371

Z
zhoukunsheng 已提交
372 373
    def setUp(self):
        self.op_type = "reduce_all"
374
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
375
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
376 377 378 379
        self.attrs = {'dim': (1, )}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
380
        self.check_output(check_eager=True)
381 382 383


class TestAll8DOpWithDim(OpTest):
384

385 386
    def setUp(self):
        self.op_type = "reduce_all"
387
        self.python_api = paddle.all
388 389 390 391 392 393
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
394 395

    def test_check_output(self):
396
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
397 398 399


class TestAllOpWithKeepDim(OpTest):
400

Z
zhoukunsheng 已提交
401 402
    def setUp(self):
        self.op_type = "reduce_all"
403
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
404 405 406
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
407
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
408 409 410
        }

    def test_check_output(self):
411
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
412 413


414
class TestAll8DOpWithKeepDim(OpTest):
415

416 417
    def setUp(self):
        self.op_type = "reduce_all"
418
        self.python_api = paddle.all
419 420 421 422 423 424
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (5, ), 'keep_dim': True}
        self.outputs = {
425 426
            'Out':
            np.expand_dims(self.inputs['X'].all(axis=self.attrs['dim']), axis=5)
427 428 429
        }

    def test_check_output(self):
430
        self.check_output(check_eager=True)
431 432


433
class TestAllOpError(unittest.TestCase):
434

435 436 437 438 439 440
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
            # The input dtype of reduce_all_op must be bool.
441 442 443
            input2 = fluid.layers.data(name='input2',
                                       shape=[12, 10],
                                       dtype="int32")
444 445 446
            self.assertRaises(TypeError, fluid.layers.reduce_all, input2)


Z
zhoukunsheng 已提交
447
class TestAnyOp(OpTest):
448

Z
zhoukunsheng 已提交
449 450
    def setUp(self):
        self.op_type = "reduce_any"
451
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
452 453 454 455 456
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
457
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
458 459


460
class TestAny8DOp(OpTest):
461

462 463
    def setUp(self):
        self.op_type = "reduce_any"
464
        self.python_api = paddle.any
465 466 467 468 469 470 471 472
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
473
        self.check_output(check_eager=True)
474 475


Z
zhoukunsheng 已提交
476
class TestAnyOpWithDim(OpTest):
477

Z
zhoukunsheng 已提交
478 479
    def setUp(self):
        self.op_type = "reduce_any"
480
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
481 482 483 484 485
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
486
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
487 488


489
class TestAny8DOpWithDim(OpTest):
490

491 492
    def setUp(self):
        self.op_type = "reduce_any"
493
        self.python_api = paddle.any
494 495 496 497 498 499 500 501
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
502
        self.check_output(check_eager=True)
503 504


Z
zhoukunsheng 已提交
505
class TestAnyOpWithKeepDim(OpTest):
506

Z
zhoukunsheng 已提交
507 508
    def setUp(self):
        self.op_type = "reduce_any"
509
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
510
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
511 512
        self.attrs = {'dim': (1, ), 'keep_dim': True}
        self.outputs = {
513 514
            'Out':
            np.expand_dims(self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
515 516 517
        }

    def test_check_output(self):
518
        self.check_output(check_eager=True)
519 520 521


class TestAny8DOpWithKeepDim(OpTest):
522

523 524
    def setUp(self):
        self.op_type = "reduce_any"
525
        self.python_api = paddle.any
526 527 528 529 530
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (1, ), 'keep_dim': True}
Z
zhoukunsheng 已提交
531
        self.outputs = {
532 533
            'Out':
            np.expand_dims(self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
Z
zhoukunsheng 已提交
534 535 536
        }

    def test_check_output(self):
537
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
538 539


540
class TestAnyOpError(unittest.TestCase):
541

542 543 544 545 546 547
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
            # The input dtype of reduce_any_op must be bool.
548 549 550
            input2 = fluid.layers.data(name='input2',
                                       shape=[12, 10],
                                       dtype="int32")
551 552 553
            self.assertRaises(TypeError, fluid.layers.reduce_any, input2)


Q
qiaolongfei 已提交
554
class Test1DReduce(OpTest):
555

G
guosheng 已提交
556
    def setUp(self):
557
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
558
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
559
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
560 561 562

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
563

564 565
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
566 567


Q
qiaolongfei 已提交
568
class Test2DReduce0(Test1DReduce):
569

G
guosheng 已提交
570
    def setUp(self):
571
        self.op_type = "reduce_sum"
Q
qiaolongfei 已提交
572 573
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
574 575 576
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
577
class Test2DReduce1(Test1DReduce):
578

Q
qiaolongfei 已提交
579 580 581 582
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
583 584 585 586 587 588
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
589

Q
qiaolongfei 已提交
590 591 592 593 594 595 596 597 598 599
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
600

Q
qiaolongfei 已提交
601 602 603 604 605 606 607 608 609 610
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
611

Q
qiaolongfei 已提交
612 613 614 615 616 617 618 619 620 621
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
622

Q
qiaolongfei 已提交
623 624 625 626 627 628 629
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
630 631


632
class Test8DReduce0(Test1DReduce):
633

634 635 636 637 638 639 640 641 642 643 644
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


Q
qiaolongfei 已提交
645
class TestKeepDimReduce(Test1DReduce):
646

Q
qiaolongfei 已提交
647 648 649
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
650
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
651
        self.outputs = {
652 653 654
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                 keepdims=self.attrs['keep_dim'])
Q
qiaolongfei 已提交
655 656 657
        }


658
class TestKeepDim8DReduce(Test1DReduce):
659

660 661 662 663 664 665 666
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
667 668 669
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                 keepdims=self.attrs['keep_dim'])
670 671 672
        }


673 674 675
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
W
whs 已提交
676 677 678 679 680
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
681
        self.python_api = paddle.max
W
whs 已提交
682 683 684 685 686 687 688
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
689
        self.check_output(check_eager=True)
W
whs 已提交
690 691


692 693 694
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
W
whs 已提交
695 696 697 698 699
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
700
        self.python_api = paddle.min
W
whs 已提交
701 702 703 704 705 706 707
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
708
        self.check_output(check_eager=True)
W
whs 已提交
709 710 711


class TestKeepDimReduceSumMultiAxises(OpTest):
712

W
whs 已提交
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


729
class TestReduceSumWithDimOne(OpTest):
730

731 732
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
733
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
734 735
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
736 737
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
738 739 740 741 742 743 744 745 746 747
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceSumWithNumelOne(OpTest):
748

749 750
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
751
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
752 753
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
754 755
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=False)
756 757 758 759 760 761 762 763 764 765
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceAll(OpTest):
766

767 768
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
769
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
770 771 772 773 774 775 776 777 778 779
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


780
class Test1DReduceWithAxes1(OpTest):
781

782 783
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
784
        self.inputs = {'X': np.random.random(100).astype("float64")}
785 786 787 788 789 790 791 792 793 794
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


795
class TestReduceWithDtype(OpTest):
796

797 798 799 800 801 802
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
        self.attrs.update({
803 804 805 806
            'in_dtype':
            int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype':
            int(convert_np_dtype_to_dtype_(np.float64))
807 808 809 810 811 812 813 814 815 816
        })

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceWithDtype1(TestReduceWithDtype):
817

818 819 820 821 822 823
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
        self.attrs.update({
824 825 826 827
            'in_dtype':
            int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype':
            int(convert_np_dtype_to_dtype_(np.float64))
828 829 830 831
        })


class TestReduceWithDtype2(TestReduceWithDtype):
832

833 834 835 836 837 838
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.attrs.update({
839 840 841 842
            'in_dtype':
            int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype':
            int(convert_np_dtype_to_dtype_(np.float64))
843 844 845
        })


846
class TestReduceSumOpError(unittest.TestCase):
847

848 849 850
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
851 852
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
853 854 855 856 857 858
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)


859
class API_TestSumOp(unittest.TestCase):
860

861 862 863 864 865 866 867 868
    def run_static(self,
                   shape,
                   x_dtype,
                   attr_axis,
                   attr_dtype=None,
                   np_axis=None):
        if np_axis is None:
            np_axis = attr_axis
869

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
                result_sum = paddle.sum(x=data,
                                        axis=attr_axis,
                                        dtype=attr_dtype)

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
                res, = exe.run(feed={"data": input_data},
                               fetch_list=[result_sum])

            self.assertTrue(
886 887 888
                np.allclose(res,
                            np.sum(input_data.astype(attr_dtype),
                                   axis=np_axis)))
889

890 891 892 893
    def test_static(self):
        shape = [10, 10]
        axis = 1

894 895 896
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
897
        self.run_static(shape, "bool", axis, attr_dtype="float16")
898

899 900 901
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
902
        self.run_static(shape, "int32", axis, attr_dtype="float64")
903

904 905 906 907
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

908 909 910
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
911
        self.run_static(shape, "float32", axis, attr_dtype="int64")
912 913 914 915

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
916 917 918

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
919 920 921 922
        self.run_static(shape,
                        "int32", (),
                        attr_dtype="int32",
                        np_axis=(0, 1, 2))
923 924 925

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
926 927
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
928 929 930 931 932 933 934 935 936
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
937 938


939
class TestAllAPI(unittest.TestCase):
940

941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], np.all(input_np)))

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
996

997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], np.any(input_np)))

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


G
guosheng 已提交
1051
if __name__ == '__main__':
1052 1053
    import paddle
    paddle.enable_static()
G
guosheng 已提交
1054
    unittest.main()