test_reduce_op.py 32.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

G
guosheng 已提交
17 18
import unittest
import numpy as np
19
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
20
import paddle
21 22 23
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
24
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
25 26


27
class TestSumOp(OpTest):
G
guosheng 已提交
28
    def setUp(self):
F
From00 已提交
29
        self.python_api = paddle.sum
30
        self.op_type = "reduce_sum"
31
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
32
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
33
        self.attrs = {'dim': [0]}
34 35

    def test_check_output(self):
F
From00 已提交
36
        self.check_output(check_eager=True)
37 38

    def test_check_grad(self):
F
From00 已提交
39
        self.check_grad(['X'], 'Out', check_eager=True)
40 41


42 43
class TestSumOp_fp16(OpTest):
    def setUp(self):
F
From00 已提交
44
        self.python_api = paddle.sum
45 46 47 48 49 50 51 52 53 54 55
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
56
        self.check_output(check_eager=True)
57 58 59 60 61 62 63

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return grad,

    def test_check_grad(self):
F
From00 已提交
64 65
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)
66 67


68 69 70 71 72
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
73
        self.python_api = paddle.sum
74 75 76 77 78 79 80 81 82 83 84 85 86
        self.op_type = "reduce_sum"
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        place = core.CUDAPlace(0)
F
From00 已提交
87
        self.check_output_with_place(place, check_eager=True)
88 89 90 91

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(
F
From00 已提交
92 93 94 95
            place, ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True)
96 97 98 99 100 101 102

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


103 104
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
F
From00 已提交
105
        self.python_api = paddle.sum
106 107 108 109 110 111 112 113 114 115 116 117 118
        self.op_type = "reduce_sum"
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
119
        self.check_output(check_eager=True)
120 121 122 123 124 125 126

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return grad,

    def test_check_grad(self):
F
From00 已提交
127 128
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)
129 130


131 132
class TestSumOp5D(OpTest):
    def setUp(self):
F
From00 已提交
133
        self.python_api = paddle.sum
134 135 136 137
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
138
        self.attrs = {'dim': [0]}
139 140 141
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
F
From00 已提交
142
        self.check_output(check_eager=True)
143 144

    def test_check_grad(self):
F
From00 已提交
145
        self.check_grad(['X'], 'Out', check_eager=True)
146 147 148 149


class TestSumOp6D(OpTest):
    def setUp(self):
F
From00 已提交
150
        self.python_api = paddle.sum
151 152 153 154
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
155
        self.attrs = {'dim': [0]}
156
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
157

158
    def test_check_output(self):
F
From00 已提交
159
        self.check_output(check_eager=True)
G
guosheng 已提交
160

161
    def test_check_grad(self):
F
From00 已提交
162
        self.check_grad(['X'], 'Out', check_eager=True)
G
guosheng 已提交
163 164


165 166
class TestSumOp8D(OpTest):
    def setUp(self):
F
From00 已提交
167
        self.python_api = paddle.sum
168 169 170 171 172 173 174 175
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
F
From00 已提交
176
        self.check_output(check_eager=True)
177 178

    def test_check_grad(self):
F
From00 已提交
179
        self.check_grad(['X'], 'Out', check_eager=True)
180 181


182 183 184
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
185 186
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
187 188

    def setUp(self):
189
        self.op_type = "reduce_max"
190
        self.python_api = paddle.max
191
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
192 193 194 195
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
196 197

    def test_check_output(self):
198
        self.check_output(check_eager=True)
G
guosheng 已提交
199 200


201 202 203
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
204 205
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
206

207 208
    def setUp(self):
        self.op_type = "reduce_min"
209
        self.python_api = paddle.min
210
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
211 212 213 214
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
215

216
    def test_check_output(self):
217
        self.check_output(check_eager=True)
G
guosheng 已提交
218 219


220 221 222 223 224
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
225
        self.python_api = paddle.min
226 227 228 229 230 231 232 233 234
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
235
        self.check_output(check_eager=True)
236 237 238 239 240 241 242


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
243
        self.python_api = paddle.min
244 245 246 247 248 249 250 251 252
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
253
        self.check_output(check_eager=True)
254 255


H
hong 已提交
256 257 258 259
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


260 261 262
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
263
        self.python_api = raw_reduce_prod
264 265
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
266 267
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

268 269 270 271
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

272
    def test_check_output(self):
H
hong 已提交
273
        self.check_output(check_eager=True)
274 275

    def test_check_grad(self):
H
hong 已提交
276
        self.check_grad(['X'], 'Out', check_eager=True)
277 278


279 280 281
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
282
        self.python_api = raw_reduce_prod
283
        self.init_data_type()
284
        self.inputs = {
285
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
286 287 288 289 290 291
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

292 293 294 295
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

296
    def test_check_output(self):
H
hong 已提交
297
        self.check_output(check_eager=True)
298 299

    def test_check_grad(self):
H
hong 已提交
300
        self.check_grad(['X'], 'Out', check_eager=True)
301 302 303 304 305


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
306
        self.python_api = raw_reduce_prod
307
        self.init_data_type()
308
        self.inputs = {
309 310
            'X': np.random.random(
                (2, 5, 3, 2, 2, 3, 4, 2)).astype(self.data_type)
311 312 313 314 315 316
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

317 318 319 320
    def init_data_type(self):
        self.data_type = "float32" if core.is_compiled_with_rocm(
        ) else "float64"

321
    def test_check_output(self):
H
hong 已提交
322
        self.check_output(check_eager=True)
323 324

    def test_check_grad(self):
H
hong 已提交
325
        self.check_grad(['X'], 'Out', check_eager=True)
326 327


Z
zhoukunsheng 已提交
328 329 330
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
331
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
332 333 334 335 336
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
337
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
338 339


340 341 342
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
343
        self.python_api = paddle.all
344 345 346 347 348 349 350 351
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
352
        self.check_output(check_eager=True)
353 354


Z
zhoukunsheng 已提交
355 356 357
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
358
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
359
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
360 361 362 363
        self.attrs = {'dim': (1, )}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
364
        self.check_output(check_eager=True)
365 366 367 368 369


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
370
        self.python_api = paddle.all
371 372 373 374 375 376
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
377 378

    def test_check_output(self):
379
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
380 381 382 383 384


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
385
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
386 387 388 389 390 391 392 393
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=1), axis=1)
        }

    def test_check_output(self):
394
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
395 396


397 398 399
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
400
        self.python_api = paddle.all
401 402 403 404 405 406 407 408 409 410 411
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (5, ), 'keep_dim': True}
        self.outputs = {
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5)
        }

    def test_check_output(self):
412
        self.check_output(check_eager=True)
413 414


415 416 417 418 419 420 421 422 423 424 425 426
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
            # The input dtype of reduce_all_op must be bool.
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.reduce_all, input2)


Z
zhoukunsheng 已提交
427 428 429
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
430
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
431 432 433 434 435
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
436
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
437 438


439 440 441
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
442
        self.python_api = paddle.any
443 444 445 446 447 448 449 450
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
451
        self.check_output(check_eager=True)
452 453


Z
zhoukunsheng 已提交
454 455 456
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
457
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
458 459 460 461 462
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
463
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
464 465


466 467 468
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
469
        self.python_api = paddle.any
470 471 472 473 474 475 476 477
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
478
        self.check_output(check_eager=True)
479 480


Z
zhoukunsheng 已提交
481 482 483
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
484
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
485
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
486 487 488 489 490 491 492
        self.attrs = {'dim': (1, ), 'keep_dim': True}
        self.outputs = {
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
        }

    def test_check_output(self):
493
        self.check_output(check_eager=True)
494 495 496 497 498


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
499
        self.python_api = paddle.any
500 501 502 503 504
        self.inputs = {
            'X': np.random.randint(0, 2,
                                   (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
        }
        self.attrs = {'dim': (1, ), 'keep_dim': True}
Z
zhoukunsheng 已提交
505 506
        self.outputs = {
            'Out': np.expand_dims(
507
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
Z
zhoukunsheng 已提交
508 509 510
        }

    def test_check_output(self):
511
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
512 513


514 515 516 517 518 519 520 521 522 523 524 525
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
            # The input dtype of reduce_any_op must be bool.
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.reduce_any, input2)


Q
qiaolongfei 已提交
526
class Test1DReduce(OpTest):
G
guosheng 已提交
527
    def setUp(self):
528
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
529
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
530
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
531 532 533

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
534

535 536
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
537 538


Q
qiaolongfei 已提交
539
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
540
    def setUp(self):
541
        self.op_type = "reduce_sum"
Q
qiaolongfei 已提交
542 543
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
544 545 546
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
547 548 549 550 551
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
595 596


597 598 599 600 601 602 603 604 605 606 607 608
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


Q
qiaolongfei 已提交
609 610 611 612
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
613
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
614 615 616 617 618 619
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                        keepdims=self.attrs['keep_dim'])
        }


620 621 622 623 624 625 626 627 628 629 630 631 632
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                        keepdims=self.attrs['keep_dim'])
        }


633 634 635
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
W
whs 已提交
636 637 638 639 640
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
641
        self.python_api = paddle.max
W
whs 已提交
642 643 644 645 646 647 648
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
649
        self.check_output(check_eager=True)
W
whs 已提交
650 651


652 653 654
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework.")
W
whs 已提交
655 656 657 658 659
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
660
        self.python_api = paddle.min
W
whs 已提交
661 662 663 664 665 666 667
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
668
        self.check_output(check_eager=True)
W
whs 已提交
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
            'Out':
            self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


688 689 690
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
691
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                        keepdims=True)
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
708
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
                                        keepdims=False)
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
725
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
726 727 728 729 730 731 732 733 734 735
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


736 737 738
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
739
        self.inputs = {'X': np.random.random(100).astype("float64")}
740 741 742 743 744 745 746 747 748 749
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
        self.attrs.update({
            'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
        })

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
        self.attrs.update({
            'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
        })


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.attrs.update({
            'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
            'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
        })


792
class TestReduceSumOpError(unittest.TestCase):
793 794 795 796 797 798 799 800 801 802 803
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)


804
class API_TestSumOp(unittest.TestCase):
805 806 807 808 809 810 811 812
    def run_static(self,
                   shape,
                   x_dtype,
                   attr_axis,
                   attr_dtype=None,
                   np_axis=None):
        if np_axis is None:
            np_axis = attr_axis
813

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
                result_sum = paddle.sum(x=data,
                                        axis=attr_axis,
                                        dtype=attr_dtype)

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
                res, = exe.run(feed={"data": input_data},
                               fetch_list=[result_sum])

            self.assertTrue(
                np.allclose(
                    res, np.sum(input_data.astype(attr_dtype), axis=np_axis)))
832

833 834 835 836
    def test_static(self):
        shape = [10, 10]
        axis = 1

837 838 839
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
840
        self.run_static(shape, "bool", axis, attr_dtype="float16")
841

842 843 844
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
845
        self.run_static(shape, "int32", axis, attr_dtype="float64")
846

847 848 849 850
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

851 852 853
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
854
        self.run_static(shape, "float32", axis, attr_dtype="int64")
855 856 857 858

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
859 860 861 862 863

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2))
864 865 866

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
867 868
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
869 870 871 872 873 874 875 876 877
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
878 879


880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], np.all(input_np)))

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
908
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], np.any(input_np)))

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
963
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


G
guosheng 已提交
990
if __name__ == '__main__':
991 992
    import paddle
    paddle.enable_static()
G
guosheng 已提交
993
    unittest.main()