“b883160629d39dcee430244008038c5c64e93b2d”上不存在“mobile/test/executor_for_test.h”
test_reduce_op.py 35.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15
import unittest
16

G
guosheng 已提交
17
import numpy as np
18 19
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci

20
import paddle
21
import paddle.fluid as fluid
22
import paddle.fluid.core as core
23
from paddle.fluid import Program, program_guard
24
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
25 26


27
class TestSumOp(OpTest):
G
guosheng 已提交
28
    def setUp(self):
F
From00 已提交
29
        self.python_api = paddle.sum
30
        self.op_type = "reduce_sum"
31
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
32
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
33
        self.attrs = {'dim': [0]}
34 35

    def test_check_output(self):
F
From00 已提交
36
        self.check_output(check_eager=True)
37 38

    def test_check_grad(self):
F
From00 已提交
39
        self.check_grad(['X'], 'Out', check_eager=True)
40 41


42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
class TestSumOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.sum
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


57 58
class TestSumOp_fp16(OpTest):
    def setUp(self):
F
From00 已提交
59
        self.python_api = paddle.sum
60 61 62 63 64 65 66 67 68 69 70
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
71
        self.check_output(check_eager=True)
72 73 74 75

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
76
        return (grad,)
77 78

    def test_check_grad(self):
79 80 81
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True
        )
82 83


84 85 86
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
87 88 89
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
90
        self.python_api = paddle.sum
91 92 93 94 95 96 97 98 99 100 101 102 103
        self.op_type = "reduce_sum"
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        place = core.CUDAPlace(0)
F
From00 已提交
104
        self.check_output_with_place(place, check_eager=True)
105 106 107

    def test_check_grad(self):
        place = core.CUDAPlace(0)
108 109 110 111 112 113 114
        self.check_grad_with_place(
            place,
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
        )
115 116 117 118 119 120 121

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


122 123
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
F
From00 已提交
124
        self.python_api = paddle.sum
125 126 127 128 129 130 131 132 133 134 135 136 137
        self.op_type = "reduce_sum"
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
138
        self.check_output(check_eager=True)
139 140 141 142

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
143
        return (grad,)
144 145

    def test_check_grad(self):
146 147 148
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True
        )
149 150


151 152
class TestSumOp5D(OpTest):
    def setUp(self):
F
From00 已提交
153
        self.python_api = paddle.sum
154 155 156 157
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
158
        self.attrs = {'dim': [0]}
159 160 161
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
F
From00 已提交
162
        self.check_output(check_eager=True)
163 164

    def test_check_grad(self):
F
From00 已提交
165
        self.check_grad(['X'], 'Out', check_eager=True)
166 167 168 169


class TestSumOp6D(OpTest):
    def setUp(self):
F
From00 已提交
170
        self.python_api = paddle.sum
171 172 173 174
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
175
        self.attrs = {'dim': [0]}
176
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
177

178
    def test_check_output(self):
F
From00 已提交
179
        self.check_output(check_eager=True)
G
guosheng 已提交
180

181
    def test_check_grad(self):
F
From00 已提交
182
        self.check_grad(['X'], 'Out', check_eager=True)
G
guosheng 已提交
183 184


185 186
class TestSumOp8D(OpTest):
    def setUp(self):
F
From00 已提交
187
        self.python_api = paddle.sum
188 189 190 191 192 193 194 195
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
F
From00 已提交
196
        self.check_output(check_eager=True)
197 198

    def test_check_grad(self):
F
From00 已提交
199
        self.check_grad(['X'], 'Out', check_eager=True)
200 201


202 203
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
204 205
    " its gradient check is not supported by unittest framework."
)
206 207
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
208 209

    def setUp(self):
210
        self.op_type = "reduce_max"
211
        self.python_api = paddle.max
212
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
213 214 215 216
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
217 218

    def test_check_output(self):
219
        self.check_output(check_eager=True)
G
guosheng 已提交
220 221


222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
class TestMaxOp_ZeroDim(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
        self.python_api = paddle.max
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


238 239
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
240 241
    " its gradient check is not supported by unittest framework."
)
242 243
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
244

245 246
    def setUp(self):
        self.op_type = "reduce_min"
247
        self.python_api = paddle.min
248
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
249 250 251 252
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
253

254
    def test_check_output(self):
255
        self.check_output(check_eager=True)
G
guosheng 已提交
256 257


258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
class TestMinOp_ZeroDim(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
        self.python_api = paddle.min
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


274 275 276 277 278
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
279
        self.python_api = paddle.min
280 281 282 283 284 285 286 287 288
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
289
        self.check_output(check_eager=True)
290 291 292 293 294 295 296


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
297
        self.python_api = paddle.min
298 299 300 301 302 303 304 305 306
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
307
        self.check_output(check_eager=True)
308 309


H
hong 已提交
310 311 312 313
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


314 315 316
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
317
        self.python_api = raw_reduce_prod
318 319
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
320 321
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

322
    def init_data_type(self):
323 324 325
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
326

327
    def test_check_output(self):
H
hong 已提交
328
        self.check_output(check_eager=True)
329 330

    def test_check_grad(self):
H
hong 已提交
331
        self.check_grad(['X'], 'Out', check_eager=True)
332 333


334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
class TestProdOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.prod
        self.op_type = "reduce_prod"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].prod()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


349 350 351
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
352
        self.python_api = raw_reduce_prod
353
        self.init_data_type()
354
        self.inputs = {
355
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
356 357 358 359 360 361
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

362
    def init_data_type(self):
363 364 365
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
366

367
    def test_check_output(self):
H
hong 已提交
368
        self.check_output(check_eager=True)
369 370

    def test_check_grad(self):
H
hong 已提交
371
        self.check_grad(['X'], 'Out', check_eager=True)
372 373 374 375 376


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
377
        self.python_api = raw_reduce_prod
378
        self.init_data_type()
379
        self.inputs = {
380 381 382
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(
                self.data_type
            )
383 384 385 386 387 388
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

389
    def init_data_type(self):
390 391 392
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
393

394
    def test_check_output(self):
H
hong 已提交
395
        self.check_output(check_eager=True)
396 397

    def test_check_grad(self):
H
hong 已提交
398
        self.check_grad(['X'], 'Out', check_eager=True)
399 400


Z
zhoukunsheng 已提交
401 402 403
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
404
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
405 406 407 408 409
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
410
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
411 412


413 414 415 416 417 418 419 420 421 422 423 424
class TestAllOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.all
        self.op_type = "reduce_all"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


425 426 427
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
428
        self.python_api = paddle.all
429
        self.inputs = {
430 431 432
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
433 434 435 436 437
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
438
        self.check_output(check_eager=True)
439 440


Z
zhoukunsheng 已提交
441 442 443
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
444
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
445
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
446
        self.attrs = {'dim': (1,)}
447 448 449
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
450
        self.check_output(check_eager=True)
451 452 453 454 455


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
456
        self.python_api = paddle.all
457
        self.inputs = {
458 459 460
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
461 462 463
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
464 465

    def test_check_output(self):
466
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
467 468 469 470 471


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
472
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
473 474 475
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
476
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
477 478 479
        }

    def test_check_output(self):
480
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
481 482


483 484 485
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
486
        self.python_api = paddle.all
487
        self.inputs = {
488 489 490
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
491
        }
492
        self.attrs = {'dim': (5,), 'keep_dim': True}
493
        self.outputs = {
494 495 496
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5
            )
497 498 499
        }

    def test_check_output(self):
500
        self.check_output(check_eager=True)
501 502


503 504 505 506 507 508 509
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
            # The input dtype of reduce_all_op must be bool.
510 511 512
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
513 514 515
            self.assertRaises(TypeError, fluid.layers.reduce_all, input2)


Z
zhoukunsheng 已提交
516 517 518
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
519
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
520 521 522 523 524
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
525
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
526 527


528 529 530 531 532 533 534 535 536 537 538 539
class TestAnyOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.any
        self.op_type = "reduce_any"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


540 541 542
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
543
        self.python_api = paddle.any
544
        self.inputs = {
545 546 547
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
548 549 550 551 552
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
553
        self.check_output(check_eager=True)
554 555


Z
zhoukunsheng 已提交
556 557 558
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
559
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
560 561 562 563 564
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
565
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
566 567


568 569 570
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
571
        self.python_api = paddle.any
572
        self.inputs = {
573 574 575
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
576 577 578 579 580
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
581
        self.check_output(check_eager=True)
582 583


Z
zhoukunsheng 已提交
584 585 586
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
587
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
588
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
589
        self.attrs = {'dim': (1,), 'keep_dim': True}
590
        self.outputs = {
591 592 593
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
594 595 596
        }

    def test_check_output(self):
597
        self.check_output(check_eager=True)
598 599 600 601 602


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
603
        self.python_api = paddle.any
604
        self.inputs = {
605 606 607
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
608
        }
609
        self.attrs = {'dim': (1,), 'keep_dim': True}
Z
zhoukunsheng 已提交
610
        self.outputs = {
611 612 613
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
Z
zhoukunsheng 已提交
614 615 616
        }

    def test_check_output(self):
617
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
618 619


620 621 622 623 624 625 626
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
            # The input dtype of reduce_any_op must be bool.
627 628 629
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
630 631 632
            self.assertRaises(TypeError, fluid.layers.reduce_any, input2)


Q
qiaolongfei 已提交
633
class Test1DReduce(OpTest):
G
guosheng 已提交
634
    def setUp(self):
635
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
636
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
637
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
638 639 640

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
641

642 643
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
644 645


Q
qiaolongfei 已提交
646
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
647
    def setUp(self):
648
        self.op_type = "reduce_sum"
Q
qiaolongfei 已提交
649 650
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
651 652 653
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
654 655 656 657 658
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
702 703


704 705 706 707 708 709 710 711 712 713 714 715
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


Q
qiaolongfei 已提交
716 717 718 719
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
720
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
721
        self.outputs = {
722 723 724
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
Q
qiaolongfei 已提交
725 726 727
        }


728 729 730 731 732 733 734 735
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
736 737 738
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
739 740 741
        }


742 743
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
744 745
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
746 747 748 749 750
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
751
        self.python_api = paddle.max
W
whs 已提交
752 753 754 755 756 757 758
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
759
        self.check_output(check_eager=True)
W
whs 已提交
760 761


762 763
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
764 765
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
766 767 768 769 770
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
771
        self.python_api = paddle.min
W
whs 已提交
772 773 774 775 776 777 778
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
779
        self.check_output(check_eager=True)
W
whs 已提交
780 781 782 783 784 785 786 787


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
788 789 790
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
W
whs 已提交
791 792 793 794 795 796 797 798 799
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


800 801 802
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
803
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
804 805
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
806 807 808
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
809 810 811 812 813 814 815 816 817 818 819 820
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
821
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
822 823
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
824 825 826
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=False
            )
827 828 829 830 831 832 833 834 835 836 837 838
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
839
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
840 841 842 843 844 845 846 847 848 849
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


850 851 852
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
853
        self.inputs = {'X': np.random.random(100).astype("float64")}
854 855 856 857 858 859 860 861 862 863
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


864 865 866 867 868 869
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
870 871 872 873 874 875
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
876 877 878 879 880 881 882 883 884 885 886 887 888 889

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
890 891 892 893 894 895
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
896 897 898 899 900 901 902 903


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
904 905 906 907 908 909
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
910 911


912
class TestReduceSumOpError(unittest.TestCase):
913 914 915
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
916 917 918
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
919
            self.assertRaises(TypeError, paddle.sum, x1)
920 921
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
922
            self.assertRaises(TypeError, paddle.sum, x2)
923 924


925
class API_TestSumOp(unittest.TestCase):
926 927 928
    def run_static(
        self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None
    ):
929 930
        if np_axis is None:
            np_axis = attr_axis
931

932 933 934 935 936 937
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
938 939 940
                result_sum = paddle.sum(
                    x=data, axis=attr_axis, dtype=attr_dtype
                )
941 942 943

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
944 945 946
                (res,) = exe.run(
                    feed={"data": input_data}, fetch_list=[result_sum]
                )
947

948 949 950 951 952
            np.testing.assert_allclose(
                res,
                np.sum(input_data.astype(attr_dtype), axis=np_axis),
                rtol=1e-05,
            )
953

954 955 956 957
    def test_static(self):
        shape = [10, 10]
        axis = 1

958 959 960
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
961
        self.run_static(shape, "bool", axis, attr_dtype="float16")
962

963 964 965
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
966
        self.run_static(shape, "int32", axis, attr_dtype="float64")
967

968 969 970 971
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

972 973 974
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
975
        self.run_static(shape, "float32", axis, attr_dtype="int64")
976 977 978 979

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
980 981 982

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
983 984 985
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2)
        )
986 987 988

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
989 990
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
991 992 993 994 995 996 997 998 999
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
1000 1001


1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1017 1018 1019 1020 1021
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1022
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
1023 1024 1025 1026 1027 1028 1029 1030 1031

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1032
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1074 1075 1076 1077 1078
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1079
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
1080 1081 1082 1083 1084 1085 1086 1087 1088

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1089
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


G
guosheng 已提交
1116
if __name__ == '__main__':
1117
    import paddle
1118

1119
    paddle.enable_static()
G
guosheng 已提交
1120
    unittest.main()