test_reduce_op.py 43.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15
import unittest
16

G
guosheng 已提交
17
import numpy as np
18 19
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci

20
import paddle
21
import paddle.fluid as fluid
22
import paddle.fluid.core as core
23
from paddle.fluid import Program, program_guard
24
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
25 26


27
class TestSumOp(OpTest):
G
guosheng 已提交
28
    def setUp(self):
F
From00 已提交
29
        self.python_api = paddle.sum
30
        self.public_python_api = paddle.sum
31
        self.op_type = "reduce_sum"
32
        self.prim_op_type = "prim"
33
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
34
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
35
        self.attrs = {'dim': [0]}
36
        self.enable_cinn = True
37 38

    def test_check_output(self):
F
From00 已提交
39
        self.check_output(check_eager=True)
40 41

    def test_check_grad(self):
42
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
43 44


45
class TestSumOpFp32(OpTest):
46
    def setUp(self):
F
From00 已提交
47
        self.python_api = paddle.sum
48
        self.public_python_api = paddle.sum
49
        self.op_type = "reduce_sum"
50
        self.prim_op_type = "prim"
51 52 53 54 55 56 57 58
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()
59
        self.enable_cinn = True
60 61

    def test_check_output(self):
F
From00 已提交
62
        self.check_output(check_eager=True)
63 64 65 66

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
67
        return (grad,)
68 69

    def test_check_grad(self):
70
        self.check_grad(
71 72 73 74 75
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
            check_prim=True,
76
        )
77 78


79 80 81
class TestSumOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.sum
82
        self.public_python_api = paddle.sum
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
        self.op_type = "reduce_sum"
        self.prim_op_type = "prim"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
        self.attrs = {'dim': [], 'reduce_all': True}
        # reduce doesn't support float64 in cinn.
        # 0-D tensor doesn't support in cinn
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


99 100 101
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
102 103 104
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
105
        self.python_api = paddle.sum
106
        self.public_python_api = paddle.sum
107
        self.op_type = "reduce_sum"
108
        self.prim_op_type = "prim"
109 110 111 112 113 114 115 116 117
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()
118
        self.enable_cinn = False
119 120 121

    def test_check_output(self):
        place = core.CUDAPlace(0)
122
        self.check_output_with_place(place, check_eager=True, atol=0.1)
123 124 125

    def test_check_grad(self):
        place = core.CUDAPlace(0)
126 127 128 129 130 131
        self.check_grad_with_place(
            place,
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
132
            check_prim=True,
133
        )
134 135 136 137 138 139 140

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


141 142
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
F
From00 已提交
143
        self.python_api = paddle.sum
144
        self.public_python_api = paddle.sum
145
        self.op_type = "reduce_sum"
146
        self.prim_op_type = "prim"
147 148 149 150 151 152 153 154 155 156
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()
157
        self.enable_cinn = True
158 159

    def test_check_output(self):
F
From00 已提交
160
        self.check_output(check_eager=True)
161 162 163 164

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
165
        return (grad,)
166 167

    def test_check_grad(self):
168
        self.check_grad(
169 170 171 172 173
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
            check_prim=True,
174
        )
175 176


177 178
class TestSumOp5D(OpTest):
    def setUp(self):
F
From00 已提交
179
        self.python_api = paddle.sum
180
        self.public_python_api = paddle.sum
181
        self.op_type = "reduce_sum"
182
        self.prim_op_type = "prim"
183 184 185
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
186
        self.attrs = {'dim': [0]}
187
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
188
        # error occurred in cinn
189
        self.enable_cinn = True
190 191

    def test_check_output(self):
F
From00 已提交
192
        self.check_output(check_eager=True)
193 194

    def test_check_grad(self):
195
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
196 197 198 199


class TestSumOp6D(OpTest):
    def setUp(self):
F
From00 已提交
200
        self.python_api = paddle.sum
201
        self.public_python_api = paddle.sum
202
        self.op_type = "reduce_sum"
203
        self.prim_op_type = "prim"
204 205 206
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
207
        self.attrs = {'dim': [0]}
208
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
209

210
    def test_check_output(self):
F
From00 已提交
211
        self.check_output(check_eager=True)
G
guosheng 已提交
212

213
    def test_check_grad(self):
214
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
G
guosheng 已提交
215 216


217 218
class TestSumOp8D(OpTest):
    def setUp(self):
F
From00 已提交
219
        self.python_api = paddle.sum
220 221 222 223 224 225 226 227
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
228
        self.check_output()
229 230

    def test_check_grad(self):
F
From00 已提交
231
        self.check_grad(['X'], 'Out', check_eager=True)
232 233


234 235
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
236 237
    " its gradient check is not supported by unittest framework."
)
238 239
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
240 241

    def setUp(self):
242
        self.op_type = "reduce_max"
243
        self.prim_op_type = "prim"
244
        self.python_api = paddle.max
245
        self.public_python_api = paddle.max
246
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
247 248 249 250
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
251 252

    def test_check_output(self):
253
        self.check_output(check_eager=True)
G
guosheng 已提交
254

255 256 257 258 259 260 261 262 263 264
    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad(
            ['X'],
            'Out',
            check_eager=True,
            check_prim=True,
            only_check_prim=True,
        )

265 266 267 268 269 270 271 272 273 274 275 276
    def test_raise_error(self):
        if core.is_compiled_with_cuda():
            self.inputs = {'X': np.random.random((5, 6, 10)).astype("float16")}
            place = core.CUDAPlace(0)
            with self.assertRaises(RuntimeError) as cm:
                self.check_output_with_place(place, check_eager=True)
            error_msg = str(cm.exception).split("\n")[-2].strip().split(".")[0]
            self.assertEqual(
                error_msg,
                "NotFoundError: The kernel (reduce_max) with key (GPU, Undefined(AnyLayout), float16) is not found and GPU kernel cannot fallback to CPU one",
            )

G
guosheng 已提交
277

278 279 280 281 282
class TestMaxOp_ZeroDim(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
283
        self.prim_op_type = "prim"
284
        self.python_api = paddle.max
285 286
        self.public_python_api = paddle.max
        self.enable_cinn = False
287 288 289 290 291 292 293 294 295
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad(
            ['X'],
            'Out',
            check_eager=True,
            check_prim=True,
            only_check_prim=True,
        )


class TestMaxOp_FP32(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
        self.prim_op_type = "prim"
        self.python_api = paddle.max
        self.public_python_api = paddle.max
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")}
        self.attrs = {'dim': [-1], 'keep_dim': True}
        self.outputs = {
            'Out': self.inputs['X'].max(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
        }

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad(
            ['X'],
            'Out',
            check_eager=True,
            check_prim=True,
            only_check_prim=True,
        )

336

337 338
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
339 340
    " its gradient check is not supported by unittest framework."
)
341 342
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
343

344 345
    def setUp(self):
        self.op_type = "reduce_min"
346
        self.python_api = paddle.min
347
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
348 349 350 351
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
352

353
    def test_check_output(self):
354
        self.check_output(check_eager=True)
G
guosheng 已提交
355 356


357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
class TestMinOp_ZeroDim(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
        self.python_api = paddle.min
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


373 374 375 376 377
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
378
        self.python_api = paddle.min
379 380 381 382 383 384 385 386 387
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
388
        self.check_output(check_eager=True)
389 390 391 392 393 394 395


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
396
        self.python_api = paddle.min
397 398 399 400 401 402 403 404 405
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
406
        self.check_output(check_eager=True)
407 408


H
hong 已提交
409 410 411 412
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


413 414 415
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
416
        self.python_api = raw_reduce_prod
417 418
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
419 420
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

421
    def init_data_type(self):
422 423 424
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
425

426
    def test_check_output(self):
H
hong 已提交
427
        self.check_output(check_eager=True)
428 429

    def test_check_grad(self):
H
hong 已提交
430
        self.check_grad(['X'], 'Out', check_eager=True)
431 432


433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
class TestProdOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.prod
        self.op_type = "reduce_prod"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].prod()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


448 449 450
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
451
        self.python_api = raw_reduce_prod
452
        self.init_data_type()
453
        self.inputs = {
454
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
455 456 457 458 459 460
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

461
    def init_data_type(self):
462 463 464
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
465

466
    def test_check_output(self):
H
hong 已提交
467
        self.check_output(check_eager=True)
468 469

    def test_check_grad(self):
H
hong 已提交
470
        self.check_grad(['X'], 'Out', check_eager=True)
471 472 473 474 475


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
476
        self.python_api = raw_reduce_prod
477
        self.init_data_type()
478
        self.inputs = {
479 480 481
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(
                self.data_type
            )
482 483 484 485 486 487
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

488
    def init_data_type(self):
489 490 491
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
492

493
    def test_check_output(self):
H
hong 已提交
494
        self.check_output(check_eager=True)
495 496

    def test_check_grad(self):
H
hong 已提交
497
        self.check_grad(['X'], 'Out', check_eager=True)
498 499


Z
zhoukunsheng 已提交
500 501 502
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
503
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
504 505 506 507 508
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
509
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
510 511


512 513 514 515 516 517 518 519 520 521 522 523
class TestAllOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.all
        self.op_type = "reduce_all"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


524 525 526
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
527
        self.python_api = paddle.all
528
        self.inputs = {
529 530 531
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
532 533 534 535 536
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
537
        self.check_output(check_eager=True)
538 539


Z
zhoukunsheng 已提交
540 541 542
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
543
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
544
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
545
        self.attrs = {'dim': (1,)}
546 547 548
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
549
        self.check_output(check_eager=True)
550 551 552 553 554


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
555
        self.python_api = paddle.all
556
        self.inputs = {
557 558 559
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
560 561 562
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
563 564

    def test_check_output(self):
565
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
566 567 568 569 570


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
571
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
572 573 574
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
575
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
576 577 578
        }

    def test_check_output(self):
579
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
580 581


582 583 584
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
585
        self.python_api = paddle.all
586
        self.inputs = {
587 588 589
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
590
        }
591
        self.attrs = {'dim': (5,), 'keep_dim': True}
592
        self.outputs = {
593 594 595
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5
            )
596 597 598
        }

    def test_check_output(self):
599
        self.check_output(check_eager=True)
600 601


602 603 604 605 606
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
607
            self.assertRaises(TypeError, paddle.all, input1)
608
            # The input dtype of reduce_all_op must be bool.
G
GGBond8488 已提交
609 610
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
611
            )
612
            self.assertRaises(TypeError, paddle.all, input2)
613 614


Z
zhoukunsheng 已提交
615 616 617
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
618
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
619 620 621 622 623
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
624
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
625 626


627 628 629 630 631 632 633 634 635 636 637 638
class TestAnyOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.any
        self.op_type = "reduce_any"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


639 640 641
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
642
        self.python_api = paddle.any
643
        self.inputs = {
644 645 646
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
647 648 649 650 651
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
652
        self.check_output(check_eager=True)
653 654


Z
zhoukunsheng 已提交
655 656 657
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
658
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
659 660 661 662 663
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
664
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
665 666


667 668 669
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
670
        self.python_api = paddle.any
671
        self.inputs = {
672 673 674
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
675 676 677 678 679
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
680
        self.check_output(check_eager=True)
681 682


Z
zhoukunsheng 已提交
683 684 685
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
686
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
687
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
688
        self.attrs = {'dim': (1,), 'keep_dim': True}
689
        self.outputs = {
690 691 692
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
693 694 695
        }

    def test_check_output(self):
696
        self.check_output(check_eager=True)
697 698 699 700 701


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
702
        self.python_api = paddle.any
703
        self.inputs = {
704 705 706
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
707
        }
708
        self.attrs = {'dim': (1,), 'keep_dim': True}
Z
zhoukunsheng 已提交
709
        self.outputs = {
710 711 712
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
Z
zhoukunsheng 已提交
713 714 715
        }

    def test_check_output(self):
716
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
717 718


719 720 721 722 723
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
724
            self.assertRaises(TypeError, paddle.any, input1)
725
            # The input dtype of reduce_any_op must be bool.
G
GGBond8488 已提交
726 727
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
728
            )
729
            self.assertRaises(TypeError, paddle.any, input2)
730 731


Q
qiaolongfei 已提交
732
class Test1DReduce(OpTest):
G
guosheng 已提交
733
    def setUp(self):
734
        self.op_type = "reduce_sum"
735
        self.python_api = paddle.sum
736
        self.public_python_api = paddle.sum
737
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
738
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
739
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
740
        self.enable_cinn = True
741 742 743

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
744

745
    def test_check_grad(self):
746
        self.check_grad(['X'], 'Out', check_prim=True)
G
guosheng 已提交
747 748


Q
qiaolongfei 已提交
749
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
750
    def setUp(self):
751
        self.op_type = "reduce_sum"
752
        self.python_api = paddle.sum
753
        self.public_python_api = paddle.sum
754
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
755 756
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
757 758 759
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
760 761 762
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
763
        self.python_api = paddle.sum
764
        self.public_python_api = paddle.sum
765
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
766 767
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
768 769 770 771 772 773 774 775
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
776
        self.python_api = paddle.sum
777
        self.public_python_api = paddle.sum
778
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
779 780 781 782 783 784 785 786 787 788
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
789
        self.python_api = paddle.sum
790
        self.public_python_api = paddle.sum
791
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
792 793 794 795 796 797 798 799 800 801
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
802
        self.python_api = paddle.sum
803
        self.public_python_api = paddle.sum
804
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
805 806 807 808 809 810 811 812 813 814
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
815
        self.python_api = paddle.sum
816
        self.public_python_api = paddle.sum
817
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
818 819 820 821 822
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
823 824


825 826 827 828 829 830 831 832 833 834 835
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }

836 837 838 839 840 841
    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

842

Q
qiaolongfei 已提交
843 844 845
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
846
        self.python_api = paddle.sum
847
        self.public_python_api = paddle.sum
848
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
849
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
850
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
851
        self.outputs = {
852 853 854
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
Q
qiaolongfei 已提交
855 856 857
        }


858 859 860 861 862 863 864 865
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
866 867 868
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
869 870
        }

871 872 873 874 875 876
    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

877

878 879
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
880 881
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
882 883 884 885 886
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
887
        self.prim_op_type = "prim"
888
        self.python_api = paddle.max
889
        self.public_python_api = paddle.max
W
whs 已提交
890 891 892 893 894 895 896
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
897
        self.check_output(check_eager=True)
W
whs 已提交
898

899 900 901 902 903 904 905 906 907 908
    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad(
            ['X'],
            'Out',
            check_eager=True,
            check_prim=True,
            only_check_prim=True,
        )

W
whs 已提交
909

910 911
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
912 913
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
914 915 916 917 918
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
919
        self.python_api = paddle.min
W
whs 已提交
920 921 922 923 924 925 926
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
927
        self.check_output(check_eager=True)
W
whs 已提交
928 929 930 931 932


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
933
        self.python_api = paddle.sum
934
        self.public_python_api = paddle.sum
935
        self.prim_op_type = "prim"
W
whs 已提交
936 937 938
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
939 940 941
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
W
whs 已提交
942 943 944 945 946 947
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
948
        self.check_grad(['X'], 'Out', check_prim=True)
W
whs 已提交
949 950


951 952 953
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
954
        self.python_api = paddle.sum
955
        self.public_python_api = paddle.sum
956
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
957
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
958 959
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
960 961 962
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
963
        }
964
        self.enable_cinn = True
965 966 967 968 969

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
970
        self.check_grad(['X'], 'Out', check_prim=True)
971 972 973 974 975


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
976
        self.python_api = paddle.sum
977
        self.public_python_api = paddle.sum
978
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
979
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
980 981
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
982 983 984
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=False
            )
985
        }
986
        self.enable_cinn = True
987 988 989 990 991

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
992
        self.check_grad(['X'], 'Out', check_prim=False)
993 994 995 996 997


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
998
        self.python_api = paddle.sum
999
        self.public_python_api = paddle.sum
1000
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
1001
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
1002 1003
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}
1004
        self.enable_cinn = True
1005 1006 1007 1008 1009

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1010 1011 1012 1013 1014 1015 1016
        self.check_grad(['X'], 'Out', check_prim=True)


class TestReduceAllFp32(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.python_api = paddle.sum
1017
        self.public_python_api = paddle.sum
1018 1019 1020 1021
        self.prim_op_type = "prim"
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")}
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}
1022
        self.enable_cinn = True
1023 1024 1025 1026 1027 1028

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1029 1030


1031 1032 1033
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1034
        self.python_api = paddle.sum
1035
        self.public_python_api = paddle.sum
1036
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
1037
        self.inputs = {'X': np.random.random(100).astype("float64")}
1038 1039
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
1040
        self.enable_cinn = True
1041 1042

    def test_check_output(self):
1043
        self.check_output()
1044 1045

    def test_check_grad(self):
1046
        self.check_grad(['X'], 'Out', check_prim=True)
1047 1048


1049
def reduce_sum_wrapper(x, axis=None, out_dtype=None, keepdim=False, name=None):
1050 1051 1052
    return paddle.sum(x, axis, "float64", keepdim, name)


1053 1054 1055
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1056
        self.python_api = reduce_sum_wrapper
1057
        self.public_python_api = reduce_sum_wrapper
1058
        self.prim_op_type = "prim"
1059 1060 1061
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
1062 1063 1064 1065 1066 1067
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1068 1069

    def test_check_output(self):
1070
        self.check_output()
1071 1072

    def test_check_grad(self):
1073 1074 1075
        self.check_grad(['X'], 'Out', check_prim=True)


1076 1077 1078
class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
1079
        self.python_api = reduce_sum_wrapper
1080
        self.public_python_api = reduce_sum_wrapper
1081
        self.prim_op_type = "prim"
1082 1083 1084
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
1085 1086 1087 1088 1089 1090
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1091
        # cinn op_mapper not support in_dtype/out_dtype attr
1092 1093 1094 1095 1096 1097 1098
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1099 1100 1101 1102 1103


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
1104 1105
        self.prim_op_type = "prim"
        self.python_api = reduce_sum_wrapper
1106
        self.public_python_api = reduce_sum_wrapper
1107 1108 1109
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
1110 1111 1112 1113 1114 1115
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1116
        # cinn op_mapper not support in_dtype/out_dtype attr
1117 1118 1119 1120 1121 1122 1123
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1124 1125


1126
class TestReduceSumOpError(unittest.TestCase):
1127 1128 1129
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
1130 1131 1132
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
1133
            self.assertRaises(TypeError, paddle.sum, x1)
1134
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
G
GGBond8488 已提交
1135
            x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8")
1136
            self.assertRaises(TypeError, paddle.sum, x2)
1137 1138


1139
class API_TestSumOp(unittest.TestCase):
1140 1141 1142
    def run_static(
        self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None
    ):
1143 1144
        if np_axis is None:
            np_axis = attr_axis
1145

1146 1147 1148 1149 1150
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
1151
                data = paddle.static.data("data", shape=shape, dtype=x_dtype)
1152 1153 1154
                result_sum = paddle.sum(
                    x=data, axis=attr_axis, dtype=attr_dtype
                )
1155 1156 1157

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
1158 1159 1160
                (res,) = exe.run(
                    feed={"data": input_data}, fetch_list=[result_sum]
                )
1161

1162 1163 1164 1165 1166
            np.testing.assert_allclose(
                res,
                np.sum(input_data.astype(attr_dtype), axis=np_axis),
                rtol=1e-05,
            )
1167

1168 1169 1170 1171
    def test_static(self):
        shape = [10, 10]
        axis = 1

1172 1173 1174
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
1175
        self.run_static(shape, "bool", axis, attr_dtype="float16")
1176

1177 1178 1179
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
1180
        self.run_static(shape, "int32", axis, attr_dtype="float64")
1181

1182 1183 1184 1185
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

1186 1187 1188
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
1189
        self.run_static(shape, "float32", axis, attr_dtype="int64")
1190 1191 1192 1193

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
1194 1195 1196

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
1197 1198 1199
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2)
        )
1200 1201 1202

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
1203 1204
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
1205 1206 1207 1208 1209 1210 1211 1212 1213
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
1214 1215


1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
1226
            input = paddle.static.data(name="input", shape=[4, 4], dtype="bool")
1227 1228 1229 1230
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1231 1232 1233 1234 1235
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1236
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
1237 1238 1239 1240 1241 1242 1243 1244 1245

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1246
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1247 1248
                x = paddle.assign(np_x)
                x = paddle.cast(x, 'bool')
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
1283
            input = paddle.static.data(name="input", shape=[4, 4], dtype="bool")
1284 1285 1286 1287
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1288 1289 1290 1291 1292
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1293
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
1294 1295 1296 1297 1298 1299 1300 1301 1302

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1303
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1304 1305
                x = paddle.assign(np_x)
                x = paddle.cast(x, 'bool')
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
class TestAllZeroError(unittest.TestCase):
    def test_errors(self):
        with paddle.fluid.dygraph.guard():

            def test_0_size():
                array = np.array([], dtype=np.float32)
                x = paddle.to_tensor(np.reshape(array, [0, 0, 0]), dtype='bool')
                paddle.all(x, axis=1)

            self.assertRaises(ValueError, test_0_size)


G
guosheng 已提交
1342
if __name__ == '__main__':
1343
    paddle.enable_static()
G
guosheng 已提交
1344
    unittest.main()