test_reduce_op.py 40.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15
import unittest
16

G
guosheng 已提交
17
import numpy as np
18 19
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci

20
import paddle
21
import paddle.fluid as fluid
22
import paddle.fluid.core as core
23
from paddle.fluid import Program, program_guard
24
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
25 26


27
class TestSumOp(OpTest):
G
guosheng 已提交
28
    def setUp(self):
F
From00 已提交
29
        self.python_api = paddle.sum
30
        self.op_type = "reduce_sum"
31
        self.prim_op_type = "prim"
32
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
33
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
34
        self.attrs = {'dim': [0]}
35
        self.enable_cinn = True
36 37

    def test_check_output(self):
F
From00 已提交
38
        self.check_output(check_eager=True)
39 40

    def test_check_grad(self):
41
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
42 43


44
class TestSumOpFp32(OpTest):
45
    def setUp(self):
F
From00 已提交
46
        self.python_api = paddle.sum
47
        self.op_type = "reduce_sum"
48
        self.prim_op_type = "prim"
49 50 51 52 53 54 55 56
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()
57
        self.enable_cinn = True
58 59

    def test_check_output(self):
F
From00 已提交
60
        self.check_output(check_eager=True)
61 62 63 64

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
65
        return (grad,)
66 67

    def test_check_grad(self):
68
        self.check_grad(
69 70 71 72 73
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
            check_prim=True,
74
        )
75 76


77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
class TestSumOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.sum
        self.op_type = "reduce_sum"
        self.prim_op_type = "prim"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
        self.attrs = {'dim': [], 'reduce_all': True}
        # reduce doesn't support float64 in cinn.
        # 0-D tensor doesn't support in cinn
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


96 97 98
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
99 100 101
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
102
        self.python_api = paddle.sum
103
        self.op_type = "reduce_sum"
104
        self.prim_op_type = "prim"
105 106 107 108 109 110 111 112 113
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()
114
        self.enable_cinn = False
115 116 117

    def test_check_output(self):
        place = core.CUDAPlace(0)
118
        self.check_output_with_place(place, check_eager=True, atol=0.1)
119 120 121

    def test_check_grad(self):
        place = core.CUDAPlace(0)
122 123 124 125 126 127
        self.check_grad_with_place(
            place,
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
128
            check_prim=True,
129
        )
130 131 132 133 134 135 136

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


137 138
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
F
From00 已提交
139
        self.python_api = paddle.sum
140
        self.op_type = "reduce_sum"
141
        self.prim_op_type = "prim"
142 143 144 145 146 147 148 149 150 151
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()
152
        self.enable_cinn = True
153 154

    def test_check_output(self):
F
From00 已提交
155
        self.check_output(check_eager=True)
156 157 158 159

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
160
        return (grad,)
161 162

    def test_check_grad(self):
163
        self.check_grad(
164 165 166 167 168
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
            check_prim=True,
169
        )
170 171


172 173
class TestSumOp5D(OpTest):
    def setUp(self):
F
From00 已提交
174
        self.python_api = paddle.sum
175
        self.op_type = "reduce_sum"
176
        self.prim_op_type = "prim"
177 178 179
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
180
        self.attrs = {'dim': [0]}
181
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
182
        # error occurred in cinn
183
        self.enable_cinn = True
184 185

    def test_check_output(self):
F
From00 已提交
186
        self.check_output(check_eager=True)
187 188

    def test_check_grad(self):
189
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
190 191 192 193


class TestSumOp6D(OpTest):
    def setUp(self):
F
From00 已提交
194
        self.python_api = paddle.sum
195
        self.op_type = "reduce_sum"
196
        self.prim_op_type = "prim"
197 198 199
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
200
        self.attrs = {'dim': [0]}
201
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
202

203
    def test_check_output(self):
F
From00 已提交
204
        self.check_output(check_eager=True)
G
guosheng 已提交
205

206
    def test_check_grad(self):
207
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
G
guosheng 已提交
208 209


210 211
class TestSumOp8D(OpTest):
    def setUp(self):
F
From00 已提交
212
        self.python_api = paddle.sum
213 214 215 216 217 218 219 220
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
221
        self.check_output()
222 223

    def test_check_grad(self):
F
From00 已提交
224
        self.check_grad(['X'], 'Out', check_eager=True)
225 226


227 228
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
229 230
    " its gradient check is not supported by unittest framework."
)
231 232
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
233 234

    def setUp(self):
235
        self.op_type = "reduce_max"
236
        self.python_api = paddle.max
237
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
238 239 240 241
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
242 243

    def test_check_output(self):
244
        self.check_output(check_eager=True)
G
guosheng 已提交
245

246 247 248 249 250 251 252 253 254 255 256 257
    def test_raise_error(self):
        if core.is_compiled_with_cuda():
            self.inputs = {'X': np.random.random((5, 6, 10)).astype("float16")}
            place = core.CUDAPlace(0)
            with self.assertRaises(RuntimeError) as cm:
                self.check_output_with_place(place, check_eager=True)
            error_msg = str(cm.exception).split("\n")[-2].strip().split(".")[0]
            self.assertEqual(
                error_msg,
                "NotFoundError: The kernel (reduce_max) with key (GPU, Undefined(AnyLayout), float16) is not found and GPU kernel cannot fallback to CPU one",
            )

G
guosheng 已提交
258

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
class TestMaxOp_ZeroDim(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
        self.python_api = paddle.max
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


275 276
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
277 278
    " its gradient check is not supported by unittest framework."
)
279 280
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
281

282 283
    def setUp(self):
        self.op_type = "reduce_min"
284
        self.python_api = paddle.min
285
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
286 287 288 289
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
290

291
    def test_check_output(self):
292
        self.check_output(check_eager=True)
G
guosheng 已提交
293 294


295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
class TestMinOp_ZeroDim(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
        self.python_api = paddle.min
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


311 312 313 314 315
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
316
        self.python_api = paddle.min
317 318 319 320 321 322 323 324 325
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
326
        self.check_output(check_eager=True)
327 328 329 330 331 332 333


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
334
        self.python_api = paddle.min
335 336 337 338 339 340 341 342 343
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
344
        self.check_output(check_eager=True)
345 346


H
hong 已提交
347 348 349 350
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


351 352 353
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
354
        self.python_api = raw_reduce_prod
355 356
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
357 358
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

359
    def init_data_type(self):
360 361 362
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
363

364
    def test_check_output(self):
H
hong 已提交
365
        self.check_output(check_eager=True)
366 367

    def test_check_grad(self):
H
hong 已提交
368
        self.check_grad(['X'], 'Out', check_eager=True)
369 370


371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
class TestProdOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.prod
        self.op_type = "reduce_prod"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].prod()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


386 387 388
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
389
        self.python_api = raw_reduce_prod
390
        self.init_data_type()
391
        self.inputs = {
392
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
393 394 395 396 397 398
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

399
    def init_data_type(self):
400 401 402
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
403

404
    def test_check_output(self):
H
hong 已提交
405
        self.check_output(check_eager=True)
406 407

    def test_check_grad(self):
H
hong 已提交
408
        self.check_grad(['X'], 'Out', check_eager=True)
409 410 411 412 413


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
414
        self.python_api = raw_reduce_prod
415
        self.init_data_type()
416
        self.inputs = {
417 418 419
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(
                self.data_type
            )
420 421 422 423 424 425
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

426
    def init_data_type(self):
427 428 429
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
430

431
    def test_check_output(self):
H
hong 已提交
432
        self.check_output(check_eager=True)
433 434

    def test_check_grad(self):
H
hong 已提交
435
        self.check_grad(['X'], 'Out', check_eager=True)
436 437


Z
zhoukunsheng 已提交
438 439 440
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
441
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
442 443 444 445 446
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
447
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
448 449


450 451 452 453 454 455 456 457 458 459 460 461
class TestAllOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.all
        self.op_type = "reduce_all"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


462 463 464
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
465
        self.python_api = paddle.all
466
        self.inputs = {
467 468 469
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
470 471 472 473 474
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
475
        self.check_output(check_eager=True)
476 477


Z
zhoukunsheng 已提交
478 479 480
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
481
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
482
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
483
        self.attrs = {'dim': (1,)}
484 485 486
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
487
        self.check_output(check_eager=True)
488 489 490 491 492


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
493
        self.python_api = paddle.all
494
        self.inputs = {
495 496 497
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
498 499 500
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
501 502

    def test_check_output(self):
503
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
504 505 506 507 508


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
509
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
510 511 512
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
513
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
514 515 516
        }

    def test_check_output(self):
517
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
518 519


520 521 522
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
523
        self.python_api = paddle.all
524
        self.inputs = {
525 526 527
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
528
        }
529
        self.attrs = {'dim': (5,), 'keep_dim': True}
530
        self.outputs = {
531 532 533
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5
            )
534 535 536
        }

    def test_check_output(self):
537
        self.check_output(check_eager=True)
538 539


540 541 542 543 544
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
545
            self.assertRaises(TypeError, paddle.all, input1)
546
            # The input dtype of reduce_all_op must be bool.
G
GGBond8488 已提交
547 548
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
549
            )
550
            self.assertRaises(TypeError, paddle.all, input2)
551 552


Z
zhoukunsheng 已提交
553 554 555
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
556
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
557 558 559 560 561
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
562
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
563 564


565 566 567 568 569 570 571 572 573 574 575 576
class TestAnyOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.any
        self.op_type = "reduce_any"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


577 578 579
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
580
        self.python_api = paddle.any
581
        self.inputs = {
582 583 584
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
585 586 587 588 589
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
590
        self.check_output(check_eager=True)
591 592


Z
zhoukunsheng 已提交
593 594 595
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
596
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
597 598 599 600 601
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
602
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
603 604


605 606 607
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
608
        self.python_api = paddle.any
609
        self.inputs = {
610 611 612
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
613 614 615 616 617
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
618
        self.check_output(check_eager=True)
619 620


Z
zhoukunsheng 已提交
621 622 623
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
624
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
625
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
626
        self.attrs = {'dim': (1,), 'keep_dim': True}
627
        self.outputs = {
628 629 630
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
631 632 633
        }

    def test_check_output(self):
634
        self.check_output(check_eager=True)
635 636 637 638 639


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
640
        self.python_api = paddle.any
641
        self.inputs = {
642 643 644
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
645
        }
646
        self.attrs = {'dim': (1,), 'keep_dim': True}
Z
zhoukunsheng 已提交
647
        self.outputs = {
648 649 650
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
Z
zhoukunsheng 已提交
651 652 653
        }

    def test_check_output(self):
654
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
655 656


657 658 659 660 661
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
662
            self.assertRaises(TypeError, paddle.any, input1)
663
            # The input dtype of reduce_any_op must be bool.
G
GGBond8488 已提交
664 665
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
666
            )
667
            self.assertRaises(TypeError, paddle.any, input2)
668 669


Q
qiaolongfei 已提交
670
class Test1DReduce(OpTest):
G
guosheng 已提交
671
    def setUp(self):
672
        self.op_type = "reduce_sum"
673 674
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
675
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
676
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
677
        self.enable_cinn = True
678 679 680

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
681

682
    def test_check_grad(self):
683
        self.check_grad(['X'], 'Out', check_prim=True)
G
guosheng 已提交
684 685


Q
qiaolongfei 已提交
686
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
687
    def setUp(self):
688
        self.op_type = "reduce_sum"
689 690
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
691 692
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
693 694 695
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
696 697 698
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
699 700
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
701 702
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
703 704 705 706 707 708 709 710
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
711 712
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
713 714 715 716 717 718 719 720 721 722
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
723 724
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
725 726 727 728 729 730 731 732 733 734
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
735 736
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
737 738 739 740 741 742 743 744 745 746
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
747 748
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
749 750 751 752 753
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
754 755


756 757 758 759 760 761 762 763 764 765 766
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }

767 768 769 770 771 772
    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

773

Q
qiaolongfei 已提交
774 775 776
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
777 778
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
779
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
780
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
781
        self.outputs = {
782 783 784
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
Q
qiaolongfei 已提交
785 786 787
        }


788 789 790 791 792 793 794 795
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
796 797 798
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
799 800
        }

801 802 803 804 805 806
    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

807

808 809
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
810 811
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
812 813 814 815 816
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
817
        self.python_api = paddle.max
W
whs 已提交
818 819 820 821 822 823 824
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
825
        self.check_output(check_eager=True)
W
whs 已提交
826 827


828 829
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
830 831
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
832 833 834 835 836
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
837
        self.python_api = paddle.min
W
whs 已提交
838 839 840 841 842 843 844
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
845
        self.check_output(check_eager=True)
W
whs 已提交
846 847 848 849 850


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
851 852
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
W
whs 已提交
853 854 855
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
856 857 858
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
W
whs 已提交
859 860 861 862 863 864
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
865
        self.check_grad(['X'], 'Out', check_prim=True)
W
whs 已提交
866 867


868 869 870
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
871 872
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
873
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
874 875
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
876 877 878
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
879
        }
880
        self.enable_cinn = True
881 882 883 884 885

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
886
        self.check_grad(['X'], 'Out', check_prim=True)
887 888 889 890 891


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
892 893
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
894
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
895 896
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
897 898 899
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=False
            )
900
        }
901
        self.enable_cinn = True
902 903 904 905 906

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
907
        self.check_grad(['X'], 'Out', check_prim=False)
908 909 910 911 912


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
913 914
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
915
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
916 917
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}
918
        self.enable_cinn = True
919 920 921 922 923

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
924 925 926 927 928 929 930 931 932 933 934
        self.check_grad(['X'], 'Out', check_prim=True)


class TestReduceAllFp32(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")}
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}
935
        self.enable_cinn = True
936 937 938 939 940 941

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
942 943


944 945 946
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
947 948
        self.python_api = paddle.sum
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
949
        self.inputs = {'X': np.random.random(100).astype("float64")}
950 951
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
952
        self.enable_cinn = True
953 954

    def test_check_output(self):
955
        self.check_output()
956 957

    def test_check_grad(self):
958
        self.check_grad(['X'], 'Out', check_prim=True)
959 960


961 962 963 964 965 966
def reduce_sum_wrapper(
    x, axis=None, dtype_rename=None, keepdim=False, name=None
):
    return paddle.sum(x, axis, "float64", keepdim, name)


967 968 969
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
970
        self.python_api = reduce_sum_wrapper
971
        self.prim_op_type = "prim"
972 973 974
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
975 976 977 978 979 980
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
981 982

    def test_check_output(self):
983
        self.check_output()
984 985

    def test_check_grad(self):
986 987 988
        self.check_grad(['X'], 'Out', check_prim=True)


989 990 991
class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
992 993
        self.python_api = reduce_sum_wrapper
        self.prim_op_type = "prim"
994 995 996
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
997 998 999 1000 1001 1002
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1003
        # cinn op_mapper not support in_dtype/out_dtype attr
1004 1005 1006 1007 1008 1009 1010
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1011 1012 1013 1014 1015


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
1016 1017
        self.prim_op_type = "prim"
        self.python_api = reduce_sum_wrapper
1018 1019 1020
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
1021 1022 1023 1024 1025 1026
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1027
        # cinn op_mapper not support in_dtype/out_dtype attr
1028 1029 1030 1031 1032 1033 1034
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1035 1036


1037
class TestReduceSumOpError(unittest.TestCase):
1038 1039 1040
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
1041 1042 1043
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
1044
            self.assertRaises(TypeError, paddle.sum, x1)
1045
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
G
GGBond8488 已提交
1046
            x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8")
1047
            self.assertRaises(TypeError, paddle.sum, x2)
1048 1049


1050
class API_TestSumOp(unittest.TestCase):
1051 1052 1053
    def run_static(
        self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None
    ):
1054 1055
        if np_axis is None:
            np_axis = attr_axis
1056

1057 1058 1059 1060 1061 1062
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
1063 1064 1065
                result_sum = paddle.sum(
                    x=data, axis=attr_axis, dtype=attr_dtype
                )
1066 1067 1068

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
1069 1070 1071
                (res,) = exe.run(
                    feed={"data": input_data}, fetch_list=[result_sum]
                )
1072

1073 1074 1075 1076 1077
            np.testing.assert_allclose(
                res,
                np.sum(input_data.astype(attr_dtype), axis=np_axis),
                rtol=1e-05,
            )
1078

1079 1080 1081 1082
    def test_static(self):
        shape = [10, 10]
        axis = 1

1083 1084 1085
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
1086
        self.run_static(shape, "bool", axis, attr_dtype="float16")
1087

1088 1089 1090
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
1091
        self.run_static(shape, "int32", axis, attr_dtype="float64")
1092

1093 1094 1095 1096
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

1097 1098 1099
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
1100
        self.run_static(shape, "float32", axis, attr_dtype="int64")
1101 1102 1103 1104

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
1105 1106 1107

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
1108 1109 1110
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2)
        )
1111 1112 1113

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
1114 1115
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
1116 1117 1118 1119 1120 1121 1122 1123 1124
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
1125 1126


1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1142 1143 1144 1145 1146
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1147
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
1148 1149 1150 1151 1152 1153 1154 1155 1156

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1157
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1158 1159
                x = paddle.assign(np_x)
                x = paddle.cast(x, 'bool')
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1199 1200 1201 1202 1203
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1204
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
1205 1206 1207 1208 1209 1210 1211 1212 1213

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1214
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1215 1216
                x = paddle.assign(np_x)
                x = paddle.cast(x, 'bool')
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
class TestAllZeroError(unittest.TestCase):
    def test_errors(self):
        with paddle.fluid.dygraph.guard():

            def test_0_size():
                array = np.array([], dtype=np.float32)
                x = paddle.to_tensor(np.reshape(array, [0, 0, 0]), dtype='bool')
                paddle.all(x, axis=1)

            self.assertRaises(ValueError, test_0_size)


G
guosheng 已提交
1253
if __name__ == '__main__':
1254
    paddle.enable_static()
G
guosheng 已提交
1255
    unittest.main()