test_reduce_op.py 41.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15
import unittest
16

G
guosheng 已提交
17
import numpy as np
18 19
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci

20
import paddle
21
import paddle.fluid as fluid
22
import paddle.fluid.core as core
23
from paddle.fluid import Program, program_guard
24
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
25 26


27
class TestSumOp(OpTest):
G
guosheng 已提交
28
    def setUp(self):
F
From00 已提交
29
        self.python_api = paddle.sum
30
        self.public_python_api = paddle.sum
31
        self.op_type = "reduce_sum"
32
        self.prim_op_type = "prim"
33
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
34
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
35
        self.attrs = {'dim': [0]}
36
        self.enable_cinn = True
37 38

    def test_check_output(self):
F
From00 已提交
39
        self.check_output(check_eager=True)
40 41

    def test_check_grad(self):
42
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
43 44


45
class TestSumOpFp32(OpTest):
46
    def setUp(self):
F
From00 已提交
47
        self.python_api = paddle.sum
48
        self.public_python_api = paddle.sum
49
        self.op_type = "reduce_sum"
50
        self.prim_op_type = "prim"
51 52 53 54 55 56 57 58
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()
59
        self.enable_cinn = True
60 61

    def test_check_output(self):
F
From00 已提交
62
        self.check_output(check_eager=True)
63 64 65 66

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
67
        return (grad,)
68 69

    def test_check_grad(self):
70
        self.check_grad(
71 72 73 74 75
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
            check_prim=True,
76
        )
77 78


79 80 81
class TestSumOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.sum
82
        self.public_python_api = paddle.sum
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
        self.op_type = "reduce_sum"
        self.prim_op_type = "prim"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
        self.attrs = {'dim': [], 'reduce_all': True}
        # reduce doesn't support float64 in cinn.
        # 0-D tensor doesn't support in cinn
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


99 100 101
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
102 103 104
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
105
        self.python_api = paddle.sum
106
        self.public_python_api = paddle.sum
107
        self.op_type = "reduce_sum"
108
        self.prim_op_type = "prim"
109 110 111 112 113 114 115 116 117
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()
118
        self.enable_cinn = False
119 120 121

    def test_check_output(self):
        place = core.CUDAPlace(0)
122
        self.check_output_with_place(place, check_eager=True, atol=0.1)
123 124 125

    def test_check_grad(self):
        place = core.CUDAPlace(0)
126 127 128 129 130 131
        self.check_grad_with_place(
            place,
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
132
            check_prim=True,
133
        )
134 135 136 137 138 139 140

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


141 142
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
F
From00 已提交
143
        self.python_api = paddle.sum
144
        self.public_python_api = paddle.sum
145
        self.op_type = "reduce_sum"
146
        self.prim_op_type = "prim"
147 148 149 150 151 152 153 154 155 156
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()
157
        self.enable_cinn = True
158 159

    def test_check_output(self):
F
From00 已提交
160
        self.check_output(check_eager=True)
161 162 163 164

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
165
        return (grad,)
166 167

    def test_check_grad(self):
168
        self.check_grad(
169 170 171 172 173
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
            check_prim=True,
174
        )
175 176


177 178
class TestSumOp5D(OpTest):
    def setUp(self):
F
From00 已提交
179
        self.python_api = paddle.sum
180
        self.public_python_api = paddle.sum
181
        self.op_type = "reduce_sum"
182
        self.prim_op_type = "prim"
183 184 185
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
186
        self.attrs = {'dim': [0]}
187
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
188
        # error occurred in cinn
189
        self.enable_cinn = True
190 191

    def test_check_output(self):
F
From00 已提交
192
        self.check_output(check_eager=True)
193 194

    def test_check_grad(self):
195
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
196 197 198 199


class TestSumOp6D(OpTest):
    def setUp(self):
F
From00 已提交
200
        self.python_api = paddle.sum
201
        self.public_python_api = paddle.sum
202
        self.op_type = "reduce_sum"
203
        self.prim_op_type = "prim"
204 205 206
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
207
        self.attrs = {'dim': [0]}
208
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
209

210
    def test_check_output(self):
F
From00 已提交
211
        self.check_output(check_eager=True)
G
guosheng 已提交
212

213
    def test_check_grad(self):
214
        self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
G
guosheng 已提交
215 216


217 218
class TestSumOp8D(OpTest):
    def setUp(self):
F
From00 已提交
219
        self.python_api = paddle.sum
220 221 222 223 224 225 226 227
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
228
        self.check_output()
229 230

    def test_check_grad(self):
F
From00 已提交
231
        self.check_grad(['X'], 'Out', check_eager=True)
232 233


234 235
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
236 237
    " its gradient check is not supported by unittest framework."
)
238 239
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
240 241

    def setUp(self):
242
        self.op_type = "reduce_max"
243
        self.python_api = paddle.max
244
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
245 246 247 248
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
249 250

    def test_check_output(self):
251
        self.check_output(check_eager=True)
G
guosheng 已提交
252

253 254 255 256 257 258 259 260 261 262 263 264
    def test_raise_error(self):
        if core.is_compiled_with_cuda():
            self.inputs = {'X': np.random.random((5, 6, 10)).astype("float16")}
            place = core.CUDAPlace(0)
            with self.assertRaises(RuntimeError) as cm:
                self.check_output_with_place(place, check_eager=True)
            error_msg = str(cm.exception).split("\n")[-2].strip().split(".")[0]
            self.assertEqual(
                error_msg,
                "NotFoundError: The kernel (reduce_max) with key (GPU, Undefined(AnyLayout), float16) is not found and GPU kernel cannot fallback to CPU one",
            )

G
guosheng 已提交
265

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
class TestMaxOp_ZeroDim(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
        self.python_api = paddle.max
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


282 283
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
284 285
    " its gradient check is not supported by unittest framework."
)
286 287
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
288

289 290
    def setUp(self):
        self.op_type = "reduce_min"
291
        self.python_api = paddle.min
292
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
293 294 295 296
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
297

298
    def test_check_output(self):
299
        self.check_output(check_eager=True)
G
guosheng 已提交
300 301


302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
class TestMinOp_ZeroDim(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
        self.python_api = paddle.min
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


318 319 320 321 322
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
323
        self.python_api = paddle.min
324 325 326 327 328 329 330 331 332
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
333
        self.check_output(check_eager=True)
334 335 336 337 338 339 340


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
341
        self.python_api = paddle.min
342 343 344 345 346 347 348 349 350
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
351
        self.check_output(check_eager=True)
352 353


H
hong 已提交
354 355 356 357
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


358 359 360
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
361
        self.python_api = raw_reduce_prod
362 363
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
364 365
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

366
    def init_data_type(self):
367 368 369
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
370

371
    def test_check_output(self):
H
hong 已提交
372
        self.check_output(check_eager=True)
373 374

    def test_check_grad(self):
H
hong 已提交
375
        self.check_grad(['X'], 'Out', check_eager=True)
376 377


378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
class TestProdOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.prod
        self.op_type = "reduce_prod"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].prod()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


393 394 395
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
396
        self.python_api = raw_reduce_prod
397
        self.init_data_type()
398
        self.inputs = {
399
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
400 401 402 403 404 405
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

406
    def init_data_type(self):
407 408 409
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
410

411
    def test_check_output(self):
H
hong 已提交
412
        self.check_output(check_eager=True)
413 414

    def test_check_grad(self):
H
hong 已提交
415
        self.check_grad(['X'], 'Out', check_eager=True)
416 417 418 419 420


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
421
        self.python_api = raw_reduce_prod
422
        self.init_data_type()
423
        self.inputs = {
424 425 426
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(
                self.data_type
            )
427 428 429 430 431 432
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

433
    def init_data_type(self):
434 435 436
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
437

438
    def test_check_output(self):
H
hong 已提交
439
        self.check_output(check_eager=True)
440 441

    def test_check_grad(self):
H
hong 已提交
442
        self.check_grad(['X'], 'Out', check_eager=True)
443 444


Z
zhoukunsheng 已提交
445 446 447
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
448
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
449 450 451 452 453
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
454
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
455 456


457 458 459 460 461 462 463 464 465 466 467 468
class TestAllOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.all
        self.op_type = "reduce_all"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


469 470 471
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
472
        self.python_api = paddle.all
473
        self.inputs = {
474 475 476
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
477 478 479 480 481
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
482
        self.check_output(check_eager=True)
483 484


Z
zhoukunsheng 已提交
485 486 487
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
488
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
489
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
490
        self.attrs = {'dim': (1,)}
491 492 493
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
494
        self.check_output(check_eager=True)
495 496 497 498 499


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
500
        self.python_api = paddle.all
501
        self.inputs = {
502 503 504
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
505 506 507
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
508 509

    def test_check_output(self):
510
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
511 512 513 514 515


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
516
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
517 518 519
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
520
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
521 522 523
        }

    def test_check_output(self):
524
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
525 526


527 528 529
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
530
        self.python_api = paddle.all
531
        self.inputs = {
532 533 534
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
535
        }
536
        self.attrs = {'dim': (5,), 'keep_dim': True}
537
        self.outputs = {
538 539 540
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5
            )
541 542 543
        }

    def test_check_output(self):
544
        self.check_output(check_eager=True)
545 546


547 548 549 550 551
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
552
            self.assertRaises(TypeError, paddle.all, input1)
553
            # The input dtype of reduce_all_op must be bool.
G
GGBond8488 已提交
554 555
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
556
            )
557
            self.assertRaises(TypeError, paddle.all, input2)
558 559


Z
zhoukunsheng 已提交
560 561 562
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
563
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
564 565 566 567 568
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
569
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
570 571


572 573 574 575 576 577 578 579 580 581 582 583
class TestAnyOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.any
        self.op_type = "reduce_any"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


584 585 586
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
587
        self.python_api = paddle.any
588
        self.inputs = {
589 590 591
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
592 593 594 595 596
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
597
        self.check_output(check_eager=True)
598 599


Z
zhoukunsheng 已提交
600 601 602
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
603
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
604 605 606 607 608
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
609
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
610 611


612 613 614
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
615
        self.python_api = paddle.any
616
        self.inputs = {
617 618 619
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
620 621 622 623 624
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
625
        self.check_output(check_eager=True)
626 627


Z
zhoukunsheng 已提交
628 629 630
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
631
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
632
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
633
        self.attrs = {'dim': (1,), 'keep_dim': True}
634
        self.outputs = {
635 636 637
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
638 639 640
        }

    def test_check_output(self):
641
        self.check_output(check_eager=True)
642 643 644 645 646


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
647
        self.python_api = paddle.any
648
        self.inputs = {
649 650 651
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
652
        }
653
        self.attrs = {'dim': (1,), 'keep_dim': True}
Z
zhoukunsheng 已提交
654
        self.outputs = {
655 656 657
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
Z
zhoukunsheng 已提交
658 659 660
        }

    def test_check_output(self):
661
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
662 663


664 665 666 667 668
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
669
            self.assertRaises(TypeError, paddle.any, input1)
670
            # The input dtype of reduce_any_op must be bool.
G
GGBond8488 已提交
671 672
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
673
            )
674
            self.assertRaises(TypeError, paddle.any, input2)
675 676


Q
qiaolongfei 已提交
677
class Test1DReduce(OpTest):
G
guosheng 已提交
678
    def setUp(self):
679
        self.op_type = "reduce_sum"
680
        self.python_api = paddle.sum
681
        self.public_python_api = paddle.sum
682
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
683
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
684
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
685
        self.enable_cinn = True
686 687 688

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
689

690
    def test_check_grad(self):
691
        self.check_grad(['X'], 'Out', check_prim=True)
G
guosheng 已提交
692 693


Q
qiaolongfei 已提交
694
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
695
    def setUp(self):
696
        self.op_type = "reduce_sum"
697
        self.python_api = paddle.sum
698
        self.public_python_api = paddle.sum
699
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
700 701
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
702 703 704
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
705 706 707
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
708
        self.python_api = paddle.sum
709
        self.public_python_api = paddle.sum
710
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
711 712
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
713 714 715 716 717 718 719 720
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
721
        self.python_api = paddle.sum
722
        self.public_python_api = paddle.sum
723
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
724 725 726 727 728 729 730 731 732 733
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
734
        self.python_api = paddle.sum
735
        self.public_python_api = paddle.sum
736
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
737 738 739 740 741 742 743 744 745 746
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
747
        self.python_api = paddle.sum
748
        self.public_python_api = paddle.sum
749
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
750 751 752 753 754 755 756 757 758 759
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
760
        self.python_api = paddle.sum
761
        self.public_python_api = paddle.sum
762
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
763 764 765 766 767
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
768 769


770 771 772 773 774 775 776 777 778 779 780
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }

781 782 783 784 785 786
    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

787

Q
qiaolongfei 已提交
788 789 790
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
791
        self.python_api = paddle.sum
792
        self.public_python_api = paddle.sum
793
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
794
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
795
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
796
        self.outputs = {
797 798 799
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
Q
qiaolongfei 已提交
800 801 802
        }


803 804 805 806 807 808 809 810
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
811 812 813
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
814 815
        }

816 817 818 819 820 821
    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

822

823 824
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
825 826
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
827 828 829 830 831
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
832
        self.python_api = paddle.max
W
whs 已提交
833 834 835 836 837 838 839
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
840
        self.check_output(check_eager=True)
W
whs 已提交
841 842


843 844
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
845 846
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
847 848 849 850 851
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
852
        self.python_api = paddle.min
W
whs 已提交
853 854 855 856 857 858 859
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
860
        self.check_output(check_eager=True)
W
whs 已提交
861 862 863 864 865


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
866
        self.python_api = paddle.sum
867
        self.public_python_api = paddle.sum
868
        self.prim_op_type = "prim"
W
whs 已提交
869 870 871
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
872 873 874
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
W
whs 已提交
875 876 877 878 879 880
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
881
        self.check_grad(['X'], 'Out', check_prim=True)
W
whs 已提交
882 883


884 885 886
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
887
        self.python_api = paddle.sum
888
        self.public_python_api = paddle.sum
889
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
890
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
891 892
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
893 894 895
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
896
        }
897
        self.enable_cinn = True
898 899 900 901 902

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
903
        self.check_grad(['X'], 'Out', check_prim=True)
904 905 906 907 908


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
909
        self.python_api = paddle.sum
910
        self.public_python_api = paddle.sum
911
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
912
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
913 914
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
915 916 917
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=False
            )
918
        }
919
        self.enable_cinn = True
920 921 922 923 924

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
925
        self.check_grad(['X'], 'Out', check_prim=False)
926 927 928 929 930


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
931
        self.python_api = paddle.sum
932
        self.public_python_api = paddle.sum
933
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
934
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
935 936
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}
937
        self.enable_cinn = True
938 939 940 941 942

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
943 944 945 946 947 948 949
        self.check_grad(['X'], 'Out', check_prim=True)


class TestReduceAllFp32(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.python_api = paddle.sum
950
        self.public_python_api = paddle.sum
951 952 953 954
        self.prim_op_type = "prim"
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")}
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}
955
        self.enable_cinn = True
956 957 958 959 960 961

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
962 963


964 965 966
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
967
        self.python_api = paddle.sum
968
        self.public_python_api = paddle.sum
969
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
970
        self.inputs = {'X': np.random.random(100).astype("float64")}
971 972
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
973
        self.enable_cinn = True
974 975

    def test_check_output(self):
976
        self.check_output()
977 978

    def test_check_grad(self):
979
        self.check_grad(['X'], 'Out', check_prim=True)
980 981


982
def reduce_sum_wrapper(x, axis=None, out_dtype=None, keepdim=False, name=None):
983 984 985
    return paddle.sum(x, axis, "float64", keepdim, name)


986 987 988
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
989
        self.python_api = reduce_sum_wrapper
990
        self.public_python_api = reduce_sum_wrapper
991
        self.prim_op_type = "prim"
992 993 994
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
995 996 997 998 999 1000
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1001 1002

    def test_check_output(self):
1003
        self.check_output()
1004 1005

    def test_check_grad(self):
1006 1007 1008
        self.check_grad(['X'], 'Out', check_prim=True)


1009 1010 1011
class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
1012
        self.python_api = reduce_sum_wrapper
1013
        self.public_python_api = reduce_sum_wrapper
1014
        self.prim_op_type = "prim"
1015 1016 1017
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
1018 1019 1020 1021 1022 1023
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1024
        # cinn op_mapper not support in_dtype/out_dtype attr
1025 1026 1027 1028 1029 1030 1031
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1032 1033 1034 1035 1036


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
1037 1038
        self.prim_op_type = "prim"
        self.python_api = reduce_sum_wrapper
1039
        self.public_python_api = reduce_sum_wrapper
1040 1041 1042
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
1043 1044 1045 1046 1047 1048
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1049
        # cinn op_mapper not support in_dtype/out_dtype attr
1050 1051 1052 1053 1054 1055 1056
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1057 1058


1059
class TestReduceSumOpError(unittest.TestCase):
1060 1061 1062
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
1063 1064 1065
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
1066
            self.assertRaises(TypeError, paddle.sum, x1)
1067
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
G
GGBond8488 已提交
1068
            x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8")
1069
            self.assertRaises(TypeError, paddle.sum, x2)
1070 1071


1072
class API_TestSumOp(unittest.TestCase):
1073 1074 1075
    def run_static(
        self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None
    ):
1076 1077
        if np_axis is None:
            np_axis = attr_axis
1078

1079 1080 1081 1082 1083
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
1084
                data = paddle.static.data("data", shape=shape, dtype=x_dtype)
1085 1086 1087
                result_sum = paddle.sum(
                    x=data, axis=attr_axis, dtype=attr_dtype
                )
1088 1089 1090

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
1091 1092 1093
                (res,) = exe.run(
                    feed={"data": input_data}, fetch_list=[result_sum]
                )
1094

1095 1096 1097 1098 1099
            np.testing.assert_allclose(
                res,
                np.sum(input_data.astype(attr_dtype), axis=np_axis),
                rtol=1e-05,
            )
1100

1101 1102 1103 1104
    def test_static(self):
        shape = [10, 10]
        axis = 1

1105 1106 1107
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
1108
        self.run_static(shape, "bool", axis, attr_dtype="float16")
1109

1110 1111 1112
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
1113
        self.run_static(shape, "int32", axis, attr_dtype="float64")
1114

1115 1116 1117 1118
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

1119 1120 1121
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
1122
        self.run_static(shape, "float32", axis, attr_dtype="int64")
1123 1124 1125 1126

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
1127 1128 1129

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
1130 1131 1132
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2)
        )
1133 1134 1135

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
1136 1137
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
1138 1139 1140 1141 1142 1143 1144 1145 1146
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
1147 1148


1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
1159
            input = paddle.static.data(name="input", shape=[4, 4], dtype="bool")
1160 1161 1162 1163
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1164 1165 1166 1167 1168
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1169
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
1170 1171 1172 1173 1174 1175 1176 1177 1178

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1179
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1180 1181
                x = paddle.assign(np_x)
                x = paddle.cast(x, 'bool')
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
1216
            input = paddle.static.data(name="input", shape=[4, 4], dtype="bool")
1217 1218 1219 1220
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1221 1222 1223 1224 1225
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1226
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
1227 1228 1229 1230 1231 1232 1233 1234 1235

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1236
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1237 1238
                x = paddle.assign(np_x)
                x = paddle.cast(x, 'bool')
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
class TestAllZeroError(unittest.TestCase):
    def test_errors(self):
        with paddle.fluid.dygraph.guard():

            def test_0_size():
                array = np.array([], dtype=np.float32)
                x = paddle.to_tensor(np.reshape(array, [0, 0, 0]), dtype='bool')
                paddle.all(x, axis=1)

            self.assertRaises(ValueError, test_0_size)


G
guosheng 已提交
1275
if __name__ == '__main__':
1276
    paddle.enable_static()
G
guosheng 已提交
1277
    unittest.main()