test_reduce_op.py 51.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15
import unittest
16

G
guosheng 已提交
17
import numpy as np
W
wanghuancoder 已提交
18
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
19

20
import paddle
21 22
from paddle import fluid
from paddle.fluid import Program, core, program_guard
23
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
24 25


26
class TestSumOp(OpTest):
G
guosheng 已提交
27
    def setUp(self):
W
Wilber 已提交
28 29 30 31 32
        self.init_dtype()
        self.init_input()
        self.init_attrs()
        self.calc_output()

F
From00 已提交
33
        self.python_api = paddle.sum
34
        self.public_python_api = paddle.sum
35
        self.op_type = "reduce_sum"
36
        self.prim_op_type = "prim"
W
Wilber 已提交
37 38
        self.inputs = {'X': self.x}
        self.outputs = {'Out': self.out}
39
        self.if_enable_cinn()
40

W
Wilber 已提交
41 42
    def init_dtype(self):
        self.dtype = np.float64
43

W
Wilber 已提交
44 45
    def init_input(self):
        self.x = np.random.random((5, 6, 10)).astype(self.dtype)
46

W
Wilber 已提交
47 48
    def init_attrs(self):
        self.attrs = {'dim': [0]}
49

50 51 52
    def if_enable_cinn(self):
        pass

W
Wilber 已提交
53 54
    def calc_output(self):
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
55 56

    def test_check_output(self):
W
wanghuancoder 已提交
57
        self.check_output()
58 59

    def test_check_grad(self):
W
Wilber 已提交
60
        self.check_grad(['X'], 'Out', check_prim=True)
61 62


63 64 65 66 67 68 69 70 71 72 73 74 75 76
class TestComplexSumOP(TestSumOp):
    def init_dtype(self):
        self.dtype = np.complex128

    def init_input(self):
        self.x = np.random.random((3, 4)).astype(self.dtype)

    def init_attrs(self):
        self.attrs = {'dim': [0]}

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=False)


W
Wilber 已提交
77 78
class TestSumOp_ZeroDim(TestSumOp):
    def init_attrs(self):
79
        self.attrs = {'dim': []}
80

W
Wilber 已提交
81 82 83 84 85
    def init_input(self):
        self.x = np.random.random([]).astype(self.dtype)

    def calc_output(self):
        self.out = self.x.sum(axis=None)
86 87

    def test_check_grad(self):
W
wanghuancoder 已提交
88
        self.check_grad(['X'], 'Out')
89 90


W
Wilber 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104
class TestSumOp5D(TestSumOp):
    def init_input(self):
        self.x = np.random.random((1, 2, 5, 6, 10)).astype(self.dtype)

    def init_attrs(self):
        self.attrs = {'dim': [0]}


class TestSumOp6D(TestSumOp):
    def init_input(self):
        self.x = np.random.random((1, 1, 2, 5, 6, 10)).astype(self.dtype)

    def init_attrs(self):
        self.attrs = {'dim': [0]}
105

W
Wilber 已提交
106 107 108 109 110 111 112

class TestSumOp8D(TestSumOp):
    def init_input(self):
        self.x = np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype(self.dtype)

    def init_attrs(self):
        self.attrs = {'dim': (0, 3)}
113 114

    def test_check_output(self):
W
Wilber 已提交
115
        self.check_output()
116 117

    def test_check_grad(self):
W
Wilber 已提交
118
        self.check_grad(['X'], 'Out')
119 120


W
Wilber 已提交
121 122 123 124 125
class TestSumOp_withInt(TestSumOp):
    def init_input(self):
        # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
        # Precision limitations on integer values between 0 and 2048 can be exactly represented
        self.x = np.random.randint(0, 30, (10, 10)).astype(self.dtype)
126

W
Wilber 已提交
127 128
    def init_attrs(self):
        self.attrs = {'dim': (0, 1)}
129 130

    def test_check_output(self):
W
wanghuancoder 已提交
131
        self.check_output()
132 133 134 135

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
136
        return (grad,)
137 138

    def test_check_grad(self):
139
        self.check_grad(
140 141
            ['X'],
            'Out',
W
Wilber 已提交
142
            user_defined_grads=self.calc_gradient(),
143
            check_prim=True,
144
        )
145 146


W
Wilber 已提交
147 148 149
class TestSumOp3Dim(TestSumOp):
    def init_input(self):
        self.x = np.random.uniform(0, 0.1, (5, 6, 10)).astype(self.dtype)
150

W
Wilber 已提交
151 152
    def init_attrs(self):
        self.attrs = {'dim': (0, 1, 2)}
G
guosheng 已提交
153

154
    def test_check_output(self):
W
wanghuancoder 已提交
155
        self.check_output()
G
guosheng 已提交
156

W
Wilber 已提交
157 158 159 160
    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
        return (grad,)
G
guosheng 已提交
161

W
Wilber 已提交
162 163 164 165 166 167 168
    def test_check_grad(self):
        self.check_grad(
            ['X'],
            'Out',
            user_defined_grads=self.calc_gradient(),
            check_prim=True,
        )
169 170


W
Wilber 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
def create_test_fp16_class(parent):
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
    class TestSumOpFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16

        def test_check_output(self):
            self.check_output()

        def test_check_grad(self):
            self.check_grad(
                ['X'],
                'Out',
                check_prim=True,
            )


create_test_fp16_class(TestSumOp)
create_test_fp16_class(TestSumOp_ZeroDim)
create_test_fp16_class(TestSumOp5D)
create_test_fp16_class(TestSumOp6D)
create_test_fp16_class(TestSumOp8D)
create_test_fp16_class(TestSumOp_withInt)
create_test_fp16_class(TestSumOp3Dim)


def create_test_bf16_class(parent):
    @unittest.skipIf(
201 202
        not core.is_compiled_with_cuda() or paddle.is_compiled_with_rocm(),
        "core is not compiled with CUDA",
W
Wilber 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
    )
    class TestSumOpBf16(parent):
        def setUp(self):
            self.inputs = {'X': convert_float_to_uint16(self.x)}
            self.outputs = {'Out': convert_float_to_uint16(self.out)}
            self.enable_cinn = False

        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place,
                ['X'],
                'Out',
                user_defined_grads=self.gradient,
                check_prim=True,
            )

        def calc_gradient(self):
            x = self.x
            grad = np.ones(x.shape, dtype=x.dtype)
            return [grad]


create_test_bf16_class(TestSumOp)
create_test_bf16_class(TestSumOp_ZeroDim)
create_test_bf16_class(TestSumOp5D)
create_test_bf16_class(TestSumOp6D)
create_test_bf16_class(TestSumOp8D)
create_test_bf16_class(TestSumOp_withInt)
create_test_bf16_class(TestSumOp3Dim)
240 241


242 243
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
244 245
    " its gradient check is not supported by unittest framework."
)
246 247
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
248 249

    def setUp(self):
250
        self.op_type = "reduce_max"
251
        self.prim_op_type = "prim"
252
        self.python_api = paddle.max
253
        self.public_python_api = paddle.max
254
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
255 256 257 258
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
259 260

    def test_check_output(self):
W
wanghuancoder 已提交
261
        self.check_output()
G
guosheng 已提交
262

263 264 265 266 267 268 269 270 271
    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=True,
        )

G
guosheng 已提交
272

273 274 275 276 277
class TestMaxOp_ZeroDim(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
278
        self.prim_op_type = "prim"
279
        self.python_api = paddle.max
280
        self.public_python_api = paddle.max
281
        self.if_enable_cinn()
282 283 284 285 286 287
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

288 289 290
    def if_enable_cinn(self):
        self.enable_cinn = False

291
    def test_check_output(self):
W
wanghuancoder 已提交
292
        self.check_output()
293

294 295 296 297 298 299 300 301 302 303
    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=True,
        )


304
class TestMaxFP32Op(OpTest):
305 306 307 308 309 310 311
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
        self.prim_op_type = "prim"
        self.python_api = paddle.max
        self.public_python_api = paddle.max
312
        self.init_dtype()
313
        self.if_enable_cinn()
314 315 316 317 318 319
        if self.dtype == np.uint16:
            x = np.random.random((5, 6, 10)).astype(np.float32)
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
            x = np.random.random((5, 6, 10)).astype(self.dtype)
            self.inputs = {'X': x}
320
        self.attrs = {'dim': [-1], 'keep_dim': True}
321 322 323 324 325
        out = x.max(axis=tuple(self.attrs['dim']), keepdims=True)
        if self.dtype == np.uint16:
            self.outputs = {'Out': convert_float_to_uint16(out)}
        else:
            self.outputs = {'Out': out}
326

327 328 329
    def if_enable_cinn(self):
        pass

330
    def test_check_output(self):
W
wanghuancoder 已提交
331
        self.check_output()
332 333 334 335 336 337 338 339 340 341

    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=True,
        )

342 343 344 345 346 347 348 349 350 351 352
    def init_dtype(self):
        self.dtype = np.float32


class TestMaxFP16Op(TestMaxFP32Op):
    def init_dtype(self):
        self.dtype = np.float16


@unittest.skipIf(
    not core.is_compiled_with_cuda()
353
    or paddle.is_compiled_with_rocm()
354 355 356 357 358 359 360
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestMaxBF16Op(TestMaxFP32Op):
    def init_dtype(self):
        self.dtype = np.uint16

361 362 363
    def if_enable_cinn(self):
        self.enable_cinn = False

364 365 366 367 368 369 370 371 372 373 374 375 376
    def test_check_output(self):
        self.check_output_with_place(core.CUDAPlace(0))

    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad_with_place(
            core.CUDAPlace(0),
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=True,
        )

377

378 379
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
380 381
    " its gradient check is not supported by unittest framework."
)
382 383
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
384

385 386
    def setUp(self):
        self.op_type = "reduce_min"
387
        self.python_api = paddle.min
388
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
389 390 391 392
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
393

394
    def test_check_output(self):
W
wanghuancoder 已提交
395
        self.check_output()
G
guosheng 已提交
396 397


398 399 400 401 402 403 404 405 406 407 408 409 410
class TestMinOp_ZeroDim(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
        self.python_api = paddle.min
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
W
wanghuancoder 已提交
411
        self.check_output()
412 413


414 415 416 417 418
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
419
        self.python_api = paddle.min
420 421 422 423 424 425 426 427 428
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
W
wanghuancoder 已提交
429
        self.check_output()
430 431 432 433 434 435 436


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
437
        self.python_api = paddle.min
438 439 440 441 442 443 444 445 446
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
W
wanghuancoder 已提交
447
        self.check_output()
448 449


450 451 452 453
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
    " its gradient check is not supported by unittest framework."
)
454 455 456
@unittest.skipIf(
    paddle.is_compiled_with_rocm(), "ROCm doesn't have FP16 reduce_min kernel"
)
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
class TestMinFP16Op(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
        self.python_api = paddle.min
        self.public_python_api = paddle.min
        self.init_dtype()
        if self.dtype == np.uint16:
            x = np.random.random((5, 6, 10)).astype(np.float32)
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
            x = np.random.random((5, 6, 10)).astype(self.dtype)
            self.inputs = {'X': x}
        self.attrs = {'dim': [2], 'keep_dim': True}
        out = x.min(axis=tuple(self.attrs['dim']), keepdims=True)
        if self.dtype == np.uint16:
            self.outputs = {'Out': convert_float_to_uint16(out)}
        else:
            self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        self.check_output()


@unittest.skipIf(
    not core.is_compiled_with_cuda()
487
    or paddle.is_compiled_with_rocm()
488 489 490 491 492 493 494 495 496 497 498
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestMinBF16Op(TestMinFP16Op):
    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        self.check_output_with_place(core.CUDAPlace(0))


H
hong 已提交
499 500 501 502
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


503 504 505
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
506
        self.python_api = raw_reduce_prod
507 508
        self.public_python_api = raw_reduce_prod
        self.prim_op_type = "prim"
509
        self.init_data_type()
510 511 512 513
        self.init_inputs_and_outputs()
        self.if_enable_cinn()

    def init_inputs_and_outputs(self):
514
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
515 516
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

517
    def init_data_type(self):
518 519 520
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
521

522 523 524
    def if_enable_cinn(self):
        pass

525
    def test_check_output(self):
W
wanghuancoder 已提交
526
        self.check_output()
527 528

    def test_check_grad(self):
529 530 531
        self.check_grad(['X'], 'Out', check_prim=True)


532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
@unittest.skipIf(
    not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
class TestProdFP16OP(TestProdOp):
    def init_data_type(self):
        self.data_type = "float16"

    def test_check_output(self):
        self.check_output_with_place(place=paddle.CUDAPlace(0))

    def test_check_grad(self):
        self.check_grad_with_place(
            paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True
        )


@unittest.skipIf(
    not core.is_compiled_with_cuda()
550
    or paddle.is_compiled_with_rocm()
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestProdBFP16OP(TestProdOp):
    def init_data_type(self):
        self.data_type = np.uint16

    def init_inputs_and_outputs(self):
        x = np.random.random((5, 6, 10)).astype("float32")
        out = x.prod(axis=0)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def if_enable_cinn(self):
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output_with_place(place=paddle.CUDAPlace(0))

    def test_check_grad(self):
        self.check_grad_with_place(
            paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True
        )


576 577 578
class TestProdOpFp64(TestProdOp):
    def init_data_type(self):
        self.data_type = "float64"
579 580


581 582
class TestProdOp_ZeroDim(OpTest):
    def setUp(self):
583 584
        self.python_api = raw_reduce_prod
        self.public_python_api = raw_reduce_prod
585
        self.op_type = "reduce_prod"
586
        self.prim_op_type = "prim"
587 588 589 590
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].prod()}
        self.attrs = {'dim': [], 'reduce_all': True}

591 592 593
        # 0-D tensor doesn't support in cinn
        self.enable_cinn = False

594 595 596 597 598
    def init_inputs_and_outputs(self):
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].prod()}
        self.attrs = {'dim': [], 'reduce_all': True}

599
    def test_check_output(self):
W
wanghuancoder 已提交
600
        self.check_output()
601 602

    def test_check_grad(self):
603
        self.check_grad(['X'], 'Out', check_prim=True)
604 605


606 607 608
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
609
        self.python_api = raw_reduce_prod
610 611
        self.public_python_api = raw_reduce_prod
        self.prim_op_type = "prim"
612
        self.init_data_type()
613 614 615 616 617 618 619 620 621
        self.init_inputs_and_outputs()
        self.if_enable_cinn()

    def init_data_type(self):
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )

    def init_inputs_and_outputs(self):
622
        self.inputs = {
623
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
624 625 626 627 628 629
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

630 631
    def if_enable_cinn(self):
        pass
632

633
    def test_check_output(self):
W
wanghuancoder 已提交
634
        self.check_output()
635 636

    def test_check_grad(self):
637
        self.check_grad(['X'], 'Out', check_prim=True)
638 639


640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
@unittest.skipIf(
    not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
class TestProd6DFP16OP(TestProd6DOp):
    def init_data_type(self):
        self.data_type = "float16"

    def test_check_output(self):
        self.check_output_with_place(place=paddle.CUDAPlace(0))

    def test_check_grad(self):
        self.check_grad_with_place(
            paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True
        )


@unittest.skipIf(
    not core.is_compiled_with_cuda()
658
    or paddle.is_compiled_with_rocm()
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestProd6DBFP16OP(TestProd6DOp):
    def init_data_type(self):
        self.data_type = np.uint16

    def init_inputs_and_outputs(self):
        x = np.random.random((5, 6, 2, 3, 4, 2)).astype("float32")
        self.attrs = {'dim': [2, 3, 4]}
        out = x.prod(axis=tuple(self.attrs['dim']))
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def if_enable_cinn(self):
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output_with_place(place=paddle.CUDAPlace(0))

    def test_check_grad(self):
        self.check_grad_with_place(
            paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True
        )


685 686 687
class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
688
        self.python_api = raw_reduce_prod
689
        self.public_python_api = raw_reduce_prod
690
        self.init_data_type()
691 692 693
        self.init_inputs_and_outputs()

    def init_inputs_and_outputs(self):
694
        self.inputs = {
695 696 697
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(
                self.data_type
            )
698 699 700 701 702 703
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

704
    def init_data_type(self):
705 706 707
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
708

709
    def test_check_output(self):
W
wanghuancoder 已提交
710
        self.check_output()
711 712

    def test_check_grad(self):
W
wanghuancoder 已提交
713
        self.check_grad(['X'], 'Out')
714 715


716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
@unittest.skipIf(
    not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
class TestProd8DFP16OP(TestProd8DOp):
    def init_data_type(self):
        self.data_type = "float16"

    def test_check_output(self):
        self.check_output_with_place(place=paddle.CUDAPlace(0))

    def test_check_grad(self):
        self.check_grad_with_place(paddle.CUDAPlace(0), ['X'], 'Out')


@unittest.skipIf(
    not core.is_compiled_with_cuda()
732
    or paddle.is_compiled_with_rocm()
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestProd8DBFP16OP(TestProd8DOp):
    def init_data_type(self):
        self.data_type = np.uint16

    def init_inputs_and_outputs(self):
        x = np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float32")
        self.attrs = {'dim': [2, 3, 4]}
        out = x.prod(axis=tuple(self.attrs['dim']))
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def test_check_output(self):
        self.check_output_with_place(place=paddle.CUDAPlace(0))

    def test_check_grad(self):
        self.check_grad_with_place(paddle.CUDAPlace(0), ['X'], 'Out')


754 755 756 757
def reduce_all_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None):
    return paddle.all(x, axis, keepdim, name)


Z
zhoukunsheng 已提交
758 759 760
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
761
        self.python_api = reduce_all_wrapper
Z
zhoukunsheng 已提交
762 763 764 765 766
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
W
wanghuancoder 已提交
767
        self.check_output()
Z
zhoukunsheng 已提交
768 769


770 771 772 773 774 775
class TestAllOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.all
        self.op_type = "reduce_all"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
776
        self.attrs = {'dim': []}
777 778

    def test_check_output(self):
W
wanghuancoder 已提交
779
        self.check_output()
780 781


782 783 784
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
785
        self.python_api = paddle.all
786
        self.inputs = {
787 788 789
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
790
        }
791
        self.attrs = {'dim': (2, 3, 4)}
792 793 794
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
W
wanghuancoder 已提交
795
        self.check_output()
796 797


Z
zhoukunsheng 已提交
798 799 800
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
801
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
802
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
803
        self.attrs = {'dim': (1,)}
804 805 806
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
W
wanghuancoder 已提交
807
        self.check_output()
808 809 810 811 812


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
813
        self.python_api = paddle.all
814
        self.inputs = {
815 816 817
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
818 819 820
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
821 822

    def test_check_output(self):
W
wanghuancoder 已提交
823
        self.check_output()
Z
zhoukunsheng 已提交
824 825 826 827 828


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
829
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
830 831 832
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
833
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
834 835 836
        }

    def test_check_output(self):
W
wanghuancoder 已提交
837
        self.check_output()
Z
zhoukunsheng 已提交
838 839


840 841 842
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
843
        self.python_api = paddle.all
844
        self.inputs = {
845 846 847
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
848
        }
849
        self.attrs = {'dim': (5,), 'keep_dim': True}
850
        self.outputs = {
851 852 853
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5
            )
854 855 856
        }

    def test_check_output(self):
W
wanghuancoder 已提交
857
        self.check_output()
858 859


860 861 862 863 864
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
865
            self.assertRaises(TypeError, paddle.all, input1)
866
            # The input dtype of reduce_all_op must be bool.
G
GGBond8488 已提交
867 868
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
869
            )
870
            self.assertRaises(TypeError, paddle.all, input2)
871 872


873 874 875 876
def reduce_any_wrapper(x, axis=None, keepdim=False, reduce_all=True, name=None):
    return paddle.any(x, axis, keepdim, name)


Z
zhoukunsheng 已提交
877 878 879
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
880
        self.python_api = reduce_any_wrapper
Z
zhoukunsheng 已提交
881 882 883 884 885
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
W
wanghuancoder 已提交
886
        self.check_output()
Z
zhoukunsheng 已提交
887 888


889 890 891 892 893 894
class TestAnyOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.any
        self.op_type = "reduce_any"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
895
        self.attrs = {'dim': []}
896 897

    def test_check_output(self):
W
wanghuancoder 已提交
898
        self.check_output()
899 900


901 902 903
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
904
        self.python_api = paddle.any
905
        self.inputs = {
906 907 908
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
909
        }
910
        self.attrs = {'dim': (3, 5, 4)}
911 912 913
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
W
wanghuancoder 已提交
914
        self.check_output()
915 916


Z
zhoukunsheng 已提交
917 918 919
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
920
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
921 922 923 924 925
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
W
wanghuancoder 已提交
926
        self.check_output()
Z
zhoukunsheng 已提交
927 928


929 930 931
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
932
        self.python_api = paddle.any
933
        self.inputs = {
934 935 936
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
937 938 939 940 941
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
W
wanghuancoder 已提交
942
        self.check_output()
943 944


Z
zhoukunsheng 已提交
945 946 947
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
948
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
949
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
950
        self.attrs = {'dim': (1,), 'keep_dim': True}
951
        self.outputs = {
952 953 954
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
955 956 957
        }

    def test_check_output(self):
W
wanghuancoder 已提交
958
        self.check_output()
959 960 961 962 963


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
964
        self.python_api = paddle.any
965
        self.inputs = {
966 967 968
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
969
        }
970
        self.attrs = {'dim': (1,), 'keep_dim': True}
Z
zhoukunsheng 已提交
971
        self.outputs = {
972 973 974
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
Z
zhoukunsheng 已提交
975 976 977
        }

    def test_check_output(self):
W
wanghuancoder 已提交
978
        self.check_output()
Z
zhoukunsheng 已提交
979 980


981 982 983 984 985
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
986
            self.assertRaises(TypeError, paddle.any, input1)
987
            # The input dtype of reduce_any_op must be bool.
G
GGBond8488 已提交
988 989
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
990
            )
991
            self.assertRaises(TypeError, paddle.any, input2)
992 993


Q
qiaolongfei 已提交
994
class Test1DReduce(OpTest):
G
guosheng 已提交
995
    def setUp(self):
996
        self.op_type = "reduce_sum"
997
        self.python_api = paddle.sum
998
        self.public_python_api = paddle.sum
999
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
1000
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
1001
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
1002 1003 1004 1005
        self.if_enable_cinn()

    def if_enable_cinn(self):
        pass
1006 1007 1008

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
1009

1010
    def test_check_grad(self):
1011
        self.check_grad(['X'], 'Out', check_prim=True)
G
guosheng 已提交
1012 1013


Q
qiaolongfei 已提交
1014
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
1015
    def setUp(self):
1016
        self.op_type = "reduce_sum"
1017
        self.python_api = paddle.sum
1018
        self.public_python_api = paddle.sum
1019
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
1020 1021
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
1022
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
1023
        self.if_enable_cinn()
1024 1025


Q
qiaolongfei 已提交
1026 1027 1028
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
1029
        self.python_api = paddle.sum
1030
        self.public_python_api = paddle.sum
1031
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
1032 1033
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
1034 1035 1036
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
1037
        self.if_enable_cinn()
Q
qiaolongfei 已提交
1038 1039 1040 1041 1042


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
1043
        self.python_api = paddle.sum
1044
        self.public_python_api = paddle.sum
1045
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
1046 1047 1048 1049 1050
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
1051
        self.if_enable_cinn()
Q
qiaolongfei 已提交
1052 1053 1054 1055 1056


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
1057
        self.python_api = paddle.sum
1058
        self.public_python_api = paddle.sum
1059
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
1060 1061 1062 1063 1064
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
1065
        self.if_enable_cinn()
Q
qiaolongfei 已提交
1066 1067 1068 1069 1070


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
1071
        self.python_api = paddle.sum
1072
        self.public_python_api = paddle.sum
1073
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
1074 1075 1076 1077 1078
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
1079
        self.if_enable_cinn()
Q
qiaolongfei 已提交
1080 1081 1082 1083 1084


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
1085
        self.python_api = paddle.sum
1086
        self.public_python_api = paddle.sum
1087
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
1088 1089 1090 1091 1092
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
1093
        self.if_enable_cinn()
G
guosheng 已提交
1094 1095


W
wanghuancoder 已提交
1096 1097 1098 1099
def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False):
    return paddle._C_ops.sum(x, axis, dtype, keepdim)


1100 1101 1102
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
W
wanghuancoder 已提交
1103
        self.python_api = reduce_sum_wrapper2
1104 1105 1106 1107 1108 1109 1110 1111
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }

1112 1113 1114 1115 1116 1117
    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

1118

Q
qiaolongfei 已提交
1119 1120 1121
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
1122
        self.python_api = paddle.sum
1123
        self.public_python_api = paddle.sum
1124
        self.prim_op_type = "prim"
Q
qiaolongfei 已提交
1125
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
1126
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
1127
        self.outputs = {
1128 1129 1130
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
Q
qiaolongfei 已提交
1131
        }
1132
        self.if_enable_cinn()
Q
qiaolongfei 已提交
1133 1134


W
wanghuancoder 已提交
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
class TestKeepDimReduceForEager(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.python_api = reduce_sum_wrapper2
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
        }

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


1151 1152 1153
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
W
wanghuancoder 已提交
1154
        self.python_api = reduce_sum_wrapper2
1155 1156 1157 1158 1159
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
1160 1161 1162
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
1163 1164
        }

1165 1166 1167 1168 1169 1170
    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')

1171

1172 1173
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
1174 1175
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
1176 1177 1178 1179 1180
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
1181
        self.prim_op_type = "prim"
1182
        self.python_api = paddle.max
1183
        self.public_python_api = paddle.max
W
whs 已提交
1184 1185 1186 1187 1188 1189 1190
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
W
wanghuancoder 已提交
1191
        self.check_output()
W
whs 已提交
1192

1193 1194 1195 1196 1197 1198 1199 1200 1201
    def test_check_grad(self):
        # only composite op support gradient check of reduce_max
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=True,
        )

W
whs 已提交
1202

1203 1204
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
1205 1206
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
1207 1208 1209 1210 1211
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
1212
        self.python_api = paddle.min
W
whs 已提交
1213 1214 1215 1216 1217 1218 1219
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
W
wanghuancoder 已提交
1220
        self.check_output()
W
whs 已提交
1221 1222 1223 1224 1225


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1226
        self.python_api = paddle.sum
1227
        self.public_python_api = paddle.sum
1228
        self.prim_op_type = "prim"
W
whs 已提交
1229 1230 1231
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
1232 1233 1234
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
W
whs 已提交
1235
        }
1236 1237 1238 1239
        self.if_enable_cinn()

    def if_enable_cinn(self):
        pass
W
whs 已提交
1240 1241 1242 1243 1244

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1245
        self.check_grad(['X'], 'Out', check_prim=True)
W
whs 已提交
1246 1247


W
wanghuancoder 已提交
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
class TestKeepDimReduceSumMultiAxisesForEager(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.python_api = reduce_sum_wrapper2
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


1267 1268 1269
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1270
        self.python_api = paddle.sum
1271
        self.public_python_api = paddle.sum
1272
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
1273
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
1274 1275
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
1276 1277 1278
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
1279
        }
1280 1281 1282 1283
        self.if_enable_cinn()

    def if_enable_cinn(self):
        pass
1284 1285 1286 1287 1288

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1289
        self.check_grad(['X'], 'Out', check_prim=True)
1290 1291


W
wanghuancoder 已提交
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
class TestReduceSumWithDimOneForEager(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.python_api = reduce_sum_wrapper2
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
        }
        self.enable_cinn = True

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


1312 1313 1314
class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1315
        self.python_api = paddle.sum
1316
        self.public_python_api = paddle.sum
1317
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
1318
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
1319 1320
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
1321 1322 1323
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=False
            )
1324
        }
1325 1326 1327 1328
        self.if_enable_cinn()

    def if_enable_cinn(self):
        pass
1329 1330 1331 1332 1333

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1334
        self.check_grad(['X'], 'Out', check_prim=False)
1335 1336


1337 1338 1339 1340 1341 1342
def reduce_sum_wrapper(
    x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None
):
    return paddle.sum(x, axis, out_dtype, keepdim, name)


1343 1344 1345
class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1346 1347
        self.python_api = reduce_sum_wrapper
        self.public_python_api = reduce_sum_wrapper
1348
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
1349
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
1350 1351
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}
1352 1353 1354 1355
        self.if_enable_cinn()

    def if_enable_cinn(self):
        pass
1356 1357 1358 1359 1360

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1361 1362 1363 1364 1365 1366
        self.check_grad(['X'], 'Out', check_prim=True)


class TestReduceAllFp32(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1367 1368
        self.python_api = reduce_sum_wrapper
        self.public_python_api = reduce_sum_wrapper
1369 1370 1371 1372
        self.prim_op_type = "prim"
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")}
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}
1373 1374 1375 1376
        self.if_enable_cinn()

    def if_enable_cinn(self):
        pass
1377 1378 1379 1380 1381 1382

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1383 1384


1385 1386 1387
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1388
        self.python_api = paddle.sum
1389
        self.public_python_api = paddle.sum
1390
        self.prim_op_type = "prim"
Z
zhupengyang 已提交
1391
        self.inputs = {'X': np.random.random(100).astype("float64")}
1392 1393
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
1394 1395 1396 1397
        self.if_enable_cinn()

    def if_enable_cinn(self):
        pass
1398 1399

    def test_check_output(self):
1400
        self.check_output()
1401 1402

    def test_check_grad(self):
1403
        self.check_grad(['X'], 'Out', check_prim=True)
1404 1405


1406 1407 1408 1409
def reduce_sum_wrapper_fp64(
    x, axis=None, keepdim=False, reduce_all=True, out_dtype=None, name=None
):
    return paddle.sum(x, axis, 'float64', keepdim, name)
1410 1411


1412 1413 1414
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
1415 1416
        self.python_api = reduce_sum_wrapper_fp64
        self.public_python_api = reduce_sum_wrapper_fp64
1417
        self.prim_op_type = "prim"
1418 1419 1420
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
1421 1422 1423 1424 1425 1426
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1427 1428 1429 1430
        self.if_enable_cinn()

    def if_enable_cinn(self):
        pass
1431 1432

    def test_check_output(self):
1433
        self.check_output()
1434 1435

    def test_check_grad(self):
1436 1437 1438
        self.check_grad(['X'], 'Out', check_prim=True)


1439 1440 1441
class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
1442 1443
        self.python_api = paddle.sum
        self.public_python_api = paddle.sum
1444
        self.prim_op_type = "prim"
1445 1446 1447
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
1448 1449 1450 1451 1452 1453
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1454
        # cinn op_mapper not support in_dtype/out_dtype attr
1455 1456 1457 1458 1459 1460 1461
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1462 1463 1464 1465 1466


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
1467
        self.prim_op_type = "prim"
1468 1469
        self.python_api = paddle.sum
        self.public_python_api = paddle.sum
1470 1471 1472
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
1473 1474 1475 1476 1477 1478
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
1479
        # cinn op_mapper not support in_dtype/out_dtype attr
1480 1481 1482 1483 1484 1485 1486
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)
1487 1488


1489
class TestReduceSumOpError(unittest.TestCase):
1490
    def test_errors(self):
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
        with paddle.fluid.framework._static_guard():
            with program_guard(Program(), Program()):
                # The input type of reduce_sum_op must be Variable.
                x1 = fluid.create_lod_tensor(
                    np.array([[-1]]), [[1]], fluid.CPUPlace()
                )
                self.assertRaises(TypeError, paddle.sum, x1)
                # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
                x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8")
                self.assertRaises(TypeError, paddle.sum, x2)
1501 1502


1503
class API_TestSumOp(unittest.TestCase):
1504 1505 1506
    def run_static(
        self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None
    ):
1507 1508
        if np_axis is None:
            np_axis = attr_axis
1509

1510 1511 1512 1513 1514
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
1515
                data = paddle.static.data("data", shape=shape, dtype=x_dtype)
1516 1517 1518
                result_sum = paddle.sum(
                    x=data, axis=attr_axis, dtype=attr_dtype
                )
1519 1520 1521

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
1522 1523 1524
                (res,) = exe.run(
                    feed={"data": input_data}, fetch_list=[result_sum]
                )
1525

1526 1527 1528 1529 1530
            np.testing.assert_allclose(
                res,
                np.sum(input_data.astype(attr_dtype), axis=np_axis),
                rtol=1e-05,
            )
1531

1532 1533 1534 1535
    def test_static(self):
        shape = [10, 10]
        axis = 1

1536 1537 1538
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
1539
        self.run_static(shape, "bool", axis, attr_dtype="float16")
1540

1541 1542 1543
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
1544
        self.run_static(shape, "int32", axis, attr_dtype="float64")
1545

1546 1547 1548 1549
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

1550 1551 1552
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
1553
        self.run_static(shape, "float32", axis, attr_dtype="int64")
1554 1555 1556 1557

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
1558 1559 1560

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
1561 1562 1563
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2)
        )
1564 1565 1566

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
1567 1568
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
1569 1570 1571 1572 1573 1574 1575 1576 1577
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
1578 1579


1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
1590
            input = paddle.static.data(name="input", shape=[4, 4], dtype="bool")
1591 1592 1593 1594
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1595 1596 1597 1598 1599
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1600
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
1601 1602 1603 1604 1605 1606 1607 1608 1609

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1610
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1611 1612
                x = paddle.assign(np_x)
                x = paddle.cast(x, 'bool')
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
1647
            input = paddle.static.data(name="input", shape=[4, 4], dtype="bool")
1648 1649 1650 1651
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1652 1653 1654 1655 1656
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1657
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
1658 1659 1660 1661 1662 1663 1664 1665 1666

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1667
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1668 1669
                x = paddle.assign(np_x)
                x = paddle.cast(x, 'bool')
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
class TestAllZeroError(unittest.TestCase):
    def test_errors(self):
        with paddle.fluid.dygraph.guard():

            def test_0_size():
                array = np.array([], dtype=np.float32)
                x = paddle.to_tensor(np.reshape(array, [0, 0, 0]), dtype='bool')
                paddle.all(x, axis=1)

            self.assertRaises(ValueError, test_0_size)


G
guosheng 已提交
1706
if __name__ == '__main__':
1707
    paddle.enable_static()
G
guosheng 已提交
1708
    unittest.main()