test_reduce_op.py 35.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
guosheng 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
18
import paddle
19 20
import paddle.fluid.core as core
import paddle.fluid as fluid
21
from paddle.fluid import Program, program_guard
22
from paddle.fluid.framework import convert_np_dtype_to_dtype_
G
guosheng 已提交
23 24


25
class TestSumOp(OpTest):
G
guosheng 已提交
26
    def setUp(self):
F
From00 已提交
27
        self.python_api = paddle.sum
28
        self.op_type = "reduce_sum"
29
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
30
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
F
From00 已提交
31
        self.attrs = {'dim': [0]}
32 33

    def test_check_output(self):
F
From00 已提交
34
        self.check_output(check_eager=True)
35 36

    def test_check_grad(self):
F
From00 已提交
37
        self.check_grad(['X'], 'Out', check_eager=True)
38 39


40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
class TestSumOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.sum
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


55 56
class TestSumOp_fp16(OpTest):
    def setUp(self):
F
From00 已提交
57
        self.python_api = paddle.sum
58 59 60 61 62 63 64 65 66 67 68
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
69
        self.check_output(check_eager=True)
70 71 72 73

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
74
        return (grad,)
75 76

    def test_check_grad(self):
77 78 79
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True
        )
80 81


82 83 84
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
85 86 87
class TestSumOp_bf16(OpTest):
    def setUp(self):
        np.random.seed(100)
F
From00 已提交
88
        self.python_api = paddle.sum
89 90 91 92 93 94 95 96 97 98 99 100 101
        self.op_type = "reduce_sum"
        self.dtype = np.uint16
        self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
        self.attrs = {'dim': [0, 1, 2]}
        self.out = self.x.sum(axis=tuple(self.attrs['dim']))
        self.gradient = self.calc_gradient()

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.gradient = self.calc_gradient()

    def test_check_output(self):
        place = core.CUDAPlace(0)
F
From00 已提交
102
        self.check_output_with_place(place, check_eager=True)
103 104 105

    def test_check_grad(self):
        place = core.CUDAPlace(0)
106 107 108 109 110 111 112
        self.check_grad_with_place(
            place,
            ['X'],
            'Out',
            user_defined_grads=self.gradient,
            check_eager=True,
        )
113 114 115 116 117 118 119

    def calc_gradient(self):
        x = self.x
        grad = np.ones(x.shape, dtype=x.dtype)
        return [grad]


120 121
class TestSumOp_fp16_withInt(OpTest):
    def setUp(self):
F
From00 已提交
122
        self.python_api = paddle.sum
123 124 125 126 127 128 129 130 131 132 133 134 135
        self.op_type = "reduce_sum"
        self.inputs = {
            # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
            # Precision limitations on integer values between 0 and 2048 can be exactly represented
            'X': np.random.randint(0, 30, (10, 10)).astype("float16")
        }
        self.attrs = {'dim': [0, 1]}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
        self.gradient = self.calc_gradient()

    def test_check_output(self):
F
From00 已提交
136
        self.check_output(check_eager=True)
137 138 139 140

    def calc_gradient(self):
        x = self.inputs["X"]
        grad = np.ones(x.shape, dtype=x.dtype)
141
        return (grad,)
142 143

    def test_check_grad(self):
144 145 146
        self.check_grad(
            ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True
        )
147 148


149 150
class TestSumOp5D(OpTest):
    def setUp(self):
F
From00 已提交
151
        self.python_api = paddle.sum
152 153 154 155
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
156
        self.attrs = {'dim': [0]}
157 158 159
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
F
From00 已提交
160
        self.check_output(check_eager=True)
161 162

    def test_check_grad(self):
F
From00 已提交
163
        self.check_grad(['X'], 'Out', check_eager=True)
164 165 166 167


class TestSumOp6D(OpTest):
    def setUp(self):
F
From00 已提交
168
        self.python_api = paddle.sum
169 170 171 172
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
        }
F
From00 已提交
173
        self.attrs = {'dim': [0]}
174
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
G
guosheng 已提交
175

176
    def test_check_output(self):
F
From00 已提交
177
        self.check_output(check_eager=True)
G
guosheng 已提交
178

179
    def test_check_grad(self):
F
From00 已提交
180
        self.check_grad(['X'], 'Out', check_eager=True)
G
guosheng 已提交
181 182


183 184
class TestSumOp8D(OpTest):
    def setUp(self):
F
From00 已提交
185
        self.python_api = paddle.sum
186 187 188 189 190 191 192 193
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
        }
        self.attrs = {'dim': (0, 3)}
        self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

    def test_check_output(self):
F
From00 已提交
194
        self.check_output(check_eager=True)
195 196

    def test_check_grad(self):
F
From00 已提交
197
        self.check_grad(['X'], 'Out', check_eager=True)
198 199


200 201
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
202 203
    " its gradient check is not supported by unittest framework."
)
204 205
class TestMaxOp(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
206 207

    def setUp(self):
208
        self.op_type = "reduce_max"
209
        self.python_api = paddle.max
210
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
211 212 213 214
        self.attrs = {'dim': [-1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }
215 216

    def test_check_output(self):
217
        self.check_output(check_eager=True)
G
guosheng 已提交
218 219


220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
class TestMaxOp_ZeroDim(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
        self.python_api = paddle.max
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


236 237
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
238 239
    " its gradient check is not supported by unittest framework."
)
240 241
class TestMinOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""
G
guosheng 已提交
242

243 244
    def setUp(self):
        self.op_type = "reduce_min"
245
        self.python_api = paddle.min
246
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
W
whs 已提交
247 248 249 250
        self.attrs = {'dim': [2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
251

252
    def test_check_output(self):
253
        self.check_output(check_eager=True)
G
guosheng 已提交
254 255


256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
class TestMinOp_ZeroDim(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
        self.python_api = paddle.min
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.attrs = {'dim': []}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
        self.check_output(check_eager=True)


272 273 274 275 276
class TestMin6DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
277
        self.python_api = paddle.min
278 279 280 281 282 283 284 285 286
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
        }
        self.attrs = {'dim': [2, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
287
        self.check_output(check_eager=True)
288 289 290 291 292 293 294


class TestMin8DOp(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
295
        self.python_api = paddle.min
296 297 298 299 300 301 302 303 304
        self.inputs = {
            'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
305
        self.check_output(check_eager=True)
306 307


H
hong 已提交
308 309 310 311
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)


312 313 314
class TestProdOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
315
        self.python_api = raw_reduce_prod
316 317
        self.init_data_type()
        self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
318 319
        self.outputs = {'Out': self.inputs['X'].prod(axis=0)}

320
    def init_data_type(self):
321 322 323
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
324

325
    def test_check_output(self):
H
hong 已提交
326
        self.check_output(check_eager=True)
327 328

    def test_check_grad(self):
H
hong 已提交
329
        self.check_grad(['X'], 'Out', check_eager=True)
330 331


332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
class TestProdOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.prod
        self.op_type = "reduce_prod"
        self.inputs = {'X': np.random.random([]).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].prod()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_eager=True)


347 348 349
class TestProd6DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
350
        self.python_api = raw_reduce_prod
351
        self.init_data_type()
352
        self.inputs = {
353
            'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
354 355 356 357 358 359
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

360
    def init_data_type(self):
361 362 363
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
364

365
    def test_check_output(self):
H
hong 已提交
366
        self.check_output(check_eager=True)
367 368

    def test_check_grad(self):
H
hong 已提交
369
        self.check_grad(['X'], 'Out', check_eager=True)
370 371 372 373 374


class TestProd8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_prod"
H
hong 已提交
375
        self.python_api = raw_reduce_prod
376
        self.init_data_type()
377
        self.inputs = {
378 379 380
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(
                self.data_type
            )
381 382 383 384 385 386
        }
        self.attrs = {'dim': [2, 3, 4]}
        self.outputs = {
            'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
        }

387
    def init_data_type(self):
388 389 390
        self.data_type = (
            "float32" if core.is_compiled_with_rocm() else "float64"
        )
391

392
    def test_check_output(self):
H
hong 已提交
393
        self.check_output(check_eager=True)
394 395

    def test_check_grad(self):
H
hong 已提交
396
        self.check_grad(['X'], 'Out', check_eager=True)
397 398


Z
zhoukunsheng 已提交
399 400 401
class TestAllOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
402
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
403 404 405 406 407
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
408
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
409 410


411 412 413 414 415 416 417 418 419 420 421 422
class TestAllOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.all
        self.op_type = "reduce_all"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].all()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


423 424 425
class TestAll8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
426
        self.python_api = paddle.all
427
        self.inputs = {
428 429 430
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
431 432 433 434 435
        }
        self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
436
        self.check_output(check_eager=True)
437 438


Z
zhoukunsheng 已提交
439 440 441
class TestAllOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
442
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
443
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
444
        self.attrs = {'dim': (1,)}
445 446 447
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}

    def test_check_output(self):
448
        self.check_output(check_eager=True)
449 450 451 452 453


class TestAll8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
454
        self.python_api = paddle.all
455
        self.inputs = {
456 457 458
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
459 460 461
        }
        self.attrs = {'dim': (1, 3, 4)}
        self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
Z
zhoukunsheng 已提交
462 463

    def test_check_output(self):
464
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
465 466 467 468 469


class TestAllOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
470
        self.python_api = paddle.all
Z
zhoukunsheng 已提交
471 472 473
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1], 'keep_dim': True}
        self.outputs = {
474
            'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
Z
zhoukunsheng 已提交
475 476 477
        }

    def test_check_output(self):
478
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
479 480


481 482 483
class TestAll8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_all"
484
        self.python_api = paddle.all
485
        self.inputs = {
486 487 488
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
489
        }
490
        self.attrs = {'dim': (5,), 'keep_dim': True}
491
        self.outputs = {
492 493 494
            'Out': np.expand_dims(
                self.inputs['X'].all(axis=self.attrs['dim']), axis=5
            )
495 496 497
        }

    def test_check_output(self):
498
        self.check_output(check_eager=True)
499 500


501 502 503 504 505 506 507
class TestAllOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_all_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
            # The input dtype of reduce_all_op must be bool.
508 509 510
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
511 512 513
            self.assertRaises(TypeError, fluid.layers.reduce_all, input2)


Z
zhoukunsheng 已提交
514 515 516
class TestAnyOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
517
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
518 519 520 521 522
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'reduce_all': True}

    def test_check_output(self):
523
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
524 525


526 527 528 529 530 531 532 533 534 535 536 537
class TestAnyOp_ZeroDim(OpTest):
    def setUp(self):
        self.python_api = paddle.any
        self.op_type = "reduce_any"
        self.inputs = {'X': np.random.randint(0, 2, []).astype("bool")}
        self.outputs = {'Out': self.inputs['X'].any()}
        self.attrs = {'dim': [], 'reduce_all': True}

    def test_check_output(self):
        self.check_output(check_eager=True)


538 539 540
class TestAny8DOp(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
541
        self.python_api = paddle.any
542
        self.inputs = {
543 544 545
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
546 547 548 549 550
        }
        self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
551
        self.check_output(check_eager=True)
552 553


Z
zhoukunsheng 已提交
554 555 556
class TestAnyOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
557
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
558 559 560 561 562
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
        self.attrs = {'dim': [1]}
        self.outputs = {'Out': self.inputs['X'].any(axis=1)}

    def test_check_output(self):
563
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
564 565


566 567 568
class TestAny8DOpWithDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
569
        self.python_api = paddle.any
570
        self.inputs = {
571 572 573
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
574 575 576 577 578
        }
        self.attrs = {'dim': (3, 6)}
        self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}

    def test_check_output(self):
579
        self.check_output(check_eager=True)
580 581


Z
zhoukunsheng 已提交
582 583 584
class TestAnyOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
585
        self.python_api = paddle.any
Z
zhoukunsheng 已提交
586
        self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
587
        self.attrs = {'dim': (1,), 'keep_dim': True}
588
        self.outputs = {
589 590 591
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
592 593 594
        }

    def test_check_output(self):
595
        self.check_output(check_eager=True)
596 597 598 599 600


class TestAny8DOpWithKeepDim(OpTest):
    def setUp(self):
        self.op_type = "reduce_any"
601
        self.python_api = paddle.any
602
        self.inputs = {
603 604 605
            'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype(
                "bool"
            )
606
        }
607
        self.attrs = {'dim': (1,), 'keep_dim': True}
Z
zhoukunsheng 已提交
608
        self.outputs = {
609 610 611
            'Out': np.expand_dims(
                self.inputs['X'].any(axis=self.attrs['dim']), axis=1
            )
Z
zhoukunsheng 已提交
612 613 614
        }

    def test_check_output(self):
615
        self.check_output(check_eager=True)
Z
zhoukunsheng 已提交
616 617


618 619 620 621 622 623 624
class TestAnyOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_any_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
            # The input dtype of reduce_any_op must be bool.
625 626 627
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
628 629 630
            self.assertRaises(TypeError, fluid.layers.reduce_any, input2)


Q
qiaolongfei 已提交
631
class Test1DReduce(OpTest):
G
guosheng 已提交
632
    def setUp(self):
633
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
634
        self.inputs = {'X': np.random.random(120).astype("float64")}
Q
qiaolongfei 已提交
635
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
636 637 638

    def test_check_output(self):
        self.check_output()
G
guosheng 已提交
639

640 641
    def test_check_grad(self):
        self.check_grad(['X'], 'Out')
G
guosheng 已提交
642 643


Q
qiaolongfei 已提交
644
class Test2DReduce0(Test1DReduce):
G
guosheng 已提交
645
    def setUp(self):
646
        self.op_type = "reduce_sum"
Q
qiaolongfei 已提交
647 648
        self.attrs = {'dim': [0]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
649 650 651
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}


Q
qiaolongfei 已提交
652 653 654 655 656
class Test2DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
Q
qiaolongfei 已提交
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce1(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce2(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [-2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


class Test3DReduce3(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': [1, 2]}
        self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }
G
guosheng 已提交
700 701


702 703 704 705 706 707 708 709 710 711 712 713
class Test8DReduce0(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.attrs = {'dim': (4, 2, 3)}
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.outputs = {
            'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
        }


Q
qiaolongfei 已提交
714 715 716 717
class TestKeepDimReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
Q
qiaolongfei 已提交
718
        self.attrs = {'dim': [1], 'keep_dim': True}
Q
qiaolongfei 已提交
719
        self.outputs = {
720 721 722
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
Q
qiaolongfei 已提交
723 724 725
        }


726 727 728 729 730 731 732 733
class TestKeepDim8DReduce(Test1DReduce):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {
            'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
        }
        self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
        self.outputs = {
734 735 736
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
            )
737 738 739
        }


740 741
@skip_check_grad_ci(
    reason="reduce_max is discontinuous non-derivable function,"
742 743
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
744 745 746 747 748
class TestReduceMaxOpMultiAxises(OpTest):
    """Remove Max with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_max"
749
        self.python_api = paddle.max
W
whs 已提交
750 751 752 753 754 755 756
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1]}
        self.outputs = {
            'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
757
        self.check_output(check_eager=True)
W
whs 已提交
758 759


760 761
@skip_check_grad_ci(
    reason="reduce_min is discontinuous non-derivable function,"
762 763
    " its gradient check is not supported by unittest framework."
)
W
whs 已提交
764 765 766 767 768
class TestReduceMinOpMultiAxises(OpTest):
    """Remove Min with subgradient from gradient check to confirm the success of CI."""

    def setUp(self):
        self.op_type = "reduce_min"
769
        self.python_api = paddle.min
W
whs 已提交
770 771 772 773 774 775 776
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [1, 2]}
        self.outputs = {
            'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
        }

    def test_check_output(self):
777
        self.check_output(check_eager=True)
W
whs 已提交
778 779 780 781 782 783 784 785


class TestKeepDimReduceSumMultiAxises(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
        self.attrs = {'dim': [-2, -1], 'keep_dim': True}
        self.outputs = {
786 787 788
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
W
whs 已提交
789 790 791 792 793 794 795 796 797
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


798 799 800
class TestReduceSumWithDimOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
801
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
802 803
        self.attrs = {'dim': [1, 2], 'keep_dim': True}
        self.outputs = {
804 805 806
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=True
            )
807 808 809 810 811 812 813 814 815 816 817 818
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceSumWithNumelOne(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
819
        self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
820 821
        self.attrs = {'dim': [1], 'keep_dim': False}
        self.outputs = {
822 823 824
            'Out': self.inputs['X'].sum(
                axis=tuple(self.attrs['dim']), keepdims=False
            )
825 826 827 828 829 830 831 832 833 834 835 836
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceAll(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
837
        self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
838 839 840 841 842 843 844 845 846 847
        self.attrs = {'reduce_all': True, 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum()}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


848 849 850
class Test1DReduceWithAxes1(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
Z
zhupengyang 已提交
851
        self.inputs = {'X': np.random.random(100).astype("float64")}
852 853 854 855 856 857 858 859 860 861
        self.attrs = {'dim': [0], 'keep_dim': False}
        self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


862 863 864 865 866 867
class TestReduceWithDtype(OpTest):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
        self.attrs = {'reduce_all': True}
868 869 870 871 872 873
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
874 875 876 877 878 879 880 881 882 883 884 885 886 887

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out')


class TestReduceWithDtype1(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
        self.attrs = {'dim': [1]}
888 889 890 891 892 893
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
894 895 896 897 898 899 900 901


class TestReduceWithDtype2(TestReduceWithDtype):
    def setUp(self):
        self.op_type = "reduce_sum"
        self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
        self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
        self.attrs = {'dim': [1], 'keep_dim': True}
902 903 904 905 906 907
        self.attrs.update(
            {
                'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
                'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
            }
        )
908 909


910
class TestReduceSumOpError(unittest.TestCase):
911 912 913
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of reduce_sum_op must be Variable.
914 915 916
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
917
            self.assertRaises(TypeError, paddle.sum, x1)
918 919
            # The input dtype of reduce_sum_op  must be float32 or float64 or int32 or int64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
920
            self.assertRaises(TypeError, paddle.sum, x2)
921 922


923
class API_TestSumOp(unittest.TestCase):
924 925 926
    def run_static(
        self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None
    ):
927 928
        if np_axis is None:
            np_axis = attr_axis
929

930 931 932 933 934 935
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                data = fluid.data("data", shape=shape, dtype=x_dtype)
936 937 938
                result_sum = paddle.sum(
                    x=data, axis=attr_axis, dtype=attr_dtype
                )
939 940 941

                exe = fluid.Executor(place)
                input_data = np.random.rand(*shape).astype(x_dtype)
942 943 944
                (res,) = exe.run(
                    feed={"data": input_data}, fetch_list=[result_sum]
                )
945

946 947 948 949 950
            np.testing.assert_allclose(
                res,
                np.sum(input_data.astype(attr_dtype), axis=np_axis),
                rtol=1e-05,
            )
951

952 953 954 955
    def test_static(self):
        shape = [10, 10]
        axis = 1

956 957 958
        self.run_static(shape, "bool", axis, attr_dtype=None)
        self.run_static(shape, "bool", axis, attr_dtype="int32")
        self.run_static(shape, "bool", axis, attr_dtype="int64")
959
        self.run_static(shape, "bool", axis, attr_dtype="float16")
960

961 962 963
        self.run_static(shape, "int32", axis, attr_dtype=None)
        self.run_static(shape, "int32", axis, attr_dtype="int32")
        self.run_static(shape, "int32", axis, attr_dtype="int64")
964
        self.run_static(shape, "int32", axis, attr_dtype="float64")
965

966 967 968 969
        self.run_static(shape, "int64", axis, attr_dtype=None)
        self.run_static(shape, "int64", axis, attr_dtype="int64")
        self.run_static(shape, "int64", axis, attr_dtype="int32")

970 971 972
        self.run_static(shape, "float32", axis, attr_dtype=None)
        self.run_static(shape, "float32", axis, attr_dtype="float32")
        self.run_static(shape, "float32", axis, attr_dtype="float64")
973
        self.run_static(shape, "float32", axis, attr_dtype="int64")
974 975 976 977

        self.run_static(shape, "float64", axis, attr_dtype=None)
        self.run_static(shape, "float64", axis, attr_dtype="float32")
        self.run_static(shape, "float64", axis, attr_dtype="float64")
978 979 980

        shape = [5, 5, 5]
        self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
981 982 983
        self.run_static(
            shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2)
        )
984 985 986

    def test_dygraph(self):
        np_x = np.random.random([2, 3, 4]).astype('int32')
987 988
        with fluid.dygraph.guard():
            x = fluid.dygraph.to_variable(np_x)
989 990 991 992 993 994 995 996 997
            out0 = paddle.sum(x).numpy()
            out1 = paddle.sum(x, axis=0).numpy()
            out2 = paddle.sum(x, axis=(0, 1)).numpy()
            out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()

        self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
        self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
        self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
        self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
998 999


1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
class TestAllAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.all(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1015 1016 1017 1018 1019
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1020
            np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05)
1021 1022 1023 1024 1025 1026 1027 1028 1029

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1030
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.all(x)
                np_out1 = out1.numpy()
                expect_res1 = np.all(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.all(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.all(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.all(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.all(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.all(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.all(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


class TestAnyAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(123)
        paddle.enable_static()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[4, 4], dtype="bool")
            result = paddle.any(x=input)
            input_np = np.random.randint(0, 2, [4, 4]).astype("bool")

            exe = fluid.Executor(place)
1072 1073 1074 1075 1076
            fetches = exe.run(
                fluid.default_main_program(),
                feed={"input": input_np},
                fetch_list=[result],
            )
1077
            np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05)
1078 1079 1080 1081 1082 1083 1084 1085 1086

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        paddle.disable_static()
        for place in self.places:
            with fluid.dygraph.guard(place):
1087
                np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
                x = fluid.layers.assign(np_x)
                x = fluid.layers.cast(x, 'bool')

                out1 = paddle.any(x)
                np_out1 = out1.numpy()
                expect_res1 = np.any(np_x)
                self.assertTrue((np_out1 == expect_res1).all())

                out2 = paddle.any(x, axis=0)
                np_out2 = out2.numpy()
                expect_res2 = np.any(np_x, axis=0)
                self.assertTrue((np_out2 == expect_res2).all())

                out3 = paddle.any(x, axis=-1)
                np_out3 = out3.numpy()
                expect_res3 = np.any(np_x, axis=-1)
                self.assertTrue((np_out3 == expect_res3).all())

                out4 = paddle.any(x, axis=1, keepdim=True)
                np_out4 = out4.numpy()
                expect_res4 = np.any(np_x, axis=1, keepdims=True)
                self.assertTrue((np_out4 == expect_res4).all())

        paddle.enable_static()


G
guosheng 已提交
1114
if __name__ == '__main__':
1115
    import paddle
1116

1117
    paddle.enable_static()
G
guosheng 已提交
1118
    unittest.main()