test_mean_op.py 17.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

L
liaogang 已提交
15
import unittest
16 17

import gradient_checker
L
liaogang 已提交
18
import numpy as np
19
from decorator_helper import prog_scope
W
wanghuancoder 已提交
20
from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16
21 22
from test_sum_op import TestReduceOPTensorAxisBase

23
import paddle
24 25
from paddle import base
from paddle.base import Program, core, program_guard
26

27 28
np.random.seed(10)

L
liaogang 已提交
29

30
def mean_wrapper(x, axis=None, keepdim=False, reduce_all=False):
31
    if reduce_all:
32
        return paddle.mean(x, list(range(len(x.shape))), keepdim)
33 34 35 36
    return paddle.mean(x, axis, keepdim)


def reduce_mean_wrapper(x, axis=0, keepdim=False, reduce_all=False):
37
    if reduce_all:
38
        return paddle.mean(x, list(range(len(x.shape))), keepdim)
39 40 41
    return paddle.mean(x, axis, keepdim)


Q
qijun 已提交
42
class TestMeanOp(OpTest):
L
liaogang 已提交
43
    def setUp(self):
Q
qijun 已提交
44
        self.op_type = "mean"
45
        self.python_api = paddle.mean
46
        self.dtype = np.float64
C
chengduo 已提交
47 48
        self.init_dtype_type()
        self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
Q
qijun 已提交
49
        self.outputs = {'Out': np.mean(self.inputs["X"])}
L
liaogang 已提交
50

C
chengduo 已提交
51 52 53
    def init_dtype_type(self):
        pass

Q
qijun 已提交
54
    def test_check_output(self):
W
wanghuancoder 已提交
55
        self.check_output()
L
liaogang 已提交
56

Q
qijun 已提交
57
    def test_checkout_grad(self):
W
wanghuancoder 已提交
58
        self.check_grad(['X'], 'Out')
59 60


61 62 63 64 65 66 67 68 69
class TestMeanOp_ZeroDim(OpTest):
    def setUp(self):
        self.op_type = "mean"
        self.python_api = paddle.mean
        self.dtype = np.float64
        self.inputs = {'X': np.random.random([]).astype(self.dtype)}
        self.outputs = {'Out': np.mean(self.inputs["X"])}

    def test_check_output(self):
W
wanghuancoder 已提交
70
        self.check_output()
71 72

    def test_checkout_grad(self):
W
wanghuancoder 已提交
73
        self.check_grad(['X'], 'Out')
74 75


76
class TestMeanOpError(unittest.TestCase):
77
    def test_errors(self):
78
        paddle.enable_static()
79 80 81
        with program_guard(Program(), Program()):
            # The input type of mean_op must be Variable.
            input1 = 12
82
            self.assertRaises(TypeError, paddle.mean, input1)
83
            # The input dtype of mean_op must be float16, float32, float64.
G
GGBond8488 已提交
84 85
            input2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
86
            )
87
            self.assertRaises(TypeError, paddle.mean, input2)
G
GGBond8488 已提交
88 89
            input3 = paddle.static.data(
                name='input3', shape=[-1, 4], dtype="float16"
90
            )
91
            paddle.nn.functional.softmax(input3)
92
        paddle.disable_static()
93 94


95 96 97
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
chengduo 已提交
98 99 100
class TestFP16MeanOp(TestMeanOp):
    def init_dtype_type(self):
        self.dtype = np.float16
S
sneaxiy 已提交
101
        self.__class__.no_need_check_grad = True
C
chengduo 已提交
102 103 104 105

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
W
wanghuancoder 已提交
106
            self.check_output_with_place(place)
C
chengduo 已提交
107 108 109 110

    def test_checkout_grad(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
111
            with base.dygraph.guard():
S
sneaxiy 已提交
112 113 114
                x_np = np.random.random((10, 10)).astype(self.dtype)
                x = paddle.to_tensor(x_np)
                x.stop_gradient = False
115
                y = paddle.mean(x)
S
sneaxiy 已提交
116 117
                dx = paddle.grad(y, x)[0].numpy()
                dx_expected = self.dtype(1.0 / np.prod(x_np.shape)) * np.ones(
118 119
                    x_np.shape
                ).astype(self.dtype)
120
                np.testing.assert_array_equal(dx, dx_expected)
C
chengduo 已提交
121 122


A
arlesniak 已提交
123 124 125 126 127 128 129
@OpTestTool.skip_if_not_cpu_bf16()
class TestBF16MeanOp(TestMeanOp):
    def init_dtype_type(self):
        self.dtype = np.uint16

    def test_check_output(self):
        paddle.enable_static()
W
wanghuancoder 已提交
130
        self.check_output_with_place(core.CPUPlace())
A
arlesniak 已提交
131 132 133

    def test_checkout_grad(self):
        place = core.CPUPlace()
W
wanghuancoder 已提交
134
        self.check_grad_with_place(place, ['X'], 'Out')
A
arlesniak 已提交
135 136


137 138 139 140 141 142 143 144
def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False):
    if isinstance(axis, list):
        axis = tuple(axis)
    if reduce_all:
        axis = None
    return np.mean(x, axis=axis, keepdims=keepdim)


145 146 147 148 149
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_float16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA",
)
150 151 152
class TestReduceMeanOp(OpTest):
    def setUp(self):
        self.op_type = 'reduce_mean'
153
        self.python_api = reduce_mean_wrapper
154
        self.public_python_api = reduce_mean_wrapper
155
        self.prim_op_type = "comp"
156
        self.dtype = 'float64'
157
        self.init_shapes()
158
        self.axis = [0]
159 160
        if self.shape == []:
            self.axis = []
161 162
        self.keepdim = False
        self.set_attrs()
163
        self.if_enable_cinn()
164 165 166

        np.random.seed(10)
        x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
167
        if not hasattr(self, "reduce_all") and not x_np.shape == ():
S
sneaxiy 已提交
168
            self.reduce_all = (not self.axis) or len(self.axis) == len(x_np)
169 170
        if x_np.shape == ():
            self.reduce_all = True
171 172 173 174 175 176
        out_np = ref_reduce_mean(x_np, self.axis, self.keepdim, self.reduce_all)
        self.inputs = {'X': x_np}
        self.outputs = {'Out': out_np}
        self.attrs = {
            'dim': self.axis,
            'keep_dim': self.keepdim,
177
            'reduce_all': self.reduce_all,
178 179
        }

180 181 182
    def init_shapes(self):
        self.shape = [2, 3, 4, 5]

183 184 185
    def set_attrs(self):
        pass

186
    def if_enable_cinn(self):
187
        pass
188

189
    def test_check_output(self):
S
sneaxiy 已提交
190
        if self.dtype != 'float16':
W
wanghuancoder 已提交
191
            self.check_output(check_prim=True)
S
sneaxiy 已提交
192 193
        else:
            place = paddle.CUDAPlace(0)
194
            self.check_output_with_place(place=place, check_prim=True)
195 196

    def test_check_grad(self):
S
sneaxiy 已提交
197
        if self.dtype != 'float16':
W
wanghuancoder 已提交
198
            self.check_grad(['X'], ['Out'], check_prim=True)
S
sneaxiy 已提交
199 200
        else:
            place = paddle.CUDAPlace(0)
201
            self.check_grad_with_place(
202
                place, ['X'], ['Out'], numeric_grad_delta=0.5, check_prim=True
203 204 205
            )


206 207 208 209 210 211
class TestReduceMeanOp_ZeroDim(TestReduceMeanOp):
    def init_shapes(self):
        self.shape = []
        self.enable_cinn = False


212 213 214 215 216 217 218 219 220
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA and do not support bfloat16",
)
class TestReduceMeanBF16Op(OpTest):
    def setUp(self):
        self.op_type = 'reduce_mean'
        self.python_api = reduce_mean_wrapper
221
        self.public_python_api = reduce_mean_wrapper
222
        self.prim_op_type = "comp"
223 224 225 226 227
        self.dtype = np.uint16
        self.shape = [2, 3, 4, 5]
        self.axis = [0]
        self.keepdim = False
        self.set_attrs()
228
        self.if_enable_cinn()
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243

        np.random.seed(10)
        x_np = np.random.uniform(-1, 1, self.shape).astype(np.float32)
        if not hasattr(self, "reduce_all"):
            self.reduce_all = (not self.axis) or len(self.axis) == len(x_np)

        out_np = ref_reduce_mean(x_np, self.axis, self.keepdim, self.reduce_all)
        self.inputs = {'X': convert_float_to_uint16(x_np)}
        self.outputs = {'Out': convert_float_to_uint16(out_np)}
        self.attrs = {
            'dim': self.axis,
            'keep_dim': self.keepdim,
            'reduce_all': self.reduce_all,
        }

244 245 246
    def if_enable_cinn(self):
        self.enable_cinn = False

247 248 249 250 251
    def set_attrs(self):
        pass

    def test_check_output(self):
        place = paddle.CUDAPlace(0)
252
        self.check_output_with_place(place, check_prim=True)
253 254 255 256

    def test_check_grad(self):
        place = paddle.CUDAPlace(0)
        self.check_grad_with_place(
257
            place, ['X'], ['Out'], numeric_grad_delta=0.05, check_prim=True
258
        )
259 260 261 262 263


class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp):
    def setUp(self):
        self.op_type = 'reduce_mean'
264
        self.python_api = reduce_mean_wrapper
265
        self.public_python_api = reduce_mean_wrapper
266
        self.prim_op_type = "comp"
267 268 269 270 271 272 273 274 275 276 277 278 279 280
        self.dtype = 'float64'
        self.shape = [2, 3, 4, 5]

        x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        out_np = np.mean(x_np, axis=0)
        self.inputs = {'X': x_np}
        self.outputs = {'Out': out_np}


class TestReduceMeanOpFloat32(TestReduceMeanOp):
    def set_attrs(self):
        self.dtype = 'float32'


S
sneaxiy 已提交
281 282 283 284 285
class TestReduceMeanOpFloat16(TestReduceMeanOp):
    def set_attrs(self):
        self.dtype = 'float16'


286 287 288 289 290
class TestReduceMeanOpShape1D(TestReduceMeanOp):
    def set_attrs(self):
        self.shape = [100]


S
sneaxiy 已提交
291 292 293 294 295 296
class TestReduceMeanOpShape1DFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.shape = [100]
        self.dtype = 'float16'


297 298 299 300 301
class TestReduceMeanOpShape6D(TestReduceMeanOp):
    def set_attrs(self):
        self.shape = [2, 3, 4, 5, 6, 7]


302 303 304 305 306
class TestReduceMeanOpShape6DBF16(TestReduceMeanBF16Op):
    def set_attrs(self):
        self.shape = [2, 3, 4, 5, 6, 7]


S
sneaxiy 已提交
307 308 309 310 311 312
class TestReduceMeanOpShape6DFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.shape = [2, 3, 4, 5, 6, 7]
        self.dtype = 'float16'


313 314 315 316 317
class TestReduceMeanOpAxisAll(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]


S
sneaxiy 已提交
318 319 320 321 322 323
class TestReduceMeanOpAxisAllFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]
        self.dtype = 'float16'


324 325 326 327 328
class TestReduceMeanOpAxisAllBF16(TestReduceMeanBF16Op):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]


329 330 331 332 333
class TestReduceMeanOpAxisTuple(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = (0, 1, 2)


S
sneaxiy 已提交
334 335 336 337 338 339
class TestReduceMeanOpAxisTupleFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = (0, 1, 2)
        self.dtype = 'float16'


340 341 342 343 344
class TestReduceMeanOpAxisTupleBF16(TestReduceMeanBF16Op):
    def set_attrs(self):
        self.axis = (0, 1, 2)


345 346 347 348 349
class TestReduceMeanOpAxisNegative(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [-2, -1]


S
sneaxiy 已提交
350 351 352 353 354 355
class TestReduceMeanOpAxisNegativeFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [-2, -1]
        self.dtype = 'float16'


356 357 358 359 360
class TestReduceMeanOpAxisNegativeBF16(TestReduceMeanBF16Op):
    def set_attrs(self):
        self.axis = [-2, -1]


361 362 363 364 365
class TestReduceMeanOpKeepdimTrue1(TestReduceMeanOp):
    def set_attrs(self):
        self.keepdim = True


S
sneaxiy 已提交
366 367 368 369 370 371
class TestReduceMeanOpKeepdimTrue1FP16(TestReduceMeanOp):
    def set_attrs(self):
        self.keepdim = True
        self.dtype = 'float16'


372 373 374 375 376
class TestReduceMeanOpKeepdimTrue1BF16(TestReduceMeanBF16Op):
    def set_attrs(self):
        self.keepdim = True


377 378 379 380 381 382
class TestReduceMeanOpKeepdimTrue2(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]
        self.keepdim = True


S
sneaxiy 已提交
383 384 385 386 387 388 389
class TestReduceMeanOpKeepdimTrue2FP16(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]
        self.keepdim = True
        self.dtype = 'float16'


390 391 392 393 394 395
class TestReduceMeanOpKeepdimTrue2BF16(TestReduceMeanBF16Op):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]
        self.keepdim = True


396 397 398 399 400
class TestReduceMeanOpReduceAllTrue(TestReduceMeanOp):
    def set_attrs(self):
        self.reduce_all = True


S
sneaxiy 已提交
401 402 403 404 405 406
class TestReduceMeanOpReduceAllTrueFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.reduce_all = True
        self.dtype = 'float16'


407 408 409 410 411
class TestReduceMeanOpReduceAllTrueBF16(TestReduceMeanBF16Op):
    def set_attrs(self):
        self.reduce_all = True


412
class TestMeanAPI(unittest.TestCase):
413
    # test paddle.tensor.stat.mean
414 415 416 417

    def setUp(self):
        self.x_shape = [2, 3, 4, 5]
        self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
418 419 420
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
421
            else paddle.CPUPlace()
422
        )
423 424

    def test_api_static(self):
Z
Fix  
zhupengyang 已提交
425
        paddle.enable_static()
426
        with paddle.static.program_guard(paddle.static.Program()):
427
            x = paddle.static.data('X', self.x_shape)
428 429 430 431 432 433 434 435
            out1 = paddle.mean(x)
            out2 = paddle.tensor.mean(x)
            out3 = paddle.tensor.stat.mean(x)
            axis = np.arange(len(self.x_shape)).tolist()
            out4 = paddle.mean(x, axis)
            out5 = paddle.mean(x, tuple(axis))

            exe = paddle.static.Executor(self.place)
436 437 438
            res = exe.run(
                feed={'X': self.x}, fetch_list=[out1, out2, out3, out4, out5]
            )
439 440
        out_ref = np.mean(self.x)
        for out in res:
441
            np.testing.assert_allclose(out, out_ref, rtol=0.0001)
442

Z
Fix  
zhupengyang 已提交
443 444 445
    def test_api_dygraph(self):
        paddle.disable_static(self.place)

446
        def test_case(x, axis=None, keepdim=False):
Z
Zhou Wei 已提交
447
            x_tensor = paddle.to_tensor(x)
448 449 450 451 452 453
            out = paddle.mean(x_tensor, axis, keepdim)
            if isinstance(axis, list):
                axis = tuple(axis)
                if len(axis) == 0:
                    axis = None
            out_ref = np.mean(x, axis, keepdims=keepdim)
454
            np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.0001)
455 456 457 458 459 460 461 462 463 464 465

        test_case(self.x)
        test_case(self.x, [])
        test_case(self.x, -1)
        test_case(self.x, keepdim=True)
        test_case(self.x, 2, keepdim=True)
        test_case(self.x, [0, 2])
        test_case(self.x, (0, 2))
        test_case(self.x, [0, 1, 2, 3])
        paddle.enable_static()

466 467
    def test_base_api(self):
        with base.program_guard(base.Program(), base.Program()):
468
            x = paddle.static.data("x", shape=[10, 10], dtype="float32")
469
            out = paddle.mean(x=x, axis=1)
470 471
            place = base.CPUPlace()
            exe = base.Executor(place)
472 473
            x_np = np.random.rand(10, 10).astype(np.float32)
            res = exe.run(feed={"x": x_np}, fetch_list=[out])
474
        np.testing.assert_allclose(res[0], np.mean(x_np, axis=1), rtol=1e-05)
475

476
        with base.dygraph.guard():
477
            x_np = np.random.rand(10, 10).astype(np.float32)
478
            x = base.dygraph.to_variable(x_np)
479
            out = paddle.mean(x=x, axis=1)
480 481 482
        np.testing.assert_allclose(
            out.numpy(), np.mean(x_np, axis=1), rtol=1e-05
        )
483

484
    def test_errors(self):
485 486 487 488 489
        paddle.disable_static()
        x = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        x = paddle.to_tensor(x)
        self.assertRaises(Exception, paddle.mean, x, -3)
        self.assertRaises(Exception, paddle.mean, x, 2)
Z
Fix  
zhupengyang 已提交
490
        paddle.enable_static()
491
        with paddle.static.program_guard(paddle.static.Program()):
492
            x = paddle.static.data('X', [10, 12], 'int32')
493 494 495
            self.assertRaises(TypeError, paddle.mean, x)


496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
class TestMeanWithTensorAxis1(TestReduceOPTensorAxisBase):
    def init_data(self):
        self.pd_api = paddle.mean
        self.np_api = np.mean
        self.x = paddle.randn([10, 5, 9, 9], dtype='float64')
        self.np_axis = np.array([1, 2], dtype='int64')
        self.tensor_axis = paddle.to_tensor([1, 2], dtype='int64')


class TestMeanWithTensorAxis2(TestReduceOPTensorAxisBase):
    def init_data(self):
        self.pd_api = paddle.mean
        self.np_api = np.mean
        self.x = paddle.randn([10, 10, 9, 9], dtype='float64')
        self.np_axis = np.array([0, 1, 2], dtype='int64')
        self.tensor_axis = [
            0,
            paddle.to_tensor([1], 'int64'),
514
            paddle.to_tensor([2], 'int64'),
515 516 517
        ]


518 519 520 521 522 523 524 525 526 527
class TestMeanDoubleGradCheck(unittest.TestCase):
    def mean_wrapper(self, x):
        return paddle.mean(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
528
        data = paddle.static.data('data', [3, 4, 5], dtype)
529 530 531 532
        data.persistable = True
        out = paddle.mean(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

533 534 535 536 537 538
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.mean_wrapper, [data], out, x_init=[data_arr], place=place
        )
539 540 541

    def test_grad(self):
        paddle.enable_static()
542
        places = [base.CPUPlace()]
543
        if core.is_compiled_with_cuda():
544
            places.append(base.CUDAPlace(0))
545 546 547 548 549 550 551 552 553 554 555 556 557 558
        for p in places:
            self.func(p)


class TestMeanTripleGradCheck(unittest.TestCase):
    def mean_wrapper(self, x):
        return paddle.mean(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
559
        data = paddle.static.data('data', [3, 4, 5], dtype)
560 561 562 563
        data.persistable = True
        out = paddle.mean(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

564 565 566 567 568 569
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.triple_grad_check_for_dygraph(
            self.mean_wrapper, [data], out, x_init=[data_arr], place=place
        )
570 571 572

    def test_grad(self):
        paddle.enable_static()
573
        places = [base.CPUPlace()]
574
        if core.is_compiled_with_cuda():
575
            places.append(base.CUDAPlace(0))
576 577 578 579
        for p in places:
            self.func(p)


Q
qijun 已提交
580
if __name__ == "__main__":
581
    paddle.enable_static()
L
liaogang 已提交
582
    unittest.main()