test_mean_op.py 11.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

L
liaogang 已提交
17 18
import unittest
import numpy as np
A
arlesniak 已提交
19
from op_test import OpTest, OpTestTool
20
import paddle
C
chengduo 已提交
21
import paddle.fluid.core as core
22 23
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
L
liaogang 已提交
24

25 26
np.random.seed(10)

L
liaogang 已提交
27

Q
qijun 已提交
28
class TestMeanOp(OpTest):
L
liaogang 已提交
29
    def setUp(self):
Q
qijun 已提交
30
        self.op_type = "mean"
31
        self.dtype = np.float64
C
chengduo 已提交
32 33
        self.init_dtype_type()
        self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
Q
qijun 已提交
34
        self.outputs = {'Out': np.mean(self.inputs["X"])}
L
liaogang 已提交
35

C
chengduo 已提交
36 37 38
    def init_dtype_type(self):
        pass

Q
qijun 已提交
39 40
    def test_check_output(self):
        self.check_output()
L
liaogang 已提交
41

Q
qijun 已提交
42 43
    def test_checkout_grad(self):
        self.check_grad(['X'], 'Out')
44 45


46
class TestMeanOpError(unittest.TestCase):
47 48 49 50 51 52 53 54 55 56 57 58 59 60
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of mean_op must be Variable.
            input1 = 12
            self.assertRaises(TypeError, fluid.layers.mean, input1)
            # The input dtype of mean_op must be float16, float32, float64.
            input2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.mean, input2)
            input3 = fluid.layers.data(
                name='input3', shape=[4], dtype="float16")
            fluid.layers.softmax(input3)


C
chengduo 已提交
61 62 63 64 65
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestFP16MeanOp(TestMeanOp):
    def init_dtype_type(self):
        self.dtype = np.float16
S
sneaxiy 已提交
66
        self.__class__.no_need_check_grad = True
C
chengduo 已提交
67 68 69 70

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
S
sneaxiy 已提交
71
            self.check_output_with_place(place)
C
chengduo 已提交
72 73 74 75

    def test_checkout_grad(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
S
sneaxiy 已提交
76 77 78 79 80 81 82 83 84
            with fluid.dygraph.guard():
                x_np = np.random.random((10, 10)).astype(self.dtype)
                x = paddle.to_tensor(x_np)
                x.stop_gradient = False
                y = fluid.layers.mean(x)
                dx = paddle.grad(y, x)[0].numpy()
                dx_expected = self.dtype(1.0 / np.prod(x_np.shape)) * np.ones(
                    x_np.shape).astype(self.dtype)
                self.assertTrue(np.array_equal(dx, dx_expected))
C
chengduo 已提交
85 86


A
arlesniak 已提交
87 88 89 90 91 92 93 94 95 96 97 98 99 100
@OpTestTool.skip_if_not_cpu_bf16()
class TestBF16MeanOp(TestMeanOp):
    def init_dtype_type(self):
        self.dtype = np.uint16

    def test_check_output(self):
        paddle.enable_static()
        self.check_output_with_place(core.CPUPlace())

    def test_checkout_grad(self):
        place = core.CPUPlace()
        self.check_grad_with_place(place, ['X'], 'Out')


101 102 103 104 105 106 107 108
def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False):
    if isinstance(axis, list):
        axis = tuple(axis)
    if reduce_all:
        axis = None
    return np.mean(x, axis=axis, keepdims=keepdim)


S
sneaxiy 已提交
109 110 111 112 113 114 115 116
def ref_reduce_mean_grad(x, axis, dtype):
    if reduce_all:
        axis = list(range(x.ndim))

    shape = [x.shape[i] for i in axis]
    return (1.0 / np.prod(shape) * np.ones(shape)).astype(dtype)


117 118 119 120 121 122 123 124 125 126 127
class TestReduceMeanOp(OpTest):
    def setUp(self):
        self.op_type = 'reduce_mean'
        self.dtype = 'float64'
        self.shape = [2, 3, 4, 5]
        self.axis = [0]
        self.keepdim = False
        self.set_attrs()

        np.random.seed(10)
        x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
S
sneaxiy 已提交
128 129 130
        if not hasattr(self, "reduce_all"):
            self.reduce_all = (not self.axis) or len(self.axis) == len(x_np)

131 132 133 134 135 136 137 138 139
        out_np = ref_reduce_mean(x_np, self.axis, self.keepdim, self.reduce_all)
        self.inputs = {'X': x_np}
        self.outputs = {'Out': out_np}
        self.attrs = {
            'dim': self.axis,
            'keep_dim': self.keepdim,
            'reduce_all': self.reduce_all
        }

S
sneaxiy 已提交
140 141 142
        if self.dtype == 'float16':
            self.__class__.no_need_check_grad = True

143 144 145 146
    def set_attrs(self):
        pass

    def test_check_output(self):
S
sneaxiy 已提交
147 148 149 150 151 152 153
        if self.dtype != 'float16':
            self.check_output()
        else:
            if not core.is_compiled_with_cuda():
                return
            place = paddle.CUDAPlace(0)
            self.check_output_with_place(place=place)
154 155

    def test_check_grad(self):
S
sneaxiy 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
        if self.dtype != 'float16':
            self.check_grad(['X'], ['Out'])
        else:
            return
            if not core.is_compiled_with_cuda():
                return
            place = paddle.CUDAPlace(0)
            if core.is_float16_supported(place):
                return
            with fluid.dygraph.guard(place=place):
                x = paddle.tensor(self.inputs['X'])
                y = paddle.mean(
                    x, axis=self.attrs['dim'], keepdim=self.attrs['keep_dim'])
                dx = paddle.grad(y, x)[0].numpy()
                dx_expected = ref_reduce_mean_grad(
                    self.inputs['X'], self.attrs['dim'], self.dtype)
                self.assertTrue(np.array_equal(dx, dx_expected))
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191


class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp):
    def setUp(self):
        self.op_type = 'reduce_mean'
        self.dtype = 'float64'
        self.shape = [2, 3, 4, 5]

        x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        out_np = np.mean(x_np, axis=0)
        self.inputs = {'X': x_np}
        self.outputs = {'Out': out_np}


class TestReduceMeanOpFloat32(TestReduceMeanOp):
    def set_attrs(self):
        self.dtype = 'float32'


S
sneaxiy 已提交
192 193 194 195 196
class TestReduceMeanOpFloat16(TestReduceMeanOp):
    def set_attrs(self):
        self.dtype = 'float16'


197 198 199 200 201
class TestReduceMeanOpShape1D(TestReduceMeanOp):
    def set_attrs(self):
        self.shape = [100]


S
sneaxiy 已提交
202 203 204 205 206 207
class TestReduceMeanOpShape1DFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.shape = [100]
        self.dtype = 'float16'


208 209 210 211 212
class TestReduceMeanOpShape6D(TestReduceMeanOp):
    def set_attrs(self):
        self.shape = [2, 3, 4, 5, 6, 7]


S
sneaxiy 已提交
213 214 215 216 217 218
class TestReduceMeanOpShape6DFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.shape = [2, 3, 4, 5, 6, 7]
        self.dtype = 'float16'


219 220 221 222 223
class TestReduceMeanOpAxisAll(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]


S
sneaxiy 已提交
224 225 226 227 228 229
class TestReduceMeanOpAxisAllFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]
        self.dtype = 'float16'


230 231 232 233 234
class TestReduceMeanOpAxisTuple(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = (0, 1, 2)


S
sneaxiy 已提交
235 236 237 238 239 240
class TestReduceMeanOpAxisTupleFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = (0, 1, 2)
        self.dtype = 'float16'


241 242 243 244 245
class TestReduceMeanOpAxisNegative(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [-2, -1]


S
sneaxiy 已提交
246 247 248 249 250 251
class TestReduceMeanOpAxisNegativeFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [-2, -1]
        self.dtype = 'float16'


252 253 254 255 256
class TestReduceMeanOpKeepdimTrue1(TestReduceMeanOp):
    def set_attrs(self):
        self.keepdim = True


S
sneaxiy 已提交
257 258 259 260 261 262
class TestReduceMeanOpKeepdimTrue1FP16(TestReduceMeanOp):
    def set_attrs(self):
        self.keepdim = True
        self.dtype = 'float16'


263 264 265 266 267 268
class TestReduceMeanOpKeepdimTrue2(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]
        self.keepdim = True


S
sneaxiy 已提交
269 270 271 272 273 274 275
class TestReduceMeanOpKeepdimTrue2FP16(TestReduceMeanOp):
    def set_attrs(self):
        self.axis = [0, 1, 2, 3]
        self.keepdim = True
        self.dtype = 'float16'


276 277 278 279 280
class TestReduceMeanOpReduceAllTrue(TestReduceMeanOp):
    def set_attrs(self):
        self.reduce_all = True


S
sneaxiy 已提交
281 282 283 284 285 286
class TestReduceMeanOpReduceAllTrueFP16(TestReduceMeanOp):
    def set_attrs(self):
        self.reduce_all = True
        self.dtype = 'float16'


287
class TestMeanAPI(unittest.TestCase):
288
    # test paddle.tensor.stat.mean
289 290 291 292 293 294 295 296

    def setUp(self):
        self.x_shape = [2, 3, 4, 5]
        self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
        self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_api_static(self):
Z
Fix  
zhupengyang 已提交
297
        paddle.enable_static()
298
        with paddle.static.program_guard(paddle.static.Program()):
299
            x = paddle.fluid.data('X', self.x_shape)
300 301 302 303 304 305 306 307 308 309 310 311
            out1 = paddle.mean(x)
            out2 = paddle.tensor.mean(x)
            out3 = paddle.tensor.stat.mean(x)
            axis = np.arange(len(self.x_shape)).tolist()
            out4 = paddle.mean(x, axis)
            out5 = paddle.mean(x, tuple(axis))

            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x},
                          fetch_list=[out1, out2, out3, out4, out5])
        out_ref = np.mean(self.x)
        for out in res:
312
            self.assertEqual(np.allclose(out, out_ref, rtol=1e-04), True)
313

Z
Fix  
zhupengyang 已提交
314 315 316
    def test_api_dygraph(self):
        paddle.disable_static(self.place)

317
        def test_case(x, axis=None, keepdim=False):
Z
Zhou Wei 已提交
318
            x_tensor = paddle.to_tensor(x)
319 320 321 322 323 324
            out = paddle.mean(x_tensor, axis, keepdim)
            if isinstance(axis, list):
                axis = tuple(axis)
                if len(axis) == 0:
                    axis = None
            out_ref = np.mean(x, axis, keepdims=keepdim)
325 326 327
            self.assertEqual(
                np.allclose(
                    out.numpy(), out_ref, rtol=1e-04), True)
328 329 330 331 332 333 334 335 336 337 338

        test_case(self.x)
        test_case(self.x, [])
        test_case(self.x, -1)
        test_case(self.x, keepdim=True)
        test_case(self.x, 2, keepdim=True)
        test_case(self.x, [0, 2])
        test_case(self.x, (0, 2))
        test_case(self.x, [0, 1, 2, 3])
        paddle.enable_static()

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            x = fluid.data("x", shape=[10, 10], dtype="float32")
            out = fluid.layers.reduce_mean(input=x, dim=1)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            x_np = np.random.rand(10, 10).astype(np.float32)
            res = exe.run(feed={"x": x_np}, fetch_list=[out])
        self.assertEqual(np.allclose(res[0], np.mean(x_np, axis=1)), True)

        with fluid.dygraph.guard():
            x_np = np.random.rand(10, 10).astype(np.float32)
            x = fluid.dygraph.to_variable(x_np)
            out = fluid.layers.reduce_mean(input=x, dim=1)
        self.assertEqual(np.allclose(out.numpy(), np.mean(x_np, axis=1)), True)

355
    def test_errors(self):
356 357 358 359 360
        paddle.disable_static()
        x = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        x = paddle.to_tensor(x)
        self.assertRaises(Exception, paddle.mean, x, -3)
        self.assertRaises(Exception, paddle.mean, x, 2)
Z
Fix  
zhupengyang 已提交
361
        paddle.enable_static()
362
        with paddle.static.program_guard(paddle.static.Program()):
363
            x = paddle.fluid.data('X', [10, 12], 'int32')
364 365 366
            self.assertRaises(TypeError, paddle.mean, x)


Q
qijun 已提交
367
if __name__ == "__main__":
368
    paddle.enable_static()
L
liaogang 已提交
369
    unittest.main()