test_prelu_op.py 16.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Z
zchen0211 已提交
17 18
import unittest
import numpy as np
19
import paddle.fluid as fluid
M
minqiyang 已提交
20
import six
21
import paddle.fluid.core as core
22
from paddle.fluid import Program, program_guard
23
from op_test import OpTest, skip_check_grad_ci
24 25
import paddle
import paddle.nn.functional as F
26
from paddle.fluid.framework import _test_eager_guard
27 28 29 30 31 32 33 34


def ref_prelu(x, weight):
    x_t = x.copy()
    weight = weight.reshape(1, -1, 1, 1)
    neg_indices = x <= 0
    assert x.shape == neg_indices.shape
    x_t[neg_indices] = (x_t * weight)[neg_indices]
35
    return x_t
36 37 38 39 40 41 42 43


def ref_prelu_nn(x, num_parameters, init):
    weight_np = np.full((num_parameters), init)
    return ref_prelu(x, weight_np)


class TestFunctionalPReluAPI(unittest.TestCase):
44

45
    def setUp(self):
46 47
        self.place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
48 49 50 51 52 53
        self.x_np = np.random.uniform(-1., 1., [1, 2, 3, 4]).astype('float32')
        self.weight_np_0 = np.random.randn(1).astype('float32')
        self.weight_np_1 = np.random.randn(self.x_np.shape[1]).astype('float32')

    def static_check(self, weight_np):
        with paddle.static.program_guard(paddle.static.Program()):
54 55
            x = paddle.fluid.data('X', self.x_np.shape, 'float32')
            weight = paddle.fluid.data('Alpha', weight_np.shape, 'float32')
56 57
            out = F.prelu(x, weight)
            exe = paddle.static.Executor(self.place)
58 59 60 61
            res = exe.run(feed={
                'X': self.x_np,
                'Alpha': weight_np
            },
62 63
                          fetch_list=[out])
        out_ref = ref_prelu(self.x_np, weight_np)
64
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
65 66 67 68 69 70 71

    def dygraph_check(self, weight_np):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        weight = paddle.to_tensor(weight_np)
        out = F.prelu(x, weight)
        out_ref = ref_prelu(self.x_np, weight_np)
72
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
73 74 75 76 77
        paddle.enable_static()

    def test_static_api(self):
        self.static_check(self.weight_np_0)
        self.static_check(self.weight_np_1)
Z
zchen0211 已提交
78

79 80 81
    def test_dygraph_api(self):
        self.dygraph_check(self.weight_np_0)
        self.dygraph_check(self.weight_np_1)
Z
zchen0211 已提交
82

83 84 85 86
    def test_dygraph_api_eager(self):
        with _test_eager_guard():
            self.test_dygraph_api()

87 88
    def test_error(self):
        with paddle.static.program_guard(paddle.static.Program()):
89 90 91
            weight_fp32 = paddle.fluid.data(name='weight_fp32',
                                            shape=[1],
                                            dtype='float32')
92
            # The input type must be Variable.
93
            self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32)
94
            # The input dtype must be float16, float32, float64.
95 96 97
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[2, 3],
                                        dtype='int32')
98 99
            self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32)
            # support the input dtype is float16
100 101 102
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[2, 3],
                                       dtype='float16')
103 104 105 106
            F.prelu(x=x_fp16, weight=weight_fp32)


class TestNNPReluAPI(unittest.TestCase):
107

108
    def setUp(self):
109 110
        self.place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
111 112 113 114 115 116
        self.x_np = np.ones([1, 2, 3, 4]).astype('float32')

    def test_static_api(self):
        startup_program = paddle.static.Program()
        train_program = paddle.static.Program()
        with paddle.static.program_guard(train_program, startup_program):
117 118 119
            x = paddle.fluid.data(name='X',
                                  shape=self.x_np.shape,
                                  dtype='float32')
120 121 122 123 124 125 126 127
            m = paddle.nn.PReLU()
            out = m(x)
            exe = paddle.static.Executor(self.place)
            exe.run(startup_program)
            res = exe.run(train_program,
                          feed={'X': self.x_np},
                          fetch_list=[out])
        out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
128
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
129 130 131 132 133 134 135 136

    def test_dygraph_api(self):
        paddle.disable_static(self.place)

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU()
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
137
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
138 139 140 141 142

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU(num_parameters=self.x_np.shape[1])
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, self.x_np.shape[1], 0.25)
143
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
144 145 146 147 148

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU(init=0.5)
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, 1, 0.5)
149
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
150 151 152 153 154

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(name="weight"))
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
155
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
156 157 158 159 160 161

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(
            initializer=fluid.initializer.Constant(0.5)))
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, 1, 0.5)
162
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
163 164

        paddle.enable_static()
165 166


167 168 169 170 171
def prelu_api_wrapper(x, weight, data_format="NCHW"):
    weight = weight.reshape([-1])
    return paddle.nn.functional.prelu(x, weight, data_format, name=None)


Z
zchen0211 已提交
172
class PReluTest(OpTest):
173

Z
zchen0211 已提交
174
    def setUp(self):
C
cc 已提交
175
        self.init_dtype()
176
        self.init_input_shape()
177
        self.eager_mode = True
178
        self.init_attr()
Z
zchen0211 已提交
179
        self.op_type = "prelu"
180
        self.python_api = prelu_api_wrapper
J
jerrywgz 已提交
181

C
cc 已提交
182
        x_np = np.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
J
jerrywgz 已提交
183 184 185 186
        # Since zero point in prelu is not differentiable, avoid randomize
        # zero.
        x_np[np.abs(x_np) < 0.005] = 0.02

187 188 189 190 191 192 193
        if self.attrs == {
                'mode': "all",
                "data_format": "NCHW"
        } or self.attrs == {
                'mode': "all",
                "data_format": "NHWC"
        }:
194
            alpha_np = np.random.uniform(-1, -0.5, (1))
195
        elif self.attrs == {'mode': "channel", "data_format": "NCHW"}:
196
            alpha_np = np.random.uniform(-1, -0.5, [1, self.x_shape[1], 1, 1])
197 198
        elif self.attrs == {'mode': "channel", "data_format": "NHWC"}:
            alpha_np = np.random.uniform(-1, -0.5, [1, 1, 1, self.x_shape[-1]])
J
jerrywgz 已提交
199
        else:
200
            alpha_np = np.random.uniform(-1, -0.5, [1] + self.x_shape[1:])
201 202
            # eager check don't support mode = 'all'
            self.eager_mode = False
C
cc 已提交
203
        alpha_np = alpha_np.astype(self.dtype)
204

205
        self.inputs = {'X': x_np, 'Alpha': alpha_np}
J
jerrywgz 已提交
206

207
        # NOTE(zhiqu): reshape inputs['Alpha'] from [1, 100, 1, 1] to [1, 100] + [1]*len(x.shape[2:])
208
        # since np operands could not be broadcast together with shapes (1,100,2,2,2,3) (1,100,1,1)
209
        reshaped_alpha = self.inputs['Alpha']
210
        if self.attrs == {'mode': "channel", "data_format": "NCHW"}:
211 212 213
            reshaped_alpha = np.reshape(self.inputs['Alpha'],
                                        [1, self.x_shape[1]] +
                                        [1] * len(self.x_shape[2:]))
214
        elif self.attrs == {'mode': "channel", "data_format": "NHWC"}:
215 216 217
            reshaped_alpha = np.reshape(self.inputs['Alpha'],
                                        [1] + [1] * len(self.x_shape[1:-1]) +
                                        [self.x_shape[-1]])
Z
zchen0211 已提交
218
        out_np = np.maximum(self.inputs['X'], 0.)
219
        out_np = out_np + np.minimum(self.inputs['X'], 0.) * reshaped_alpha
Z
zchen0211 已提交
220 221
        assert out_np is not self.inputs['X']
        self.outputs = {'Out': out_np}
Z
zchen0211 已提交
222

C
cc 已提交
223 224 225
    def init_dtype(self):
        self.dtype = np.float64

226
    def init_input_shape(self):
227
        self.x_shape = [2, 100, 3, 4]
228 229

    def init_attr(self):
230
        self.attrs = {'mode': "channel", "data_format": "NCHW"}
J
jerrywgz 已提交
231

232
    def test_check_output(self):
233
        self.check_output(check_eager=self.eager_mode)
Z
zchen0211 已提交
234

235
    def test_check_grad(self):
236
        self.check_grad(['X', 'Alpha'], 'Out', check_eager=self.eager_mode)
J
jerrywgz 已提交
237 238


239
@skip_check_grad_ci(
240 241
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
242 243
)
class TestModeAll(PReluTest):
244

245
    def init_input_shape(self):
246
        self.x_shape = [2, 3, 4, 5]
M
minqiyang 已提交
247

248
    def init_attr(self):
249 250 251 252
        self.attrs = {'mode': "all", "data_format": "NCHW"}


@skip_check_grad_ci(
253 254
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
255 256
)
class TestModeAllNHWC(PReluTest):
257

258 259 260 261 262
    def init_input_shape(self):
        self.x_shape = [2, 3, 4, 50]

    def init_attr(self):
        self.attrs = {'mode': "all", "data_format": "NHWC"}
M
minqiyang 已提交
263

Z
zchen0211 已提交
264

265
class TestModeElt(PReluTest):
266

267
    def init_input_shape(self):
268 269 270
        self.x_shape = [3, 2, 5, 10]

    def init_attr(self):
271 272 273 274
        self.attrs = {'mode': "element", "data_format": "NCHW"}


class TestModeEltNHWC(PReluTest):
275

276 277 278 279 280
    def init_input_shape(self):
        self.x_shape = [3, 2, 5, 10]

    def init_attr(self):
        self.attrs = {'mode': "element", "data_format": "NHWC"}
281 282 283


@skip_check_grad_ci(
284 285
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
286 287
)
class TestModeAllRank3(PReluTest):
288

289 290 291 292
    def init_input_shape(self):
        self.x_shape = [1, 200, 3]

    def init_attr(self):
293 294 295 296
        self.attrs = {'mode': "all", "data_format": "NCHW"}


@skip_check_grad_ci(
297 298
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
299 300
)
class TestModeAllRank3NHWC(PReluTest):
301

302 303 304 305 306
    def init_input_shape(self):
        self.x_shape = [1, 200, 3]

    def init_attr(self):
        self.attrs = {'mode': "all", "data_format": "NHWC"}
307 308 309


@skip_check_grad_ci(
310 311
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
312 313
)
class TestModeAllRank6(PReluTest):
314

315 316 317 318
    def init_input_shape(self):
        self.x_shape = [1, 2, 3, 4, 5, 6]

    def init_attr(self):
319 320 321 322
        self.attrs = {'mode': "all", "data_format": "NCHW"}


@skip_check_grad_ci(
323 324
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
325 326
)
class TestModeAllRank6NHWC(PReluTest):
327

328 329 330 331 332
    def init_input_shape(self):
        self.x_shape = [1, 2, 3, 4, 5, 6]

    def init_attr(self):
        self.attrs = {'mode': "all", "data_format": "NHWC"}
333 334 335


class TestModeChannelRank3(PReluTest):
336

337 338 339 340
    def init_input_shape(self):
        self.x_shape = [1, 200, 3]

    def init_attr(self):
341 342 343 344
        self.attrs = {'mode': "channel", "data_format": "NCHW"}


class TestModeChannelRank3NHWC(PReluTest):
345

346 347 348 349 350
    def init_input_shape(self):
        self.x_shape = [1, 3, 100]

    def init_attr(self):
        self.attrs = {'mode': "channel", "data_format": "NHWC"}
351 352 353


class TestModeChannelRank6(PReluTest):
354

355 356 357 358
    def init_input_shape(self):
        self.x_shape = [1, 100, 2, 2, 2, 2]

    def init_attr(self):
359 360 361 362
        self.attrs = {'mode': "channel", "data_format": "NCHW"}


class TestModeChannelRank6NHWC(PReluTest):
363

364 365 366 367 368
    def init_input_shape(self):
        self.x_shape = [1, 2, 2, 2, 2, 100]

    def init_attr(self):
        self.attrs = {'mode': "channel", "data_format": "NHWC"}
369 370 371


class TestModeElementRank3(PReluTest):
372

373 374 375 376
    def init_input_shape(self):
        self.x_shape = [3, 10, 10]

    def init_attr(self):
377 378 379 380
        self.attrs = {'mode': "element", "data_format": "NCHW"}


class TestModeElementRank3NHWC(PReluTest):
381

382 383 384 385 386
    def init_input_shape(self):
        self.x_shape = [3, 10, 10]

    def init_attr(self):
        self.attrs = {'mode': "element", "data_format": "NHWC"}
387 388 389


class TestModeElementRank6(PReluTest):
390

391 392
    def init_input_shape(self):
        self.x_shape = [3, 2, 2, 4, 5, 2]
Z
zchen0211 已提交
393

394
    def init_attr(self):
395 396 397 398
        self.attrs = {'mode': "element", "data_format": "NCHW"}


class TestModeElementRank6NHWC(PReluTest):
399

400 401 402 403 404
    def init_input_shape(self):
        self.x_shape = [3, 2, 2, 4, 5, 2]

    def init_attr(self):
        self.attrs = {'mode': "element", "data_format": "NHWC"}
405 406


C
cc 已提交
407 408 409 410
def create_test_fp16_class(parent,
                           check_grad=True,
                           atol=1e-3,
                           max_relative_error=0.05):
411

C
cc 已提交
412 413 414
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestPReluFp16Case(parent):
415

C
cc 已提交
416 417 418 419 420 421 422
        def init_dtype(self):
            self.dtype = np.float16

        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
423 424 425
                    self.check_output_with_place(place,
                                                 atol=atol,
                                                 check_eager=self.eager_mode)
C
cc 已提交
426 427 428 429 430 431 432

        def test_check_grad(self):
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place) and check_grad:
                self.check_grad_with_place(
                    place, ['X', 'Alpha'],
                    'Out',
433
                    max_relative_error=max_relative_error,
434
                    check_eager=self.eager_mode)
C
cc 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447

    cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
    TestPReluFp16Case.__name__ = cls_name
    globals()[cls_name] = TestPReluFp16Case


create_test_fp16_class(TestModeElt)
create_test_fp16_class(TestModeAllRank3)
create_test_fp16_class(TestModeAllRank6)
create_test_fp16_class(TestModeChannelRank3)
create_test_fp16_class(TestModeChannelRank6)
create_test_fp16_class(TestModeElementRank3)
create_test_fp16_class(TestModeElementRank6)
448 449 450 451 452 453 454
create_test_fp16_class(TestModeEltNHWC)
create_test_fp16_class(TestModeAllRank3NHWC)
create_test_fp16_class(TestModeAllRank6NHWC)
create_test_fp16_class(TestModeChannelRank3NHWC)
create_test_fp16_class(TestModeChannelRank6NHWC)
create_test_fp16_class(TestModeElementRank3NHWC)
create_test_fp16_class(TestModeElementRank6NHWC)
C
cc 已提交
455 456


457
def prelu_t(x, mode, param_attr=None, name=None, data_format='NCHW'):
458 459 460 461 462 463 464 465 466 467
    helper = fluid.layer_helper.LayerHelper('prelu', **locals())
    alpha_shape = [1, x.shape[1], 1, 1]
    dtype = helper.input_dtype(input_param_name='x')
    alpha = helper.create_parameter(
        attr=helper.param_attr,
        shape=alpha_shape,
        dtype='float32',
        is_bias=False,
        default_initializer=fluid.initializer.ConstantInitializer(0.25))
    out = helper.create_variable_for_type_inference(dtype)
468 469 470 471 472 473 474 475 476 477
    helper.append_op(type="prelu",
                     inputs={
                         "X": x,
                         'Alpha': alpha
                     },
                     attrs={
                         "mode": mode,
                         'data_format': data_format
                     },
                     outputs={"Out": out})
478 479 480 481 482
    return out


# error message test if mode is not one of 'all', 'channel', 'element'
class TestModeError(unittest.TestCase):
483

484
    def setUp(self):
485 486
        self.place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
487 488
        self.x_np = np.ones([1, 2, 3, 4]).astype('float32')

489 490 491 492 493 494 495
    def test_mode_error(self):
        main_program = Program()
        with fluid.program_guard(main_program, Program()):
            x = fluid.data(name='x', shape=[2, 3, 4, 5])
            try:
                y = prelu_t(x, 'any')
            except Exception as e:
496
                assert (e.args[0].find('InvalidArgument') != -1)
497

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
    def test_data_format_error1(self):
        main_program = Program()
        with fluid.program_guard(main_program, Program()):
            x = fluid.data(name='x', shape=[2, 3, 4, 5])
            try:
                y = prelu_t(x, 'channel', data_format='N')
            except Exception as e:
                assert (e.args[0].find('InvalidArgument') != -1)

    def test_data_format_error2(self):
        main_program = Program()
        with fluid.program_guard(main_program, Program()):
            x = fluid.data(name='x', shape=[2, 3, 4, 5])
            try:
                y = paddle.static.nn.prelu(x, 'channel', data_format='N')
            except ValueError as e:
                pass

516

Z
zchen0211 已提交
517 518
if __name__ == "__main__":
    unittest.main()