test_prelu_op.py 16.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Z
zchen0211 已提交
15 16
import unittest
import numpy as np
17
import paddle.fluid as fluid
18
import paddle.fluid.core as core
19
from paddle.fluid import Program
20
from op_test import OpTest, skip_check_grad_ci
21 22
import paddle
import paddle.nn.functional as F
23
from paddle.fluid.framework import _test_eager_guard
24 25 26 27 28 29 30 31


def ref_prelu(x, weight):
    x_t = x.copy()
    weight = weight.reshape(1, -1, 1, 1)
    neg_indices = x <= 0
    assert x.shape == neg_indices.shape
    x_t[neg_indices] = (x_t * weight)[neg_indices]
32
    return x_t
33 34 35 36 37 38 39 40


def ref_prelu_nn(x, num_parameters, init):
    weight_np = np.full((num_parameters), init)
    return ref_prelu(x, weight_np)


class TestFunctionalPReluAPI(unittest.TestCase):
41

42
    def setUp(self):
43 44
        self.place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
45 46 47 48 49 50
        self.x_np = np.random.uniform(-1., 1., [1, 2, 3, 4]).astype('float32')
        self.weight_np_0 = np.random.randn(1).astype('float32')
        self.weight_np_1 = np.random.randn(self.x_np.shape[1]).astype('float32')

    def static_check(self, weight_np):
        with paddle.static.program_guard(paddle.static.Program()):
51 52
            x = paddle.fluid.data('X', self.x_np.shape, 'float32')
            weight = paddle.fluid.data('Alpha', weight_np.shape, 'float32')
53 54
            out = F.prelu(x, weight)
            exe = paddle.static.Executor(self.place)
55 56 57 58
            res = exe.run(feed={
                'X': self.x_np,
                'Alpha': weight_np
            },
59 60
                          fetch_list=[out])
        out_ref = ref_prelu(self.x_np, weight_np)
61
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
62 63 64 65 66 67 68

    def dygraph_check(self, weight_np):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        weight = paddle.to_tensor(weight_np)
        out = F.prelu(x, weight)
        out_ref = ref_prelu(self.x_np, weight_np)
69
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
70 71 72 73 74
        paddle.enable_static()

    def test_static_api(self):
        self.static_check(self.weight_np_0)
        self.static_check(self.weight_np_1)
Z
zchen0211 已提交
75

76 77 78
    def test_dygraph_api(self):
        self.dygraph_check(self.weight_np_0)
        self.dygraph_check(self.weight_np_1)
Z
zchen0211 已提交
79

80 81 82 83
    def test_dygraph_api_eager(self):
        with _test_eager_guard():
            self.test_dygraph_api()

84 85
    def test_error(self):
        with paddle.static.program_guard(paddle.static.Program()):
86 87 88
            weight_fp32 = paddle.fluid.data(name='weight_fp32',
                                            shape=[1],
                                            dtype='float32')
89
            # The input type must be Variable.
90
            self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32)
91
            # The input dtype must be float16, float32, float64.
92 93 94
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[2, 3],
                                        dtype='int32')
95 96
            self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32)
            # support the input dtype is float16
97 98 99
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[2, 3],
                                       dtype='float16')
100 101 102 103
            F.prelu(x=x_fp16, weight=weight_fp32)


class TestNNPReluAPI(unittest.TestCase):
104

105
    def setUp(self):
106 107
        self.place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
108 109 110 111 112 113
        self.x_np = np.ones([1, 2, 3, 4]).astype('float32')

    def test_static_api(self):
        startup_program = paddle.static.Program()
        train_program = paddle.static.Program()
        with paddle.static.program_guard(train_program, startup_program):
114 115 116
            x = paddle.fluid.data(name='X',
                                  shape=self.x_np.shape,
                                  dtype='float32')
117 118 119 120 121 122 123 124
            m = paddle.nn.PReLU()
            out = m(x)
            exe = paddle.static.Executor(self.place)
            exe.run(startup_program)
            res = exe.run(train_program,
                          feed={'X': self.x_np},
                          fetch_list=[out])
        out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
125
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
126 127 128 129 130 131 132 133

    def test_dygraph_api(self):
        paddle.disable_static(self.place)

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU()
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
134
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
135 136 137 138 139

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU(num_parameters=self.x_np.shape[1])
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, self.x_np.shape[1], 0.25)
140
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
141 142 143 144 145

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU(init=0.5)
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, 1, 0.5)
146
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
147 148 149 150 151

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(name="weight"))
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
152
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
153 154 155 156 157 158

        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(
            initializer=fluid.initializer.Constant(0.5)))
        out = m(x)
        out_ref = ref_prelu_nn(self.x_np, 1, 0.5)
159
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
160 161

        paddle.enable_static()
162 163


164 165 166 167 168
def prelu_api_wrapper(x, weight, data_format="NCHW"):
    weight = weight.reshape([-1])
    return paddle.nn.functional.prelu(x, weight, data_format, name=None)


Z
zchen0211 已提交
169
class PReluTest(OpTest):
170

Z
zchen0211 已提交
171
    def setUp(self):
C
cc 已提交
172
        self.init_dtype()
173
        self.init_input_shape()
174
        self.eager_mode = True
175
        self.init_attr()
Z
zchen0211 已提交
176
        self.op_type = "prelu"
177
        self.python_api = prelu_api_wrapper
J
jerrywgz 已提交
178

C
cc 已提交
179
        x_np = np.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
J
jerrywgz 已提交
180 181 182 183
        # Since zero point in prelu is not differentiable, avoid randomize
        # zero.
        x_np[np.abs(x_np) < 0.005] = 0.02

184 185 186 187 188 189 190
        if self.attrs == {
                'mode': "all",
                "data_format": "NCHW"
        } or self.attrs == {
                'mode': "all",
                "data_format": "NHWC"
        }:
191
            alpha_np = np.random.uniform(-1, -0.5, (1))
192
        elif self.attrs == {'mode': "channel", "data_format": "NCHW"}:
193
            alpha_np = np.random.uniform(-1, -0.5, [1, self.x_shape[1], 1, 1])
194 195
        elif self.attrs == {'mode': "channel", "data_format": "NHWC"}:
            alpha_np = np.random.uniform(-1, -0.5, [1, 1, 1, self.x_shape[-1]])
J
jerrywgz 已提交
196
        else:
197
            alpha_np = np.random.uniform(-1, -0.5, [1] + self.x_shape[1:])
198 199
            # eager check don't support mode = 'all'
            self.eager_mode = False
C
cc 已提交
200
        alpha_np = alpha_np.astype(self.dtype)
201

202
        self.inputs = {'X': x_np, 'Alpha': alpha_np}
J
jerrywgz 已提交
203

204
        # NOTE(zhiqu): reshape inputs['Alpha'] from [1, 100, 1, 1] to [1, 100] + [1]*len(x.shape[2:])
205
        # since np operands could not be broadcast together with shapes (1,100,2,2,2,3) (1,100,1,1)
206
        reshaped_alpha = self.inputs['Alpha']
207
        if self.attrs == {'mode': "channel", "data_format": "NCHW"}:
208 209 210
            reshaped_alpha = np.reshape(self.inputs['Alpha'],
                                        [1, self.x_shape[1]] +
                                        [1] * len(self.x_shape[2:]))
211
        elif self.attrs == {'mode': "channel", "data_format": "NHWC"}:
212 213 214
            reshaped_alpha = np.reshape(self.inputs['Alpha'],
                                        [1] + [1] * len(self.x_shape[1:-1]) +
                                        [self.x_shape[-1]])
Z
zchen0211 已提交
215
        out_np = np.maximum(self.inputs['X'], 0.)
216
        out_np = out_np + np.minimum(self.inputs['X'], 0.) * reshaped_alpha
Z
zchen0211 已提交
217 218
        assert out_np is not self.inputs['X']
        self.outputs = {'Out': out_np}
Z
zchen0211 已提交
219

C
cc 已提交
220 221 222
    def init_dtype(self):
        self.dtype = np.float64

223
    def init_input_shape(self):
224
        self.x_shape = [2, 100, 3, 4]
225 226

    def init_attr(self):
227
        self.attrs = {'mode': "channel", "data_format": "NCHW"}
J
jerrywgz 已提交
228

229
    def test_check_output(self):
230
        self.check_output(check_eager=self.eager_mode)
Z
zchen0211 已提交
231

232
    def test_check_grad(self):
233
        self.check_grad(['X', 'Alpha'], 'Out', check_eager=self.eager_mode)
J
jerrywgz 已提交
234 235


236
@skip_check_grad_ci(
237 238
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
239 240
)
class TestModeAll(PReluTest):
241

242
    def init_input_shape(self):
243
        self.x_shape = [2, 3, 4, 5]
M
minqiyang 已提交
244

245
    def init_attr(self):
246 247 248 249
        self.attrs = {'mode': "all", "data_format": "NCHW"}


@skip_check_grad_ci(
250 251
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
252 253
)
class TestModeAllNHWC(PReluTest):
254

255 256 257 258 259
    def init_input_shape(self):
        self.x_shape = [2, 3, 4, 50]

    def init_attr(self):
        self.attrs = {'mode': "all", "data_format": "NHWC"}
M
minqiyang 已提交
260

Z
zchen0211 已提交
261

262
class TestModeElt(PReluTest):
263

264
    def init_input_shape(self):
265 266 267
        self.x_shape = [3, 2, 5, 10]

    def init_attr(self):
268 269 270 271
        self.attrs = {'mode': "element", "data_format": "NCHW"}


class TestModeEltNHWC(PReluTest):
272

273 274 275 276 277
    def init_input_shape(self):
        self.x_shape = [3, 2, 5, 10]

    def init_attr(self):
        self.attrs = {'mode': "element", "data_format": "NHWC"}
278 279 280


@skip_check_grad_ci(
281 282
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
283 284
)
class TestModeAllRank3(PReluTest):
285

286 287 288 289
    def init_input_shape(self):
        self.x_shape = [1, 200, 3]

    def init_attr(self):
290 291 292 293
        self.attrs = {'mode': "all", "data_format": "NCHW"}


@skip_check_grad_ci(
294 295
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
296 297
)
class TestModeAllRank3NHWC(PReluTest):
298

299 300 301 302 303
    def init_input_shape(self):
        self.x_shape = [1, 200, 3]

    def init_attr(self):
        self.attrs = {'mode': "all", "data_format": "NHWC"}
304 305 306


@skip_check_grad_ci(
307 308
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
309 310
)
class TestModeAllRank6(PReluTest):
311

312 313 314 315
    def init_input_shape(self):
        self.x_shape = [1, 2, 3, 4, 5, 6]

    def init_attr(self):
316 317 318 319
        self.attrs = {'mode': "all", "data_format": "NCHW"}


@skip_check_grad_ci(
320 321
    reason=
    "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
322 323
)
class TestModeAllRank6NHWC(PReluTest):
324

325 326 327 328 329
    def init_input_shape(self):
        self.x_shape = [1, 2, 3, 4, 5, 6]

    def init_attr(self):
        self.attrs = {'mode': "all", "data_format": "NHWC"}
330 331 332


class TestModeChannelRank3(PReluTest):
333

334 335 336 337
    def init_input_shape(self):
        self.x_shape = [1, 200, 3]

    def init_attr(self):
338 339 340 341
        self.attrs = {'mode': "channel", "data_format": "NCHW"}


class TestModeChannelRank3NHWC(PReluTest):
342

343 344 345 346 347
    def init_input_shape(self):
        self.x_shape = [1, 3, 100]

    def init_attr(self):
        self.attrs = {'mode': "channel", "data_format": "NHWC"}
348 349 350


class TestModeChannelRank6(PReluTest):
351

352 353 354 355
    def init_input_shape(self):
        self.x_shape = [1, 100, 2, 2, 2, 2]

    def init_attr(self):
356 357 358 359
        self.attrs = {'mode': "channel", "data_format": "NCHW"}


class TestModeChannelRank6NHWC(PReluTest):
360

361 362 363 364 365
    def init_input_shape(self):
        self.x_shape = [1, 2, 2, 2, 2, 100]

    def init_attr(self):
        self.attrs = {'mode': "channel", "data_format": "NHWC"}
366 367 368


class TestModeElementRank3(PReluTest):
369

370 371 372 373
    def init_input_shape(self):
        self.x_shape = [3, 10, 10]

    def init_attr(self):
374 375 376 377
        self.attrs = {'mode': "element", "data_format": "NCHW"}


class TestModeElementRank3NHWC(PReluTest):
378

379 380 381 382 383
    def init_input_shape(self):
        self.x_shape = [3, 10, 10]

    def init_attr(self):
        self.attrs = {'mode': "element", "data_format": "NHWC"}
384 385 386


class TestModeElementRank6(PReluTest):
387

388 389
    def init_input_shape(self):
        self.x_shape = [3, 2, 2, 4, 5, 2]
Z
zchen0211 已提交
390

391
    def init_attr(self):
392 393 394 395
        self.attrs = {'mode': "element", "data_format": "NCHW"}


class TestModeElementRank6NHWC(PReluTest):
396

397 398 399 400 401
    def init_input_shape(self):
        self.x_shape = [3, 2, 2, 4, 5, 2]

    def init_attr(self):
        self.attrs = {'mode': "element", "data_format": "NHWC"}
402 403


C
cc 已提交
404 405 406 407
def create_test_fp16_class(parent,
                           check_grad=True,
                           atol=1e-3,
                           max_relative_error=0.05):
408

C
cc 已提交
409 410 411
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestPReluFp16Case(parent):
412

C
cc 已提交
413 414 415 416 417 418 419
        def init_dtype(self):
            self.dtype = np.float16

        def test_check_output(self):
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                if core.is_float16_supported(place):
420 421 422
                    self.check_output_with_place(place,
                                                 atol=atol,
                                                 check_eager=self.eager_mode)
C
cc 已提交
423 424 425 426 427 428 429

        def test_check_grad(self):
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place) and check_grad:
                self.check_grad_with_place(
                    place, ['X', 'Alpha'],
                    'Out',
430
                    max_relative_error=max_relative_error,
431
                    check_eager=self.eager_mode)
C
cc 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444

    cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
    TestPReluFp16Case.__name__ = cls_name
    globals()[cls_name] = TestPReluFp16Case


create_test_fp16_class(TestModeElt)
create_test_fp16_class(TestModeAllRank3)
create_test_fp16_class(TestModeAllRank6)
create_test_fp16_class(TestModeChannelRank3)
create_test_fp16_class(TestModeChannelRank6)
create_test_fp16_class(TestModeElementRank3)
create_test_fp16_class(TestModeElementRank6)
445 446 447 448 449 450 451
create_test_fp16_class(TestModeEltNHWC)
create_test_fp16_class(TestModeAllRank3NHWC)
create_test_fp16_class(TestModeAllRank6NHWC)
create_test_fp16_class(TestModeChannelRank3NHWC)
create_test_fp16_class(TestModeChannelRank6NHWC)
create_test_fp16_class(TestModeElementRank3NHWC)
create_test_fp16_class(TestModeElementRank6NHWC)
C
cc 已提交
452 453


454
def prelu_t(x, mode, param_attr=None, name=None, data_format='NCHW'):
455 456 457 458 459 460 461 462 463 464
    helper = fluid.layer_helper.LayerHelper('prelu', **locals())
    alpha_shape = [1, x.shape[1], 1, 1]
    dtype = helper.input_dtype(input_param_name='x')
    alpha = helper.create_parameter(
        attr=helper.param_attr,
        shape=alpha_shape,
        dtype='float32',
        is_bias=False,
        default_initializer=fluid.initializer.ConstantInitializer(0.25))
    out = helper.create_variable_for_type_inference(dtype)
465 466 467 468 469 470 471 472 473 474
    helper.append_op(type="prelu",
                     inputs={
                         "X": x,
                         'Alpha': alpha
                     },
                     attrs={
                         "mode": mode,
                         'data_format': data_format
                     },
                     outputs={"Out": out})
475 476 477 478 479
    return out


# error message test if mode is not one of 'all', 'channel', 'element'
class TestModeError(unittest.TestCase):
480

481
    def setUp(self):
482 483
        self.place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
484 485
        self.x_np = np.ones([1, 2, 3, 4]).astype('float32')

486 487 488 489 490 491 492
    def test_mode_error(self):
        main_program = Program()
        with fluid.program_guard(main_program, Program()):
            x = fluid.data(name='x', shape=[2, 3, 4, 5])
            try:
                y = prelu_t(x, 'any')
            except Exception as e:
493
                assert (e.args[0].find('InvalidArgument') != -1)
494

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
    def test_data_format_error1(self):
        main_program = Program()
        with fluid.program_guard(main_program, Program()):
            x = fluid.data(name='x', shape=[2, 3, 4, 5])
            try:
                y = prelu_t(x, 'channel', data_format='N')
            except Exception as e:
                assert (e.args[0].find('InvalidArgument') != -1)

    def test_data_format_error2(self):
        main_program = Program()
        with fluid.program_guard(main_program, Program()):
            x = fluid.data(name='x', shape=[2, 3, 4, 5])
            try:
                y = paddle.static.nn.prelu(x, 'channel', data_format='N')
            except ValueError as e:
                pass

513

Z
zchen0211 已提交
514 515
if __name__ == "__main__":
    unittest.main()