test_activation_op.py 49.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22
import paddle
23
import paddle.fluid as fluid
24 25
import paddle.nn as nn
import paddle.nn.functional as functional
26
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
27 28


29
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
45
class TestActivation(OpTest):
Q
qijun 已提交
46 47
    def setUp(self):
        self.op_type = "exp"
48
        self.init_dtype()
49
        self.init_kernel_type()
50 51 52 53 54 55

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
56 57 58 59 60

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
61 62
        if self.dtype == np.float16:
            return
63
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
64

65
    def init_dtype(self):
66
        self.dtype = np.float64
67

68 69 70
    def init_kernel_type(self):
        pass

Q
qijun 已提交
71

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
class TestParameter(object):
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
            self.assertEqual(z, z_expected)


C
chengduo 已提交
102
class TestSigmoid(TestActivation):
Q
qijun 已提交
103 104
    def setUp(self):
        self.op_type = "sigmoid"
105 106 107 108 109 110 111
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
112

113 114 115
    def init_dtype(self):
        self.dtype = np.float32

116
    def test_check_grad(self):
117 118 119 120
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

121

C
chengduo 已提交
122
class TestLogSigmoid(TestActivation):
123 124
    def setUp(self):
        self.op_type = "logsigmoid"
125 126 127 128 129 130 131
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
132 133

    def test_check_grad(self):
134 135
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
136
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
137 138


139
class TestTanh(TestActivation, TestParameter):
140 141
    def setUp(self):
        self.op_type = "tanh"
142 143 144 145 146 147
        self.init_dtype()
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
148 149

    def test_check_grad(self):
150 151
        if self.dtype == np.float16:
            return
152
        self.check_grad(['X'], 'Out')
153

154 155 156 157 158 159
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

160

161
class TestAtan(TestActivation, TestParameter):
162 163 164 165 166 167 168 169 170 171 172 173 174
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
175
        self.check_grad(['X'], 'Out')
176

177 178 179 180 181 182 183 184
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

185

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.sinh(x).numpy()
            z_expected = np.sinh(np_x)
            self.assertTrue(np.allclose(z, z_expected))

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32")

            pd_sinh_out = fluid.layers.sinh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            np_sinh_res = exe.run(fluid.default_main_program(),
                                  feed={"data_x": input_x},
                                  fetch_list=[pd_sinh_out])

        expected_res = np.sinh(input_x)
        self.assertTrue(np.allclose(np_sinh_res, expected_res))

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.sinh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.sinh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.sinh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.sinh(x_fp16)


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.cosh(x).numpy()
            z_expected = np.cosh(np_x)
            self.assertTrue(np.allclose(z, z_expected))

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32")

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            np_cosh_res = exe.run(fluid.default_main_program(),
                                  feed={"data_x": input_x},
                                  fetch_list=[pd_cosh_out])

        expected_res = np.cosh(input_x)
        self.assertTrue(np.allclose(np_cosh_res, expected_res))

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.cosh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.cosh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.cosh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.cosh(x_fp16)


C
chengduo 已提交
328
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
329 330
    def setUp(self):
        self.op_type = "tanh_shrink"
331 332 333 334 335 336 337
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
338 339

    def test_check_grad(self):
340 341
        if self.dtype == np.float16:
            return
342
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
343

344

C
chengduo 已提交
345
class TestHardShrink(TestActivation):
346 347
    def setUp(self):
        self.op_type = "hard_shrink"
348 349
        self.init_dtype()

350
        threshold = 0.5
Z
zhupengyang 已提交
351
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
352 353
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
354 355

        self.attrs = {'lambda': threshold}
356 357
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
358 359

    def test_check_grad(self):
360 361
        if self.dtype == np.float16:
            return
362
        self.check_grad(['X'], 'Out')
363 364


365 366 367 368 369 370 371 372 373 374 375 376 377
class TestHardShrinkOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.hard_shrink, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.hard_shrink, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.hard_shrink(x_fp16)


C
chengduo 已提交
378
class TestSoftShrink(TestActivation):
379 380
    def setUp(self):
        self.op_type = "softshrink"
381 382
        self.init_dtype()

383
        lambda_val = 0.1
Z
zhupengyang 已提交
384
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
385 386 387 388
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

389
        self.attrs = {'lambda': lambda_val}
390 391
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
392 393

    def test_check_grad(self):
394 395
        if self.dtype == np.float16:
            return
396
        self.check_grad(['X'], 'Out')
397

398

399 400 401 402 403 404 405 406 407 408 409 410 411
class TestSoftShrinkOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.softshrink, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.softshrink, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.softshrink(x_fp16)


412
class TestSqrt(TestActivation, TestParameter):
413 414
    def setUp(self):
        self.op_type = "sqrt"
415 416 417 418 419 420 421
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
422 423

    def test_check_grad(self):
424 425
        if self.dtype == np.float16:
            return
426
        self.check_grad(['X'], 'Out')
427

428

Z
zhoukunsheng 已提交
429 430 431 432 433
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

Z
zhupengyang 已提交
434
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
435 436 437 438 439 440 441 442 443 444 445
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
446
class TestAbs(TestActivation):
447 448
    def setUp(self):
        self.op_type = "abs"
449 450
        self.init_dtype()

451
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
452
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
453
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
454
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
455 456
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
457 458 459 460
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
461 462

    def test_check_grad(self):
463 464
        if self.dtype == np.float16:
            return
465
        self.check_grad(['X'], 'Out')
466

467

C
chengduo 已提交
468
class TestCeil(TestActivation):
D
dzhwinter 已提交
469 470
    def setUp(self):
        self.op_type = "ceil"
471 472
        self.init_dtype()

Z
zhupengyang 已提交
473
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
474 475 476 477
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
478

D
dzhwinter 已提交
479
    # The same reason with TestFloor
C
chengduo 已提交
480
    def test_check_grad(self):
481 482 483
        pass


C
chengduo 已提交
484
class TestFloor(TestActivation):
D
dzhwinter 已提交
485 486
    def setUp(self):
        self.op_type = "floor"
487 488
        self.init_dtype()

Z
zhupengyang 已提交
489
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
490 491 492 493
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
494

D
dzhwinter 已提交
495
    # the gradient on floor, ceil, round is undefined.
496
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
497 498
    # The same reason with TestFloor
    def test_check_grad(self):
499 500 501
        pass


C
chengduo 已提交
502
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
503 504
    def setUp(self):
        self.op_type = "cos"
505 506
        self.init_dtype()

Z
zhupengyang 已提交
507
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
508 509 510 511
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
512 513

    def test_check_grad(self):
514 515
        if self.dtype == np.float16:
            return
516
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
517

518

519 520 521 522 523
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

Z
zhupengyang 已提交
524
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
525 526 527 528 529 530 531 532
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
533
        self.check_grad(['X'], 'Out')
534 535


536
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
537 538
    def setUp(self):
        self.op_type = "sin"
539 540
        self.init_dtype()

Z
zhupengyang 已提交
541
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
542 543 544 545
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
546 547

    def test_check_grad(self):
548 549
        if self.dtype == np.float16:
            return
550
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
551 552


553 554 555 556 557
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

Z
zhupengyang 已提交
558
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
559 560 561 562 563 564 565 566
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
567
        self.check_grad(['X'], 'Out')
568 569


C
chengduo 已提交
570
class TestRound(TestActivation):
D
dzhwinter 已提交
571 572
    def setUp(self):
        self.op_type = "round"
573 574
        self.init_dtype()

Z
zhupengyang 已提交
575
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
576 577 578 579
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
580

C
chengduo 已提交
581
    def test_check_grad(self):
582 583 584
        pass


C
chengduo 已提交
585
class TestRelu(TestActivation):
586
    def setUp(self):
Q
qijun 已提交
587
        self.op_type = "relu"
K
Kexin Zhao 已提交
588 589 590
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
591 592
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
593 594 595 596
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
597 598

    def test_check_grad(self):
K
Kexin Zhao 已提交
599 600
        if self.dtype == np.float16:
            return
601
        self.check_grad(['X'], 'Out')
A
Adam 已提交
602 603


604 605 606 607
class TestReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
608
            self.assertRaises(TypeError, fluid.layers.relu, 1)
609 610 611 612 613 614 615 616 617
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.relu(x_fp16)


A
Adam 已提交
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
634
        self.check_grad(['X'], 'Out')
635 636


637 638 639 640 641 642 643 644 645 646 647 648 649 650
class TestLeakyReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.leaky_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.leaky_relu, x_int32)
            # support the input dtype is float32
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float32')
            fluid.layers.leaky_relu(x_fp16)


651 652 653 654 655 656 657 658 659 660
def gelu(x, approximate):
    if approximate:
        y_ref = 0.5 * x * (1.0 + np.tanh(
            np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
661 662 663
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
664 665 666
        approximate = True
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
667

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
C
Clementine 已提交
683
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
684
        out = gelu(x, approximate)
C
Clementine 已提交
685 686 687

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
688
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
689 690 691 692

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
693
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
694 695


C
chengduo 已提交
696
class TestBRelu(TestActivation):
697 698
    def setUp(self):
        self.op_type = "brelu"
699 700
        self.init_dtype()

Z
zhupengyang 已提交
701
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
702 703
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
704 705
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
706
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
707 708 709
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
710 711 712

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
713
        self.outputs = {'Out': t}
714 715

    def test_check_grad(self):
716 717
        if self.dtype == np.float16:
            return
718
        self.check_grad(['X'], 'Out')
719

720

721 722 723 724 725 726 727 728 729 730 731 732 733 734
class TestBReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.brelu(x_fp16)


C
chengduo 已提交
735
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
736
    def setUp(self):
737
        self.op_type = "relu6"
738 739
        self.init_dtype()

Z
zhupengyang 已提交
740
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
741 742 743 744
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
745
        out = np.minimum(np.maximum(x, 0), threshold)
746

747
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
748
        self.attrs = {'threshold': threshold}
749
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
750

751 752 753
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
754
        self.check_grad(['X'], 'Out')
755 756


757 758 759 760 761 762 763 764 765 766 767 768 769
class TestRelu6OpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.relu6, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.relu6, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.relu6(x_fp16)


H
huangjun12 已提交
770 771 772 773 774
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

Z
zhupengyang 已提交
775
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
791
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
792 793


794 795 796 797 798 799 800 801 802 803 804 805 806
class TestHardSwishOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.hard_swish, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.hard_swish, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.hard_swish(x_fp16)


C
chengduo 已提交
807
class TestSoftRelu(TestActivation):
808 809
    def setUp(self):
        self.op_type = "soft_relu"
810 811 812
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
813
        threshold = 2.0
Q
qijun 已提交
814 815
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
816
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
817 818 819
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
820 821 822 823 824
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
825 826

    def test_check_grad(self):
827 828
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
829
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
830

831

832 833 834 835 836 837 838 839 840 841 842 843 844
class TestSoftReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.soft_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.soft_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.soft_relu(x_fp16)


C
chengduo 已提交
845
class TestELU(TestActivation):
846 847
    def setUp(self):
        self.op_type = "elu"
848 849
        self.init_dtype()

Z
zhupengyang 已提交
850
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
851
        alpha = 1.
852
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
853 854 855 856
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
857
        self.outputs = {'Out': out}
858 859

    def test_check_grad(self):
860 861
        if self.dtype == np.float16:
            return
862
        self.check_grad(['X'], 'Out')
863 864


865
class TestELUOpError(unittest.TestCase):
866 867 868 869 870 871 872 873 874 875 876
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
877
class TestReciprocal(TestActivation):
Q
qijun 已提交
878 879
    def setUp(self):
        self.op_type = "reciprocal"
880 881 882 883 884 885 886
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
887 888

    def test_check_grad(self):
889 890
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
891
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
892 893


C
chengduo 已提交
894
class TestLog(TestActivation):
Q
qijun 已提交
895 896
    def setUp(self):
        self.op_type = "log"
897 898 899 900 901 902 903
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
904 905

    def test_check_grad(self):
906 907
        if self.dtype == np.float16:
            return
908
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
909

910 911 912 913 914 915 916 917 918
    def test_error(self):
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32")
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64")

        self.assertRaises(TypeError, fluid.layers.log, in1)
        self.assertRaises(TypeError, fluid.layers.log, in2)

919

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.layers.data(
                name="data_x",
                shape=[11, 17],
                append_batch_size=False,
                dtype="float64")

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
948 949 950
            res1 = exe.run(fluid.default_main_program(),
                           feed={"data_x": input_x},
                           fetch_list=[out1])
951
        expected_res = np.log1p(input_x)
952
        self.assertTrue(np.allclose(res1, expected_res))
953 954 955 956 957 958 959 960

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
961
        self.assertTrue(np.allclose(np_z, z_expected))
962 963


C
chengduo 已提交
964
class TestSquare(TestActivation):
Q
qijun 已提交
965 966
    def setUp(self):
        self.op_type = "square"
967 968 969 970 971 972 973
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
974 975

    def test_check_grad(self):
976 977
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
978
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
979

980

C
chengduo 已提交
981
class TestPow(TestActivation):
982 983
    def setUp(self):
        self.op_type = "pow"
984 985 986 987 988 989
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
990
        self.attrs = {'factor': 3.0}
991
        self.outputs = {'Out': out}
992 993

    def test_check_grad(self):
994 995
        if self.dtype == np.float16:
            return
996
        self.check_grad(['X'], 'Out')
997

998

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1021
        self.check_grad(['X'], 'Out')
1022 1023 1024 1025 1026

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
1027 1028 1029 1030 1031
        res = fluid.layers.data(
            name="res",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
1032 1033 1034 1035 1036

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
1037 1038 1039 1040 1041
        out_3 = paddle.pow(x, factor_1, out=res)
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
1042 1043

        exe = fluid.Executor(place=fluid.CPUPlace())
1044 1045 1046 1047
        res_1, res_2, res_3, res, res_6 = exe.run(
            fluid.default_main_program(),
            feed={"x": input},
            fetch_list=[out_1, out_2, out_3, res, out_6])
1048 1049 1050

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))
1051 1052
        assert np.array_equal(res_3, res)
        assert np.array_equal(res_6, np.power(input, 3))
1053

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
    def test_error(self):
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32")
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64")
        in3 = fluid.layers.data(
            name="in3",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
        in4 = fluid.layers.data(
            name="in4",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float64")

        factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)

        self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)

1077

C
chengduo 已提交
1078
class TestSTanh(TestActivation):
1079 1080
    def setUp(self):
        self.op_type = "stanh"
1081 1082 1083
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
1084 1085
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
1086 1087 1088
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
1089
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
1090
        self.outputs = {'Out': out}
1091

Q
qijun 已提交
1092
    def test_check_grad(self):
1093 1094
        if self.dtype == np.float16:
            return
1095
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
1096

1097

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
class TestSTanhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.stanh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.stanh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.stanh(x_fp16)


C
chengduo 已提交
1111
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
1112 1113
    def setUp(self):
        self.op_type = "softplus"
1114
        self.init_dtype()
C
chengduo 已提交
1115
        self.dtype = np.float64
1116 1117 1118 1119 1120 1121

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
1122 1123

    def test_check_grad(self):
1124 1125
        if self.dtype == np.float16:
            return
1126
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
1127

1128

C
chengduo 已提交
1129
class TestSoftsign(TestActivation):
1130 1131
    def setUp(self):
        self.op_type = "softsign"
1132 1133 1134 1135 1136 1137 1138
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1139 1140

    def test_check_grad(self):
1141 1142
        if self.dtype == np.float16:
            return
1143
        self.check_grad(['X'], 'Out')
1144 1145


C
chengduo 已提交
1146
class TestThresholdedRelu(TestActivation):
1147 1148
    def setUp(self):
        self.op_type = "thresholded_relu"
1149 1150
        self.init_dtype()

1151
        threshold = 0.25
Z
zhupengyang 已提交
1152
        self.delta = 0.005
1153
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1154 1155

        # Same reason as TestAbs
Z
zhupengyang 已提交
1156
        X[np.abs(X - threshold) < self.delta] = threshold + 0.2
1157
        out = (X > threshold) * X
1158

1159
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
1160
        self.attrs = {'threshold': threshold}
1161
        self.outputs = {'Out': out}
1162 1163

    def test_check_grad(self):
1164 1165
        if self.dtype == np.float16:
            return
1166
        self.check_grad(['X'], 'Out')
1167 1168


1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
class TestThresholdedReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.thresholded_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.thresholded_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.thresholded_relu(x_fp16)


C
chengduo 已提交
1182
class TestHardSigmoid(TestActivation):
1183 1184
    def setUp(self):
        self.op_type = "hard_sigmoid"
1185 1186
        self.init_dtype()

Z
zhupengyang 已提交
1187
        X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
1188 1189 1190 1191 1192
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

Z
zhupengyang 已提交
1193 1194
        self.delta = 0.005

1195
        # Same reason as TestAbs
Z
zhupengyang 已提交
1196 1197
        X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
        X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
1198 1199

        temp = X * slope + offset
1200 1201 1202 1203
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
1204 1205

    def test_check_grad(self):
1206 1207
        if self.dtype == np.float16:
            return
Z
zhupengyang 已提交
1208
        self.check_grad(['X'], 'Out')
1209

1210

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
class TestHardSigmoidOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.hard_sigmoid, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.hard_sigmoid, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.hard_sigmoid(x_fp16)


C
chengduo 已提交
1224
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
1225 1226
    def setUp(self):
        self.op_type = "swish"
1227 1228 1229 1230 1231 1232 1233 1234 1235
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
1236 1237

    def test_check_grad(self):
1238 1239
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1240
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
1241

1242

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
class TestSwishOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.swish, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.swish, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.swish(x_fp16)


1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
#------------------ Test Error Activation----------------------
def create_test_error_class(op_type):
    class TestOpErrors(unittest.TestCase):
        def test_errors(self):
            with program_guard(Program(), Program()):
                op = getattr(fluid.layers, op_type)
                # The input dtype of op_type must be float32, float64.
                in1 = fluid.layers.data(
                    name='input2', shape=[12, 10], dtype="int32")
                in2 = fluid.layers.data(
                    name='input3', shape=[12, 10], dtype="int64")
                self.assertRaises(TypeError, op, in1)
                self.assertRaises(TypeError, op, in2)

    cls_name = "{0}_{1}".format(op_type, "test_errors")
    TestOpErrors.__name__ = cls_name
    globals()[cls_name] = TestOpErrors


create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')


1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
1318

C
chengduo 已提交
1319
        def test_check_output(self):
1320
            place = core.CUDAPlace(0)
C
chengduo 已提交
1321 1322 1323
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
1324

C
chengduo 已提交
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
1349
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
1350
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
1351
create_test_act_fp16_class(TestSin)
1352
create_test_act_fp16_class(TestSinh)
1353 1354
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
1355 1356
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
1357
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
1358 1359 1360 1361 1362 1363
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
1364
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
1365 1366
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
1367
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
1368 1369 1370 1371 1372 1373
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
1374
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
1375

1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444

class TestNNReluAPI(unittest.TestCase):
    def setUp(self):
        self.init_data()

    def init_data(self):
        self.x_shape = [10, 12]
        self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
        self.y = self.ref_forward(self.x)

    def ref_forward(self, x):
        return np.maximum(x, 0)

    def ref_backward(self, y, dy):
        y_t = y.copy()
        y_t[y_t > 0] = 1
        return y_t * dy

    def check_api(self, place=fluid.CPUPlace(), inplace=False):
        main_program = Program()
        myrelu = nn.ReLU(inplace)
        with fluid.program_guard(main_program):
            x = fluid.data(name='x', shape=self.x_shape)
            x.stop_gradient = False
            y = myrelu(x)
            fluid.backward.append_backward(fluid.layers.mean(y))
        exe = fluid.Executor(place)
        out = exe.run(main_program,
                      feed={'x': self.x},
                      fetch_list=[y, y.grad_name, x.grad_name])
        self.assertTrue(np.allclose(out[0], self.y))
        self.assertTrue(np.allclose(out[2], self.ref_backward(self.y, out[1])))

        with fluid.dygraph.guard(place):
            x = fluid.dygraph.to_variable(self.x)
            y = myrelu(x)
        self.assertTrue(np.allclose(y.numpy(), self.y))

    def test_check_api(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            for inplace in [True, False]:
                self.check_api(place, inplace)


class TestNNFunctionalReluAPI(unittest.TestCase):
    def setUp(self):
        self.init_data()

    def init_data(self):
        self.x_shape = [10, 12]
        self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
        self.y = self.ref_forward(self.x)

    def ref_forward(self, x):
        return np.maximum(x, 0)

    def test_check_api(self):
        main_program = Program()
        with fluid.program_guard(main_program):
            x = fluid.data(name='x', shape=self.x_shape)
            y = functional.relu(x)
        exe = fluid.Executor(fluid.CPUPlace())
        out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
        self.assertTrue(np.allclose(out[0], self.y))


1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
class TestNNSigmoidAPI(unittest.TestCase):
    def setUp(self):
        self.init_data()

    def init_data(self):
        self.x_shape = [10, 15]
        self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
        self.y = self.ref_forward(self.x)

    def ref_forward(self, x):
        return 1 / (1 + np.exp(-x))

    def ref_backward(self, y, dy):
        return dy * y * (1 - y)

    def check_api(self, place=fluid.CPUPlace(), inplace=False):
        main_program = Program()
        mysigmoid = nn.Sigmoid(inplace)
        with fluid.program_guard(main_program):
            x = fluid.data(name='x', shape=self.x_shape)
            x.stop_gradient = False
            y = mysigmoid(x)
            fluid.backward.append_backward(fluid.layers.mean(y))
        exe = fluid.Executor(place)
        out = exe.run(main_program,
                      feed={'x': self.x},
                      fetch_list=[y, y.grad_name, x.grad_name])
        self.assertTrue(np.allclose(out[0], self.y))
        self.assertTrue(np.allclose(out[2], self.ref_backward(self.y, out[1])))

        with fluid.dygraph.guard(place):
            x = fluid.dygraph.to_variable(self.x)
            y = mysigmoid(x)
        self.assertTrue(np.allclose(y.numpy(), self.y))

    def test_check_api(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            for inplace in [True, False]:
                self.check_api(place, inplace)


class TestNNFunctionalSigmoidAPI(unittest.TestCase):
    def setUp(self):
        self.init_data()

    def init_data(self):
        self.x_shape = [10, 15]
        self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
        self.y = self.ref_forward(self.x)

    def ref_forward(self, x):
        return 1 / (1 + np.exp(-x))

    def test_check_api(self):
        main_program = Program()
        with fluid.program_guard(main_program):
            x = fluid.data(name='x', shape=self.x_shape)
            y = functional.sigmoid(x)
        exe = fluid.Executor(fluid.CPUPlace())
        out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
        self.assertTrue(np.allclose(out[0], self.y))


Q
qijun 已提交
1511 1512
if __name__ == "__main__":
    unittest.main()