test_activation_op.py 72.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22
import paddle
23
import paddle.fluid as fluid
24
import paddle.nn as nn
25
import paddle.nn.functional as F
26
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
27 28


29
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
30
    def test_errors(self):
31
        paddle.enable_static()
Z
Zhaolong Xing 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
46
class TestActivation(OpTest):
Q
qijun 已提交
47
    def setUp(self):
48
        paddle.enable_static()
Q
qijun 已提交
49
        self.op_type = "exp"
50
        self.init_dtype()
51
        self.init_kernel_type()
52 53 54 55 56 57

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
58 59 60 61 62

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
63 64
        if self.dtype == np.float16:
            return
65
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
66

67
    def init_dtype(self):
68
        self.dtype = np.float64
69

70 71 72
    def init_kernel_type(self):
        pass

Q
qijun 已提交
73

74 75
class TestParameter(object):
    def test_out_name(self):
76
        paddle.enable_static()
77
        with fluid.program_guard(fluid.Program()):
W
WuHaobo 已提交
78
            np_x = np.array([0.1])
79
            data = fluid.layers.data(name="X", shape=[1])
W
WuHaobo 已提交
80
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
81 82
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
W
WuHaobo 已提交
83 84 85
            result, = exe.run(feed={"X": np_x}, fetch_list=[out])
            expected = eval("np.%s(np_x)" % self.op_type)
            self.assertEqual(result, expected)
86 87 88 89 90 91 92 93 94 95

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
            self.assertEqual(z, z_expected)


C
chengduo 已提交
96
class TestSigmoid(TestActivation):
Q
qijun 已提交
97
    def setUp(self):
98
        paddle.enable_static()
Q
qijun 已提交
99
        self.op_type = "sigmoid"
100 101 102 103 104 105 106
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
107

108 109 110
    def init_dtype(self):
        self.dtype = np.float32

111
    def test_check_grad(self):
112 113 114 115
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

116

C
chengduo 已提交
117
class TestLogSigmoid(TestActivation):
118
    def setUp(self):
119
        paddle.enable_static()
120
        self.op_type = "logsigmoid"
121 122 123 124 125
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

126
        self.inputs = {'X': x}
127
        self.outputs = {'Out': out}
128 129

    def test_check_grad(self):
130 131
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
132
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
133 134


135
class TestLogSigmoidAPI(unittest.TestCase):
136
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
137 138 139 140 141 142
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
143
        paddle.enable_static()
144 145
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [11, 17])
146
            out1 = F.log_sigmoid(x)
147 148 149 150 151 152
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
153
            self.assertTrue(np.allclose(out_ref, r))
154 155 156 157

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
158
        out1 = F.log_sigmoid(x)
159 160 161 162
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
163
            self.assertTrue(np.allclose(out_ref, r.numpy()))
164 165
        paddle.enable_static()

166
    def test_fluid_api(self):
167
        paddle.enable_static()
168 169 170 171 172 173 174 175
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [11, 17])
            out = paddle.fluid.layers.logsigmoid(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        self.assertTrue(np.allclose(out_ref, res[0]))

176
    def test_errors(self):
177
        paddle.enable_static()
178 179
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
180
            self.assertRaises(TypeError, F.log_sigmoid, 1)
181 182
            # The input dtype must be float16, float32, float64.
            x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32')
183
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
184 185
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16')
186
            F.log_sigmoid(x_fp16)
187 188


189
class TestTanh(TestActivation, TestParameter):
190
    def setUp(self):
191
        paddle.enable_static()
192
        self.op_type = "tanh"
193 194 195 196 197 198
        self.init_dtype()
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
199 200

    def test_check_grad(self):
201 202
        if self.dtype == np.float16:
            return
203
        self.check_grad(['X'], 'Out')
204

205 206 207 208 209 210
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

211

W
WangXi 已提交
212 213 214 215 216 217 218 219 220
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
221
        paddle.enable_static()
W
WangXi 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [10, 12], self.dtype)
            out1 = F.tanh(x)
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_variable(self.x_np)
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_fluid_api(self):
246
        paddle.enable_static()
W
WangXi 已提交
247 248 249 250 251 252 253 254 255
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12], self.dtype)
            out = fluid.layers.tanh(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tanh(self.x_np)
        self.assertEqual(np.allclose(out_ref, res[0]), True)

    def test_errors(self):
256
        paddle.enable_static()
W
WangXi 已提交
257 258 259 260 261 262 263 264 265 266 267
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanh, 1)
            # The input dtype must be float16, float32.
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.tanh, x_int32)
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.tanh(x_fp16)


268
class TestAtan(TestActivation, TestParameter):
269
    def setUp(self):
270
        paddle.enable_static()
271 272 273 274 275 276 277 278 279 280 281 282
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
283
        self.check_grad(['X'], 'Out')
284

W
WuHaobo 已提交
285 286 287 288 289 290 291 292 293 294 295
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            np_x = np.array([0.1])
            data = fluid.layers.data(name="X", shape=[1])
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result, = exe.run(feed={"X": np_x}, fetch_list=[out])
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

296 297 298 299 300 301 302 303
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

304

305 306
class TestSinh(TestActivation):
    def setUp(self):
307
        paddle.enable_static()
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
        self.op_type = "sinh"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.sinh(x).numpy()
            z_expected = np.sinh(np_x)
            self.assertTrue(np.allclose(z, z_expected))

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32")

            pd_sinh_out = fluid.layers.sinh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            np_sinh_res = exe.run(fluid.default_main_program(),
                                  feed={"data_x": input_x},
                                  fetch_list=[pd_sinh_out])

        expected_res = np.sinh(input_x)
        self.assertTrue(np.allclose(np_sinh_res, expected_res))

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.sinh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
366
        paddle.enable_static()
367 368 369 370 371 372 373 374 375 376 377 378 379
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.sinh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.sinh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.sinh(x_fp16)


class TestCosh(TestActivation):
    def setUp(self):
380
        paddle.enable_static()
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
        self.op_type = "cosh"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.cosh(x).numpy()
            z_expected = np.cosh(np_x)
            self.assertTrue(np.allclose(z, z_expected))

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32")

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            np_cosh_res = exe.run(fluid.default_main_program(),
                                  feed={"data_x": input_x},
                                  fetch_list=[pd_cosh_out])

        expected_res = np.cosh(input_x)
        self.assertTrue(np.allclose(np_cosh_res, expected_res))

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.cosh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
439
        paddle.enable_static()
440 441 442 443 444 445 446 447 448 449 450
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.cosh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.cosh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.cosh(x_fp16)


451 452 453 454 455 456
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
457
    def setUp(self):
458
        paddle.enable_static()
K
Kavya Srinet 已提交
459
        self.op_type = "tanh_shrink"
460 461
        self.init_dtype()

462 463
        x = np.random.uniform(10, 20, [10, 17]).astype(self.dtype)
        out = ref_tanhshrink(x)
464

465
        self.inputs = {'X': x}
466
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
467 468

    def test_check_grad(self):
469 470
        if self.dtype == np.float16:
            return
471
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
472

473

474 475 476 477 478 479 480 481
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
482
        paddle.enable_static()
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_fluid_api(self):
506
        paddle.enable_static()
507 508 509 510 511 512 513 514 515
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.tanh_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_tanhshrink(self.x_np)
        self.assertEqual(np.allclose(out_ref, res[0]), True)

    def test_errors(self):
516
        paddle.enable_static()
517 518 519 520 521 522 523 524 525 526 527
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.tanhshrink(x_fp16)


528 529 530 531 532 533
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
534
class TestHardShrink(TestActivation):
535
    def setUp(self):
536
        paddle.enable_static()
537
        self.op_type = "hard_shrink"
538 539
        self.init_dtype()

540 541
        self.threshold = 0.5
        self.set_attrs()
Z
zhupengyang 已提交
542
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
543
        out = ref_hardshrink(x, self.threshold)
544

545
        self.attrs = {'threshold': self.threshold}
546
        self.inputs = {'X': x}
547
        self.outputs = {'Out': out}
548

549 550 551
    def set_attrs(self):
        pass

552
    def test_check_grad(self):
553 554
        if self.dtype == np.float16:
            return
555
        self.check_grad(['X'], 'Out')
556 557


558 559 560 561 562
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


563 564 565
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
566
        paddle.enable_static()
567 568 569 570 571
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
572
        paddle.enable_static()
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [10, 12])
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_variable(self.x_np)
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_fluid_api(self):
603
        paddle.enable_static()
604 605 606 607 608 609 610 611
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.hard_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        self.assertEqual(np.allclose(out_ref, res[0]), True)

612
    def test_errors(self):
613
        paddle.enable_static()
614
        with paddle.static.program_guard(paddle.static.Program()):
615
            # The input type must be Variable.
616
            self.assertRaises(TypeError, F.hardshrink, 1)
617
            # The input dtype must be float16, float32, float64.
618 619
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.hardshrink, x_int32)
620
            # support the input dtype is float16
621 622
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.hardshrink(x_fp16)
623 624


625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
641
        paddle.enable_static()
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [10, 12])
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_variable(self.x_np)
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_errors(self):
672
        paddle.enable_static()
673 674 675 676 677 678 679 680 681 682 683
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.hardtanh(x_fp16)


684 685 686 687 688 689 690 691
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
        out - threshold)
    return out


class TestSoftshrink(TestActivation):
692
    def setUp(self):
693
        paddle.enable_static()
694
        self.op_type = "softshrink"
695 696
        self.init_dtype()

697
        threshold = 0.8
698

699 700 701 702
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
703
        self.outputs = {'Out': out}
704 705

    def test_check_grad(self):
706 707
        if self.dtype == np.float16:
            return
708
        self.check_grad(['X'], 'Out')
709

710

711 712 713 714 715 716 717 718 719
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
720
        paddle.enable_static()
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_fluid_api(self):
744
        paddle.enable_static()
745 746 747 748 749 750 751 752
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softshrink(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        self.assertEqual(np.allclose(out_ref, res[0]), True)

753
    def test_errors(self):
754
        paddle.enable_static()
755
        with paddle.static.program_guard(paddle.static.Program()):
756
            # The input type must be Variable.
757
            self.assertRaises(TypeError, F.softshrink, 1)
758
            # The input dtype must be float16, float32, float64.
759 760
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.softshrink, x_int32)
761 762 763
            # The threshold must be no less than zero
            x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32')
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
764
            # support the input dtype is float16
765 766
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.softshrink(x_fp16)
767 768


769
class TestSqrt(TestActivation, TestParameter):
770
    def setUp(self):
771
        paddle.enable_static()
772
        self.op_type = "sqrt"
773 774 775 776 777 778 779
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
780 781

    def test_check_grad(self):
782 783
        if self.dtype == np.float16:
            return
784
        self.check_grad(['X'], 'Out')
785

786

Z
zhoukunsheng 已提交
787 788
class TestRsqrt(TestActivation):
    def setUp(self):
789
        paddle.enable_static()
Z
zhoukunsheng 已提交
790 791 792
        self.op_type = "rsqrt"
        self.init_dtype()

Z
zhupengyang 已提交
793
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
794 795 796 797 798 799 800 801 802 803 804
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
805
class TestAbs(TestActivation):
806
    def setUp(self):
807
        paddle.enable_static()
808
        self.op_type = "abs"
809 810
        self.init_dtype()

811
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
812
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
813
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
814
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
815 816
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
817 818 819 820
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
821 822

    def test_check_grad(self):
823 824
        if self.dtype == np.float16:
            return
825
        self.check_grad(['X'], 'Out')
826

827

C
chengduo 已提交
828
class TestCeil(TestActivation):
D
dzhwinter 已提交
829
    def setUp(self):
830
        paddle.enable_static()
D
dzhwinter 已提交
831
        self.op_type = "ceil"
832 833
        self.init_dtype()

Z
zhupengyang 已提交
834
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
835 836 837 838
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
839

D
dzhwinter 已提交
840
    # The same reason with TestFloor
C
chengduo 已提交
841
    def test_check_grad(self):
842 843 844
        pass


C
chengduo 已提交
845
class TestFloor(TestActivation):
D
dzhwinter 已提交
846
    def setUp(self):
847
        paddle.enable_static()
D
dzhwinter 已提交
848
        self.op_type = "floor"
849 850
        self.init_dtype()

Z
zhupengyang 已提交
851
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
852 853 854 855
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
856

D
dzhwinter 已提交
857
    # the gradient on floor, ceil, round is undefined.
858
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
859 860
    # The same reason with TestFloor
    def test_check_grad(self):
861 862 863
        pass


C
chengduo 已提交
864
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
865
    def setUp(self):
866
        paddle.enable_static()
C
add cos  
chengduoZH 已提交
867
        self.op_type = "cos"
868 869
        self.init_dtype()

Z
zhupengyang 已提交
870
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
871 872 873 874
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
875 876

    def test_check_grad(self):
877 878
        if self.dtype == np.float16:
            return
879
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
880

881

882 883
class TestAcos(TestActivation):
    def setUp(self):
884
        paddle.enable_static()
885 886 887
        self.op_type = "acos"
        self.init_dtype()

Z
zhupengyang 已提交
888
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
889 890 891 892 893 894 895 896
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
897
        self.check_grad(['X'], 'Out')
898 899


900
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
901
    def setUp(self):
902
        paddle.enable_static()
C
add sin  
chengduoZH 已提交
903
        self.op_type = "sin"
904 905
        self.init_dtype()

Z
zhupengyang 已提交
906
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
907 908 909 910
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
911 912

    def test_check_grad(self):
913 914
        if self.dtype == np.float16:
            return
915
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
916 917


918 919
class TestAsin(TestActivation):
    def setUp(self):
920
        paddle.enable_static()
921 922 923
        self.op_type = "asin"
        self.init_dtype()

Z
zhupengyang 已提交
924
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
925 926 927 928 929 930 931 932
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
933
        self.check_grad(['X'], 'Out')
934 935


C
chengduo 已提交
936
class TestRound(TestActivation):
D
dzhwinter 已提交
937
    def setUp(self):
938
        paddle.enable_static()
D
dzhwinter 已提交
939
        self.op_type = "round"
940 941
        self.init_dtype()

Z
zhupengyang 已提交
942
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
943 944 945 946
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
947

C
chengduo 已提交
948
    def test_check_grad(self):
949 950 951
        pass


C
chengduo 已提交
952
class TestRelu(TestActivation):
953
    def setUp(self):
954
        paddle.enable_static()
Q
qijun 已提交
955
        self.op_type = "relu"
K
Kexin Zhao 已提交
956 957 958
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
959 960
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
961 962
        out = np.maximum(x, 0)

963
        self.inputs = {'X': x}
K
Kexin Zhao 已提交
964
        self.outputs = {'Out': out}
965 966

    def test_check_grad(self):
K
Kexin Zhao 已提交
967 968
        if self.dtype == np.float16:
            return
969
        self.check_grad(['X'], 'Out')
A
Adam 已提交
970 971


972 973 974 975 976 977 978 979
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
980
        paddle.enable_static()
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [10, 12])
            out1 = F.relu(x)
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu(x)
        m = paddle.nn.ReLU()
        out2 = m(x)
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

1003
    def test_errors(self):
1004
        paddle.enable_static()
1005
        with paddle.static.program_guard(paddle.static.Program()):
1006
            # The input type must be Variable.
1007
            self.assertRaises(TypeError, F.relu, 1)
1008
            # The input dtype must be float16, float32, float64.
1009 1010
            x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32')
            self.assertRaises(TypeError, F.relu, x_int32)
1011
            # support the input dtype is float16
1012 1013
            x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16')
            F.relu(x_fp16)
1014 1015


1016 1017 1018 1019 1020 1021
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1022
class TestLeakyRelu(TestActivation):
1023 1024 1025
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1026
    def setUp(self):
1027
        paddle.enable_static()
A
Adam 已提交
1028 1029
        self.op_type = "leaky_relu"
        self.init_dtype()
1030
        alpha = self.get_alpha()
A
Adam 已提交
1031

1032
        np.random.seed(10)
A
Adam 已提交
1033 1034
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
1035 1036
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1037

1038
        self.inputs = {'X': x}
A
Adam 已提交
1039
        self.outputs = {'Out': out}
1040
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1041 1042 1043 1044

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1045
        self.check_grad(['X'], 'Out')
1046 1047


1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    # fluid.layers.leaky_relu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
1072
        paddle.enable_static()
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [10, 12])
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_variable(self.x_np)
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_fluid_api(self):
1103
        paddle.enable_static()
1104 1105 1106 1107 1108 1109 1110 1111
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.leaky_relu(x, 0.01)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_leaky_relu(self.x_np)
        self.assertEqual(np.allclose(out_ref, res[0]), True)

1112
    def test_errors(self):
1113
        paddle.enable_static()
1114
        with paddle.static.program_guard(paddle.static.Program()):
1115
            # The input type must be Variable.
1116
            self.assertRaises(TypeError, F.leaky_relu, 1)
1117
            # The input dtype must be float16, float32, float64.
1118 1119 1120 1121 1122
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.leaky_relu(x_fp16)
1123 1124


1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
def gelu(x, approximate):
    if approximate:
        y_ref = 0.5 * x * (1.0 + np.tanh(
            np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
1135
    def setUp(self):
1136
        paddle.enable_static()
C
Clementine 已提交
1137 1138
        self.op_type = "gelu"
        self.init_dtype()
1139 1140 1141
        approximate = True
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
1142

1143
        self.inputs = {'X': x}
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
1155
        paddle.enable_static()
1156 1157 1158
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
C
Clementine 已提交
1159
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1160
        out = gelu(x, approximate)
C
Clementine 已提交
1161

1162
        self.inputs = {'X': x}
C
Clementine 已提交
1163
        self.outputs = {'Out': out}
1164
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1165 1166 1167 1168

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1169
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1170 1171


1172 1173 1174 1175 1176 1177 1178 1179
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
1180
        paddle.enable_static()
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [11, 17])
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_errors(self):
1211
        paddle.enable_static()
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32')
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16')
            F.gelu(x_fp16)


C
chengduo 已提交
1223
class TestBRelu(TestActivation):
1224
    def setUp(self):
1225
        paddle.enable_static()
1226
        self.op_type = "brelu"
1227 1228
        self.init_dtype()

Z
zhupengyang 已提交
1229
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1230 1231
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1232 1233
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1234
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1235 1236 1237
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1238 1239 1240

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1241
        self.outputs = {'Out': t}
1242 1243

    def test_check_grad(self):
1244 1245
        if self.dtype == np.float16:
            return
1246
        self.check_grad(['X'], 'Out')
1247

1248

1249 1250
class TestBReluOpError(unittest.TestCase):
    def test_errors(self):
1251
        paddle.enable_static()
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.brelu(x_fp16)


1264 1265 1266 1267 1268 1269 1270
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
1271
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
1272
    def setUp(self):
1273
        paddle.enable_static()
1274
        self.op_type = "relu6"
1275 1276
        self.init_dtype()

Z
zhupengyang 已提交
1277
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
1278
        x[np.abs(x) < 0.005] = 0.02
1279
        out = ref_relu6(x)
1280

1281 1282
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
1283
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
1284

1285 1286 1287
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1288
        self.check_grad(['X'], 'Out')
1289 1290


1291 1292 1293 1294 1295 1296 1297 1298 1299
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
1300
        paddle.enable_static()
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_fluid_api(self):
1324
        paddle.enable_static()
1325 1326 1327 1328 1329 1330 1331 1332
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.relu6(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
        self.assertEqual(np.allclose(out_ref, res[0]), True)

1333
    def test_errors(self):
1334
        paddle.enable_static()
1335
        with paddle.static.program_guard(paddle.static.Program()):
1336
            # The input type must be Variable.
1337
            self.assertRaises(TypeError, F.relu6, 1)
1338
            # The input dtype must be float16, float32, float64.
1339 1340
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.relu6, x_int32)
1341
            # support the input dtype is float16
1342 1343
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.relu6(x_fp16)
1344 1345


H
huangjun12 已提交
1346 1347
class TestHardSwish(TestActivation):
    def setUp(self):
1348
        paddle.enable_static()
H
huangjun12 已提交
1349 1350 1351
        self.op_type = 'hard_swish'
        self.init_dtype()

Z
zhupengyang 已提交
1352
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1368
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
1369 1370


1371 1372
class TestHardSwishOpError(unittest.TestCase):
    def test_errors(self):
1373
        paddle.enable_static()
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.hard_swish, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.hard_swish, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.hard_swish(x_fp16)


C
chengduo 已提交
1385
class TestSoftRelu(TestActivation):
1386
    def setUp(self):
1387
        paddle.enable_static()
1388
        self.op_type = "soft_relu"
1389 1390 1391
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1392
        threshold = 2.0
Q
qijun 已提交
1393 1394
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
1395
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
1396 1397 1398
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
1399 1400 1401 1402 1403
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
1404 1405

    def test_check_grad(self):
1406 1407
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1408
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
1409

1410

1411 1412
class TestSoftReluOpError(unittest.TestCase):
    def test_errors(self):
1413
        paddle.enable_static()
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.soft_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.soft_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.soft_relu(x_fp16)


1425 1426 1427 1428 1429
def elu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
    return out_ref.astype(x.dtype)


C
chengduo 已提交
1430
class TestELU(TestActivation):
1431
    def setUp(self):
1432
        paddle.enable_static()
1433
        self.op_type = "elu"
1434 1435
        self.init_dtype()

Z
zhupengyang 已提交
1436
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
1437
        alpha = 1.
1438
        out = elu(x, alpha)
1439 1440 1441 1442
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
1443
        self.outputs = {'Out': out}
1444 1445

    def test_check_grad(self):
1446 1447
        if self.dtype == np.float16:
            return
1448
        self.check_grad(['X'], 'Out')
1449 1450


1451 1452 1453
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
1454
        np.random.seed(1024)
1455 1456 1457 1458 1459
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
1460
        paddle.enable_static()
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', [10, 12])
            out1 = F.elu(x)
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.elu(x)
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)

        out1 = F.elu(x, 0.2)
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

1490
    def test_errors(self):
1491
        paddle.enable_static()
1492 1493 1494 1495 1496 1497 1498 1499 1500
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.elu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32')
            self.assertRaises(TypeError, F.elu, x_int32)
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16')
            F.elu(x_fp16)
1501 1502


C
chengduo 已提交
1503
class TestReciprocal(TestActivation):
Q
qijun 已提交
1504
    def setUp(self):
1505
        paddle.enable_static()
Q
qijun 已提交
1506
        self.op_type = "reciprocal"
1507 1508 1509 1510 1511 1512 1513
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
1514 1515

    def test_check_grad(self):
1516 1517
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1518
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
1519 1520


C
chengduo 已提交
1521
class TestLog(TestActivation):
Q
qijun 已提交
1522
    def setUp(self):
1523
        paddle.enable_static()
Q
qijun 已提交
1524
        self.op_type = "log"
1525 1526 1527 1528 1529 1530 1531
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
1532 1533

    def test_check_grad(self):
1534 1535
        if self.dtype == np.float16:
            return
1536
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
1537

1538 1539 1540 1541 1542 1543 1544 1545 1546
    def test_error(self):
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32")
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64")

        self.assertRaises(TypeError, fluid.layers.log, in1)
        self.assertRaises(TypeError, fluid.layers.log, in2)

1547

1548 1549
class TestLog1p(TestActivation):
    def setUp(self):
1550
        paddle.enable_static()
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
        self.op_type = "log1p"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.layers.data(
                name="data_x",
                shape=[11, 17],
                append_batch_size=False,
                dtype="float64")

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
1577 1578 1579
            res1 = exe.run(fluid.default_main_program(),
                           feed={"data_x": input_x},
                           fetch_list=[out1])
1580
        expected_res = np.log1p(input_x)
1581
        self.assertTrue(np.allclose(res1, expected_res))
1582 1583 1584 1585 1586 1587 1588 1589

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
1590
        self.assertTrue(np.allclose(np_z, z_expected))
1591 1592


C
chengduo 已提交
1593
class TestSquare(TestActivation):
Q
qijun 已提交
1594
    def setUp(self):
1595
        paddle.enable_static()
Q
qijun 已提交
1596
        self.op_type = "square"
1597 1598 1599 1600 1601 1602 1603
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
1604 1605

    def test_check_grad(self):
1606 1607
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1608
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
1609

1610

C
chengduo 已提交
1611
class TestPow(TestActivation):
1612
    def setUp(self):
1613
        paddle.enable_static()
1614
        self.op_type = "pow"
1615 1616 1617 1618 1619 1620
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
1621
        self.attrs = {'factor': 3.0}
1622
        self.outputs = {'Out': out}
1623 1624

    def test_check_grad(self):
1625 1626
        if self.dtype == np.float16:
            return
1627
        self.check_grad(['X'], 'Out')
1628

1629

1630 1631
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
1632
        paddle.enable_static()
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1653
        self.check_grad(['X'], 'Out')
1654 1655 1656 1657 1658

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
1659 1660 1661 1662 1663
        res = fluid.layers.data(
            name="res",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
1664 1665 1666 1667 1668

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
1669 1670 1671
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
1672 1673

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
1674
        res_1, res_2, res, res_6 = exe.run(
1675 1676
            fluid.default_main_program(),
            feed={"x": input},
W
WuHaobo 已提交
1677
            fetch_list=[out_1, out_2, res, out_6])
1678 1679 1680

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))
1681
        assert np.array_equal(res_6, np.power(input, 3))
1682

1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
    def test_error(self):
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32")
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64")
        in3 = fluid.layers.data(
            name="in3",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
        in4 = fluid.layers.data(
            name="in4",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float64")

        factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)

        self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)

1706

C
chengduo 已提交
1707
class TestSTanh(TestActivation):
1708
    def setUp(self):
1709
        paddle.enable_static()
1710
        self.op_type = "stanh"
1711 1712 1713
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
1714 1715
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
1716 1717 1718
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
1719
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
1720
        self.outputs = {'Out': out}
1721

Q
qijun 已提交
1722
    def test_check_grad(self):
1723 1724
        if self.dtype == np.float16:
            return
1725
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
1726

1727

1728 1729
class TestSTanhOpError(unittest.TestCase):
    def test_errors(self):
1730
        paddle.enable_static()
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.stanh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.stanh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.stanh(x_fp16)


1742 1743 1744 1745 1746 1747 1748
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
    out = np.select([x_beta <= threshold, x_beta > threshold],
                    [np.log(1 + np.exp(x_beta)) / beta, x])
    return out


C
chengduo 已提交
1749
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
1750
    def setUp(self):
1751
        paddle.enable_static()
K
kexinzhao 已提交
1752
        self.op_type = "softplus"
1753 1754
        self.init_dtype()

1755 1756
        beta = 2
        threshold = 15
1757

1758 1759 1760 1761
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
1762
        self.outputs = {'Out': out}
K
kexinzhao 已提交
1763 1764

    def test_check_grad(self):
1765 1766
        if self.dtype == np.float16:
            return
1767
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
1768

1769

1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
1780
        paddle.enable_static()
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_fluid_api(self):
1804
        paddle.enable_static()
1805 1806 1807 1808 1809 1810 1811 1812 1813
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softplus(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softplus(self.x_np)
        self.assertEqual(np.allclose(out_ref, res[0]), True)

    def test_errors(self):
1814
        paddle.enable_static()
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
1831
class TestSoftsign(TestActivation):
1832
    def setUp(self):
1833
        paddle.enable_static()
1834
        self.op_type = "softsign"
1835 1836
        self.init_dtype()

1837 1838 1839
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_softsign(x)
        self.inputs = {'X': x}
1840
        self.outputs = {'Out': out}
1841 1842

    def test_check_grad(self):
1843 1844
        if self.dtype == np.float16:
            return
1845
        self.check_grad(['X'], 'Out')
1846 1847


1848 1849 1850 1851 1852 1853 1854 1855
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
1856
        paddle.enable_static()
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
            self.assertEqual(np.allclose(out_ref, r), True)

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
            self.assertEqual(np.allclose(out_ref, r.numpy()), True)
        paddle.enable_static()

    def test_fluid_api(self):
1880
        paddle.enable_static()
1881 1882 1883 1884 1885 1886 1887 1888 1889
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softsign(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softsign(self.x_np)
        self.assertEqual(np.allclose(out_ref, res[0]), True)

    def test_errors(self):
1890
        paddle.enable_static()
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
            x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
            F.softsign(x_fp16)


C
chengduo 已提交
1902
class TestThresholdedRelu(TestActivation):
1903
    def setUp(self):
1904
        paddle.enable_static()
1905
        self.op_type = "thresholded_relu"
1906 1907
        self.init_dtype()

1908
        threshold = 0.25
Z
zhupengyang 已提交
1909
        self.delta = 0.005
1910
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1911 1912

        # Same reason as TestAbs
Z
zhupengyang 已提交
1913
        X[np.abs(X - threshold) < self.delta] = threshold + 0.2
1914
        out = (X > threshold) * X
1915

1916
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
1917
        self.attrs = {'threshold': threshold}
1918
        self.outputs = {'Out': out}
1919 1920

    def test_check_grad(self):
1921 1922
        if self.dtype == np.float16:
            return
1923
        self.check_grad(['X'], 'Out')
1924 1925


1926 1927
class TestThresholdedReluOpError(unittest.TestCase):
    def test_errors(self):
1928
        paddle.enable_static()
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.thresholded_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.thresholded_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.thresholded_relu(x_fp16)


C
chengduo 已提交
1940
class TestHardSigmoid(TestActivation):
1941
    def setUp(self):
1942
        paddle.enable_static()
1943
        self.op_type = "hard_sigmoid"
1944 1945
        self.init_dtype()

Z
zhupengyang 已提交
1946
        X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
1947 1948 1949 1950 1951
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

Z
zhupengyang 已提交
1952 1953
        self.delta = 0.005

1954
        # Same reason as TestAbs
Z
zhupengyang 已提交
1955 1956
        X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
        X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
1957 1958

        temp = X * slope + offset
1959 1960 1961 1962
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
1963 1964

    def test_check_grad(self):
1965 1966
        if self.dtype == np.float16:
            return
Z
zhupengyang 已提交
1967
        self.check_grad(['X'], 'Out')
1968

1969

1970 1971
class TestHardSigmoidOpError(unittest.TestCase):
    def test_errors(self):
1972
        paddle.enable_static()
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.hard_sigmoid, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.hard_sigmoid, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.hard_sigmoid(x_fp16)


C
chengduo 已提交
1984
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
1985
    def setUp(self):
1986
        paddle.enable_static()
A
Abhinav Arora 已提交
1987
        self.op_type = "swish"
1988 1989 1990 1991 1992 1993 1994 1995 1996
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
1997 1998

    def test_check_grad(self):
1999 2000
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2001
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
2002

2003

2004 2005
class TestSwishOpError(unittest.TestCase):
    def test_errors(self):
2006
        paddle.enable_static()
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.swish, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.swish, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.swish(x_fp16)


2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
#------------------ Test Error Activation----------------------
def create_test_error_class(op_type):
    class TestOpErrors(unittest.TestCase):
        def test_errors(self):
            with program_guard(Program(), Program()):
                op = getattr(fluid.layers, op_type)
                # The input dtype of op_type must be float32, float64.
                in1 = fluid.layers.data(
                    name='input2', shape=[12, 10], dtype="int32")
                in2 = fluid.layers.data(
                    name='input3', shape=[12, 10], dtype="int64")
                self.assertRaises(TypeError, op, in1)
                self.assertRaises(TypeError, op, in2)

    cls_name = "{0}_{1}".format(op_type, "test_errors")
    TestOpErrors.__name__ = cls_name
    globals()[cls_name] = TestOpErrors


create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')


2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
2080

C
chengduo 已提交
2081
        def test_check_output(self):
2082
            place = core.CUDAPlace(0)
C
chengduo 已提交
2083 2084 2085
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
2086

C
chengduo 已提交
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
2103
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
2104
create_test_act_fp16_class(TestHardShrink)
2105
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
2106 2107 2108 2109 2110
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
2111
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
2112
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
2113
create_test_act_fp16_class(TestSin)
2114
create_test_act_fp16_class(TestSinh)
2115 2116
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
2117 2118
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
2119
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
2120 2121 2122 2123 2124 2125
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
2126
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
2127 2128
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
2129
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
2130 2131 2132 2133 2134 2135
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
2136
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
2137

Q
qijun 已提交
2138 2139
if __name__ == "__main__":
    unittest.main()